From 46d44797c2c77431f7638daf84b0af3961bb5129 Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Fri, 14 Mar 2025 03:18:57 +0000 Subject: [PATCH] chore: add most doc strings to rbi type definitions --- .yardopts | 2 + lib/openai/base_client.rb | 35 +-- lib/openai/base_model.rb | 173 ++++--------- lib/openai/base_page.rb | 9 +- lib/openai/base_stream.rb | 11 +- lib/openai/client.rb | 4 +- lib/openai/cursor_page.rb | 6 +- lib/openai/errors.rb | 15 +- lib/openai/extern.rb | 3 +- lib/openai/models/audio/speech_model.rb | 1 - .../audio/transcription_create_params.rb | 1 - .../audio/translation_create_response.rb | 1 - lib/openai/models/audio_model.rb | 1 - lib/openai/models/beta/assistant_tool.rb | 1 - .../beta/thread_create_and_run_params.rb | 2 - .../models/beta/thread_create_params.rb | 1 - lib/openai/models/beta/threads/message.rb | 1 - .../beta/threads/message_create_params.rb | 1 - .../models/beta/threads/run_create_params.rb | 1 - .../beta/threads/runs/run_step_include.rb | 1 - ...chat_completion_assistant_message_param.rb | 1 - .../models/chat/chat_completion_chunk.rb | 1 - .../chat_completion_function_message_param.rb | 1 - .../models/chat/chat_completion_message.rb | 1 - .../models/chat/chat_completion_modality.rb | 1 - .../models/chat/completion_create_params.rb | 2 - lib/openai/models/chat_model.rb | 1 - lib/openai/models/embedding_model.rb | 1 - .../models/fine_tuning/job_create_params.rb | 1 - lib/openai/models/image_model.rb | 1 - lib/openai/models/moderation.rb | 13 - lib/openai/models/moderation_model.rb | 1 - .../response_file_search_tool_call.rb | 1 - .../models/vector_store_search_params.rb | 1 - .../models/vector_store_search_response.rb | 1 - .../vector_stores/file_batch_create_params.rb | 1 - .../vector_stores/file_create_params.rb | 1 - .../vector_stores/file_update_params.rb | 1 - .../models/vector_stores/vector_store_file.rb | 1 - lib/openai/page.rb | 6 +- lib/openai/pooled_net_requester.rb | 21 +- lib/openai/request_options.rb | 13 +- lib/openai/resources/audio.rb | 1 - lib/openai/resources/audio/speech.rb | 2 - lib/openai/resources/audio/transcriptions.rb | 2 - lib/openai/resources/audio/translations.rb | 2 - lib/openai/resources/batches.rb | 5 - lib/openai/resources/beta.rb | 1 - lib/openai/resources/beta/assistants.rb | 6 - lib/openai/resources/beta/threads.rb | 7 - lib/openai/resources/beta/threads/messages.rb | 6 - lib/openai/resources/beta/threads/runs.rb | 9 - .../resources/beta/threads/runs/steps.rb | 3 - lib/openai/resources/chat.rb | 1 - lib/openai/resources/chat/completions.rb | 7 - .../resources/chat/completions/messages.rb | 2 - lib/openai/resources/completions.rb | 3 - lib/openai/resources/embeddings.rb | 2 - lib/openai/resources/files.rb | 6 - lib/openai/resources/fine_tuning.rb | 1 - lib/openai/resources/fine_tuning/jobs.rb | 6 - .../resources/fine_tuning/jobs/checkpoints.rb | 2 - lib/openai/resources/images.rb | 4 - lib/openai/resources/models.rb | 4 - lib/openai/resources/moderations.rb | 2 - lib/openai/resources/responses.rb | 5 - lib/openai/resources/responses/input_items.rb | 2 - lib/openai/resources/uploads.rb | 4 - lib/openai/resources/uploads/parts.rb | 2 - lib/openai/resources/vector_stores.rb | 7 - .../resources/vector_stores/file_batches.rb | 5 - lib/openai/resources/vector_stores/files.rb | 7 - lib/openai/stream.rb | 5 +- lib/openai/util.rb | 102 +++----- rbi/lib/openai/base_client.rbi | 11 + rbi/lib/openai/base_model.rbi | 106 ++++++++ rbi/lib/openai/base_page.rbi | 2 + rbi/lib/openai/base_stream.rbi | 3 + rbi/lib/openai/client.rbi | 2 + rbi/lib/openai/errors.rbi | 5 + rbi/lib/openai/extern.rbi | 1 + .../models/audio/speech_create_params.rbi | 20 ++ rbi/lib/openai/models/audio/transcription.rbi | 3 + .../audio/transcription_create_params.rbi | 26 ++ .../audio/transcription_create_response.rbi | 3 + .../models/audio/transcription_segment.rbi | 13 + .../models/audio/transcription_verbose.rbi | 7 + .../models/audio/transcription_word.rbi | 3 + .../audio/translation_create_params.rbi | 18 ++ .../audio/translation_create_response.rbi | 1 + .../models/audio/translation_verbose.rbi | 4 + .../openai/models/audio_response_format.rbi | 2 + .../auto_file_chunking_strategy_param.rbi | 3 + rbi/lib/openai/models/batch.rbi | 25 ++ rbi/lib/openai/models/batch_create_params.rbi | 27 ++ rbi/lib/openai/models/batch_error.rbi | 4 + rbi/lib/openai/models/batch_list_params.rbi | 6 + .../openai/models/batch_request_counts.rbi | 4 + rbi/lib/openai/models/beta/assistant.rbi | 65 +++++ .../models/beta/assistant_create_params.rbi | 100 ++++++++ .../models/beta/assistant_list_params.rbi | 14 ++ .../beta/assistant_response_format_option.rbi | 21 ++ .../models/beta/assistant_stream_event.rbi | 124 ++++++++++ rbi/lib/openai/models/beta/assistant_tool.rbi | 1 + .../models/beta/assistant_tool_choice.rbi | 4 + .../beta/assistant_tool_choice_function.rbi | 1 + .../beta/assistant_tool_choice_option.rbi | 12 + .../models/beta/assistant_update_params.rbi | 79 ++++++ .../models/beta/code_interpreter_tool.rbi | 1 + .../openai/models/beta/file_search_tool.rbi | 29 +++ rbi/lib/openai/models/beta/function_tool.rbi | 1 + .../models/beta/message_stream_event.rbi | 29 +++ .../models/beta/run_step_stream_event.rbi | 33 +++ .../openai/models/beta/run_stream_event.rbi | 43 ++++ rbi/lib/openai/models/beta/thread.rbi | 26 ++ .../beta/thread_create_and_run_params.rbi | 180 ++++++++++++++ .../models/beta/thread_create_params.rbi | 76 ++++++ .../models/beta/thread_stream_event.rbi | 6 + .../models/beta/thread_update_params.rbi | 21 ++ .../openai/models/beta/threads/annotation.rbi | 4 + .../models/beta/threads/annotation_delta.rbi | 4 + .../beta/threads/file_citation_annotation.rbi | 6 + .../file_citation_delta_annotation.rbi | 8 + .../beta/threads/file_path_annotation.rbi | 5 + .../threads/file_path_delta_annotation.rbi | 6 + .../openai/models/beta/threads/image_file.rbi | 7 + .../beta/threads/image_file_content_block.rbi | 3 + .../models/beta/threads/image_file_delta.rbi | 7 + .../beta/threads/image_file_delta_block.rbi | 4 + .../openai/models/beta/threads/image_url.rbi | 6 + .../beta/threads/image_url_content_block.rbi | 2 + .../models/beta/threads/image_url_delta.rbi | 6 + .../beta/threads/image_url_delta_block.rbi | 3 + .../openai/models/beta/threads/message.rbi | 37 +++ .../models/beta/threads/message_content.rbi | 3 + .../beta/threads/message_content_delta.rbi | 3 + .../threads/message_content_part_param.rbi | 3 + .../beta/threads/message_create_params.rbi | 26 ++ .../models/beta/threads/message_delta.rbi | 4 + .../beta/threads/message_delta_event.rbi | 5 + .../beta/threads/message_list_params.rbi | 15 ++ .../beta/threads/message_update_params.rbi | 6 + .../beta/threads/refusal_content_block.rbi | 2 + .../beta/threads/refusal_delta_block.rbi | 3 + .../required_action_function_tool_call.rbi | 11 + rbi/lib/openai/models/beta/threads/run.rbi | 111 +++++++++ .../models/beta/threads/run_create_params.rbi | 128 ++++++++++ .../models/beta/threads/run_list_params.rbi | 14 ++ .../openai/models/beta/threads/run_status.rbi | 3 + .../run_submit_tool_outputs_params.rbi | 4 + .../models/beta/threads/run_update_params.rbi | 6 + .../threads/runs/code_interpreter_logs.rbi | 4 + .../runs/code_interpreter_output_image.rbi | 4 + .../runs/code_interpreter_tool_call.rbi | 18 ++ .../runs/code_interpreter_tool_call_delta.rbi | 13 + .../threads/runs/file_search_tool_call.rbi | 24 ++ .../runs/file_search_tool_call_delta.rbi | 5 + .../beta/threads/runs/function_tool_call.rbi | 10 + .../threads/runs/function_tool_call_delta.rbi | 11 + .../runs/message_creation_step_details.rbi | 3 + .../models/beta/threads/runs/run_step.rbi | 45 ++++ .../beta/threads/runs/run_step_delta.rbi | 4 + .../threads/runs/run_step_delta_event.rbi | 5 + .../runs/run_step_delta_message_delta.rbi | 3 + .../beta/threads/runs/step_list_params.rbi | 21 ++ .../threads/runs/step_retrieve_params.rbi | 7 + .../models/beta/threads/runs/tool_call.rbi | 2 + .../beta/threads/runs/tool_call_delta.rbi | 2 + .../threads/runs/tool_call_delta_object.rbi | 5 + .../threads/runs/tool_calls_step_details.rbi | 5 + rbi/lib/openai/models/beta/threads/text.rbi | 1 + .../beta/threads/text_content_block.rbi | 2 + .../beta/threads/text_content_block_param.rbi | 3 + .../openai/models/beta/threads/text_delta.rbi | 1 + .../models/beta/threads/text_delta_block.rbi | 3 + .../openai/models/chat/chat_completion.rbi | 33 +++ ...hat_completion_assistant_message_param.rbi | 28 +++ .../models/chat/chat_completion_audio.rbi | 9 + .../chat/chat_completion_audio_param.rbi | 11 + .../models/chat/chat_completion_chunk.rbi | 60 +++++ .../chat/chat_completion_content_part.rbi | 10 + .../chat_completion_content_part_image.rbi | 7 + ...at_completion_content_part_input_audio.rbi | 5 + .../chat_completion_content_part_refusal.rbi | 2 + .../chat_completion_content_part_text.rbi | 4 + .../models/chat/chat_completion_deleted.rbi | 3 + ...hat_completion_developer_message_param.rbi | 9 + .../chat_completion_function_call_option.rbi | 3 + ...chat_completion_function_message_param.rbi | 3 + .../models/chat/chat_completion_message.rbi | 27 ++ .../chat/chat_completion_message_param.rbi | 4 + .../chat_completion_message_tool_call.rbi | 9 + .../chat_completion_named_tool_choice.rbi | 4 + .../chat_completion_prediction_content.rbi | 11 + .../models/chat/chat_completion_role.rbi | 1 + .../chat/chat_completion_store_message.rbi | 2 + .../chat/chat_completion_stream_options.rbi | 5 + .../chat_completion_system_message_param.rbi | 9 + .../chat/chat_completion_token_logprob.rbi | 19 ++ .../models/chat/chat_completion_tool.rbi | 1 + .../chat_completion_tool_choice_option.rbi | 13 + .../chat_completion_tool_message_param.rbi | 5 + .../chat_completion_user_message_param.rbi | 8 + .../models/chat/completion_create_params.rbi | 234 ++++++++++++++++++ .../models/chat/completion_list_params.rbi | 10 + .../models/chat/completion_update_params.rbi | 6 + .../chat/completions/message_list_params.rbi | 6 + rbi/lib/openai/models/comparison_filter.rbi | 24 ++ rbi/lib/openai/models/completion.rbi | 12 + rbi/lib/openai/models/completion_choice.rbi | 8 + .../models/completion_create_params.rbi | 107 ++++++++ rbi/lib/openai/models/completion_usage.rbi | 18 ++ rbi/lib/openai/models/compound_filter.rbi | 8 + .../models/create_embedding_response.rbi | 7 + rbi/lib/openai/models/embedding.rbi | 6 + .../openai/models/embedding_create_params.rbi | 37 +++ .../openai/models/file_chunking_strategy.rbi | 2 + .../models/file_chunking_strategy_param.rbi | 3 + rbi/lib/openai/models/file_create_params.rbi | 5 + rbi/lib/openai/models/file_list_params.rbi | 11 + rbi/lib/openai/models/file_object.rbi | 19 ++ rbi/lib/openai/models/file_purpose.rbi | 4 + .../models/fine_tuning/fine_tuning_job.rbi | 109 ++++++++ .../fine_tuning/fine_tuning_job_event.rbi | 10 + .../fine_tuning_job_wandb_integration.rbi | 13 + ...ne_tuning_job_wandb_integration_object.rbi | 5 + .../models/fine_tuning/job_create_params.rbi | 136 ++++++++++ .../fine_tuning/job_list_events_params.rbi | 2 + .../models/fine_tuning/job_list_params.rbi | 4 + .../jobs/checkpoint_list_params.rbi | 2 + .../jobs/fine_tuning_job_checkpoint.rbi | 10 + rbi/lib/openai/models/function_definition.rbi | 16 ++ rbi/lib/openai/models/image.rbi | 6 + .../models/image_create_variation_params.rbi | 22 ++ rbi/lib/openai/models/image_edit_params.rbi | 26 ++ .../openai/models/image_generate_params.rbi | 36 +++ rbi/lib/openai/models/model.rbi | 5 + rbi/lib/openai/models/moderation.rbi | 63 +++++ .../models/moderation_create_params.rbi | 14 ++ .../models/moderation_create_response.rbi | 4 + .../models/moderation_image_url_input.rbi | 5 + .../models/moderation_multi_modal_input.rbi | 2 + .../openai/models/moderation_text_input.rbi | 3 + .../other_file_chunking_strategy_object.rbi | 4 + rbi/lib/openai/models/reasoning.rbi | 20 ++ rbi/lib/openai/models/reasoning_effort.rbi | 6 + .../models/response_format_json_object.rbi | 4 + .../models/response_format_json_schema.rbi | 17 ++ .../openai/models/response_format_text.rbi | 2 + .../openai/models/responses/computer_tool.rbi | 7 + .../models/responses/easy_input_message.rbi | 16 ++ .../models/responses/file_search_tool.rbi | 17 ++ .../openai/models/responses/function_tool.rbi | 9 + .../responses/input_item_list_params.rbi | 12 + rbi/lib/openai/models/responses/response.rbi | 105 ++++++++ .../responses/response_audio_delta_event.rbi | 3 + .../responses/response_audio_done_event.rbi | 2 + .../response_audio_transcript_delta_event.rbi | 3 + .../response_audio_transcript_done_event.rbi | 2 + ...code_interpreter_call_code_delta_event.rbi | 4 + ..._code_interpreter_call_code_done_event.rbi | 4 + ..._code_interpreter_call_completed_event.rbi | 4 + ...ode_interpreter_call_in_progress_event.rbi | 4 + ...de_interpreter_call_interpreting_event.rbi | 4 + .../response_code_interpreter_tool_call.rbi | 16 ++ .../responses/response_completed_event.rbi | 3 + .../responses/response_computer_tool_call.rbi | 75 ++++++ .../models/responses/response_content.rbi | 2 + .../response_content_part_added_event.rbi | 8 + .../response_content_part_done_event.rbi | 8 + .../responses/response_create_params.rbi | 114 +++++++++ .../responses/response_created_event.rbi | 3 + .../models/responses/response_error.rbi | 4 + .../models/responses/response_error_event.rbi | 5 + .../responses/response_failed_event.rbi | 3 + ...ponse_file_search_call_completed_event.rbi | 4 + ...nse_file_search_call_in_progress_event.rbi | 4 + ...ponse_file_search_call_searching_event.rbi | 4 + .../response_file_search_tool_call.rbi | 21 ++ .../responses/response_format_text_config.rbi | 14 ++ ...esponse_format_text_json_schema_config.rbi | 15 ++ ...se_function_call_arguments_delta_event.rbi | 5 + ...nse_function_call_arguments_done_event.rbi | 4 + .../responses/response_function_tool_call.rbi | 12 + .../response_function_web_search.rbi | 7 + .../responses/response_in_progress_event.rbi | 3 + .../models/responses/response_includable.rbi | 8 + .../responses/response_incomplete_event.rbi | 3 + .../models/responses/response_input_audio.rbi | 5 + .../responses/response_input_content.rbi | 2 + .../models/responses/response_input_file.rbi | 5 + .../models/responses/response_input_image.rbi | 10 + .../models/responses/response_input_item.rbi | 52 ++++ .../models/responses/response_input_text.rbi | 3 + .../models/responses/response_item_list.rbi | 46 ++++ .../responses/response_output_audio.rbi | 4 + .../models/responses/response_output_item.rbi | 2 + .../response_output_item_added_event.rbi | 4 + .../response_output_item_done_event.rbi | 4 + .../responses/response_output_message.rbi | 11 + .../responses/response_output_refusal.rbi | 3 + .../models/responses/response_output_text.rbi | 20 ++ .../responses/response_reasoning_item.rbi | 11 + .../response_refusal_delta_event.rbi | 6 + .../responses/response_refusal_done_event.rbi | 6 + .../responses/response_retrieve_params.rbi | 2 + .../models/responses/response_status.rbi | 2 + .../responses/response_stream_event.rbi | 2 + .../response_text_annotation_delta_event.rbi | 23 ++ .../models/responses/response_text_config.rbi | 18 ++ .../responses/response_text_delta_event.rbi | 6 + .../responses/response_text_done_event.rbi | 6 + .../models/responses/response_usage.rbi | 8 + ...sponse_web_search_call_completed_event.rbi | 4 + ...onse_web_search_call_in_progress_event.rbi | 4 + ...sponse_web_search_call_searching_event.rbi | 4 + rbi/lib/openai/models/responses/tool.rbi | 4 + .../models/responses/tool_choice_function.rbi | 3 + .../models/responses/tool_choice_options.rbi | 8 + .../models/responses/tool_choice_types.rbi | 18 ++ .../models/responses/web_search_tool.rbi | 22 ++ .../models/static_file_chunking_strategy.rbi | 5 + .../static_file_chunking_strategy_object.rbi | 1 + ...ic_file_chunking_strategy_object_param.rbi | 2 + rbi/lib/openai/models/upload.rbi | 13 + .../openai/models/upload_complete_params.rbi | 3 + .../openai/models/upload_create_params.rbi | 10 + .../models/uploads/part_create_params.rbi | 1 + rbi/lib/openai/models/uploads/upload_part.rbi | 5 + rbi/lib/openai/models/vector_store.rbi | 31 +++ .../models/vector_store_create_params.rbi | 17 ++ .../models/vector_store_list_params.rbi | 14 ++ .../models/vector_store_search_params.rbi | 11 + .../models/vector_store_search_response.rbi | 13 + .../models/vector_store_update_params.rbi | 12 + .../file_batch_create_params.rbi | 11 + .../file_batch_list_files_params.rbi | 16 ++ .../vector_stores/file_content_response.rbi | 2 + .../vector_stores/file_create_params.rbi | 11 + .../models/vector_stores/file_list_params.rbi | 16 ++ .../vector_stores/file_update_params.rbi | 6 + .../vector_stores/vector_store_file.rbi | 30 +++ .../vector_stores/vector_store_file_batch.rbi | 18 ++ rbi/lib/openai/pooled_net_requester.rbi | 6 + rbi/lib/openai/request_options.rbi | 21 ++ rbi/lib/openai/resources/audio/speech.rbi | 1 + .../openai/resources/audio/transcriptions.rbi | 1 + .../openai/resources/audio/translations.rbi | 1 + rbi/lib/openai/resources/batches.rbi | 6 + rbi/lib/openai/resources/beta/assistants.rbi | 5 + rbi/lib/openai/resources/beta/threads.rbi | 6 + .../resources/beta/threads/messages.rbi | 5 + .../openai/resources/beta/threads/runs.rbi | 14 ++ .../resources/beta/threads/runs/steps.rbi | 2 + rbi/lib/openai/resources/chat/completions.rbi | 43 ++++ .../resources/chat/completions/messages.rbi | 2 + rbi/lib/openai/resources/completions.rbi | 2 + rbi/lib/openai/resources/embeddings.rbi | 1 + rbi/lib/openai/resources/files.rbi | 25 ++ rbi/lib/openai/resources/fine_tuning/jobs.rbi | 13 + .../fine_tuning/jobs/checkpoints.rbi | 1 + rbi/lib/openai/resources/images.rbi | 3 + rbi/lib/openai/resources/models.rbi | 6 + rbi/lib/openai/resources/moderations.rbi | 2 + rbi/lib/openai/resources/responses.rbi | 24 ++ .../resources/responses/input_items.rbi | 1 + rbi/lib/openai/resources/uploads.rbi | 33 +++ rbi/lib/openai/resources/uploads/parts.rbi | 11 + rbi/lib/openai/resources/vector_stores.rbi | 7 + .../resources/vector_stores/file_batches.rbi | 5 + .../openai/resources/vector_stores/files.rbi | 11 + rbi/lib/openai/stream.rbi | 1 + rbi/lib/openai/util.rbi | 41 +++ sig/openai/base_client.rbs | 2 +- test/openai/client_test.rb | 5 - 375 files changed, 4744 insertions(+), 444 deletions(-) diff --git a/.yardopts b/.yardopts index 29c933bc..c7c3301d 100644 --- a/.yardopts +++ b/.yardopts @@ -1 +1,3 @@ --markup markdown +--exclude /rbi +--exclude /sig diff --git a/lib/openai/base_client.rb b/lib/openai/base_client.rb index 9707f135..5c609ac7 100644 --- a/lib/openai/base_client.rb +++ b/lib/openai/base_client.rb @@ -1,10 +1,9 @@ # frozen_string_literal: true module OpenAI - # @private + # @api private # # @abstract - # class BaseClient # from whatwg fetch spec MAX_REDIRECTS = 20 @@ -21,12 +20,11 @@ class BaseClient # rubocop:enable Style/MutableConstant class << self - # @private + # @api private # # @param req [Hash{Symbol=>Object}] # # @raise [ArgumentError] - # def validate!(req) keys = [:method, :path, :query, :headers, :body, :unwrap, :page, :stream, :model, :options] case req @@ -41,13 +39,12 @@ def validate!(req) end end - # @private + # @api private # # @param status [Integer] # @param headers [Hash{String=>String}, Net::HTTPHeader] # # @return [Boolean] - # def should_retry?(status, headers:) coerced = OpenAI::Util.coerce_boolean(headers["x-should-retry"]) case [coerced, status] @@ -65,7 +62,7 @@ def should_retry?(status, headers:) end end - # @private + # @api private # # @param request [Hash{Symbol=>Object}] . # @@ -86,7 +83,6 @@ def should_retry?(status, headers:) # @param response_headers [Hash{String=>String}, Net::HTTPHeader] # # @return [Hash{Symbol=>Object}] - # def follow_redirect(request, status:, response_headers:) method, url, headers = request.fetch_values(:method, :url, :headers) location = @@ -130,12 +126,11 @@ def follow_redirect(request, status:, response_headers:) end end - # @private - # + # @api private # @return [OpenAI::PooledNetRequester] attr_accessor :requester - # @private + # @api private # # @param base_url [String] # @param timeout [Float] @@ -144,7 +139,6 @@ def follow_redirect(request, status:, response_headers:) # @param max_retry_delay [Float] # @param headers [Hash{String=>String, Integer, Array, nil}] # @param idempotency_header [String, nil] - # def initialize( base_url:, timeout: 0.0, @@ -171,19 +165,17 @@ def initialize( @max_retry_delay = max_retry_delay end - # @private + # @api private # # @return [Hash{String=>String}] - # private def auth_headers = {} - # @private + # @api private # # @return [String] - # private def generate_idempotency_key = "stainless-ruby-retry-#{SecureRandom.uuid}" - # @private + # @api private # # @param req [Hash{Symbol=>Object}] . # @@ -220,7 +212,6 @@ def initialize( # @option opts [Float, nil] :timeout # # @return [Hash{Symbol=>Object}] - # private def build_request(req, opts) method, uninterpolated_path = req.fetch_values(:method, :path) @@ -271,13 +262,12 @@ def initialize( } end - # @private + # @api private # # @param headers [Hash{String=>String}] # @param retry_count [Integer] # # @return [Float] - # private def retry_delay(headers, retry_count:) # Non-standard extension span = Float(headers["retry-after-ms"], exception: false)&.then { _1 / 1000 } @@ -298,7 +288,7 @@ def initialize( (@initial_retry_delay * scale * jitter).clamp(0, @max_retry_delay) end - # @private + # @api private # # @param request [Hash{Symbol=>Object}] . # @@ -322,7 +312,6 @@ def initialize( # # @raise [OpenAI::APIError] # @return [Array(Integer, Net::HTTPResponse, Enumerable)] - # private def send_request(request, redirect_count:, retry_count:, send_retry_header:) url, headers, max_retries, timeout = request.fetch_values(:url, :headers, :max_retries, :timeout) input = {**request.except(:timeout), deadline: OpenAI::Util.monotonic_secs + timeout} @@ -424,7 +413,6 @@ def initialize( # # @raise [OpenAI::APIError] # @return [Object] - # def request(req) self.class.validate!(req) model = req.fetch(:model) { OpenAI::Unknown } @@ -455,7 +443,6 @@ def request(req) end # @return [String] - # def inspect # rubocop:disable Layout/LineLength base_url = OpenAI::Util.unparse_uri(@base_url) diff --git a/lib/openai/base_model.rb b/lib/openai/base_model.rb index 798e49ca..23f83864 100644 --- a/lib/openai/base_model.rb +++ b/lib/openai/base_model.rb @@ -1,41 +1,37 @@ # frozen_string_literal: true module OpenAI - # @private + # @api private # # @abstract - # module Converter # rubocop:disable Lint/UnusedMethodArgument - # @private + # @api private # # @param value [Object] # # @return [Object] - # def coerce(value) = value - # @private + # @api private # # @param value [Object] # # @return [Object] - # def dump(value) = value - # @private + # @api private # # @param value [Object] # # @return [Array(true, Object, nil), Array(false, Boolean, Integer)] - # def try_strict_coerce(value) = (raise NotImplementedError) # rubocop:enable Lint/UnusedMethodArgument class << self - # @private + # @api private # # @param spec [Hash{Symbol=>Object}, Proc, OpenAI::Converter, Class] . # @@ -48,7 +44,6 @@ class << self # @option spec [Boolean] :"nil?" # # @return [Proc] - # def type_info(spec) case spec in Hash @@ -64,7 +59,7 @@ def type_info(spec) end end - # @private + # @api private # # Based on `target`, transform `value` into `target`, to the extent possible: # @@ -77,7 +72,6 @@ def type_info(spec) # @param value [Object] # # @return [Object] - # def coerce(target, value) case target in OpenAI::Converter @@ -111,13 +105,12 @@ def coerce(target, value) end end - # @private + # @api private # # @param target [OpenAI::Converter, Class] # @param value [Object] # # @return [Object] - # def dump(target, value) case target in OpenAI::Converter @@ -127,7 +120,7 @@ def dump(target, value) end end - # @private + # @api private # # The underlying algorithm for computing maximal compatibility is subject to # future improvements. @@ -142,7 +135,6 @@ def dump(target, value) # @param value [Object] # # @return [Object] - # def try_strict_coerce(target, value) case target in OpenAI::Converter @@ -182,7 +174,7 @@ def try_strict_coerce(target, value) end end - # @private + # @api private # # @abstract # @@ -197,40 +189,35 @@ class Unknown # @param other [Object] # # @return [Boolean] - # def self.===(other) = true # @param other [Object] # # @return [Boolean] - # def self.==(other) = other.is_a?(Class) && other <= OpenAI::Unknown class << self # @!parse - # # @private + # # @api private # # # # @param value [Object] # # # # @return [Object] - # # # def coerce(value) = super # @!parse - # # @private + # # @api private # # # # @param value [Object] # # # # @return [Object] - # # # def dump(value) = super - # @private + # @api private # # @param value [Object] # # @return [Array(true, Object, nil), Array(false, Boolean, Integer)] - # def try_strict_coerce(value) # prevent unknown variant from being chosen during the first coercion pass [false, true, 0] @@ -240,7 +227,7 @@ def try_strict_coerce(value) # rubocop:enable Lint/UnusedMethodArgument end - # @private + # @api private # # @abstract # @@ -253,40 +240,35 @@ class BooleanModel # @param other [Object] # # @return [Boolean] - # def self.===(other) = other == true || other == false # @param other [Object] # # @return [Boolean] - # def self.==(other) = other.is_a?(Class) && other <= OpenAI::BooleanModel class << self # @!parse - # # @private + # # @api private # # # # @param value [Boolean, Object] # # # # @return [Boolean, Object] - # # # def coerce(value) = super # @!parse - # # @private + # # @api private # # # # @param value [Boolean, Object] # # # # @return [Boolean, Object] - # # # def dump(value) = super - # @private + # @api private # # @param value [Object] # # @return [Array(true, Object, nil), Array(false, Boolean, Integer)] - # def try_strict_coerce(value) case value in true | false @@ -298,7 +280,7 @@ def try_strict_coerce(value) end end - # @private + # @api private # # @abstract # @@ -348,13 +330,11 @@ class << self # All of the valid Symbol values for this enum. # # @return [Array] - # def values = (@values ||= constants.map { const_get(_1) }) - # @private + # @api private # # Guard against thread safety issues by instantiating `@values`. - # private def finalize! = values end @@ -363,24 +343,21 @@ def values = (@values ||= constants.map { const_get(_1) }) # @param other [Object] # # @return [Boolean] - # def self.===(other) = values.include?(other) # @param other [Object] # # @return [Boolean] - # def self.==(other) other.is_a?(Class) && other <= OpenAI::Enum && other.values.to_set == values.to_set end class << self - # @private + # @api private # # @param value [String, Symbol, Object] # # @return [Symbol, Object] - # def coerce(value) case value in Symbol | String if values.include?(val = value.to_sym) @@ -391,20 +368,18 @@ def coerce(value) end # @!parse - # # @private + # # @api private # # # # @param value [Symbol, Object] # # # # @return [Symbol, Object] - # # # def dump(value) = super - # @private + # @api private # # @param value [Object] # # @return [Array(true, Object, nil), Array(false, Boolean, Integer)] - # def try_strict_coerce(value) return [true, value, 1] if values.include?(value) @@ -423,7 +398,7 @@ def try_strict_coerce(value) end end - # @private + # @api private # # @abstract # @@ -461,28 +436,25 @@ class Union extend OpenAI::Converter class << self - # @private + # @api private # # All of the specified variant info for this union. # # @return [Array] - # private def known_variants = (@known_variants ||= []) - # @private + # @api private # # All of the specified variants for this union. # # @return [Array] - # protected def variants @known_variants.map { |key, variant_fn| [key, variant_fn.call] } end - # @private + # @api private # # @param property [Symbol] - # private def discriminator(property) case property in Symbol @@ -490,7 +462,7 @@ class << self end end - # @private + # @api private # # @param key [Symbol, Hash{Symbol=>Object}, Proc, OpenAI::Converter, Class] # @@ -503,7 +475,6 @@ class << self # @option spec [Proc] :union # # @option spec [Boolean] :"nil?" - # private def variant(key, spec = nil) variant_info = case key @@ -516,12 +487,11 @@ class << self known_variants << variant_info end - # @private + # @api private # # @param value [Object] # # @return [OpenAI::Converter, Class, nil] - # private def resolve_variant(value) case [@discriminator, value] in [_, OpenAI::BaseModel] @@ -551,7 +521,6 @@ class << self # @param other [Object] # # @return [Boolean] - # def self.===(other) known_variants.any? do |_, variant_fn| variant_fn.call === other @@ -561,18 +530,16 @@ def self.===(other) # @param other [Object] # # @return [Boolean] - # def self.==(other) other.is_a?(Class) && other <= OpenAI::Union && other.variants == variants end class << self - # @private + # @api private # # @param value [Object] # # @return [Object] - # def coerce(value) if (variant = resolve_variant(value)) return OpenAI::Converter.coerce(variant, value) @@ -597,12 +564,11 @@ def coerce(value) variant.nil? ? value : OpenAI::Converter.coerce(variant, value) end - # @private + # @api private # # @param value [Object] # # @return [Object] - # def dump(value) if (variant = resolve_variant(value)) return OpenAI::Converter.dump(variant, value) @@ -617,12 +583,11 @@ def dump(value) value end - # @private + # @api private # # @param value [Object] # # @return [Array(true, Object, nil), Array(false, Boolean, Integer)] - # def try_strict_coerce(value) # TODO(ruby) this will result in super linear decoding behaviour for nested unions # follow up with a decoding context that captures current strictness levels @@ -655,7 +620,7 @@ def try_strict_coerce(value) # rubocop:enable Style/HashEachMethods end - # @private + # @api private # # @abstract # @@ -670,7 +635,6 @@ def self.[](...) = new(...) # @param other [Object] # # @return [Boolean] - # def ===(other) type = item_type case other @@ -686,15 +650,13 @@ def ===(other) # @param other [Object] # # @return [Boolean] - # def ==(other) = other.is_a?(OpenAI::ArrayOf) && other.item_type == item_type - # @private + # @api private # # @param value [Enumerable, Object] # # @return [Array, Object] - # def coerce(value) type = item_type case value @@ -705,12 +667,11 @@ def coerce(value) end end - # @private + # @api private # # @param value [Enumerable, Object] # # @return [Array, Object] - # def dump(value) type = item_type case value @@ -721,12 +682,11 @@ def dump(value) end end - # @private + # @api private # # @param value [Object] # # @return [Array(true, Object, nil), Array(false, Boolean, Integer)] - # def try_strict_coerce(value) case value in Array @@ -760,13 +720,12 @@ def try_strict_coerce(value) end end - # @private + # @api private # # @return [OpenAI::Converter, Class] - # protected def item_type = @item_type_fn.call - # @private + # @api private # # @param type_info [Hash{Symbol=>Object}, Proc, OpenAI::Converter, Class] # @@ -779,13 +738,12 @@ def try_strict_coerce(value) # @option spec [Proc] :union # # @option spec [Boolean] :"nil?" - # def initialize(type_info, spec = {}) @item_type_fn = OpenAI::Converter.type_info(type_info || spec) end end - # @private + # @api private # # @abstract # @@ -800,7 +758,6 @@ def self.[](...) = new(...) # @param other [Object] # # @return [Boolean] - # def ===(other) type = item_type case other @@ -821,15 +778,13 @@ def ===(other) # @param other [Object] # # @return [Boolean] - # def ==(other) = other.is_a?(OpenAI::HashOf) && other.item_type == item_type - # @private + # @api private # # @param value [Hash{Object=>Object}, Object] # # @return [Hash{Symbol=>Object}, Object] - # def coerce(value) type = item_type case value @@ -843,12 +798,11 @@ def coerce(value) end end - # @private + # @api private # # @param value [Hash{Object=>Object}, Object] # # @return [Hash{Symbol=>Object}, Object] - # def dump(value) type = item_type case value @@ -861,12 +815,11 @@ def dump(value) end end - # @private + # @api private # # @param value [Object] # # @return [Array(true, Object, nil), Array(false, Boolean, Integer)] - # def try_strict_coerce(value) case value in Hash @@ -900,13 +853,12 @@ def try_strict_coerce(value) end end - # @private + # @api private # # @return [OpenAI::Converter, Class] - # protected def item_type = @item_type_fn.call - # @private + # @api private # # @param type_info [Hash{Symbol=>Object}, Proc, OpenAI::Converter, Class] # @@ -919,13 +871,12 @@ def try_strict_coerce(value) # @option spec [Proc] :union # # @option spec [Boolean] :"nil?" - # def initialize(type_info, spec = {}) @item_type_fn = OpenAI::Converter.type_info(type_info || spec) end end - # @private + # @api private # # @abstract # @@ -942,32 +893,29 @@ class BaseModel extend OpenAI::Converter class << self - # @private + # @api private # # Assumes superclass fields are totally defined before fields are accessed / # defined on subclasses. # # @return [Hash{Symbol=>Hash{Symbol=>Object}}] - # def known_fields @known_fields ||= (self < OpenAI::BaseModel ? superclass.known_fields.dup : {}) end # @return [Hash{Symbol=>Hash{Symbol=>Object}}] - # def fields known_fields.transform_values do |field| {**field.except(:type_fn), type: field.fetch(:type_fn).call} end end - # @private + # @api private # # @return [Hash{Symbol=>Proc}] - # def defaults = (@defaults ||= {}) - # @private + # @api private # # @param name_sym [Symbol] # @@ -984,7 +932,6 @@ def defaults = (@defaults ||= {}) # @option spec [Proc] :union # # @option spec [Boolean] :"nil?" - # private def add_field(name_sym, required:, type_info:, spec:) type_fn, info = case type_info @@ -1023,7 +970,7 @@ def defaults = (@defaults ||= {}) end end - # @private + # @api private # # @param name_sym [Symbol] # @@ -1038,12 +985,11 @@ def defaults = (@defaults ||= {}) # @option spec [Proc] :union # # @option spec [Boolean] :"nil?" - # def required(name_sym, type_info, spec = {}) add_field(name_sym, required: true, type_info: type_info, spec: spec) end - # @private + # @api private # # @param name_sym [Symbol] # @@ -1058,18 +1004,16 @@ def required(name_sym, type_info, spec = {}) # @option spec [Proc] :union # # @option spec [Boolean] :"nil?" - # def optional(name_sym, type_info, spec = {}) add_field(name_sym, required: false, type_info: type_info, spec: spec) end - # @private + # @api private # # `request_only` attributes not excluded from `.#coerce` when receiving responses # even if well behaved servers should not send them # # @param blk [Proc] - # private def request_only(&blk) @mode = :dump blk.call @@ -1077,12 +1021,11 @@ def optional(name_sym, type_info, spec = {}) @mode = nil end - # @private + # @api private # # `response_only` attributes are omitted from `.#dump` when making requests # # @param blk [Proc] - # private def response_only(&blk) @mode = :coerce blk.call @@ -1094,7 +1037,6 @@ def optional(name_sym, type_info, spec = {}) # @param other [Object] # # @return [Boolean] - # def ==(other) case other in OpenAI::BaseModel @@ -1105,12 +1047,11 @@ def ==(other) end class << self - # @private + # @api private # # @param value [OpenAI::BaseModel, Hash{Object=>Object}, Object] # # @return [OpenAI::BaseModel, Object] - # def coerce(value) case OpenAI::Util.coerce_hash(value) in Hash => coerced @@ -1120,12 +1061,11 @@ def coerce(value) end end - # @private + # @api private # # @param value [OpenAI::BaseModel, Object] # # @return [Hash{Object=>Object}, Object] - # def dump(value) unless (coerced = OpenAI::Util.coerce_hash(value)).is_a?(Hash) return value @@ -1157,12 +1097,11 @@ def dump(value) values end - # @private + # @api private # # @param value [Object] # # @return [Array(true, Object, nil), Array(false, Boolean, Integer)] - # def try_strict_coerce(value) case value in Hash | OpenAI::BaseModel @@ -1220,7 +1159,6 @@ def try_strict_coerce(value) # @param key [Symbol] # # @return [Object, nil] - # def [](key) unless key.instance_of?(Symbol) raise ArgumentError.new("Expected symbol key for lookup, got #{key.inspect}") @@ -1239,7 +1177,6 @@ def [](key) # should not be mutated. # # @return [Hash{Symbol=>Object}] - # def to_h = @data alias_method :to_hash, :to_h @@ -1247,7 +1184,6 @@ def to_h = @data # @param keys [Array, nil] # # @return [Hash{Symbol=>Object}] - # def deconstruct_keys(keys) (keys || self.class.known_fields.keys).filter_map do |k| unless self.class.known_fields.key?(k) @@ -1262,7 +1198,6 @@ def deconstruct_keys(keys) # Create a new instance of a model. # # @param data [Hash{Symbol=>Object}, OpenAI::BaseModel] - # def initialize(data = {}) case OpenAI::Util.coerce_hash(data) in Hash => coerced @@ -1273,11 +1208,9 @@ def initialize(data = {}) end # @return [String] - # def to_s = @data.to_s # @return [String] - # def inspect "#<#{self.class.name}:0x#{object_id.to_s(16)} #{deconstruct_keys(nil).map do |k, v| "#{k}=#{v.inspect}" diff --git a/lib/openai/base_page.rb b/lib/openai/base_page.rb index 9f315c7b..b8185c65 100644 --- a/lib/openai/base_page.rb +++ b/lib/openai/base_page.rb @@ -1,7 +1,7 @@ # frozen_string_literal: true module OpenAI - # @private + # @api private # # @abstract # @@ -27,33 +27,28 @@ module OpenAI # ``` module BasePage # @return [Boolean] - # def next_page? = (raise NotImplementedError) # @raise [OpenAI::APIError] # @return [OpenAI::BasePage] - # def next_page = (raise NotImplementedError) # @param blk [Proc] # # @return [void] - # def auto_paging_each(&) = (raise NotImplementedError) # @return [Enumerable] - # def to_enum = super(:auto_paging_each) alias_method :enum_for, :to_enum - # @private + # @api private # # @param client [OpenAI::BaseClient] # @param req [Hash{Symbol=>Object}] # @param headers [Hash{String=>String}, Net::HTTPHeader] # @param page_data [Object] - # def initialize(client:, req:, headers:, page_data:) @client = client @req = req diff --git a/lib/openai/base_stream.rb b/lib/openai/base_stream.rb index c2beb4b9..7151b3f7 100644 --- a/lib/openai/base_stream.rb +++ b/lib/openai/base_stream.rb @@ -1,7 +1,7 @@ # frozen_string_literal: true module OpenAI - # @private + # @api private # # @example # ```ruby @@ -18,19 +18,16 @@ module OpenAI # ``` module BaseStream # @return [void] - # def close = OpenAI::Util.close_fused!(@iterator) - # @private + # @api private # # @return [Enumerable] - # private def iterator = (raise NotImplementedError) # @param blk [Proc] # # @return [void] - # def for_each(&) unless block_given? raise ArgumentError.new("A block must be given to ##{__method__}") @@ -39,19 +36,17 @@ def for_each(&) end # @return [Enumerable] - # def to_enum = @iterator alias_method :enum_for, :to_enum - # @private + # @api private # # @param model [Class, OpenAI::Converter] # @param url [URI::Generic] # @param status [Integer] # @param response [Net::HTTPResponse] # @param messages [Enumerable] - # def initialize(model:, url:, status:, response:, messages:) @model = model @url = url diff --git a/lib/openai/client.rb b/lib/openai/client.rb index 7160cc26..126da608 100644 --- a/lib/openai/client.rb +++ b/lib/openai/client.rb @@ -66,10 +66,9 @@ class Client < OpenAI::BaseClient # @return [OpenAI::Resources::Responses] attr_reader :responses - # @private + # @api private # # @return [Hash{String=>String}] - # private def auth_headers return {} if @api_key.nil? @@ -93,7 +92,6 @@ class Client < OpenAI::BaseClient # @param initial_retry_delay [Float] # # @param max_retry_delay [Float] - # def initialize( base_url: nil, api_key: ENV["OPENAI_API_KEY"], diff --git a/lib/openai/cursor_page.rb b/lib/openai/cursor_page.rb index 0e74ea9e..7773eb35 100644 --- a/lib/openai/cursor_page.rb +++ b/lib/openai/cursor_page.rb @@ -30,13 +30,12 @@ class CursorPage # @return [Boolean] attr_accessor :has_more - # @private + # @api private # # @param client [OpenAI::BaseClient] # @param req [Hash{Symbol=>Object}] # @param headers [Hash{String=>String}, Net::HTTPHeader] # @param page_data [Hash{Symbol=>Object}] - # def initialize(client:, req:, headers:, page_data:) super model = req.fetch(:model) @@ -61,7 +60,6 @@ def next_page? # @raise [OpenAI::HTTP::Error] # @return [OpenAI::CursorPage] - # def next_page unless next_page? raise RuntimeError.new("No more pages available. Please check #next_page? before calling ##{__method__}") @@ -72,7 +70,6 @@ def next_page end # @param blk [Proc] - # def auto_paging_each(&blk) unless block_given? raise ArgumentError.new("A block must be given to ##{__method__}") @@ -86,7 +83,6 @@ def auto_paging_each(&blk) end # @return [String] - # def inspect "#<#{self.class}:0x#{object_id.to_s(16)} data=#{data.inspect} has_more=#{has_more.inspect}>" end diff --git a/lib/openai/errors.rb b/lib/openai/errors.rb index 40faaaae..90cebc97 100644 --- a/lib/openai/errors.rb +++ b/lib/openai/errors.rb @@ -29,7 +29,7 @@ class APIError < OpenAI::Error # @return [String, nil] attr_reader :type - # @private + # @api private # # @param url [URI::Generic] # @param status [Integer, nil] @@ -37,7 +37,6 @@ class APIError < OpenAI::Error # @param request [nil] # @param response [nil] # @param message [String, nil] - # def initialize(url:, status: nil, body: nil, request: nil, response: nil, message: nil) @url = url @status = status @@ -69,7 +68,7 @@ class APIConnectionError < OpenAI::APIError # # @return [nil] # attr_reader :type - # @private + # @api private # # @param url [URI::Generic] # @param status [nil] @@ -77,7 +76,6 @@ class APIConnectionError < OpenAI::APIError # @param request [nil] # @param response [nil] # @param message [String, nil] - # def initialize( url:, status: nil, @@ -91,7 +89,7 @@ def initialize( end class APITimeoutError < OpenAI::APIConnectionError - # @private + # @api private # # @param url [URI::Generic] # @param status [nil] @@ -99,7 +97,6 @@ class APITimeoutError < OpenAI::APIConnectionError # @param request [nil] # @param response [nil] # @param message [String, nil] - # def initialize( url:, status: nil, @@ -113,7 +110,7 @@ def initialize( end class APIStatusError < OpenAI::APIError - # @private + # @api private # # @param url [URI::Generic] # @param status [Integer] @@ -123,7 +120,6 @@ class APIStatusError < OpenAI::APIError # @param message [String, nil] # # @return [OpenAI::APIStatusError] - # def self.for(url:, status:, body:, request:, response:, message: nil) kwargs = {url: url, status: status, body: body, request: request, response: response, message: message} @@ -165,7 +161,7 @@ def self.for(url:, status:, body:, request:, response:, message: nil) # # @return [String, nil] # attr_reader :type - # @private + # @api private # # @param url [URI::Generic] # @param status [Integer] @@ -173,7 +169,6 @@ def self.for(url:, status:, body:, request:, response:, message: nil) # @param request [nil] # @param response [nil] # @param message [String, nil] - # def initialize(url:, status:, body:, request:, response:, message: nil) message ||= OpenAI::Util.dig(body, :message) { {url: url.to_s, status: status, body: body} } @code = OpenAI::Converter.coerce(String, OpenAI::Util.dig(body, :code)) diff --git a/lib/openai/extern.rb b/lib/openai/extern.rb index 3faad4c1..c8e115d3 100644 --- a/lib/openai/extern.rb +++ b/lib/openai/extern.rb @@ -1,10 +1,9 @@ # frozen_string_literal: true module OpenAI - # @private + # @api private # # @abstract - # module Extern end end diff --git a/lib/openai/models/audio/speech_model.rb b/lib/openai/models/audio/speech_model.rb index 84765c9f..96744e0c 100644 --- a/lib/openai/models/audio/speech_model.rb +++ b/lib/openai/models/audio/speech_model.rb @@ -4,7 +4,6 @@ module OpenAI module Models module Audio # @abstract - # class SpeechModel < OpenAI::Enum TTS_1 = :"tts-1" TTS_1_HD = :"tts-1-hd" diff --git a/lib/openai/models/audio/transcription_create_params.rb b/lib/openai/models/audio/transcription_create_params.rb index d0c79556..9be2124c 100644 --- a/lib/openai/models/audio/transcription_create_params.rb +++ b/lib/openai/models/audio/transcription_create_params.rb @@ -125,7 +125,6 @@ class Model < OpenAI::Union end # @abstract - # class TimestampGranularity < OpenAI::Enum WORD = :word SEGMENT = :segment diff --git a/lib/openai/models/audio/translation_create_response.rb b/lib/openai/models/audio/translation_create_response.rb index 278a37e5..25a9c3e1 100644 --- a/lib/openai/models/audio/translation_create_response.rb +++ b/lib/openai/models/audio/translation_create_response.rb @@ -4,7 +4,6 @@ module OpenAI module Models module Audio # @abstract - # class TranslationCreateResponse < OpenAI::Union variant -> { OpenAI::Models::Audio::Translation } diff --git a/lib/openai/models/audio_model.rb b/lib/openai/models/audio_model.rb index 1043030f..81db712e 100644 --- a/lib/openai/models/audio_model.rb +++ b/lib/openai/models/audio_model.rb @@ -3,7 +3,6 @@ module OpenAI module Models # @abstract - # class AudioModel < OpenAI::Enum WHISPER_1 = :"whisper-1" diff --git a/lib/openai/models/beta/assistant_tool.rb b/lib/openai/models/beta/assistant_tool.rb index 7cd4e9b8..2b17c0d2 100644 --- a/lib/openai/models/beta/assistant_tool.rb +++ b/lib/openai/models/beta/assistant_tool.rb @@ -4,7 +4,6 @@ module OpenAI module Models module Beta # @abstract - # class AssistantTool < OpenAI::Union discriminator :type diff --git a/lib/openai/models/beta/thread_create_and_run_params.rb b/lib/openai/models/beta/thread_create_and_run_params.rb index 3cf817fa..bb147096 100644 --- a/lib/openai/models/beta/thread_create_and_run_params.rb +++ b/lib/openai/models/beta/thread_create_and_run_params.rb @@ -376,7 +376,6 @@ class Attachment < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void # @abstract - # class Tool < OpenAI::Union discriminator :type @@ -701,7 +700,6 @@ class FileSearch < OpenAI::BaseModel end # @abstract - # class Tool < OpenAI::Union variant -> { OpenAI::Models::Beta::CodeInterpreterTool } diff --git a/lib/openai/models/beta/thread_create_params.rb b/lib/openai/models/beta/thread_create_params.rb index 46dfaee7..fc3e6299 100644 --- a/lib/openai/models/beta/thread_create_params.rb +++ b/lib/openai/models/beta/thread_create_params.rb @@ -155,7 +155,6 @@ class Attachment < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void # @abstract - # class Tool < OpenAI::Union discriminator :type diff --git a/lib/openai/models/beta/threads/message.rb b/lib/openai/models/beta/threads/message.rb index b434fe46..71c118cd 100644 --- a/lib/openai/models/beta/threads/message.rb +++ b/lib/openai/models/beta/threads/message.rb @@ -173,7 +173,6 @@ class Attachment < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void # @abstract - # class Tool < OpenAI::Union variant -> { OpenAI::Models::Beta::CodeInterpreterTool } diff --git a/lib/openai/models/beta/threads/message_create_params.rb b/lib/openai/models/beta/threads/message_create_params.rb index 544cf794..e89ac3e5 100644 --- a/lib/openai/models/beta/threads/message_create_params.rb +++ b/lib/openai/models/beta/threads/message_create_params.rb @@ -115,7 +115,6 @@ class Attachment < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void # @abstract - # class Tool < OpenAI::Union discriminator :type diff --git a/lib/openai/models/beta/threads/run_create_params.rb b/lib/openai/models/beta/threads/run_create_params.rb index c655fda0..57a11b7c 100644 --- a/lib/openai/models/beta/threads/run_create_params.rb +++ b/lib/openai/models/beta/threads/run_create_params.rb @@ -344,7 +344,6 @@ class Attachment < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void # @abstract - # class Tool < OpenAI::Union discriminator :type diff --git a/lib/openai/models/beta/threads/runs/run_step_include.rb b/lib/openai/models/beta/threads/runs/run_step_include.rb index 1d4c531e..ae9413a8 100644 --- a/lib/openai/models/beta/threads/runs/run_step_include.rb +++ b/lib/openai/models/beta/threads/runs/run_step_include.rb @@ -6,7 +6,6 @@ module Beta module Threads module Runs # @abstract - # class RunStepInclude < OpenAI::Enum STEP_DETAILS_TOOL_CALLS_FILE_SEARCH_RESULTS_CONTENT = :"step_details.tool_calls[*].file_search.results[*].content" diff --git a/lib/openai/models/chat/chat_completion_assistant_message_param.rb b/lib/openai/models/chat/chat_completion_assistant_message_param.rb index 7dcbccab..e3682e55 100644 --- a/lib/openai/models/chat/chat_completion_assistant_message_param.rb +++ b/lib/openai/models/chat/chat_completion_assistant_message_param.rb @@ -134,7 +134,6 @@ class ArrayOfContentPart < OpenAI::Union end # @deprecated - # class FunctionCall < OpenAI::BaseModel # @!attribute arguments # The arguments to call the function with, as generated by the model in JSON diff --git a/lib/openai/models/chat/chat_completion_chunk.rb b/lib/openai/models/chat/chat_completion_chunk.rb index 8d0ec4c1..0c8a0cea 100644 --- a/lib/openai/models/chat/chat_completion_chunk.rb +++ b/lib/openai/models/chat/chat_completion_chunk.rb @@ -194,7 +194,6 @@ class Delta < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void # @deprecated - # class FunctionCall < OpenAI::BaseModel # @!attribute [r] arguments # The arguments to call the function with, as generated by the model in JSON diff --git a/lib/openai/models/chat/chat_completion_function_message_param.rb b/lib/openai/models/chat/chat_completion_function_message_param.rb index 4ce20d75..1da70875 100644 --- a/lib/openai/models/chat/chat_completion_function_message_param.rb +++ b/lib/openai/models/chat/chat_completion_function_message_param.rb @@ -4,7 +4,6 @@ module OpenAI module Models module Chat # @deprecated - # class ChatCompletionFunctionMessageParam < OpenAI::BaseModel # @!attribute content # The contents of the function message. diff --git a/lib/openai/models/chat/chat_completion_message.rb b/lib/openai/models/chat/chat_completion_message.rb index 96223d57..228616ad 100644 --- a/lib/openai/models/chat/chat_completion_message.rb +++ b/lib/openai/models/chat/chat_completion_message.rb @@ -151,7 +151,6 @@ class URLCitation < OpenAI::BaseModel end # @deprecated - # class FunctionCall < OpenAI::BaseModel # @!attribute arguments # The arguments to call the function with, as generated by the model in JSON diff --git a/lib/openai/models/chat/chat_completion_modality.rb b/lib/openai/models/chat/chat_completion_modality.rb index e7558545..725b907d 100644 --- a/lib/openai/models/chat/chat_completion_modality.rb +++ b/lib/openai/models/chat/chat_completion_modality.rb @@ -4,7 +4,6 @@ module OpenAI module Models module Chat # @abstract - # class ChatCompletionModality < OpenAI::Enum TEXT = :text AUDIO = :audio diff --git a/lib/openai/models/chat/completion_create_params.rb b/lib/openai/models/chat/completion_create_params.rb index b77143a8..4426c2a5 100644 --- a/lib/openai/models/chat/completion_create_params.rb +++ b/lib/openai/models/chat/completion_create_params.rb @@ -473,7 +473,6 @@ class FunctionCallMode < OpenAI::Enum end # @deprecated - # class Function < OpenAI::BaseModel # @!attribute name # The name of the function to be called. Must be a-z, A-Z, 0-9, or contain @@ -520,7 +519,6 @@ class Function < OpenAI::BaseModel end # @abstract - # class Modality < OpenAI::Enum TEXT = :text AUDIO = :audio diff --git a/lib/openai/models/chat_model.rb b/lib/openai/models/chat_model.rb index 7b512490..29b0a851 100644 --- a/lib/openai/models/chat_model.rb +++ b/lib/openai/models/chat_model.rb @@ -3,7 +3,6 @@ module OpenAI module Models # @abstract - # class ChatModel < OpenAI::Enum O3_MINI = :"o3-mini" O3_MINI_2025_01_31 = :"o3-mini-2025-01-31" diff --git a/lib/openai/models/embedding_model.rb b/lib/openai/models/embedding_model.rb index ae14fe32..65247fdf 100644 --- a/lib/openai/models/embedding_model.rb +++ b/lib/openai/models/embedding_model.rb @@ -3,7 +3,6 @@ module OpenAI module Models # @abstract - # class EmbeddingModel < OpenAI::Enum TEXT_EMBEDDING_ADA_002 = :"text-embedding-ada-002" TEXT_EMBEDDING_3_SMALL = :"text-embedding-3-small" diff --git a/lib/openai/models/fine_tuning/job_create_params.rb b/lib/openai/models/fine_tuning/job_create_params.rb index 926e7f82..988c7703 100644 --- a/lib/openai/models/fine_tuning/job_create_params.rb +++ b/lib/openai/models/fine_tuning/job_create_params.rb @@ -168,7 +168,6 @@ class Preset < OpenAI::Enum end # @deprecated - # class Hyperparameters < OpenAI::BaseModel # @!attribute [r] batch_size # Number of examples in each batch. A larger batch size means that model diff --git a/lib/openai/models/image_model.rb b/lib/openai/models/image_model.rb index c9c62780..e49e6699 100644 --- a/lib/openai/models/image_model.rb +++ b/lib/openai/models/image_model.rb @@ -3,7 +3,6 @@ module OpenAI module Models # @abstract - # class ImageModel < OpenAI::Enum DALL_E_2 = :"dall-e-2" DALL_E_3 = :"dall-e-3" diff --git a/lib/openai/models/moderation.rb b/lib/openai/models/moderation.rb index 98a25176..0f3c5a90 100644 --- a/lib/openai/models/moderation.rb +++ b/lib/openai/models/moderation.rb @@ -310,7 +310,6 @@ class CategoryAppliedInputTypes < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void # @abstract - # class Harassment < OpenAI::Enum TEXT = :text @@ -318,7 +317,6 @@ class Harassment < OpenAI::Enum end # @abstract - # class HarassmentThreatening < OpenAI::Enum TEXT = :text @@ -326,7 +324,6 @@ class HarassmentThreatening < OpenAI::Enum end # @abstract - # class Hate < OpenAI::Enum TEXT = :text @@ -334,7 +331,6 @@ class Hate < OpenAI::Enum end # @abstract - # class HateThreatening < OpenAI::Enum TEXT = :text @@ -342,7 +338,6 @@ class HateThreatening < OpenAI::Enum end # @abstract - # class Illicit < OpenAI::Enum TEXT = :text @@ -350,7 +345,6 @@ class Illicit < OpenAI::Enum end # @abstract - # class IllicitViolent < OpenAI::Enum TEXT = :text @@ -358,7 +352,6 @@ class IllicitViolent < OpenAI::Enum end # @abstract - # class SelfHarm < OpenAI::Enum TEXT = :text IMAGE = :image @@ -367,7 +360,6 @@ class SelfHarm < OpenAI::Enum end # @abstract - # class SelfHarmInstruction < OpenAI::Enum TEXT = :text IMAGE = :image @@ -376,7 +368,6 @@ class SelfHarmInstruction < OpenAI::Enum end # @abstract - # class SelfHarmIntent < OpenAI::Enum TEXT = :text IMAGE = :image @@ -385,7 +376,6 @@ class SelfHarmIntent < OpenAI::Enum end # @abstract - # class Sexual < OpenAI::Enum TEXT = :text IMAGE = :image @@ -394,7 +384,6 @@ class Sexual < OpenAI::Enum end # @abstract - # class SexualMinor < OpenAI::Enum TEXT = :text @@ -402,7 +391,6 @@ class SexualMinor < OpenAI::Enum end # @abstract - # class Violence < OpenAI::Enum TEXT = :text IMAGE = :image @@ -411,7 +399,6 @@ class Violence < OpenAI::Enum end # @abstract - # class ViolenceGraphic < OpenAI::Enum TEXT = :text IMAGE = :image diff --git a/lib/openai/models/moderation_model.rb b/lib/openai/models/moderation_model.rb index 2abe4a13..4089ad86 100644 --- a/lib/openai/models/moderation_model.rb +++ b/lib/openai/models/moderation_model.rb @@ -3,7 +3,6 @@ module OpenAI module Models # @abstract - # class ModerationModel < OpenAI::Enum OMNI_MODERATION_LATEST = :"omni-moderation-latest" OMNI_MODERATION_2024_09_26 = :"omni-moderation-2024-09-26" diff --git a/lib/openai/models/responses/response_file_search_tool_call.rb b/lib/openai/models/responses/response_file_search_tool_call.rb index df500ca6..eb1bd637 100644 --- a/lib/openai/models/responses/response_file_search_tool_call.rb +++ b/lib/openai/models/responses/response_file_search_tool_call.rb @@ -131,7 +131,6 @@ class Result < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void # @abstract - # class Attribute < OpenAI::Union variant String diff --git a/lib/openai/models/vector_store_search_params.rb b/lib/openai/models/vector_store_search_params.rb index ff451753..268a7e7d 100644 --- a/lib/openai/models/vector_store_search_params.rb +++ b/lib/openai/models/vector_store_search_params.rb @@ -128,7 +128,6 @@ class RankingOptions < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void # @abstract - # class Ranker < OpenAI::Enum AUTO = :auto DEFAULT_2024_11_15 = :"default-2024-11-15" diff --git a/lib/openai/models/vector_store_search_response.rb b/lib/openai/models/vector_store_search_response.rb index aa09a30a..bc92a2f8 100644 --- a/lib/openai/models/vector_store_search_response.rb +++ b/lib/openai/models/vector_store_search_response.rb @@ -51,7 +51,6 @@ class VectorStoreSearchResponse < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void # @abstract - # class Attribute < OpenAI::Union variant String diff --git a/lib/openai/models/vector_stores/file_batch_create_params.rb b/lib/openai/models/vector_stores/file_batch_create_params.rb index f5859e61..fbe67069 100644 --- a/lib/openai/models/vector_stores/file_batch_create_params.rb +++ b/lib/openai/models/vector_stores/file_batch_create_params.rb @@ -50,7 +50,6 @@ class FileBatchCreateParams < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void # @abstract - # class Attribute < OpenAI::Union variant String diff --git a/lib/openai/models/vector_stores/file_create_params.rb b/lib/openai/models/vector_stores/file_create_params.rb index c91648aa..a303415b 100644 --- a/lib/openai/models/vector_stores/file_create_params.rb +++ b/lib/openai/models/vector_stores/file_create_params.rb @@ -50,7 +50,6 @@ class FileCreateParams < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void # @abstract - # class Attribute < OpenAI::Union variant String diff --git a/lib/openai/models/vector_stores/file_update_params.rb b/lib/openai/models/vector_stores/file_update_params.rb index 047e821f..5b0b4e3d 100644 --- a/lib/openai/models/vector_stores/file_update_params.rb +++ b/lib/openai/models/vector_stores/file_update_params.rb @@ -35,7 +35,6 @@ class FileUpdateParams < OpenAI::BaseModel # def initialize: (Hash | OpenAI::BaseModel) -> void # @abstract - # class Attribute < OpenAI::Union variant String diff --git a/lib/openai/models/vector_stores/vector_store_file.rb b/lib/openai/models/vector_stores/vector_store_file.rb index 27d1234a..15841d27 100644 --- a/lib/openai/models/vector_stores/vector_store_file.rb +++ b/lib/openai/models/vector_stores/vector_store_file.rb @@ -156,7 +156,6 @@ class Status < OpenAI::Enum end # @abstract - # class Attribute < OpenAI::Union variant String diff --git a/lib/openai/page.rb b/lib/openai/page.rb index fa3bd198..0d0866e7 100644 --- a/lib/openai/page.rb +++ b/lib/openai/page.rb @@ -30,13 +30,12 @@ class Page # @return [String] attr_accessor :object - # @private + # @api private # # @param client [OpenAI::BaseClient] # @param req [Hash{Symbol=>Object}] # @param headers [Hash{String=>String}, Net::HTTPHeader] # @param page_data [Array] - # def initialize(client:, req:, headers:, page_data:) super model = req.fetch(:model) @@ -61,13 +60,11 @@ def next_page? # @raise [OpenAI::HTTP::Error] # @return [OpenAI::Page] - # def next_page RuntimeError.new("No more pages available.") end # @param blk [Proc] - # def auto_paging_each(&blk) unless block_given? raise ArgumentError.new("A block must be given to ##{__method__}") @@ -81,7 +78,6 @@ def auto_paging_each(&blk) end # @return [String] - # def inspect "#<#{self.class}:0x#{object_id.to_s(16)} data=#{data.inspect} object=#{object.inspect}>" end diff --git a/lib/openai/pooled_net_requester.rb b/lib/openai/pooled_net_requester.rb index d1a15ffe..0e16cc88 100644 --- a/lib/openai/pooled_net_requester.rb +++ b/lib/openai/pooled_net_requester.rb @@ -1,16 +1,14 @@ # frozen_string_literal: true module OpenAI - # @private - # + # @api private class PooledNetRequester class << self - # @private + # @api private # # @param url [URI::Generic] # # @return [Net::HTTP] - # def connect(url) port = case [url.port, url.scheme] @@ -28,17 +26,16 @@ def connect(url) end end - # @private + # @api private # # @param conn [Net::HTTP] # @param deadline [Float] - # def calibrate_socket_timeout(conn, deadline) timeout = deadline - OpenAI::Util.monotonic_secs conn.open_timeout = conn.read_timeout = conn.write_timeout = conn.continue_timeout = timeout end - # @private + # @api private # # @param request [Hash{Symbol=>Object}] . # @@ -51,7 +48,6 @@ def calibrate_socket_timeout(conn, deadline) # @param blk [Proc] # # @return [Net::HTTPGenericRequest] - # def build_request(request, &) method, url, headers, body = request.fetch_values(:method, :url, :headers, :body) req = Net::HTTPGenericRequest.new( @@ -80,11 +76,10 @@ def build_request(request, &) end end - # @private + # @api private # # @param url [URI::Generic] # @param blk [Proc] - # private def with_pool(url, &) origin = OpenAI::Util.uri_origin(url) pool = @@ -97,7 +92,7 @@ def build_request(request, &) pool.with(&) end - # @private + # @api private # # @param request [Hash{Symbol=>Object}] . # @@ -112,7 +107,6 @@ def build_request(request, &) # @option request [Float] :deadline # # @return [Array(Net::HTTPResponse, Enumerable)] - # def execute(request) url, deadline = request.fetch_values(:url, :deadline) @@ -158,10 +152,9 @@ def execute(request) [response, (response.body = body)] end - # @private + # @api private # # @param size [Integer] - # def initialize(size: Etc.nprocessors) @mutex = Mutex.new @size = size diff --git a/lib/openai/request_options.rb b/lib/openai/request_options.rb index befdf96f..405cf3c3 100644 --- a/lib/openai/request_options.rb +++ b/lib/openai/request_options.rb @@ -1,10 +1,9 @@ # frozen_string_literal: true module OpenAI - # @private + # @api private # # @abstract - # module RequestParameters # @!parse # # Options to specify HTTP behaviour for this request. @@ -12,7 +11,6 @@ module RequestParameters # attr_accessor :request_options # @param mod [Module] - # def self.included(mod) return unless mod <= OpenAI::BaseModel @@ -20,15 +18,13 @@ def self.included(mod) mod.optional(:request_options, OpenAI::RequestOptions) end - # @private - # + # @api private module Converter - # @private + # @api private # # @param params [Object] # # @return [Array(Object, Hash{Symbol=>Object})] - # def dump_request(params) case (dumped = dump(params)) in Hash @@ -46,12 +42,11 @@ def dump_request(params) # When making a request, you can pass an actual {RequestOptions} instance, or # simply pass a Hash with symbol keys matching the attributes on this class. class RequestOptions < OpenAI::BaseModel - # @private + # @api private # # @param opts [OpenAI::RequestOptions, Hash{Symbol=>Object}] # # @raise [ArgumentError] - # def self.validate!(opts) case opts in OpenAI::RequestOptions | Hash diff --git a/lib/openai/resources/audio.rb b/lib/openai/resources/audio.rb index db698a4b..e82c41f4 100644 --- a/lib/openai/resources/audio.rb +++ b/lib/openai/resources/audio.rb @@ -13,7 +13,6 @@ class Audio attr_reader :speech # @param client [OpenAI::Client] - # def initialize(client:) @client = client @transcriptions = OpenAI::Resources::Audio::Transcriptions.new(client: client) diff --git a/lib/openai/resources/audio/speech.rb b/lib/openai/resources/audio/speech.rb index dce52041..9c5d8284 100644 --- a/lib/openai/resources/audio/speech.rb +++ b/lib/openai/resources/audio/speech.rb @@ -27,7 +27,6 @@ class Speech # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [Object] - # def create(params) parsed, options = OpenAI::Models::Audio::SpeechCreateParams.dump_request(params) @client.request( @@ -41,7 +40,6 @@ def create(params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/audio/transcriptions.rb b/lib/openai/resources/audio/transcriptions.rb index 4c158734..9e291700 100644 --- a/lib/openai/resources/audio/transcriptions.rb +++ b/lib/openai/resources/audio/transcriptions.rb @@ -41,7 +41,6 @@ class Transcriptions # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Audio::Transcription, OpenAI::Models::Audio::TranscriptionVerbose] - # def create(params) parsed, options = OpenAI::Models::Audio::TranscriptionCreateParams.dump_request(params) @client.request( @@ -55,7 +54,6 @@ def create(params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/audio/translations.rb b/lib/openai/resources/audio/translations.rb index 4e1431c1..c1de4f8e 100644 --- a/lib/openai/resources/audio/translations.rb +++ b/lib/openai/resources/audio/translations.rb @@ -31,7 +31,6 @@ class Translations # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Audio::Translation, OpenAI::Models::Audio::TranslationVerbose] - # def create(params) parsed, options = OpenAI::Models::Audio::TranslationCreateParams.dump_request(params) @client.request( @@ -45,7 +44,6 @@ def create(params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/batches.rb b/lib/openai/resources/batches.rb index 751d6548..8f0799eb 100644 --- a/lib/openai/resources/batches.rb +++ b/lib/openai/resources/batches.rb @@ -35,7 +35,6 @@ class Batches # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Batch] - # def create(params) parsed, options = OpenAI::Models::BatchCreateParams.dump_request(params) @client.request( @@ -56,7 +55,6 @@ def create(params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Batch] - # def retrieve(batch_id, params = {}) @client.request( method: :get, @@ -81,7 +79,6 @@ def retrieve(batch_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list(params = {}) parsed, options = OpenAI::Models::BatchListParams.dump_request(params) @client.request( @@ -105,7 +102,6 @@ def list(params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Batch] - # def cancel(batch_id, params = {}) @client.request( method: :post, @@ -116,7 +112,6 @@ def cancel(batch_id, params = {}) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/beta.rb b/lib/openai/resources/beta.rb index 4eee364b..c1b7273a 100644 --- a/lib/openai/resources/beta.rb +++ b/lib/openai/resources/beta.rb @@ -10,7 +10,6 @@ class Beta attr_reader :threads # @param client [OpenAI::Client] - # def initialize(client:) @client = client @assistants = OpenAI::Resources::Beta::Assistants.new(client: client) diff --git a/lib/openai/resources/beta/assistants.rb b/lib/openai/resources/beta/assistants.rb index 45d0010d..5911fe3f 100644 --- a/lib/openai/resources/beta/assistants.rb +++ b/lib/openai/resources/beta/assistants.rb @@ -78,7 +78,6 @@ class Assistants # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Assistant] - # def create(params) parsed, options = OpenAI::Models::Beta::AssistantCreateParams.dump_request(params) @client.request( @@ -99,7 +98,6 @@ def create(params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Assistant] - # def retrieve(assistant_id, params = {}) @client.request( method: :get, @@ -185,7 +183,6 @@ def retrieve(assistant_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Assistant] - # def update(assistant_id, params = {}) parsed, options = OpenAI::Models::Beta::AssistantUpdateParams.dump_request(params) @client.request( @@ -220,7 +217,6 @@ def update(assistant_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list(params = {}) parsed, options = OpenAI::Models::Beta::AssistantListParams.dump_request(params) @client.request( @@ -242,7 +238,6 @@ def list(params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::AssistantDeleted] - # def delete(assistant_id, params = {}) @client.request( method: :delete, @@ -253,7 +248,6 @@ def delete(assistant_id, params = {}) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/beta/threads.rb b/lib/openai/resources/beta/threads.rb index b7e58a7c..d8f2e660 100644 --- a/lib/openai/resources/beta/threads.rb +++ b/lib/openai/resources/beta/threads.rb @@ -32,7 +32,6 @@ class Threads # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Thread] - # def create(params = {}) parsed, options = OpenAI::Models::Beta::ThreadCreateParams.dump_request(params) @client.request( @@ -53,7 +52,6 @@ def create(params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Thread] - # def retrieve(thread_id, params = {}) @client.request( method: :get, @@ -84,7 +82,6 @@ def retrieve(thread_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Thread] - # def update(thread_id, params = {}) parsed, options = OpenAI::Models::Beta::ThreadUpdateParams.dump_request(params) @client.request( @@ -105,7 +102,6 @@ def update(thread_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::ThreadDeleted] - # def delete(thread_id, params = {}) @client.request( method: :delete, @@ -210,7 +206,6 @@ def delete(thread_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Threads::Run] - # def create_and_run(params) parsed, options = OpenAI::Models::Beta::ThreadCreateAndRunParams.dump_request(params) parsed.delete(:stream) @@ -318,7 +313,6 @@ def create_and_run(params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Stream] - # def create_and_run_streaming(params) parsed, options = OpenAI::Models::Beta::ThreadCreateAndRunParams.dump_request(params) parsed.store(:stream, true) @@ -334,7 +328,6 @@ def create_and_run_streaming(params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client @runs = OpenAI::Resources::Beta::Threads::Runs.new(client: client) diff --git a/lib/openai/resources/beta/threads/messages.rb b/lib/openai/resources/beta/threads/messages.rb index 3ea2f318..4e5141de 100644 --- a/lib/openai/resources/beta/threads/messages.rb +++ b/lib/openai/resources/beta/threads/messages.rb @@ -33,7 +33,6 @@ class Messages # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Threads::Message] - # def create(thread_id, params) parsed, options = OpenAI::Models::Beta::Threads::MessageCreateParams.dump_request(params) @client.request( @@ -57,7 +56,6 @@ def create(thread_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Threads::Message] - # def retrieve(message_id, params) parsed, options = OpenAI::Models::Beta::Threads::MessageRetrieveParams.dump_request(params) thread_id = parsed.delete(:thread_id) do @@ -89,7 +87,6 @@ def retrieve(message_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Threads::Message] - # def update(message_id, params) parsed, options = OpenAI::Models::Beta::Threads::MessageUpdateParams.dump_request(params) thread_id = parsed.delete(:thread_id) do @@ -132,7 +129,6 @@ def update(message_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list(thread_id, params = {}) parsed, options = OpenAI::Models::Beta::Threads::MessageListParams.dump_request(params) @client.request( @@ -156,7 +152,6 @@ def list(thread_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Threads::MessageDeleted] - # def delete(message_id, params) parsed, options = OpenAI::Models::Beta::Threads::MessageDeleteParams.dump_request(params) thread_id = parsed.delete(:thread_id) do @@ -171,7 +166,6 @@ def delete(message_id, params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/beta/threads/runs.rb b/lib/openai/resources/beta/threads/runs.rb index 34f408d7..2f3b3b12 100644 --- a/lib/openai/resources/beta/threads/runs.rb +++ b/lib/openai/resources/beta/threads/runs.rb @@ -123,7 +123,6 @@ class Runs # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Threads::Run] - # def create(thread_id, params) parsed, options = OpenAI::Models::Beta::Threads::RunCreateParams.dump_request(params) parsed.delete(:stream) @@ -253,7 +252,6 @@ def create(thread_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Stream] - # def create_streaming(thread_id, params) parsed, options = OpenAI::Models::Beta::Threads::RunCreateParams.dump_request(params) parsed.store(:stream, true) @@ -282,7 +280,6 @@ def create_streaming(thread_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Threads::Run] - # def retrieve(run_id, params) parsed, options = OpenAI::Models::Beta::Threads::RunRetrieveParams.dump_request(params) thread_id = parsed.delete(:thread_id) do @@ -315,7 +312,6 @@ def retrieve(run_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Threads::Run] - # def update(run_id, params) parsed, options = OpenAI::Models::Beta::Threads::RunUpdateParams.dump_request(params) thread_id = parsed.delete(:thread_id) do @@ -355,7 +351,6 @@ def update(run_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list(thread_id, params = {}) parsed, options = OpenAI::Models::Beta::Threads::RunListParams.dump_request(params) @client.request( @@ -379,7 +374,6 @@ def list(thread_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Threads::Run] - # def cancel(run_id, params) parsed, options = OpenAI::Models::Beta::Threads::RunCancelParams.dump_request(params) thread_id = parsed.delete(:thread_id) do @@ -411,7 +405,6 @@ def cancel(run_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Threads::Run] - # def submit_tool_outputs(run_id, params) parsed, options = OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams.dump_request(params) parsed.delete(:stream) @@ -445,7 +438,6 @@ def submit_tool_outputs(run_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Stream] - # def submit_tool_outputs_streaming(run_id, params) parsed, options = OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams.dump_request(params) parsed.store(:stream, true) @@ -464,7 +456,6 @@ def submit_tool_outputs_streaming(run_id, params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client @steps = OpenAI::Resources::Beta::Threads::Runs::Steps.new(client: client) diff --git a/lib/openai/resources/beta/threads/runs/steps.rb b/lib/openai/resources/beta/threads/runs/steps.rb index e44fb9a5..d5c4ddb3 100644 --- a/lib/openai/resources/beta/threads/runs/steps.rb +++ b/lib/openai/resources/beta/threads/runs/steps.rb @@ -28,7 +28,6 @@ class Steps # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Beta::Threads::Runs::RunStep] - # def retrieve(step_id, params) parsed, options = OpenAI::Models::Beta::Threads::Runs::StepRetrieveParams.dump_request(params) thread_id = parsed.delete(:thread_id) do @@ -82,7 +81,6 @@ def retrieve(step_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list(run_id, params) parsed, options = OpenAI::Models::Beta::Threads::Runs::StepListParams.dump_request(params) thread_id = parsed.delete(:thread_id) do @@ -99,7 +97,6 @@ def list(run_id, params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/chat.rb b/lib/openai/resources/chat.rb index 8945e202..d5bf1e2e 100644 --- a/lib/openai/resources/chat.rb +++ b/lib/openai/resources/chat.rb @@ -7,7 +7,6 @@ class Chat attr_reader :completions # @param client [OpenAI::Client] - # def initialize(client:) @client = client @completions = OpenAI::Resources::Chat::Completions.new(client: client) diff --git a/lib/openai/resources/chat/completions.rb b/lib/openai/resources/chat/completions.rb index 7be5f0ae..94e76ede 100644 --- a/lib/openai/resources/chat/completions.rb +++ b/lib/openai/resources/chat/completions.rb @@ -213,7 +213,6 @@ class Completions # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Chat::ChatCompletion] - # def create(params) parsed, options = OpenAI::Models::Chat::CompletionCreateParams.dump_request(params) parsed.delete(:stream) @@ -432,7 +431,6 @@ def create(params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Stream] - # def create_streaming(params) parsed, options = OpenAI::Models::Chat::CompletionCreateParams.dump_request(params) parsed.store(:stream, true) @@ -457,7 +455,6 @@ def create_streaming(params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Chat::ChatCompletion] - # def retrieve(completion_id, params = {}) @client.request( method: :get, @@ -485,7 +482,6 @@ def retrieve(completion_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Chat::ChatCompletion] - # def update(completion_id, params) parsed, options = OpenAI::Models::Chat::CompletionUpdateParams.dump_request(params) @client.request( @@ -518,7 +514,6 @@ def update(completion_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list(params = {}) parsed, options = OpenAI::Models::Chat::CompletionListParams.dump_request(params) @client.request( @@ -541,7 +536,6 @@ def list(params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Chat::ChatCompletionDeleted] - # def delete(completion_id, params = {}) @client.request( method: :delete, @@ -552,7 +546,6 @@ def delete(completion_id, params = {}) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client @messages = OpenAI::Resources::Chat::Completions::Messages.new(client: client) diff --git a/lib/openai/resources/chat/completions/messages.rb b/lib/openai/resources/chat/completions/messages.rb index 3bc5880d..decc122d 100644 --- a/lib/openai/resources/chat/completions/messages.rb +++ b/lib/openai/resources/chat/completions/messages.rb @@ -22,7 +22,6 @@ class Messages # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list(completion_id, params = {}) parsed, options = OpenAI::Models::Chat::Completions::MessageListParams.dump_request(params) @client.request( @@ -36,7 +35,6 @@ def list(completion_id, params = {}) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/completions.rb b/lib/openai/resources/completions.rb index 1a6dcb98..a16f4bcf 100644 --- a/lib/openai/resources/completions.rb +++ b/lib/openai/resources/completions.rb @@ -113,7 +113,6 @@ class Completions # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Completion] - # def create(params) parsed, options = OpenAI::Models::CompletionCreateParams.dump_request(params) parsed.delete(:stream) @@ -236,7 +235,6 @@ def create(params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Stream] - # def create_streaming(params) parsed, options = OpenAI::Models::CompletionCreateParams.dump_request(params) parsed.store(:stream, true) @@ -252,7 +250,6 @@ def create_streaming(params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/embeddings.rb b/lib/openai/resources/embeddings.rb index b5e6eda1..b70c1ef5 100644 --- a/lib/openai/resources/embeddings.rb +++ b/lib/openai/resources/embeddings.rb @@ -35,7 +35,6 @@ class Embeddings # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::CreateEmbeddingResponse] - # def create(params) parsed, options = OpenAI::Models::EmbeddingCreateParams.dump_request(params) @client.request( @@ -48,7 +47,6 @@ def create(params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/files.rb b/lib/openai/resources/files.rb index 2164c02b..191387e3 100644 --- a/lib/openai/resources/files.rb +++ b/lib/openai/resources/files.rb @@ -37,7 +37,6 @@ class Files # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::FileObject] - # def create(params) parsed, options = OpenAI::Models::FileCreateParams.dump_request(params) @client.request( @@ -59,7 +58,6 @@ def create(params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::FileObject] - # def retrieve(file_id, params = {}) @client.request( method: :get, @@ -89,7 +87,6 @@ def retrieve(file_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list(params = {}) parsed, options = OpenAI::Models::FileListParams.dump_request(params) @client.request( @@ -111,7 +108,6 @@ def list(params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::FileDeleted] - # def delete(file_id, params = {}) @client.request( method: :delete, @@ -130,7 +126,6 @@ def delete(file_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [Object] - # def content(file_id, params = {}) @client.request( method: :get, @@ -142,7 +137,6 @@ def content(file_id, params = {}) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/fine_tuning.rb b/lib/openai/resources/fine_tuning.rb index e7a161e4..61663e79 100644 --- a/lib/openai/resources/fine_tuning.rb +++ b/lib/openai/resources/fine_tuning.rb @@ -7,7 +7,6 @@ class FineTuning attr_reader :jobs # @param client [OpenAI::Client] - # def initialize(client:) @client = client @jobs = OpenAI::Resources::FineTuning::Jobs.new(client: client) diff --git a/lib/openai/resources/fine_tuning/jobs.rb b/lib/openai/resources/fine_tuning/jobs.rb index bcfa238e..177d978b 100644 --- a/lib/openai/resources/fine_tuning/jobs.rb +++ b/lib/openai/resources/fine_tuning/jobs.rb @@ -78,7 +78,6 @@ class Jobs # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::FineTuning::FineTuningJob] - # def create(params) parsed, options = OpenAI::Models::FineTuning::JobCreateParams.dump_request(params) @client.request( @@ -101,7 +100,6 @@ def create(params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::FineTuning::FineTuningJob] - # def retrieve(fine_tuning_job_id, params = {}) @client.request( method: :get, @@ -125,7 +123,6 @@ def retrieve(fine_tuning_job_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list(params = {}) parsed, options = OpenAI::Models::FineTuning::JobListParams.dump_request(params) @client.request( @@ -147,7 +144,6 @@ def list(params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::FineTuning::FineTuningJob] - # def cancel(fine_tuning_job_id, params = {}) @client.request( method: :post, @@ -170,7 +166,6 @@ def cancel(fine_tuning_job_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list_events(fine_tuning_job_id, params = {}) parsed, options = OpenAI::Models::FineTuning::JobListEventsParams.dump_request(params) @client.request( @@ -184,7 +179,6 @@ def list_events(fine_tuning_job_id, params = {}) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client @checkpoints = OpenAI::Resources::FineTuning::Jobs::Checkpoints.new(client: client) diff --git a/lib/openai/resources/fine_tuning/jobs/checkpoints.rb b/lib/openai/resources/fine_tuning/jobs/checkpoints.rb index e9f2d303..cb4b3c18 100644 --- a/lib/openai/resources/fine_tuning/jobs/checkpoints.rb +++ b/lib/openai/resources/fine_tuning/jobs/checkpoints.rb @@ -18,7 +18,6 @@ class Checkpoints # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list(fine_tuning_job_id, params = {}) parsed, options = OpenAI::Models::FineTuning::Jobs::CheckpointListParams.dump_request(params) @client.request( @@ -32,7 +31,6 @@ def list(fine_tuning_job_id, params = {}) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/images.rb b/lib/openai/resources/images.rb index 8cc42136..48b2ac87 100644 --- a/lib/openai/resources/images.rb +++ b/lib/openai/resources/images.rb @@ -30,7 +30,6 @@ class Images # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::ImagesResponse] - # def create_variation(params) parsed, options = OpenAI::Models::ImageCreateVariationParams.dump_request(params) @client.request( @@ -76,7 +75,6 @@ def create_variation(params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::ImagesResponse] - # def edit(params) parsed, options = OpenAI::Models::ImageEditParams.dump_request(params) @client.request( @@ -125,7 +123,6 @@ def edit(params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::ImagesResponse] - # def generate(params) parsed, options = OpenAI::Models::ImageGenerateParams.dump_request(params) @client.request( @@ -138,7 +135,6 @@ def generate(params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/models.rb b/lib/openai/resources/models.rb index 9c086955..cff4bf06 100644 --- a/lib/openai/resources/models.rb +++ b/lib/openai/resources/models.rb @@ -13,7 +13,6 @@ class Models # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Model] - # def retrieve(model, params = {}) @client.request( method: :get, @@ -31,7 +30,6 @@ def retrieve(model, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Page] - # def list(params = {}) @client.request( method: :get, @@ -52,7 +50,6 @@ def list(params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::ModelDeleted] - # def delete(model, params = {}) @client.request( method: :delete, @@ -63,7 +60,6 @@ def delete(model, params = {}) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/moderations.rb b/lib/openai/resources/moderations.rb index f1375d48..85d8cd8f 100644 --- a/lib/openai/resources/moderations.rb +++ b/lib/openai/resources/moderations.rb @@ -19,7 +19,6 @@ class Moderations # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::ModerationCreateResponse] - # def create(params) parsed, options = OpenAI::Models::ModerationCreateParams.dump_request(params) @client.request( @@ -32,7 +31,6 @@ def create(params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/responses.rb b/lib/openai/resources/responses.rb index 3b4c2c2d..fdbe46bc 100644 --- a/lib/openai/resources/responses.rb +++ b/lib/openai/resources/responses.rb @@ -127,7 +127,6 @@ class Responses # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Responses::Response] - # def create(params) parsed, options = OpenAI::Models::Responses::ResponseCreateParams.dump_request(params) parsed.delete(:stream) @@ -261,7 +260,6 @@ def create(params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Stream] - # def create_streaming(params) parsed, options = OpenAI::Models::Responses::ResponseCreateParams.dump_request(params) parsed.store(:stream, true) @@ -288,7 +286,6 @@ def create_streaming(params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Responses::Response] - # def retrieve(response_id, params = {}) parsed, options = OpenAI::Models::Responses::ResponseRetrieveParams.dump_request(params) @client.request( @@ -309,7 +306,6 @@ def retrieve(response_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [nil] - # def delete(response_id, params = {}) @client.request( method: :delete, @@ -320,7 +316,6 @@ def delete(response_id, params = {}) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client @input_items = OpenAI::Resources::Responses::InputItems.new(client: client) diff --git a/lib/openai/resources/responses/input_items.rb b/lib/openai/resources/responses/input_items.rb index 87ea88ab..1fe57b71 100644 --- a/lib/openai/resources/responses/input_items.rb +++ b/lib/openai/resources/responses/input_items.rb @@ -25,7 +25,6 @@ class InputItems # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list(response_id, params = {}) parsed, options = OpenAI::Models::Responses::InputItemListParams.dump_request(params) @client.request( @@ -39,7 +38,6 @@ def list(response_id, params = {}) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/uploads.rb b/lib/openai/resources/uploads.rb index e7a40175..f72e4255 100644 --- a/lib/openai/resources/uploads.rb +++ b/lib/openai/resources/uploads.rb @@ -45,7 +45,6 @@ class Uploads # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Upload] - # def create(params) parsed, options = OpenAI::Models::UploadCreateParams.dump_request(params) @client.request( @@ -66,7 +65,6 @@ def create(params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Upload] - # def cancel(upload_id, params = {}) @client.request( method: :post, @@ -102,7 +100,6 @@ def cancel(upload_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Upload] - # def complete(upload_id, params) parsed, options = OpenAI::Models::UploadCompleteParams.dump_request(params) @client.request( @@ -115,7 +112,6 @@ def complete(upload_id, params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client @parts = OpenAI::Resources::Uploads::Parts.new(client: client) diff --git a/lib/openai/resources/uploads/parts.rb b/lib/openai/resources/uploads/parts.rb index c6f90707..b90eef15 100644 --- a/lib/openai/resources/uploads/parts.rb +++ b/lib/openai/resources/uploads/parts.rb @@ -25,7 +25,6 @@ class Parts # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::Uploads::UploadPart] - # def create(upload_id, params) parsed, options = OpenAI::Models::Uploads::PartCreateParams.dump_request(params) @client.request( @@ -39,7 +38,6 @@ def create(upload_id, params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/vector_stores.rb b/lib/openai/resources/vector_stores.rb index 18803daa..e60a1592 100644 --- a/lib/openai/resources/vector_stores.rb +++ b/lib/openai/resources/vector_stores.rb @@ -34,7 +34,6 @@ class VectorStores # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::VectorStore] - # def create(params = {}) parsed, options = OpenAI::Models::VectorStoreCreateParams.dump_request(params) @client.request( @@ -55,7 +54,6 @@ def create(params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::VectorStore] - # def retrieve(vector_store_id, params = {}) @client.request( method: :get, @@ -85,7 +83,6 @@ def retrieve(vector_store_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::VectorStore] - # def update(vector_store_id, params = {}) parsed, options = OpenAI::Models::VectorStoreUpdateParams.dump_request(params) @client.request( @@ -120,7 +117,6 @@ def update(vector_store_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list(params = {}) parsed, options = OpenAI::Models::VectorStoreListParams.dump_request(params) @client.request( @@ -142,7 +138,6 @@ def list(params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::VectorStoreDeleted] - # def delete(vector_store_id, params = {}) @client.request( method: :delete, @@ -173,7 +168,6 @@ def delete(vector_store_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Page] - # def search(vector_store_id, params) parsed, options = OpenAI::Models::VectorStoreSearchParams.dump_request(params) @client.request( @@ -187,7 +181,6 @@ def search(vector_store_id, params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client @files = OpenAI::Resources::VectorStores::Files.new(client: client) diff --git a/lib/openai/resources/vector_stores/file_batches.rb b/lib/openai/resources/vector_stores/file_batches.rb index 3c484618..7893fd17 100644 --- a/lib/openai/resources/vector_stores/file_batches.rb +++ b/lib/openai/resources/vector_stores/file_batches.rb @@ -26,7 +26,6 @@ class FileBatches # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::VectorStores::VectorStoreFileBatch] - # def create(vector_store_id, params) parsed, options = OpenAI::Models::VectorStores::FileBatchCreateParams.dump_request(params) @client.request( @@ -49,7 +48,6 @@ def create(vector_store_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::VectorStores::VectorStoreFileBatch] - # def retrieve(batch_id, params) parsed, options = OpenAI::Models::VectorStores::FileBatchRetrieveParams.dump_request(params) vector_store_id = parsed.delete(:vector_store_id) do @@ -75,7 +73,6 @@ def retrieve(batch_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::VectorStores::VectorStoreFileBatch] - # def cancel(batch_id, params) parsed, options = OpenAI::Models::VectorStores::FileBatchCancelParams.dump_request(params) vector_store_id = parsed.delete(:vector_store_id) do @@ -119,7 +116,6 @@ def cancel(batch_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list_files(batch_id, params) parsed, options = OpenAI::Models::VectorStores::FileBatchListFilesParams.dump_request(params) vector_store_id = parsed.delete(:vector_store_id) do @@ -136,7 +132,6 @@ def list_files(batch_id, params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/resources/vector_stores/files.rb b/lib/openai/resources/vector_stores/files.rb index 23eafaa3..0c9ae2a5 100644 --- a/lib/openai/resources/vector_stores/files.rb +++ b/lib/openai/resources/vector_stores/files.rb @@ -28,7 +28,6 @@ class Files # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::VectorStores::VectorStoreFile] - # def create(vector_store_id, params) parsed, options = OpenAI::Models::VectorStores::FileCreateParams.dump_request(params) @client.request( @@ -51,7 +50,6 @@ def create(vector_store_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::VectorStores::VectorStoreFile] - # def retrieve(file_id, params) parsed, options = OpenAI::Models::VectorStores::FileRetrieveParams.dump_request(params) vector_store_id = parsed.delete(:vector_store_id) do @@ -82,7 +80,6 @@ def retrieve(file_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::VectorStores::VectorStoreFile] - # def update(file_id, params) parsed, options = OpenAI::Models::VectorStores::FileUpdateParams.dump_request(params) vector_store_id = parsed.delete(:vector_store_id) do @@ -124,7 +121,6 @@ def update(file_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::CursorPage] - # def list(vector_store_id, params = {}) parsed, options = OpenAI::Models::VectorStores::FileListParams.dump_request(params) @client.request( @@ -151,7 +147,6 @@ def list(vector_store_id, params = {}) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Models::VectorStores::VectorStoreFileDeleted] - # def delete(file_id, params) parsed, options = OpenAI::Models::VectorStores::FileDeleteParams.dump_request(params) vector_store_id = parsed.delete(:vector_store_id) do @@ -176,7 +171,6 @@ def delete(file_id, params) # @option params [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] :request_options # # @return [OpenAI::Page] - # def content(file_id, params) parsed, options = OpenAI::Models::VectorStores::FileContentParams.dump_request(params) vector_store_id = parsed.delete(:vector_store_id) do @@ -192,7 +186,6 @@ def content(file_id, params) end # @param client [OpenAI::Client] - # def initialize(client:) @client = client end diff --git a/lib/openai/stream.rb b/lib/openai/stream.rb index f9319992..02dea5de 100644 --- a/lib/openai/stream.rb +++ b/lib/openai/stream.rb @@ -1,7 +1,7 @@ # frozen_string_literal: true module OpenAI - # @private + # @api private # # @example # ```ruby @@ -19,10 +19,9 @@ module OpenAI class Stream include OpenAI::BaseStream - # @private + # @api private # # @return [Enumerable] - # private def iterator # rubocop:disable Metrics/BlockLength @iterator ||= OpenAI::Util.chain_fused(@messages) do |y| diff --git a/lib/openai/util.rb b/lib/openai/util.rb index 2546262b..2f4fecde 100644 --- a/lib/openai/util.rb +++ b/lib/openai/util.rb @@ -3,20 +3,17 @@ module OpenAI # rubocop:disable Metrics/ModuleLength - # @private - # + # @api private module Util - # @private + # @api private # # @return [Float] - # def self.monotonic_secs = Process.clock_gettime(Process::CLOCK_MONOTONIC) class << self - # @private + # @api private # # @return [String] - # def arch case (arch = RbConfig::CONFIG["arch"])&.downcase in nil @@ -32,10 +29,9 @@ def arch end end - # @private + # @api private # # @return [String] - # def os case (host = RbConfig::CONFIG["host_os"])&.downcase in nil @@ -57,12 +53,11 @@ def os end class << self - # @private + # @api private # # @param input [Object] # # @return [Boolean, Object] - # def primitive?(input) case input in true | false | Integer | Float | Symbol | String @@ -72,12 +67,11 @@ def primitive?(input) end end - # @private + # @api private # # @param input [Object] # # @return [Boolean, Object] - # def coerce_boolean(input) case input.is_a?(String) ? input.downcase : input in Numeric @@ -91,13 +85,12 @@ def coerce_boolean(input) end end - # @private + # @api private # # @param input [Object] # # @raise [ArgumentError] # @return [Boolean, nil] - # def coerce_boolean!(input) case coerce_boolean(input) in true | false | nil => coerced @@ -107,12 +100,11 @@ def coerce_boolean!(input) end end - # @private + # @api private # # @param input [Object] # # @return [Integer, Object] - # def coerce_integer(input) case input in true @@ -124,12 +116,11 @@ def coerce_integer(input) end end - # @private + # @api private # # @param input [Object] # # @return [Float, Object] - # def coerce_float(input) case input in true @@ -141,12 +132,11 @@ def coerce_float(input) end end - # @private + # @api private # # @param input [Object] # # @return [Hash{Object=>Object}, Object] - # def coerce_hash(input) case input in NilClass | Array | Set | Enumerator @@ -165,14 +155,13 @@ def coerce_hash(input) OMIT = Object.new.freeze class << self - # @private + # @api private # # @param lhs [Object] # @param rhs [Object] # @param concat [Boolean] # # @return [Object] - # private def deep_merge_lr(lhs, rhs, concat: false) case [lhs, rhs, concat] in [Hash, Hash, _] @@ -191,7 +180,7 @@ class << self end end - # @private + # @api private # # Recursively merge one hash with another. If the values at a given key are not # both hashes, just take the new value. @@ -203,7 +192,6 @@ class << self # @param concat [Boolean] whether to merge sequences by concatenation. # # @return [Object] - # def deep_merge(*values, sentinel: nil, concat: false) case values in [value, *values] @@ -215,7 +203,7 @@ def deep_merge(*values, sentinel: nil, concat: false) end end - # @private + # @api private # # @param data [Hash{Symbol=>Object}, Array, Object] # @param pick [Symbol, Integer, Array, nil] @@ -223,7 +211,6 @@ def deep_merge(*values, sentinel: nil, concat: false) # @param blk [Proc, nil] # # @return [Object, nil] - # def dig(data, pick, sentinel = nil, &blk) case [data, pick, blk] in [_, nil, nil] @@ -248,22 +235,20 @@ def dig(data, pick, sentinel = nil, &blk) end class << self - # @private + # @api private # # @param uri [URI::Generic] # # @return [String] - # def uri_origin(uri) "#{uri.scheme}://#{uri.host}#{uri.port == uri.default_port ? '' : ":#{uri.port}"}" end - # @private + # @api private # # @param path [String, Array] # # @return [String] - # def interpolate_path(path) case path in String @@ -278,40 +263,37 @@ def interpolate_path(path) end class << self - # @private + # @api private # # @param query [String, nil] # # @return [Hash{String=>Array}] - # def decode_query(query) CGI.parse(query.to_s) end - # @private + # @api private # # @param query [Hash{String=>Array, String, nil}, nil] # # @return [String, nil] - # def encode_query(query) query.to_h.empty? ? nil : URI.encode_www_form(query) end end class << self - # @private + # @api private # # @param url [URI::Generic, String] # # @return [Hash{Symbol=>String, Integer, nil}] - # def parse_uri(url) parsed = URI::Generic.component.zip(URI.split(url)).to_h {**parsed, query: decode_query(parsed.fetch(:query))} end - # @private + # @api private # # @param parsed [Hash{Symbol=>String, Integer, nil}] . # @@ -326,12 +308,11 @@ def parse_uri(url) # @option parsed [Hash{String=>Array}] :query # # @return [URI::Generic] - # def unparse_uri(parsed) URI::Generic.build(**parsed, query: encode_query(parsed.fetch(:query))) end - # @private + # @api private # # @param lhs [Hash{Symbol=>String, Integer, nil}] . # @@ -358,7 +339,6 @@ def unparse_uri(parsed) # @option rhs [Hash{String=>Array}] :query # # @return [URI::Generic] - # def join_parsed_uri(lhs, rhs) base_path, base_query = lhs.fetch_values(:path, :query) slashed = base_path.end_with?("/") ? base_path : "#{base_path}/" @@ -380,12 +360,11 @@ def join_parsed_uri(lhs, rhs) end class << self - # @private + # @api private # # @param headers [Hash{String=>String, Integer, Array, nil}] # # @return [Hash{String=>String}] - # def normalized_headers(*headers) {}.merge(*headers.compact).to_h do |key, val| case val @@ -399,16 +378,15 @@ def normalized_headers(*headers) end end - # @private + # @api private # # An adapter that satisfies the IO interface required by `::IO.copy_stream` class ReadIOAdapter - # @private + # @api private # # @param max_len [Integer, nil] # # @return [String] - # private def read_enum(max_len) case max_len in nil @@ -422,13 +400,12 @@ class ReadIOAdapter @buf.slice!(0..) end - # @private + # @api private # # @param max_len [Integer, nil] # @param out_string [String, nil] # # @return [String, nil] - # def read(max_len = nil, out_string = nil) case @stream in nil @@ -447,11 +424,10 @@ def read(max_len = nil, out_string = nil) .tap(&@blk) end - # @private + # @api private # # @param stream [String, IO, StringIO, Enumerable] # @param blk [Proc] - # def initialize(stream, &blk) @stream = stream.is_a?(String) ? StringIO.new(stream) : stream @buf = String.new.b @@ -463,7 +439,6 @@ class << self # @param blk [Proc] # # @return [Enumerable] - # def string_io(&blk) Enumerator.new do |y| y.define_singleton_method(:write) do @@ -477,13 +452,12 @@ def string_io(&blk) end class << self - # @private + # @api private # # @param y [Enumerator::Yielder] # @param boundary [String] # @param key [Symbol, String] # @param val [Object] - # private def encode_multipart_formdata(y, boundary:, key:, val:) y << "--#{boundary}\r\n" y << "Content-Disposition: form-data" @@ -516,12 +490,11 @@ class << self y << "\r\n" end - # @private + # @api private # # @param body [Object] # # @return [Array(String, Enumerable)] - # private def encode_multipart_streaming(body) boundary = SecureRandom.urlsafe_base64(60) @@ -547,13 +520,12 @@ class << self [boundary, strio] end - # @private + # @api private # # @param headers [Hash{String=>String}] # @param body [Object] # # @return [Object] - # def encode_content(headers, body) content_type = headers["content-type"] case [content_type, body] @@ -572,7 +544,7 @@ def encode_content(headers, body) end end - # @private + # @api private # # @param headers [Hash{String=>String}, Net::HTTPHeader] # @param stream [Enumerable] @@ -580,7 +552,6 @@ def encode_content(headers, body) # # @raise [JSON::ParserError] # @return [Object] - # def decode_content(headers, stream:, suppress_error: false) case headers["content-type"] in %r{^application/(?:vnd\.api\+)?json} @@ -609,7 +580,7 @@ def decode_content(headers, stream:, suppress_error: false) end class << self - # @private + # @api private # # https://doc.rust-lang.org/std/iter/trait.FusedIterator.html # @@ -618,7 +589,6 @@ class << self # @param close [Proc] # # @return [Enumerable] - # def fused_enum(enum, external: false, &close) fused = false iter = Enumerator.new do |y| @@ -642,10 +612,9 @@ def fused_enum(enum, external: false, &close) iter end - # @private + # @api private # # @param enum [Enumerable, nil] - # def close_fused!(enum) return unless enum.is_a?(Enumerator) @@ -654,11 +623,10 @@ def close_fused!(enum) # rubocop:enable Lint/UnreachableLoop end - # @private + # @api private # # @param enum [Enumerable, nil] # @param blk [Proc] - # def chain_fused(enum, &blk) iter = Enumerator.new { blk.call(_1) } fused_enum(iter) { close_fused!(enum) } @@ -666,12 +634,11 @@ def chain_fused(enum, &blk) end class << self - # @private + # @api private # # @param enum [Enumerable] # # @return [Enumerable] - # def decode_lines(enum) re = /(\r\n|\r|\n)/ buffer = String.new.b @@ -701,14 +668,13 @@ def decode_lines(enum) end end - # @private + # @api private # # https://html.spec.whatwg.org/multipage/server-sent-events.html#parsing-an-event-stream # # @param lines [Enumerable] # # @return [Hash{Symbol=>Object}] - # def decode_sse(lines) # rubocop:disable Metrics/BlockLength chain_fused(lines) do |y| diff --git a/rbi/lib/openai/base_client.rbi b/rbi/lib/openai/base_client.rbi index fd80c3c5..85abd5cb 100644 --- a/rbi/lib/openai/base_client.rbi +++ b/rbi/lib/openai/base_client.rbi @@ -44,16 +44,19 @@ module OpenAI PLATFORM_HEADERS = T::Hash[String, String] class << self + # @api private sig { params(req: OpenAI::BaseClient::RequestComponentsShape).void } def validate!(req) end + # @api private sig do params(status: Integer, headers: T.any(T::Hash[String, String], Net::HTTPHeader)).returns(T::Boolean) end def should_retry?(status, headers:) end + # @api private sig do params( request: OpenAI::BaseClient::RequestInputShape, @@ -74,6 +77,7 @@ module OpenAI def requester=(_) end + # @api private sig do params( base_url: String, @@ -98,14 +102,17 @@ module OpenAI ) end + # @api private sig { overridable.returns(T::Hash[String, String]) } private def auth_headers end + # @api private sig { returns(String) } private def generate_idempotency_key end + # @api private sig do overridable .params(req: OpenAI::BaseClient::RequestComponentsShape, opts: T::Hash[Symbol, T.anything]) @@ -114,10 +121,12 @@ module OpenAI private def build_request(req, opts) end + # @api private sig { params(headers: T::Hash[String, String], retry_count: Integer).returns(Float) } private def retry_delay(headers, retry_count:) end + # @api private sig do params( request: OpenAI::BaseClient::RequestInputShape, @@ -130,6 +139,8 @@ module OpenAI private def send_request(request, redirect_count:, retry_count:, send_retry_header:) end + # Execute the request specified by `req`. This is the method that all resource + # methods call into. sig do params( method: Symbol, diff --git a/rbi/lib/openai/base_model.rbi b/rbi/lib/openai/base_model.rbi index f64d87e2..b0ed4c49 100644 --- a/rbi/lib/openai/base_model.rbi +++ b/rbi/lib/openai/base_model.rbi @@ -1,19 +1,23 @@ # typed: strong module OpenAI + # @api private module Converter abstract! Input = T.type_alias { T.any(OpenAI::Converter, T::Class[T.anything]) } + # @api private sig { overridable.params(value: T.anything).returns(T.anything) } def coerce(value) end + # @api private sig { overridable.params(value: T.anything).returns(T.anything) } def dump(value) end + # @api private sig do overridable .params(value: T.anything) @@ -23,6 +27,7 @@ module OpenAI end class << self + # @api private sig do params( spec: T.any( @@ -40,20 +45,40 @@ module OpenAI def self.type_info(spec) end + # @api private + # + # Based on `target`, transform `value` into `target`, to the extent possible: + # + # 1. if the given `value` conforms to `target` already, return the given `value` + # 2. if it's possible and safe to convert the given `value` to `target`, then the + # converted value + # 3. otherwise, the given `value` unaltered sig { params(target: OpenAI::Converter::Input, value: T.anything).returns(T.anything) } def self.coerce(target, value) end + # @api private sig { params(target: OpenAI::Converter::Input, value: T.anything).returns(T.anything) } def self.dump(target, value) end + # @api private + # + # The underlying algorithm for computing maximal compatibility is subject to + # future improvements. + # + # Similar to `#.coerce`, used to determine the best union variant to decode into. + # + # 1. determine if strict-ish coercion is possible + # 2. return either result of successful coercion or if loose coercion is possible + # 3. return a score for recursively tallied count for fields that can be coerced sig { params(target: OpenAI::Converter::Input, value: T.anything).returns(T.anything) } def self.try_strict_coerce(target, value) end end end + # When we don't know what to expect for the value. class Unknown abstract! @@ -68,14 +93,17 @@ module OpenAI end class << self + # @api private sig { override.params(value: T.anything).returns(T.anything) } def coerce(value) end + # @api private sig { override.params(value: T.anything).returns(T.anything) } def dump(value) end + # @api private sig do override .params(value: T.anything) @@ -86,6 +114,7 @@ module OpenAI end end + # Ruby has no Boolean class; this is something for models to refer to. class BooleanModel abstract! @@ -100,14 +129,17 @@ module OpenAI end class << self + # @api private sig { override.params(value: T.any(T::Boolean, T.anything)).returns(T.any(T::Boolean, T.anything)) } def coerce(value) end + # @api private sig { override.params(value: T.any(T::Boolean, T.anything)).returns(T.any(T::Boolean, T.anything)) } def dump(value) end + # @api private sig do override .params(value: T.anything) @@ -118,16 +150,30 @@ module OpenAI end end + # A value from among a specified list of options. OpenAPI enum values map to Ruby + # values in the SDK as follows: + # + # 1. boolean => true | false + # 2. integer => Integer + # 3. float => Float + # 4. string => Symbol + # + # We can therefore convert string values to Symbols, but can't convert other + # values safely. class Enum abstract! extend OpenAI::Converter class << self + # All of the valid Symbol values for this enum. sig { overridable.returns(T::Array[T.any(NilClass, T::Boolean, Integer, Float, Symbol)]) } def values end + # @api private + # + # Guard against thread safety issues by instantiating `@values`. sig { void } private def finalize! end @@ -142,14 +188,17 @@ module OpenAI end class << self + # @api private sig { override.params(value: T.any(String, Symbol, T.anything)).returns(T.any(Symbol, T.anything)) } def coerce(value) end + # @api private sig { override.params(value: T.any(Symbol, T.anything)).returns(T.any(Symbol, T.anything)) } def dump(value) end + # @api private sig do override .params(value: T.anything) @@ -166,18 +215,26 @@ module OpenAI extend OpenAI::Converter class << self + # @api private + # + # All of the specified variant info for this union. sig { returns(T::Array[[T.nilable(Symbol), Proc]]) } private def known_variants end + # @api private + # + # All of the specified variants for this union. sig { overridable.returns(T::Array[[T.nilable(Symbol), T.anything]]) } protected def variants end + # @api private sig { params(property: Symbol).void } private def discriminator(property) end + # @api private sig do params( key: T.any( @@ -197,6 +254,7 @@ module OpenAI private def variant(key, spec = nil) end + # @api private sig { params(value: T.anything).returns(T.nilable(OpenAI::Converter::Input)) } private def resolve_variant(value) end @@ -211,14 +269,17 @@ module OpenAI end class << self + # @api private sig { override.params(value: T.anything).returns(T.anything) } def coerce(value) end + # @api private sig { override.params(value: T.anything).returns(T.anything) } def dump(value) end + # @api private sig do override .params(value: T.anything) @@ -229,6 +290,7 @@ module OpenAI end end + # Array of items of a given type. class ArrayOf abstract! @@ -242,6 +304,7 @@ module OpenAI def ==(other) end + # @api private sig do override .params(value: T.any(T::Enumerable[T.anything], T.anything)) @@ -250,6 +313,7 @@ module OpenAI def coerce(value) end + # @api private sig do override .params(value: T.any(T::Enumerable[T.anything], T.anything)) @@ -258,6 +322,7 @@ module OpenAI def dump(value) end + # @api private sig do override .params(value: T.anything) @@ -266,10 +331,12 @@ module OpenAI def try_strict_coerce(value) end + # @api private sig { returns(OpenAI::Converter::Input) } protected def item_type end + # @api private sig do params( type_info: T.any( @@ -285,6 +352,7 @@ module OpenAI end end + # Hash of items of a given type. class HashOf abstract! @@ -298,6 +366,7 @@ module OpenAI def ==(other) end + # @api private sig do override .params(value: T.any(T::Hash[T.anything, T.anything], T.anything)) @@ -306,6 +375,7 @@ module OpenAI def coerce(value) end + # @api private sig do override .params(value: T.any(T::Hash[T.anything, T.anything], T.anything)) @@ -314,6 +384,7 @@ module OpenAI def dump(value) end + # @api private sig do override .params(value: T.anything) @@ -322,10 +393,12 @@ module OpenAI def try_strict_coerce(value) end + # @api private sig { returns(OpenAI::Converter::Input) } protected def item_type end + # @api private sig do params( type_info: T.any( @@ -349,6 +422,10 @@ module OpenAI KnownFieldShape = T.type_alias { {mode: T.nilable(Symbol), required: T::Boolean} } class << self + # @api private + # + # Assumes superclass fields are totally defined before fields are accessed / + # defined on subclasses. sig do returns( T::Hash[Symbol, @@ -367,10 +444,12 @@ module OpenAI def fields end + # @api private sig { returns(T::Hash[Symbol, T.proc.returns(T::Class[T.anything])]) } def defaults end + # @api private sig do params( name_sym: Symbol, @@ -393,6 +472,7 @@ module OpenAI private def add_field(name_sym, required:, type_info:, spec:) end + # @api private sig do params( name_sym: Symbol, @@ -408,6 +488,7 @@ module OpenAI def required(name_sym, type_info, spec = {}) end + # @api private sig do params( name_sym: Symbol, @@ -423,10 +504,17 @@ module OpenAI def optional(name_sym, type_info, spec = {}) end + # @api private + # + # `request_only` attributes not excluded from `.#coerce` when receiving responses + # even if well behaved servers should not send them sig { params(blk: T.proc.void).void } private def request_only(&blk) end + # @api private + # + # `response_only` attributes are omitted from `.#dump` when making requests sig { params(blk: T.proc.void).void } private def response_only(&blk) end @@ -437,6 +525,7 @@ module OpenAI end class << self + # @api private sig do override .params(value: T.any(OpenAI::BaseModel, T::Hash[T.anything, T.anything], T.anything)) @@ -445,6 +534,7 @@ module OpenAI def coerce(value) end + # @api private sig do override .params(value: T.any(T.attached_class, T.anything)) @@ -453,6 +543,7 @@ module OpenAI def dump(value) end + # @api private sig do override .params(value: T.anything) @@ -462,10 +553,24 @@ module OpenAI end end + # Returns the raw value associated with the given key, if found. Otherwise, nil is + # returned. + # + # It is valid to lookup keys that are not in the API spec, for example to access + # undocumented features. This method does not parse response data into + # higher-level types. Lookup by anything other than a Symbol is an ArgumentError. sig { params(key: Symbol).returns(T.nilable(T.anything)) } def [](key) end + # Returns a Hash of the data underlying this object. O(1) + # + # Keys are Symbols and values are the raw values from the response. The return + # value indicates which values were ever set on the object. i.e. there will be a + # key in this hash if they ever were, even if the set value was nil. + # + # This method is not recursive. The returned value is shared by the object, so it + # should not be mutated. sig { overridable.returns(T::Hash[Symbol, T.anything]) } def to_h end @@ -476,6 +581,7 @@ module OpenAI def deconstruct_keys(keys) end + # Create a new instance of a model. sig { params(data: T.any(T::Hash[Symbol, T.anything], T.self_type)).returns(T.attached_class) } def self.new(data = {}) end diff --git a/rbi/lib/openai/base_page.rbi b/rbi/lib/openai/base_page.rbi index b4a17615..bf6ab11f 100644 --- a/rbi/lib/openai/base_page.rbi +++ b/rbi/lib/openai/base_page.rbi @@ -1,6 +1,7 @@ # typed: strong module OpenAI + # @api private module BasePage abstract! @@ -24,6 +25,7 @@ module OpenAI alias_method :enum_for, :to_enum + # @api private sig do params( client: OpenAI::BaseClient, diff --git a/rbi/lib/openai/base_stream.rbi b/rbi/lib/openai/base_stream.rbi index 8b829bd1..79324aa7 100644 --- a/rbi/lib/openai/base_stream.rbi +++ b/rbi/lib/openai/base_stream.rbi @@ -1,6 +1,7 @@ # typed: strong module OpenAI + # @api private module BaseStream Message = type_member(:in) Elem = type_member(:out) @@ -9,6 +10,7 @@ module OpenAI def close end + # @api private sig { overridable.returns(T::Enumerable[Elem]) } private def iterator end @@ -23,6 +25,7 @@ module OpenAI alias_method :enum_for, :to_enum + # @api private sig do params( model: T.any(T::Class[T.anything], OpenAI::Converter), diff --git a/rbi/lib/openai/client.rbi b/rbi/lib/openai/client.rbi index ba6253be..4b8256ce 100644 --- a/rbi/lib/openai/client.rbi +++ b/rbi/lib/openai/client.rbi @@ -78,10 +78,12 @@ module OpenAI def responses end + # @api private sig { override.returns(T::Hash[String, String]) } private def auth_headers end + # Creates and returns a new client for interacting with the API. sig do params( base_url: T.nilable(String), diff --git a/rbi/lib/openai/errors.rbi b/rbi/lib/openai/errors.rbi index 69c98916..1408d718 100644 --- a/rbi/lib/openai/errors.rbi +++ b/rbi/lib/openai/errors.rbi @@ -35,6 +35,7 @@ module OpenAI def type end + # @api private sig do params( url: URI::Generic, @@ -71,6 +72,7 @@ module OpenAI def type end + # @api private sig do params( url: URI::Generic, @@ -87,6 +89,7 @@ module OpenAI end class APITimeoutError < OpenAI::APIConnectionError + # @api private sig do params( url: URI::Generic, @@ -103,6 +106,7 @@ module OpenAI end class APIStatusError < OpenAI::APIError + # @api private sig do params( url: URI::Generic, @@ -133,6 +137,7 @@ module OpenAI def type end + # @api private sig do params( url: URI::Generic, diff --git a/rbi/lib/openai/extern.rbi b/rbi/lib/openai/extern.rbi index ca7768e3..b47bd767 100644 --- a/rbi/lib/openai/extern.rbi +++ b/rbi/lib/openai/extern.rbi @@ -1,6 +1,7 @@ # typed: strong module OpenAI + # @api private module Extern abstract! end diff --git a/rbi/lib/openai/models/audio/speech_create_params.rbi b/rbi/lib/openai/models/audio/speech_create_params.rbi index db0763fb..feb3ae70 100644 --- a/rbi/lib/openai/models/audio/speech_create_params.rbi +++ b/rbi/lib/openai/models/audio/speech_create_params.rbi @@ -7,6 +7,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The text to generate audio for. The maximum length is 4096 characters. sig { returns(String) } def input end @@ -15,6 +16,8 @@ module OpenAI def input=(_) end + # One of the available [TTS models](https://platform.openai.com/docs/models#tts): + # `tts-1` or `tts-1-hd` sig { returns(T.any(String, Symbol)) } def model end @@ -23,6 +26,10 @@ module OpenAI def model=(_) end + # The voice to use when generating the audio. Supported voices are `alloy`, `ash`, + # `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the + # voices are available in the + # [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). sig { returns(Symbol) } def voice end @@ -31,6 +38,8 @@ module OpenAI def voice=(_) end + # The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, + # `wav`, and `pcm`. sig { returns(T.nilable(Symbol)) } def response_format end @@ -39,6 +48,8 @@ module OpenAI def response_format=(_) end + # The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is + # the default. sig { returns(T.nilable(Float)) } def speed end @@ -77,16 +88,23 @@ module OpenAI def to_hash end + # One of the available [TTS models](https://platform.openai.com/docs/models#tts): + # `tts-1` or `tts-1-hd` class Model < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } private def variants end end end + # The voice to use when generating the audio. Supported voices are `alloy`, `ash`, + # `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the + # voices are available in the + # [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). class Voice < OpenAI::Enum abstract! @@ -107,6 +125,8 @@ module OpenAI end end + # The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, + # `wav`, and `pcm`. class ResponseFormat < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/audio/transcription.rbi b/rbi/lib/openai/models/audio/transcription.rbi index c314036c..bc8940ae 100644 --- a/rbi/lib/openai/models/audio/transcription.rbi +++ b/rbi/lib/openai/models/audio/transcription.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Audio class Transcription < OpenAI::BaseModel + # The transcribed text. sig { returns(String) } def text end @@ -12,6 +13,8 @@ module OpenAI def text=(_) end + # Represents a transcription response returned by model, based on the provided + # input. sig { params(text: String).returns(T.attached_class) } def self.new(text:) end diff --git a/rbi/lib/openai/models/audio/transcription_create_params.rbi b/rbi/lib/openai/models/audio/transcription_create_params.rbi index 324c2060..cc5e0e40 100644 --- a/rbi/lib/openai/models/audio/transcription_create_params.rbi +++ b/rbi/lib/openai/models/audio/transcription_create_params.rbi @@ -7,6 +7,8 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The audio file object (not file name) to transcribe, in one of these formats: + # flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. sig { returns(T.any(IO, StringIO)) } def file end @@ -15,6 +17,8 @@ module OpenAI def file=(_) end + # ID of the model to use. Only `whisper-1` (which is powered by our open source + # Whisper V2 model) is currently available. sig { returns(T.any(String, Symbol)) } def model end @@ -23,6 +27,9 @@ module OpenAI def model=(_) end + # The language of the input audio. Supplying the input language in + # [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + # format will improve accuracy and latency. sig { returns(T.nilable(String)) } def language end @@ -31,6 +38,10 @@ module OpenAI def language=(_) end + # An optional text to guide the model's style or continue a previous audio + # segment. The + # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + # should match the audio language. sig { returns(T.nilable(String)) } def prompt end @@ -39,6 +50,8 @@ module OpenAI def prompt=(_) end + # The format of the output, in one of these options: `json`, `text`, `srt`, + # `verbose_json`, or `vtt`. sig { returns(T.nilable(Symbol)) } def response_format end @@ -47,6 +60,11 @@ module OpenAI def response_format=(_) end + # The sampling temperature, between 0 and 1. Higher values like 0.8 will make the + # output more random, while lower values like 0.2 will make it more focused and + # deterministic. If set to 0, the model will use + # [log probability](https://en.wikipedia.org/wiki/Log_probability) to + # automatically increase the temperature until certain thresholds are hit. sig { returns(T.nilable(Float)) } def temperature end @@ -55,6 +73,11 @@ module OpenAI def temperature=(_) end + # The timestamp granularities to populate for this transcription. + # `response_format` must be set `verbose_json` to use timestamp granularities. + # Either or both of these options are supported: `word`, or `segment`. Note: There + # is no additional latency for segment timestamps, but generating word timestamps + # incurs additional latency. sig { returns(T.nilable(T::Array[Symbol])) } def timestamp_granularities end @@ -106,10 +129,13 @@ module OpenAI def to_hash end + # ID of the model to use. Only `whisper-1` (which is powered by our open source + # Whisper V2 model) is currently available. class Model < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } private def variants end diff --git a/rbi/lib/openai/models/audio/transcription_create_response.rbi b/rbi/lib/openai/models/audio/transcription_create_response.rbi index ba16b2e7..49982089 100644 --- a/rbi/lib/openai/models/audio/transcription_create_response.rbi +++ b/rbi/lib/openai/models/audio/transcription_create_response.rbi @@ -3,10 +3,13 @@ module OpenAI module Models module Audio + # Represents a transcription response returned by model, based on the provided + # input. class TranscriptionCreateResponse < OpenAI::Union abstract! class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/audio/transcription_segment.rbi b/rbi/lib/openai/models/audio/transcription_segment.rbi index 2e1f1c65..92a534a2 100644 --- a/rbi/lib/openai/models/audio/transcription_segment.rbi +++ b/rbi/lib/openai/models/audio/transcription_segment.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Audio class TranscriptionSegment < OpenAI::BaseModel + # Unique identifier of the segment. sig { returns(Integer) } def id end @@ -12,6 +13,8 @@ module OpenAI def id=(_) end + # Average logprob of the segment. If the value is lower than -1, consider the + # logprobs failed. sig { returns(Float) } def avg_logprob end @@ -20,6 +23,8 @@ module OpenAI def avg_logprob=(_) end + # Compression ratio of the segment. If the value is greater than 2.4, consider the + # compression failed. sig { returns(Float) } def compression_ratio end @@ -28,6 +33,7 @@ module OpenAI def compression_ratio=(_) end + # End time of the segment in seconds. sig { returns(Float) } def end_ end @@ -36,6 +42,8 @@ module OpenAI def end_=(_) end + # Probability of no speech in the segment. If the value is higher than 1.0 and the + # `avg_logprob` is below -1, consider this segment silent. sig { returns(Float) } def no_speech_prob end @@ -44,6 +52,7 @@ module OpenAI def no_speech_prob=(_) end + # Seek offset of the segment. sig { returns(Integer) } def seek end @@ -52,6 +61,7 @@ module OpenAI def seek=(_) end + # Start time of the segment in seconds. sig { returns(Float) } def start end @@ -60,6 +70,7 @@ module OpenAI def start=(_) end + # Temperature parameter used for generating the segment. sig { returns(Float) } def temperature end @@ -68,6 +79,7 @@ module OpenAI def temperature=(_) end + # Text content of the segment. sig { returns(String) } def text end @@ -76,6 +88,7 @@ module OpenAI def text=(_) end + # Array of token IDs for the text content. sig { returns(T::Array[Integer]) } def tokens end diff --git a/rbi/lib/openai/models/audio/transcription_verbose.rbi b/rbi/lib/openai/models/audio/transcription_verbose.rbi index 7dbf05e8..55eb1b23 100644 --- a/rbi/lib/openai/models/audio/transcription_verbose.rbi +++ b/rbi/lib/openai/models/audio/transcription_verbose.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Audio class TranscriptionVerbose < OpenAI::BaseModel + # The duration of the input audio. sig { returns(Float) } def duration end @@ -12,6 +13,7 @@ module OpenAI def duration=(_) end + # The language of the input audio. sig { returns(String) } def language end @@ -20,6 +22,7 @@ module OpenAI def language=(_) end + # The transcribed text. sig { returns(String) } def text end @@ -28,6 +31,7 @@ module OpenAI def text=(_) end + # Segments of the transcribed text and their corresponding details. sig { returns(T.nilable(T::Array[OpenAI::Models::Audio::TranscriptionSegment])) } def segments end @@ -39,6 +43,7 @@ module OpenAI def segments=(_) end + # Extracted words and their corresponding timestamps. sig { returns(T.nilable(T::Array[OpenAI::Models::Audio::TranscriptionWord])) } def words end @@ -50,6 +55,8 @@ module OpenAI def words=(_) end + # Represents a verbose json transcription response returned by model, based on the + # provided input. sig do params( duration: Float, diff --git a/rbi/lib/openai/models/audio/transcription_word.rbi b/rbi/lib/openai/models/audio/transcription_word.rbi index 8a81058f..a3be4b46 100644 --- a/rbi/lib/openai/models/audio/transcription_word.rbi +++ b/rbi/lib/openai/models/audio/transcription_word.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Audio class TranscriptionWord < OpenAI::BaseModel + # End time of the word in seconds. sig { returns(Float) } def end_ end @@ -12,6 +13,7 @@ module OpenAI def end_=(_) end + # Start time of the word in seconds. sig { returns(Float) } def start end @@ -20,6 +22,7 @@ module OpenAI def start=(_) end + # The text content of the word. sig { returns(String) } def word end diff --git a/rbi/lib/openai/models/audio/translation_create_params.rbi b/rbi/lib/openai/models/audio/translation_create_params.rbi index c8175a8d..e1a51573 100644 --- a/rbi/lib/openai/models/audio/translation_create_params.rbi +++ b/rbi/lib/openai/models/audio/translation_create_params.rbi @@ -7,6 +7,8 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The audio file object (not file name) translate, in one of these formats: flac, + # mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. sig { returns(T.any(IO, StringIO)) } def file end @@ -15,6 +17,8 @@ module OpenAI def file=(_) end + # ID of the model to use. Only `whisper-1` (which is powered by our open source + # Whisper V2 model) is currently available. sig { returns(T.any(String, Symbol)) } def model end @@ -23,6 +27,10 @@ module OpenAI def model=(_) end + # An optional text to guide the model's style or continue a previous audio + # segment. The + # [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + # should be in English. sig { returns(T.nilable(String)) } def prompt end @@ -31,6 +39,8 @@ module OpenAI def prompt=(_) end + # The format of the output, in one of these options: `json`, `text`, `srt`, + # `verbose_json`, or `vtt`. sig { returns(T.nilable(Symbol)) } def response_format end @@ -39,6 +49,11 @@ module OpenAI def response_format=(_) end + # The sampling temperature, between 0 and 1. Higher values like 0.8 will make the + # output more random, while lower values like 0.2 will make it more focused and + # deterministic. If set to 0, the model will use + # [log probability](https://en.wikipedia.org/wiki/Log_probability) to + # automatically increase the temperature until certain thresholds are hit. sig { returns(T.nilable(Float)) } def temperature end @@ -77,10 +92,13 @@ module OpenAI def to_hash end + # ID of the model to use. Only `whisper-1` (which is powered by our open source + # Whisper V2 model) is currently available. class Model < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } private def variants end diff --git a/rbi/lib/openai/models/audio/translation_create_response.rbi b/rbi/lib/openai/models/audio/translation_create_response.rbi index 9a25186f..79f531fc 100644 --- a/rbi/lib/openai/models/audio/translation_create_response.rbi +++ b/rbi/lib/openai/models/audio/translation_create_response.rbi @@ -7,6 +7,7 @@ module OpenAI abstract! class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/audio/translation_verbose.rbi b/rbi/lib/openai/models/audio/translation_verbose.rbi index 566f2db2..bceb7944 100644 --- a/rbi/lib/openai/models/audio/translation_verbose.rbi +++ b/rbi/lib/openai/models/audio/translation_verbose.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Audio class TranslationVerbose < OpenAI::BaseModel + # The duration of the input audio. sig { returns(Float) } def duration end @@ -12,6 +13,7 @@ module OpenAI def duration=(_) end + # The language of the output translation (always `english`). sig { returns(String) } def language end @@ -20,6 +22,7 @@ module OpenAI def language=(_) end + # The translated text. sig { returns(String) } def text end @@ -28,6 +31,7 @@ module OpenAI def text=(_) end + # Segments of the translated text and their corresponding details. sig { returns(T.nilable(T::Array[OpenAI::Models::Audio::TranscriptionSegment])) } def segments end diff --git a/rbi/lib/openai/models/audio_response_format.rbi b/rbi/lib/openai/models/audio_response_format.rbi index 087af985..2acd496e 100644 --- a/rbi/lib/openai/models/audio_response_format.rbi +++ b/rbi/lib/openai/models/audio_response_format.rbi @@ -2,6 +2,8 @@ module OpenAI module Models + # The format of the output, in one of these options: `json`, `text`, `srt`, + # `verbose_json`, or `vtt`. class AudioResponseFormat < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/auto_file_chunking_strategy_param.rbi b/rbi/lib/openai/models/auto_file_chunking_strategy_param.rbi index 5d106690..b1d97ad3 100644 --- a/rbi/lib/openai/models/auto_file_chunking_strategy_param.rbi +++ b/rbi/lib/openai/models/auto_file_chunking_strategy_param.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class AutoFileChunkingStrategyParam < OpenAI::BaseModel + # Always `auto`. sig { returns(Symbol) } def type end @@ -11,6 +12,8 @@ module OpenAI def type=(_) end + # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + # `800` and `chunk_overlap_tokens` of `400`. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :auto) end diff --git a/rbi/lib/openai/models/batch.rbi b/rbi/lib/openai/models/batch.rbi index cec4b9fb..95ad26fe 100644 --- a/rbi/lib/openai/models/batch.rbi +++ b/rbi/lib/openai/models/batch.rbi @@ -11,6 +11,7 @@ module OpenAI def id=(_) end + # The time frame within which the batch should be processed. sig { returns(String) } def completion_window end @@ -19,6 +20,7 @@ module OpenAI def completion_window=(_) end + # The Unix timestamp (in seconds) for when the batch was created. sig { returns(Integer) } def created_at end @@ -27,6 +29,7 @@ module OpenAI def created_at=(_) end + # The OpenAI API endpoint used by the batch. sig { returns(String) } def endpoint end @@ -35,6 +38,7 @@ module OpenAI def endpoint=(_) end + # The ID of the input file for the batch. sig { returns(String) } def input_file_id end @@ -43,6 +47,7 @@ module OpenAI def input_file_id=(_) end + # The object type, which is always `batch`. sig { returns(Symbol) } def object end @@ -51,6 +56,7 @@ module OpenAI def object=(_) end + # The current status of the batch. sig { returns(Symbol) } def status end @@ -59,6 +65,7 @@ module OpenAI def status=(_) end + # The Unix timestamp (in seconds) for when the batch was cancelled. sig { returns(T.nilable(Integer)) } def cancelled_at end @@ -67,6 +74,7 @@ module OpenAI def cancelled_at=(_) end + # The Unix timestamp (in seconds) for when the batch started cancelling. sig { returns(T.nilable(Integer)) } def cancelling_at end @@ -75,6 +83,7 @@ module OpenAI def cancelling_at=(_) end + # The Unix timestamp (in seconds) for when the batch was completed. sig { returns(T.nilable(Integer)) } def completed_at end @@ -83,6 +92,7 @@ module OpenAI def completed_at=(_) end + # The ID of the file containing the outputs of requests with errors. sig { returns(T.nilable(String)) } def error_file_id end @@ -99,6 +109,7 @@ module OpenAI def errors=(_) end + # The Unix timestamp (in seconds) for when the batch expired. sig { returns(T.nilable(Integer)) } def expired_at end @@ -107,6 +118,7 @@ module OpenAI def expired_at=(_) end + # The Unix timestamp (in seconds) for when the batch will expire. sig { returns(T.nilable(Integer)) } def expires_at end @@ -115,6 +127,7 @@ module OpenAI def expires_at=(_) end + # The Unix timestamp (in seconds) for when the batch failed. sig { returns(T.nilable(Integer)) } def failed_at end @@ -123,6 +136,7 @@ module OpenAI def failed_at=(_) end + # The Unix timestamp (in seconds) for when the batch started finalizing. sig { returns(T.nilable(Integer)) } def finalizing_at end @@ -131,6 +145,7 @@ module OpenAI def finalizing_at=(_) end + # The Unix timestamp (in seconds) for when the batch started processing. sig { returns(T.nilable(Integer)) } def in_progress_at end @@ -139,6 +154,12 @@ module OpenAI def in_progress_at=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -147,6 +168,7 @@ module OpenAI def metadata=(_) end + # The ID of the file containing the outputs of successfully executed requests. sig { returns(T.nilable(String)) } def output_file_id end @@ -155,6 +177,7 @@ module OpenAI def output_file_id=(_) end + # The request counts for different statuses within the batch. sig { returns(T.nilable(OpenAI::Models::BatchRequestCounts)) } def request_counts end @@ -242,6 +265,7 @@ module OpenAI def to_hash end + # The current status of the batch. class Status < OpenAI::Enum abstract! @@ -270,6 +294,7 @@ module OpenAI def data=(_) end + # The object type, which is always `list`. sig { returns(T.nilable(String)) } def object end diff --git a/rbi/lib/openai/models/batch_create_params.rbi b/rbi/lib/openai/models/batch_create_params.rbi index ff5efeab..6588dc92 100644 --- a/rbi/lib/openai/models/batch_create_params.rbi +++ b/rbi/lib/openai/models/batch_create_params.rbi @@ -6,6 +6,8 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The time frame within which the batch should be processed. Currently only `24h` + # is supported. sig { returns(Symbol) } def completion_window end @@ -14,6 +16,10 @@ module OpenAI def completion_window=(_) end + # The endpoint to be used for all requests in the batch. Currently + # `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. + # Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 + # embedding inputs across all requests in the batch. sig { returns(Symbol) } def endpoint end @@ -22,6 +28,15 @@ module OpenAI def endpoint=(_) end + # The ID of an uploaded file that contains requests for the new batch. + # + # See [upload file](https://platform.openai.com/docs/api-reference/files/create) + # for how to upload a file. + # + # Your input file must be formatted as a + # [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), + # and must be uploaded with the purpose `batch`. The file can contain up to 50,000 + # requests, and can be up to 200 MB in size. sig { returns(String) } def input_file_id end @@ -30,6 +45,12 @@ module OpenAI def input_file_id=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -66,6 +87,8 @@ module OpenAI def to_hash end + # The time frame within which the batch should be processed. Currently only `24h` + # is supported. class CompletionWindow < OpenAI::Enum abstract! @@ -78,6 +101,10 @@ module OpenAI end end + # The endpoint to be used for all requests in the batch. Currently + # `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. + # Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 + # embedding inputs across all requests in the batch. class Endpoint < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/batch_error.rbi b/rbi/lib/openai/models/batch_error.rbi index b0742afb..ded1e3bb 100644 --- a/rbi/lib/openai/models/batch_error.rbi +++ b/rbi/lib/openai/models/batch_error.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class BatchError < OpenAI::BaseModel + # An error code identifying the error type. sig { returns(T.nilable(String)) } def code end @@ -11,6 +12,7 @@ module OpenAI def code=(_) end + # The line number of the input file where the error occurred, if applicable. sig { returns(T.nilable(Integer)) } def line end @@ -19,6 +21,7 @@ module OpenAI def line=(_) end + # A human-readable message providing more details about the error. sig { returns(T.nilable(String)) } def message end @@ -27,6 +30,7 @@ module OpenAI def message=(_) end + # The name of the parameter that caused the error, if applicable. sig { returns(T.nilable(String)) } def param end diff --git a/rbi/lib/openai/models/batch_list_params.rbi b/rbi/lib/openai/models/batch_list_params.rbi index 5d04cdfd..8a8368b8 100644 --- a/rbi/lib/openai/models/batch_list_params.rbi +++ b/rbi/lib/openai/models/batch_list_params.rbi @@ -6,6 +6,10 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. sig { returns(T.nilable(String)) } def after end @@ -14,6 +18,8 @@ module OpenAI def after=(_) end + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. sig { returns(T.nilable(Integer)) } def limit end diff --git a/rbi/lib/openai/models/batch_request_counts.rbi b/rbi/lib/openai/models/batch_request_counts.rbi index 59b86c64..df746c81 100644 --- a/rbi/lib/openai/models/batch_request_counts.rbi +++ b/rbi/lib/openai/models/batch_request_counts.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class BatchRequestCounts < OpenAI::BaseModel + # Number of requests that have been completed successfully. sig { returns(Integer) } def completed end @@ -11,6 +12,7 @@ module OpenAI def completed=(_) end + # Number of requests that have failed. sig { returns(Integer) } def failed end @@ -19,6 +21,7 @@ module OpenAI def failed=(_) end + # Total number of requests in the batch. sig { returns(Integer) } def total end @@ -27,6 +30,7 @@ module OpenAI def total=(_) end + # The request counts for different statuses within the batch. sig { params(completed: Integer, failed: Integer, total: Integer).returns(T.attached_class) } def self.new(completed:, failed:, total:) end diff --git a/rbi/lib/openai/models/beta/assistant.rbi b/rbi/lib/openai/models/beta/assistant.rbi index c573af22..c6f6f83f 100644 --- a/rbi/lib/openai/models/beta/assistant.rbi +++ b/rbi/lib/openai/models/beta/assistant.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Beta class Assistant < OpenAI::BaseModel + # The identifier, which can be referenced in API endpoints. sig { returns(String) } def id end @@ -12,6 +13,7 @@ module OpenAI def id=(_) end + # The Unix timestamp (in seconds) for when the assistant was created. sig { returns(Integer) } def created_at end @@ -20,6 +22,7 @@ module OpenAI def created_at=(_) end + # The description of the assistant. The maximum length is 512 characters. sig { returns(T.nilable(String)) } def description end @@ -28,6 +31,8 @@ module OpenAI def description=(_) end + # The system instructions that the assistant uses. The maximum length is 256,000 + # characters. sig { returns(T.nilable(String)) } def instructions end @@ -36,6 +41,12 @@ module OpenAI def instructions=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -44,6 +55,11 @@ module OpenAI def metadata=(_) end + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. sig { returns(String) } def model end @@ -52,6 +68,7 @@ module OpenAI def model=(_) end + # The name of the assistant. The maximum length is 256 characters. sig { returns(T.nilable(String)) } def name end @@ -60,6 +77,7 @@ module OpenAI def name=(_) end + # The object type, which is always `assistant`. sig { returns(Symbol) } def object end @@ -68,6 +86,9 @@ module OpenAI def object=(_) end + # A list of tool enabled on the assistant. There can be a maximum of 128 tools per + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. sig do returns( T::Array[ @@ -105,6 +126,26 @@ module OpenAI def tools=(_) end + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. sig do returns( T.nilable( @@ -145,6 +186,9 @@ module OpenAI def response_format=(_) end + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. sig { returns(T.nilable(Float)) } def temperature end @@ -153,6 +197,10 @@ module OpenAI def temperature=(_) end + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. sig { returns(T.nilable(OpenAI::Models::Beta::Assistant::ToolResources)) } def tool_resources end @@ -164,6 +212,11 @@ module OpenAI def tool_resources=(_) end + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. sig { returns(T.nilable(Float)) } def top_p end @@ -172,6 +225,7 @@ module OpenAI def top_p=(_) end + # Represents an `assistant` that can call the model and use tools. sig do params( id: String, @@ -279,6 +333,10 @@ module OpenAI def file_search=(_) end + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. sig do params( code_interpreter: OpenAI::Models::Beta::Assistant::ToolResources::CodeInterpreter, @@ -302,6 +360,9 @@ module OpenAI end class CodeInterpreter < OpenAI::BaseModel + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter`` tool. There can be a maximum of 20 files + # associated with the tool. sig { returns(T.nilable(T::Array[String])) } def file_ids end @@ -320,6 +381,10 @@ module OpenAI end class FileSearch < OpenAI::BaseModel + # The ID of the + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. sig { returns(T.nilable(T::Array[String])) } def vector_store_ids end diff --git a/rbi/lib/openai/models/beta/assistant_create_params.rbi b/rbi/lib/openai/models/beta/assistant_create_params.rbi index 9287af82..37fd1121 100644 --- a/rbi/lib/openai/models/beta/assistant_create_params.rbi +++ b/rbi/lib/openai/models/beta/assistant_create_params.rbi @@ -7,6 +7,11 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. sig { returns(T.any(String, Symbol)) } def model end @@ -15,6 +20,7 @@ module OpenAI def model=(_) end + # The description of the assistant. The maximum length is 512 characters. sig { returns(T.nilable(String)) } def description end @@ -23,6 +29,8 @@ module OpenAI def description=(_) end + # The system instructions that the assistant uses. The maximum length is 256,000 + # characters. sig { returns(T.nilable(String)) } def instructions end @@ -31,6 +39,12 @@ module OpenAI def instructions=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -39,6 +53,7 @@ module OpenAI def metadata=(_) end + # The name of the assistant. The maximum length is 256 characters. sig { returns(T.nilable(String)) } def name end @@ -47,6 +62,12 @@ module OpenAI def name=(_) end + # **o-series models only** + # + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. sig { returns(T.nilable(Symbol)) } def reasoning_effort end @@ -55,6 +76,26 @@ module OpenAI def reasoning_effort=(_) end + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. sig do returns( T.nilable( @@ -95,6 +136,9 @@ module OpenAI def response_format=(_) end + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. sig { returns(T.nilable(Float)) } def temperature end @@ -103,6 +147,10 @@ module OpenAI def temperature=(_) end + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. sig { returns(T.nilable(OpenAI::Models::Beta::AssistantCreateParams::ToolResources)) } def tool_resources end @@ -114,6 +162,9 @@ module OpenAI def tool_resources=(_) end + # A list of tool enabled on the assistant. There can be a maximum of 128 tools per + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. sig do returns( T.nilable( @@ -153,6 +204,11 @@ module OpenAI def tools=(_) end + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. sig { returns(T.nilable(Float)) } def top_p end @@ -242,10 +298,16 @@ module OpenAI def to_hash end + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. class Model < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } private def variants end @@ -275,6 +337,10 @@ module OpenAI def file_search=(_) end + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. sig do params( code_interpreter: OpenAI::Models::Beta::AssistantCreateParams::ToolResources::CodeInterpreter, @@ -298,6 +364,9 @@ module OpenAI end class CodeInterpreter < OpenAI::BaseModel + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. sig { returns(T.nilable(T::Array[String])) } def file_ids end @@ -316,6 +385,10 @@ module OpenAI end class FileSearch < OpenAI::BaseModel + # The + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. sig { returns(T.nilable(T::Array[String])) } def vector_store_ids end @@ -324,6 +397,10 @@ module OpenAI def vector_store_ids=(_) end + # A helper to create a + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # with file_ids and attach it to this assistant. There can be a maximum of 1 + # vector store attached to the assistant. sig do returns( T.nilable(T::Array[OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore]) @@ -362,6 +439,8 @@ module OpenAI end class VectorStore < OpenAI::BaseModel + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. sig do returns( T.nilable( @@ -392,6 +471,9 @@ module OpenAI def chunking_strategy=(_) end + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to + # add to the vector store. There can be a maximum of 10000 files in a vector + # store. sig { returns(T.nilable(T::Array[String])) } def file_ids end @@ -400,6 +482,12 @@ module OpenAI def file_ids=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -438,10 +526,13 @@ module OpenAI def to_hash end + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. class ChunkingStrategy < OpenAI::Union abstract! class Auto < OpenAI::BaseModel + # Always `auto`. sig { returns(Symbol) } def type end @@ -450,6 +541,8 @@ module OpenAI def type=(_) end + # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + # `800` and `chunk_overlap_tokens` of `400`. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :auto) end @@ -479,6 +572,7 @@ module OpenAI def static=(_) end + # Always `static`. sig { returns(Symbol) } def type end @@ -510,6 +604,9 @@ module OpenAI end class Static < OpenAI::BaseModel + # The number of tokens that overlap between chunks. The default value is `400`. + # + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. sig { returns(Integer) } def chunk_overlap_tokens end @@ -518,6 +615,8 @@ module OpenAI def chunk_overlap_tokens=(_) end + # The maximum number of tokens in each chunk. The default value is `800`. The + # minimum value is `100` and the maximum value is `4096`. sig { returns(Integer) } def max_chunk_size_tokens end @@ -542,6 +641,7 @@ module OpenAI end class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/beta/assistant_list_params.rbi b/rbi/lib/openai/models/beta/assistant_list_params.rbi index 8653e3c0..1e331b4a 100644 --- a/rbi/lib/openai/models/beta/assistant_list_params.rbi +++ b/rbi/lib/openai/models/beta/assistant_list_params.rbi @@ -7,6 +7,10 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. sig { returns(T.nilable(String)) } def after end @@ -15,6 +19,10 @@ module OpenAI def after=(_) end + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. sig { returns(T.nilable(String)) } def before end @@ -23,6 +31,8 @@ module OpenAI def before=(_) end + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. sig { returns(T.nilable(Integer)) } def limit end @@ -31,6 +41,8 @@ module OpenAI def limit=(_) end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. sig { returns(T.nilable(Symbol)) } def order end @@ -67,6 +79,8 @@ module OpenAI def to_hash end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. class Order < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/beta/assistant_response_format_option.rbi b/rbi/lib/openai/models/beta/assistant_response_format_option.rbi index 1e789450..c79726bd 100644 --- a/rbi/lib/openai/models/beta/assistant_response_format_option.rbi +++ b/rbi/lib/openai/models/beta/assistant_response_format_option.rbi @@ -3,10 +3,31 @@ module OpenAI module Models module Beta + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. class AssistantResponseFormatOption < OpenAI::Union abstract! class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/beta/assistant_stream_event.rbi b/rbi/lib/openai/models/beta/assistant_stream_event.rbi index 7ac9987b..4684ef7c 100644 --- a/rbi/lib/openai/models/beta/assistant_stream_event.rbi +++ b/rbi/lib/openai/models/beta/assistant_stream_event.rbi @@ -3,10 +3,32 @@ module OpenAI module Models module Beta + # Represents an event emitted when streaming a Run. + # + # Each event in a server-sent events stream has an `event` and `data` property: + # + # ``` + # event: thread.created + # data: {"id": "thread_123", "object": "thread", ...} + # ``` + # + # We emit events whenever a new object is created, transitions to a new state, or + # is being streamed in parts (deltas). For example, we emit `thread.run.created` + # when a new run is created, `thread.run.completed` when a run completes, and so + # on. When an Assistant chooses to create a message during a run, we emit a + # `thread.message.created event`, a `thread.message.in_progress` event, many + # `thread.message.delta` events, and finally a `thread.message.completed` event. + # + # We may add additional events over time, so we recommend handling unknown events + # gracefully in your code. See the + # [Assistants API quickstart](https://platform.openai.com/docs/assistants/overview) + # to learn how to integrate the Assistants API with streaming. class AssistantStreamEvent < OpenAI::Union abstract! class ThreadCreated < OpenAI::BaseModel + # Represents a thread that contains + # [messages](https://platform.openai.com/docs/api-reference/messages). sig { returns(OpenAI::Models::Beta::Thread) } def data end @@ -23,6 +45,7 @@ module OpenAI def event=(_) end + # Whether to enable input audio transcription. sig { returns(T.nilable(T::Boolean)) } def enabled end @@ -31,6 +54,9 @@ module OpenAI def enabled=(_) end + # Occurs when a new + # [thread](https://platform.openai.com/docs/api-reference/threads/object) is + # created. sig { params(data: OpenAI::Models::Beta::Thread, enabled: T::Boolean, event: Symbol).returns(T.attached_class) } def self.new(data:, enabled: nil, event: :"thread.created") end @@ -41,6 +67,8 @@ module OpenAI end class ThreadRunCreated < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -57,6 +85,8 @@ module OpenAI def event=(_) end + # Occurs when a new + # [run](https://platform.openai.com/docs/api-reference/runs/object) is created. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.created") end @@ -67,6 +97,8 @@ module OpenAI end class ThreadRunQueued < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -83,6 +115,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to a `queued` status. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.queued") end @@ -93,6 +127,8 @@ module OpenAI end class ThreadRunInProgress < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -109,6 +145,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to an `in_progress` status. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.in_progress") end @@ -119,6 +157,8 @@ module OpenAI end class ThreadRunRequiresAction < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -135,6 +175,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to a `requires_action` status. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.requires_action") end @@ -145,6 +187,8 @@ module OpenAI end class ThreadRunCompleted < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -161,6 +205,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # is completed. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.completed") end @@ -171,6 +217,8 @@ module OpenAI end class ThreadRunIncomplete < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -187,6 +235,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # ends with status `incomplete`. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.incomplete") end @@ -197,6 +247,8 @@ module OpenAI end class ThreadRunFailed < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -213,6 +265,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # fails. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.failed") end @@ -223,6 +277,8 @@ module OpenAI end class ThreadRunCancelling < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -239,6 +295,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to a `cancelling` status. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.cancelling") end @@ -249,6 +307,8 @@ module OpenAI end class ThreadRunCancelled < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -265,6 +325,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # is cancelled. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.cancelled") end @@ -275,6 +337,8 @@ module OpenAI end class ThreadRunExpired < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -291,6 +355,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # expires. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.expired") end @@ -301,6 +367,7 @@ module OpenAI end class ThreadRunStepCreated < OpenAI::BaseModel + # Represents a step in execution of a run. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } def data end @@ -320,6 +387,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is created. sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.step.created") end @@ -330,6 +400,7 @@ module OpenAI end class ThreadRunStepInProgress < OpenAI::BaseModel + # Represents a step in execution of a run. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } def data end @@ -349,6 +420,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # moves to an `in_progress` state. sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.step.in_progress") end @@ -359,6 +433,8 @@ module OpenAI end class ThreadRunStepDelta < OpenAI::BaseModel + # Represents a run step delta i.e. any changed fields on a run step during + # streaming. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent) } def data end @@ -378,6 +454,9 @@ module OpenAI def event=(_) end + # Occurs when parts of a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # are being streamed. sig do params(data: OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent, event: Symbol) .returns(T.attached_class) @@ -391,6 +470,7 @@ module OpenAI end class ThreadRunStepCompleted < OpenAI::BaseModel + # Represents a step in execution of a run. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } def data end @@ -410,6 +490,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is completed. sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.step.completed") end @@ -420,6 +503,7 @@ module OpenAI end class ThreadRunStepFailed < OpenAI::BaseModel + # Represents a step in execution of a run. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } def data end @@ -439,6 +523,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # fails. sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.step.failed") end @@ -449,6 +536,7 @@ module OpenAI end class ThreadRunStepCancelled < OpenAI::BaseModel + # Represents a step in execution of a run. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } def data end @@ -468,6 +556,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is cancelled. sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.step.cancelled") end @@ -478,6 +569,7 @@ module OpenAI end class ThreadRunStepExpired < OpenAI::BaseModel + # Represents a step in execution of a run. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } def data end @@ -497,6 +589,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # expires. sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.step.expired") end @@ -507,6 +602,8 @@ module OpenAI end class ThreadMessageCreated < OpenAI::BaseModel + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Message) } def data end @@ -523,6 +620,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # created. sig { params(data: OpenAI::Models::Beta::Threads::Message, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.message.created") end @@ -533,6 +633,8 @@ module OpenAI end class ThreadMessageInProgress < OpenAI::BaseModel + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Message) } def data end @@ -549,6 +651,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) moves + # to an `in_progress` state. sig { params(data: OpenAI::Models::Beta::Threads::Message, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.message.in_progress") end @@ -559,6 +664,8 @@ module OpenAI end class ThreadMessageDelta < OpenAI::BaseModel + # Represents a message delta i.e. any changed fields on a message during + # streaming. sig { returns(OpenAI::Models::Beta::Threads::MessageDeltaEvent) } def data end @@ -578,6 +685,9 @@ module OpenAI def event=(_) end + # Occurs when parts of a + # [Message](https://platform.openai.com/docs/api-reference/messages/object) are + # being streamed. sig { params(data: OpenAI::Models::Beta::Threads::MessageDeltaEvent, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.message.delta") end @@ -588,6 +698,8 @@ module OpenAI end class ThreadMessageCompleted < OpenAI::BaseModel + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Message) } def data end @@ -604,6 +716,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # completed. sig { params(data: OpenAI::Models::Beta::Threads::Message, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.message.completed") end @@ -614,6 +729,8 @@ module OpenAI end class ThreadMessageIncomplete < OpenAI::BaseModel + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Message) } def data end @@ -630,6 +747,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) ends + # before it is completed. sig { params(data: OpenAI::Models::Beta::Threads::Message, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.message.incomplete") end @@ -656,6 +776,9 @@ module OpenAI def event=(_) end + # Occurs when an + # [error](https://platform.openai.com/docs/guides/error-codes#api-errors) occurs. + # This can happen due to an internal server error or a timeout. sig { params(data: OpenAI::Models::ErrorObject, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :error) end @@ -666,6 +789,7 @@ module OpenAI end class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/beta/assistant_tool.rbi b/rbi/lib/openai/models/beta/assistant_tool.rbi index 18612436..520704fc 100644 --- a/rbi/lib/openai/models/beta/assistant_tool.rbi +++ b/rbi/lib/openai/models/beta/assistant_tool.rbi @@ -7,6 +7,7 @@ module OpenAI abstract! class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/beta/assistant_tool_choice.rbi b/rbi/lib/openai/models/beta/assistant_tool_choice.rbi index 7d82ebcf..1cec31b5 100644 --- a/rbi/lib/openai/models/beta/assistant_tool_choice.rbi +++ b/rbi/lib/openai/models/beta/assistant_tool_choice.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Beta class AssistantToolChoice < OpenAI::BaseModel + # The type of the tool. If type is `function`, the function name must be set sig { returns(Symbol) } def type end @@ -23,6 +24,8 @@ module OpenAI def function=(_) end + # Specifies a tool the model should use. Use to force the model to call a specific + # tool. sig do params(type: Symbol, function: OpenAI::Models::Beta::AssistantToolChoiceFunction).returns(T.attached_class) end @@ -33,6 +36,7 @@ module OpenAI def to_hash end + # The type of the tool. If type is `function`, the function name must be set class Type < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/beta/assistant_tool_choice_function.rbi b/rbi/lib/openai/models/beta/assistant_tool_choice_function.rbi index be6c8d7f..6e594ce1 100644 --- a/rbi/lib/openai/models/beta/assistant_tool_choice_function.rbi +++ b/rbi/lib/openai/models/beta/assistant_tool_choice_function.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Beta class AssistantToolChoiceFunction < OpenAI::BaseModel + # The name of the function to call. sig { returns(String) } def name end diff --git a/rbi/lib/openai/models/beta/assistant_tool_choice_option.rbi b/rbi/lib/openai/models/beta/assistant_tool_choice_option.rbi index 6ae486e1..38c43dba 100644 --- a/rbi/lib/openai/models/beta/assistant_tool_choice_option.rbi +++ b/rbi/lib/openai/models/beta/assistant_tool_choice_option.rbi @@ -3,9 +3,20 @@ module OpenAI module Models module Beta + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. class AssistantToolChoiceOption < OpenAI::Union abstract! + # `none` means the model will not call any tools and instead generates a message. + # `auto` means the model can pick between generating a message or calling one or + # more tools. `required` means the model must call one or more tools before + # responding to the user. class Auto < OpenAI::Enum abstract! @@ -21,6 +32,7 @@ module OpenAI end class << self + # @api private sig { override.returns([[NilClass, Symbol], [NilClass, OpenAI::Models::Beta::AssistantToolChoice]]) } private def variants end diff --git a/rbi/lib/openai/models/beta/assistant_update_params.rbi b/rbi/lib/openai/models/beta/assistant_update_params.rbi index 0c75a7fb..446f263d 100644 --- a/rbi/lib/openai/models/beta/assistant_update_params.rbi +++ b/rbi/lib/openai/models/beta/assistant_update_params.rbi @@ -7,6 +7,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The description of the assistant. The maximum length is 512 characters. sig { returns(T.nilable(String)) } def description end @@ -15,6 +16,8 @@ module OpenAI def description=(_) end + # The system instructions that the assistant uses. The maximum length is 256,000 + # characters. sig { returns(T.nilable(String)) } def instructions end @@ -23,6 +26,12 @@ module OpenAI def instructions=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -31,6 +40,11 @@ module OpenAI def metadata=(_) end + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. sig { returns(T.nilable(T.any(String, Symbol))) } def model end @@ -39,6 +53,7 @@ module OpenAI def model=(_) end + # The name of the assistant. The maximum length is 256 characters. sig { returns(T.nilable(String)) } def name end @@ -47,6 +62,12 @@ module OpenAI def name=(_) end + # **o-series models only** + # + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. sig { returns(T.nilable(Symbol)) } def reasoning_effort end @@ -55,6 +76,26 @@ module OpenAI def reasoning_effort=(_) end + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. sig do returns( T.nilable( @@ -95,6 +136,9 @@ module OpenAI def response_format=(_) end + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. sig { returns(T.nilable(Float)) } def temperature end @@ -103,6 +147,10 @@ module OpenAI def temperature=(_) end + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. sig { returns(T.nilable(OpenAI::Models::Beta::AssistantUpdateParams::ToolResources)) } def tool_resources end @@ -114,6 +162,9 @@ module OpenAI def tool_resources=(_) end + # A list of tool enabled on the assistant. There can be a maximum of 128 tools per + # assistant. Tools can be of types `code_interpreter`, `file_search`, or + # `function`. sig do returns( T.nilable( @@ -153,6 +204,11 @@ module OpenAI def tools=(_) end + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. sig { returns(T.nilable(Float)) } def top_p end @@ -242,9 +298,19 @@ module OpenAI def to_hash end + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. class Model < OpenAI::Union abstract! + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. class AssistantSupportedModels < OpenAI::Enum abstract! @@ -287,6 +353,7 @@ module OpenAI end class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } private def variants end @@ -316,6 +383,10 @@ module OpenAI def file_search=(_) end + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. sig do params( code_interpreter: OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter, @@ -339,6 +410,10 @@ module OpenAI end class CodeInterpreter < OpenAI::BaseModel + # Overrides the list of + # [file](https://platform.openai.com/docs/api-reference/files) IDs made available + # to the `code_interpreter` tool. There can be a maximum of 20 files associated + # with the tool. sig { returns(T.nilable(T::Array[String])) } def file_ids end @@ -357,6 +432,10 @@ module OpenAI end class FileSearch < OpenAI::BaseModel + # Overrides the + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. sig { returns(T.nilable(T::Array[String])) } def vector_store_ids end diff --git a/rbi/lib/openai/models/beta/code_interpreter_tool.rbi b/rbi/lib/openai/models/beta/code_interpreter_tool.rbi index a560387e..ba0e9924 100644 --- a/rbi/lib/openai/models/beta/code_interpreter_tool.rbi +++ b/rbi/lib/openai/models/beta/code_interpreter_tool.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Beta class CodeInterpreterTool < OpenAI::BaseModel + # The type of tool being defined: `code_interpreter` sig { returns(Symbol) } def type end diff --git a/rbi/lib/openai/models/beta/file_search_tool.rbi b/rbi/lib/openai/models/beta/file_search_tool.rbi index a7d4bbaf..bf120b56 100644 --- a/rbi/lib/openai/models/beta/file_search_tool.rbi +++ b/rbi/lib/openai/models/beta/file_search_tool.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Beta class FileSearchTool < OpenAI::BaseModel + # The type of tool being defined: `file_search` sig { returns(Symbol) } def type end @@ -12,6 +13,7 @@ module OpenAI def type=(_) end + # Overrides for the file search tool. sig { returns(T.nilable(OpenAI::Models::Beta::FileSearchTool::FileSearch)) } def file_search end @@ -35,6 +37,14 @@ module OpenAI end class FileSearch < OpenAI::BaseModel + # The maximum number of results the file search tool should output. The default is + # 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between + # 1 and 50 inclusive. + # + # Note that the file search tool may output fewer than `max_num_results` results. + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. sig { returns(T.nilable(Integer)) } def max_num_results end @@ -43,6 +53,12 @@ module OpenAI def max_num_results=(_) end + # The ranking options for the file search. If not specified, the file search tool + # will use the `auto` ranker and a score_threshold of 0. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. sig { returns(T.nilable(OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions)) } def ranking_options end @@ -54,6 +70,7 @@ module OpenAI def ranking_options=(_) end + # Overrides for the file search tool. sig do params( max_num_results: Integer, @@ -74,6 +91,8 @@ module OpenAI end class RankingOptions < OpenAI::BaseModel + # The score threshold for the file search. All values must be a floating point + # number between 0 and 1. sig { returns(Float) } def score_threshold end @@ -82,6 +101,8 @@ module OpenAI def score_threshold=(_) end + # The ranker to use for the file search. If not specified will use the `auto` + # ranker. sig { returns(T.nilable(Symbol)) } def ranker end @@ -90,6 +111,12 @@ module OpenAI def ranker=(_) end + # The ranking options for the file search. If not specified, the file search tool + # will use the `auto` ranker and a score_threshold of 0. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. sig { params(score_threshold: Float, ranker: Symbol).returns(T.attached_class) } def self.new(score_threshold:, ranker: nil) end @@ -98,6 +125,8 @@ module OpenAI def to_hash end + # The ranker to use for the file search. If not specified will use the `auto` + # ranker. class Ranker < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/beta/function_tool.rbi b/rbi/lib/openai/models/beta/function_tool.rbi index 0c7a758f..645a4c1c 100644 --- a/rbi/lib/openai/models/beta/function_tool.rbi +++ b/rbi/lib/openai/models/beta/function_tool.rbi @@ -12,6 +12,7 @@ module OpenAI def function=(_) end + # The type of tool being defined: `function` sig { returns(Symbol) } def type end diff --git a/rbi/lib/openai/models/beta/message_stream_event.rbi b/rbi/lib/openai/models/beta/message_stream_event.rbi index 135308ce..c00a5caa 100644 --- a/rbi/lib/openai/models/beta/message_stream_event.rbi +++ b/rbi/lib/openai/models/beta/message_stream_event.rbi @@ -3,10 +3,15 @@ module OpenAI module Models module Beta + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # created. class MessageStreamEvent < OpenAI::Union abstract! class ThreadMessageCreated < OpenAI::BaseModel + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Message) } def data end @@ -23,6 +28,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # created. sig { params(data: OpenAI::Models::Beta::Threads::Message, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.message.created") end @@ -33,6 +41,8 @@ module OpenAI end class ThreadMessageInProgress < OpenAI::BaseModel + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Message) } def data end @@ -49,6 +59,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) moves + # to an `in_progress` state. sig { params(data: OpenAI::Models::Beta::Threads::Message, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.message.in_progress") end @@ -59,6 +72,8 @@ module OpenAI end class ThreadMessageDelta < OpenAI::BaseModel + # Represents a message delta i.e. any changed fields on a message during + # streaming. sig { returns(OpenAI::Models::Beta::Threads::MessageDeltaEvent) } def data end @@ -78,6 +93,9 @@ module OpenAI def event=(_) end + # Occurs when parts of a + # [Message](https://platform.openai.com/docs/api-reference/messages/object) are + # being streamed. sig { params(data: OpenAI::Models::Beta::Threads::MessageDeltaEvent, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.message.delta") end @@ -88,6 +106,8 @@ module OpenAI end class ThreadMessageCompleted < OpenAI::BaseModel + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Message) } def data end @@ -104,6 +124,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) is + # completed. sig { params(data: OpenAI::Models::Beta::Threads::Message, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.message.completed") end @@ -114,6 +137,8 @@ module OpenAI end class ThreadMessageIncomplete < OpenAI::BaseModel + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Message) } def data end @@ -130,6 +155,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [message](https://platform.openai.com/docs/api-reference/messages/object) ends + # before it is completed. sig { params(data: OpenAI::Models::Beta::Threads::Message, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.message.incomplete") end @@ -140,6 +168,7 @@ module OpenAI end class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/beta/run_step_stream_event.rbi b/rbi/lib/openai/models/beta/run_step_stream_event.rbi index 1adb063a..b2ebe6e2 100644 --- a/rbi/lib/openai/models/beta/run_step_stream_event.rbi +++ b/rbi/lib/openai/models/beta/run_step_stream_event.rbi @@ -3,10 +3,14 @@ module OpenAI module Models module Beta + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is created. class RunStepStreamEvent < OpenAI::Union abstract! class ThreadRunStepCreated < OpenAI::BaseModel + # Represents a step in execution of a run. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } def data end @@ -26,6 +30,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is created. sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.step.created") end @@ -36,6 +43,7 @@ module OpenAI end class ThreadRunStepInProgress < OpenAI::BaseModel + # Represents a step in execution of a run. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } def data end @@ -55,6 +63,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # moves to an `in_progress` state. sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.step.in_progress") end @@ -65,6 +76,8 @@ module OpenAI end class ThreadRunStepDelta < OpenAI::BaseModel + # Represents a run step delta i.e. any changed fields on a run step during + # streaming. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent) } def data end @@ -84,6 +97,9 @@ module OpenAI def event=(_) end + # Occurs when parts of a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # are being streamed. sig do params(data: OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent, event: Symbol) .returns(T.attached_class) @@ -97,6 +113,7 @@ module OpenAI end class ThreadRunStepCompleted < OpenAI::BaseModel + # Represents a step in execution of a run. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } def data end @@ -116,6 +133,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is completed. sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.step.completed") end @@ -126,6 +146,7 @@ module OpenAI end class ThreadRunStepFailed < OpenAI::BaseModel + # Represents a step in execution of a run. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } def data end @@ -145,6 +166,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # fails. sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.step.failed") end @@ -155,6 +179,7 @@ module OpenAI end class ThreadRunStepCancelled < OpenAI::BaseModel + # Represents a step in execution of a run. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } def data end @@ -174,6 +199,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # is cancelled. sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.step.cancelled") end @@ -184,6 +212,7 @@ module OpenAI end class ThreadRunStepExpired < OpenAI::BaseModel + # Represents a step in execution of a run. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStep) } def data end @@ -203,6 +232,9 @@ module OpenAI def event=(_) end + # Occurs when a + # [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) + # expires. sig { params(data: OpenAI::Models::Beta::Threads::Runs::RunStep, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.step.expired") end @@ -213,6 +245,7 @@ module OpenAI end class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/beta/run_stream_event.rbi b/rbi/lib/openai/models/beta/run_stream_event.rbi index 47d3fd71..aff962cb 100644 --- a/rbi/lib/openai/models/beta/run_stream_event.rbi +++ b/rbi/lib/openai/models/beta/run_stream_event.rbi @@ -3,10 +3,14 @@ module OpenAI module Models module Beta + # Occurs when a new + # [run](https://platform.openai.com/docs/api-reference/runs/object) is created. class RunStreamEvent < OpenAI::Union abstract! class ThreadRunCreated < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -23,6 +27,8 @@ module OpenAI def event=(_) end + # Occurs when a new + # [run](https://platform.openai.com/docs/api-reference/runs/object) is created. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.created") end @@ -33,6 +39,8 @@ module OpenAI end class ThreadRunQueued < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -49,6 +57,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to a `queued` status. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.queued") end @@ -59,6 +69,8 @@ module OpenAI end class ThreadRunInProgress < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -75,6 +87,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to an `in_progress` status. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.in_progress") end @@ -85,6 +99,8 @@ module OpenAI end class ThreadRunRequiresAction < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -101,6 +117,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to a `requires_action` status. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.requires_action") end @@ -111,6 +129,8 @@ module OpenAI end class ThreadRunCompleted < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -127,6 +147,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # is completed. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.completed") end @@ -137,6 +159,8 @@ module OpenAI end class ThreadRunIncomplete < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -153,6 +177,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # ends with status `incomplete`. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.incomplete") end @@ -163,6 +189,8 @@ module OpenAI end class ThreadRunFailed < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -179,6 +207,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # fails. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.failed") end @@ -189,6 +219,8 @@ module OpenAI end class ThreadRunCancelling < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -205,6 +237,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # moves to a `cancelling` status. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.cancelling") end @@ -215,6 +249,8 @@ module OpenAI end class ThreadRunCancelled < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -231,6 +267,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # is cancelled. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.cancelled") end @@ -241,6 +279,8 @@ module OpenAI end class ThreadRunExpired < OpenAI::BaseModel + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig { returns(OpenAI::Models::Beta::Threads::Run) } def data end @@ -257,6 +297,8 @@ module OpenAI def event=(_) end + # Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) + # expires. sig { params(data: OpenAI::Models::Beta::Threads::Run, event: Symbol).returns(T.attached_class) } def self.new(data:, event: :"thread.run.expired") end @@ -267,6 +309,7 @@ module OpenAI end class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/beta/thread.rbi b/rbi/lib/openai/models/beta/thread.rbi index f7263e57..799f589a 100644 --- a/rbi/lib/openai/models/beta/thread.rbi +++ b/rbi/lib/openai/models/beta/thread.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Beta class Thread < OpenAI::BaseModel + # The identifier, which can be referenced in API endpoints. sig { returns(String) } def id end @@ -12,6 +13,7 @@ module OpenAI def id=(_) end + # The Unix timestamp (in seconds) for when the thread was created. sig { returns(Integer) } def created_at end @@ -20,6 +22,12 @@ module OpenAI def created_at=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -28,6 +36,7 @@ module OpenAI def metadata=(_) end + # The object type, which is always `thread`. sig { returns(Symbol) } def object end @@ -36,6 +45,10 @@ module OpenAI def object=(_) end + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. sig { returns(T.nilable(OpenAI::Models::Beta::Thread::ToolResources)) } def tool_resources end @@ -47,6 +60,8 @@ module OpenAI def tool_resources=(_) end + # Represents a thread that contains + # [messages](https://platform.openai.com/docs/api-reference/messages). sig do params( id: String, @@ -98,6 +113,10 @@ module OpenAI def file_search=(_) end + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. sig do params( code_interpreter: OpenAI::Models::Beta::Thread::ToolResources::CodeInterpreter, @@ -121,6 +140,9 @@ module OpenAI end class CodeInterpreter < OpenAI::BaseModel + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. sig { returns(T.nilable(T::Array[String])) } def file_ids end @@ -139,6 +161,10 @@ module OpenAI end class FileSearch < OpenAI::BaseModel + # The + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. sig { returns(T.nilable(T::Array[String])) } def vector_store_ids end diff --git a/rbi/lib/openai/models/beta/thread_create_and_run_params.rbi b/rbi/lib/openai/models/beta/thread_create_and_run_params.rbi index d0dd117f..c6f68453 100644 --- a/rbi/lib/openai/models/beta/thread_create_and_run_params.rbi +++ b/rbi/lib/openai/models/beta/thread_create_and_run_params.rbi @@ -7,6 +7,9 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. sig { returns(String) } def assistant_id end @@ -15,6 +18,8 @@ module OpenAI def assistant_id=(_) end + # Override the default system message of the assistant. This is useful for + # modifying the behavior on a per-run basis. sig { returns(T.nilable(String)) } def instructions end @@ -23,6 +28,11 @@ module OpenAI def instructions=(_) end + # The maximum number of completion tokens that may be used over the course of the + # run. The run will make a best effort to use only the number of completion tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # completion tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. sig { returns(T.nilable(Integer)) } def max_completion_tokens end @@ -31,6 +41,11 @@ module OpenAI def max_completion_tokens=(_) end + # The maximum number of prompt tokens that may be used over the course of the run. + # The run will make a best effort to use only the number of prompt tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # prompt tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. sig { returns(T.nilable(Integer)) } def max_prompt_tokens end @@ -39,6 +54,12 @@ module OpenAI def max_prompt_tokens=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -47,6 +68,10 @@ module OpenAI def metadata=(_) end + # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. sig { returns(T.nilable(T.any(String, Symbol))) } def model end @@ -55,6 +80,9 @@ module OpenAI def model=(_) end + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. sig { returns(T.nilable(T::Boolean)) } def parallel_tool_calls end @@ -63,6 +91,26 @@ module OpenAI def parallel_tool_calls=(_) end + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. sig do returns( T.nilable( @@ -103,6 +151,9 @@ module OpenAI def response_format=(_) end + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. sig { returns(T.nilable(Float)) } def temperature end @@ -111,6 +162,8 @@ module OpenAI def temperature=(_) end + # Options to create a new thread. If no thread is provided when running a request, + # an empty thread will be created. sig { returns(T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread)) } def thread end @@ -122,6 +175,13 @@ module OpenAI def thread=(_) end + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. sig { returns(T.nilable(T.any(Symbol, OpenAI::Models::Beta::AssistantToolChoice))) } def tool_choice end @@ -133,6 +193,10 @@ module OpenAI def tool_choice=(_) end + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. sig { returns(T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources)) } def tool_resources end @@ -144,6 +208,8 @@ module OpenAI def tool_resources=(_) end + # Override the tools the assistant can use for this run. This is useful for + # modifying the behavior on a per-run basis. sig do returns( T.nilable( @@ -187,6 +253,11 @@ module OpenAI def tools=(_) end + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. sig { returns(T.nilable(Float)) } def top_p end @@ -195,6 +266,8 @@ module OpenAI def top_p=(_) end + # Controls for how a thread will be truncated prior to the run. Use this to + # control the intial context window of the run. sig { returns(T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy)) } def truncation_strategy end @@ -303,10 +376,15 @@ module OpenAI def to_hash end + # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. class Model < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } private def variants end @@ -314,6 +392,8 @@ module OpenAI end class Thread < OpenAI::BaseModel + # A list of [messages](https://platform.openai.com/docs/api-reference/messages) to + # start the thread with. sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message])) } def messages end @@ -325,6 +405,12 @@ module OpenAI def messages=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -333,6 +419,10 @@ module OpenAI def metadata=(_) end + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. sig { returns(T.nilable(OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources)) } def tool_resources end @@ -344,6 +434,8 @@ module OpenAI def tool_resources=(_) end + # Options to create a new thread. If no thread is provided when running a request, + # an empty thread will be created. sig do params( messages: T::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message], @@ -369,6 +461,7 @@ module OpenAI end class Message < OpenAI::BaseModel + # The text contents of the message. sig do returns( T.any( @@ -415,6 +508,12 @@ module OpenAI def content=(_) end + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. sig { returns(Symbol) } def role end @@ -423,6 +522,7 @@ module OpenAI def role=(_) end + # A list of files attached to the message, and the tools they should be added to. sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment])) } def attachments end @@ -436,6 +536,12 @@ module OpenAI def attachments=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -488,6 +594,7 @@ module OpenAI def to_hash end + # The text contents of the message. class Content < OpenAI::Union abstract! @@ -502,6 +609,7 @@ module OpenAI end class << self + # @api private sig do override .returns( @@ -525,6 +633,12 @@ module OpenAI end end + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. class Role < OpenAI::Enum abstract! @@ -539,6 +653,7 @@ module OpenAI end class Attachment < OpenAI::BaseModel + # The ID of the file to attach to the message. sig { returns(T.nilable(String)) } def file_id end @@ -547,6 +662,7 @@ module OpenAI def file_id=(_) end + # The tools to add this file to. sig do returns( T.nilable( @@ -619,6 +735,7 @@ module OpenAI abstract! class FileSearch < OpenAI::BaseModel + # The type of tool being defined: `file_search` sig { returns(Symbol) } def type end @@ -637,6 +754,7 @@ module OpenAI end class << self + # @api private sig do override .returns( @@ -673,6 +791,10 @@ module OpenAI def file_search=(_) end + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. sig do params( code_interpreter: OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter, @@ -696,6 +818,9 @@ module OpenAI end class CodeInterpreter < OpenAI::BaseModel + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. sig { returns(T.nilable(T::Array[String])) } def file_ids end @@ -714,6 +839,10 @@ module OpenAI end class FileSearch < OpenAI::BaseModel + # The + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. sig { returns(T.nilable(T::Array[String])) } def vector_store_ids end @@ -722,6 +851,10 @@ module OpenAI def vector_store_ids=(_) end + # A helper to create a + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # with file_ids and attach it to this thread. There can be a maximum of 1 vector + # store attached to the thread. sig do returns( T.nilable( @@ -766,6 +899,8 @@ module OpenAI end class VectorStore < OpenAI::BaseModel + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. sig do returns( T.nilable( @@ -796,6 +931,9 @@ module OpenAI def chunking_strategy=(_) end + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to + # add to the vector store. There can be a maximum of 10000 files in a vector + # store. sig { returns(T.nilable(T::Array[String])) } def file_ids end @@ -804,6 +942,12 @@ module OpenAI def file_ids=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -842,10 +986,13 @@ module OpenAI def to_hash end + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. class ChunkingStrategy < OpenAI::Union abstract! class Auto < OpenAI::BaseModel + # Always `auto`. sig { returns(Symbol) } def type end @@ -854,6 +1001,8 @@ module OpenAI def type=(_) end + # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + # `800` and `chunk_overlap_tokens` of `400`. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :auto) end @@ -883,6 +1032,7 @@ module OpenAI def static=(_) end + # Always `static`. sig { returns(Symbol) } def type end @@ -914,6 +1064,9 @@ module OpenAI end class Static < OpenAI::BaseModel + # The number of tokens that overlap between chunks. The default value is `400`. + # + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. sig { returns(Integer) } def chunk_overlap_tokens end @@ -922,6 +1075,8 @@ module OpenAI def chunk_overlap_tokens=(_) end + # The maximum number of tokens in each chunk. The default value is `800`. The + # minimum value is `100` and the maximum value is `4096`. sig { returns(Integer) } def max_chunk_size_tokens end @@ -948,6 +1103,7 @@ module OpenAI end class << self + # @api private sig do override .returns( @@ -986,6 +1142,10 @@ module OpenAI def file_search=(_) end + # A set of resources that are used by the assistant's tools. The resources are + # specific to the type of tool. For example, the `code_interpreter` tool requires + # a list of file IDs, while the `file_search` tool requires a list of vector store + # IDs. sig do params( code_interpreter: OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter, @@ -1009,6 +1169,9 @@ module OpenAI end class CodeInterpreter < OpenAI::BaseModel + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. sig { returns(T.nilable(T::Array[String])) } def file_ids end @@ -1027,6 +1190,10 @@ module OpenAI end class FileSearch < OpenAI::BaseModel + # The ID of the + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this assistant. There can be a maximum of 1 vector store attached to + # the assistant. sig { returns(T.nilable(T::Array[String])) } def vector_store_ids end @@ -1049,6 +1216,7 @@ module OpenAI abstract! class << self + # @api private sig do override .returns( @@ -1061,6 +1229,10 @@ module OpenAI end class TruncationStrategy < OpenAI::BaseModel + # The truncation strategy to use for the thread. The default is `auto`. If set to + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. sig { returns(Symbol) } def type end @@ -1069,6 +1241,8 @@ module OpenAI def type=(_) end + # The number of most recent messages from the thread when constructing the context + # for the run. sig { returns(T.nilable(Integer)) } def last_messages end @@ -1077,6 +1251,8 @@ module OpenAI def last_messages=(_) end + # Controls for how a thread will be truncated prior to the run. Use this to + # control the intial context window of the run. sig { params(type: Symbol, last_messages: T.nilable(Integer)).returns(T.attached_class) } def self.new(type:, last_messages: nil) end @@ -1085,6 +1261,10 @@ module OpenAI def to_hash end + # The truncation strategy to use for the thread. The default is `auto`. If set to + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. class Type < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/beta/thread_create_params.rbi b/rbi/lib/openai/models/beta/thread_create_params.rbi index 627808ef..670f30b9 100644 --- a/rbi/lib/openai/models/beta/thread_create_params.rbi +++ b/rbi/lib/openai/models/beta/thread_create_params.rbi @@ -7,6 +7,8 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # A list of [messages](https://platform.openai.com/docs/api-reference/messages) to + # start the thread with. sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::ThreadCreateParams::Message])) } def messages end @@ -18,6 +20,12 @@ module OpenAI def messages=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -26,6 +34,10 @@ module OpenAI def metadata=(_) end + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. sig { returns(T.nilable(OpenAI::Models::Beta::ThreadCreateParams::ToolResources)) } def tool_resources end @@ -64,6 +76,7 @@ module OpenAI end class Message < OpenAI::BaseModel + # The text contents of the message. sig do returns( T.any( @@ -110,6 +123,12 @@ module OpenAI def content=(_) end + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. sig { returns(Symbol) } def role end @@ -118,6 +137,7 @@ module OpenAI def role=(_) end + # A list of files attached to the message, and the tools they should be added to. sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment])) } def attachments end @@ -129,6 +149,12 @@ module OpenAI def attachments=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -181,6 +207,7 @@ module OpenAI def to_hash end + # The text contents of the message. class Content < OpenAI::Union abstract! @@ -195,6 +222,7 @@ module OpenAI end class << self + # @api private sig do override .returns( @@ -218,6 +246,12 @@ module OpenAI end end + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. class Role < OpenAI::Enum abstract! @@ -232,6 +266,7 @@ module OpenAI end class Attachment < OpenAI::BaseModel + # The ID of the file to attach to the message. sig { returns(T.nilable(String)) } def file_id end @@ -240,6 +275,7 @@ module OpenAI def file_id=(_) end + # The tools to add this file to. sig do returns( T.nilable( @@ -312,6 +348,7 @@ module OpenAI abstract! class FileSearch < OpenAI::BaseModel + # The type of tool being defined: `file_search` sig { returns(Symbol) } def type end @@ -330,6 +367,7 @@ module OpenAI end class << self + # @api private sig do override .returns( @@ -366,6 +404,10 @@ module OpenAI def file_search=(_) end + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. sig do params( code_interpreter: OpenAI::Models::Beta::ThreadCreateParams::ToolResources::CodeInterpreter, @@ -389,6 +431,9 @@ module OpenAI end class CodeInterpreter < OpenAI::BaseModel + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. sig { returns(T.nilable(T::Array[String])) } def file_ids end @@ -407,6 +452,10 @@ module OpenAI end class FileSearch < OpenAI::BaseModel + # The + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. sig { returns(T.nilable(T::Array[String])) } def vector_store_ids end @@ -415,6 +464,10 @@ module OpenAI def vector_store_ids=(_) end + # A helper to create a + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # with file_ids and attach it to this thread. There can be a maximum of 1 vector + # store attached to the thread. sig do returns( T.nilable(T::Array[OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore]) @@ -453,6 +506,8 @@ module OpenAI end class VectorStore < OpenAI::BaseModel + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. sig do returns( T.nilable( @@ -483,6 +538,9 @@ module OpenAI def chunking_strategy=(_) end + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to + # add to the vector store. There can be a maximum of 10000 files in a vector + # store. sig { returns(T.nilable(T::Array[String])) } def file_ids end @@ -491,6 +549,12 @@ module OpenAI def file_ids=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -529,10 +593,13 @@ module OpenAI def to_hash end + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. class ChunkingStrategy < OpenAI::Union abstract! class Auto < OpenAI::BaseModel + # Always `auto`. sig { returns(Symbol) } def type end @@ -541,6 +608,8 @@ module OpenAI def type=(_) end + # The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + # `800` and `chunk_overlap_tokens` of `400`. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :auto) end @@ -570,6 +639,7 @@ module OpenAI def static=(_) end + # Always `static`. sig { returns(Symbol) } def type end @@ -601,6 +671,9 @@ module OpenAI end class Static < OpenAI::BaseModel + # The number of tokens that overlap between chunks. The default value is `400`. + # + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. sig { returns(Integer) } def chunk_overlap_tokens end @@ -609,6 +682,8 @@ module OpenAI def chunk_overlap_tokens=(_) end + # The maximum number of tokens in each chunk. The default value is `800`. The + # minimum value is `100` and the maximum value is `4096`. sig { returns(Integer) } def max_chunk_size_tokens end @@ -633,6 +708,7 @@ module OpenAI end class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/beta/thread_stream_event.rbi b/rbi/lib/openai/models/beta/thread_stream_event.rbi index df43a6d2..b7db3495 100644 --- a/rbi/lib/openai/models/beta/thread_stream_event.rbi +++ b/rbi/lib/openai/models/beta/thread_stream_event.rbi @@ -4,6 +4,8 @@ module OpenAI module Models module Beta class ThreadStreamEvent < OpenAI::BaseModel + # Represents a thread that contains + # [messages](https://platform.openai.com/docs/api-reference/messages). sig { returns(OpenAI::Models::Beta::Thread) } def data end @@ -20,6 +22,7 @@ module OpenAI def event=(_) end + # Whether to enable input audio transcription. sig { returns(T.nilable(T::Boolean)) } def enabled end @@ -28,6 +31,9 @@ module OpenAI def enabled=(_) end + # Occurs when a new + # [thread](https://platform.openai.com/docs/api-reference/threads/object) is + # created. sig { params(data: OpenAI::Models::Beta::Thread, enabled: T::Boolean, event: Symbol).returns(T.attached_class) } def self.new(data:, enabled: nil, event: :"thread.created") end diff --git a/rbi/lib/openai/models/beta/thread_update_params.rbi b/rbi/lib/openai/models/beta/thread_update_params.rbi index f95411f9..2748a8cc 100644 --- a/rbi/lib/openai/models/beta/thread_update_params.rbi +++ b/rbi/lib/openai/models/beta/thread_update_params.rbi @@ -7,6 +7,12 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -15,6 +21,10 @@ module OpenAI def metadata=(_) end + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. sig { returns(T.nilable(OpenAI::Models::Beta::ThreadUpdateParams::ToolResources)) } def tool_resources end @@ -73,6 +83,10 @@ module OpenAI def file_search=(_) end + # A set of resources that are made available to the assistant's tools in this + # thread. The resources are specific to the type of tool. For example, the + # `code_interpreter` tool requires a list of file IDs, while the `file_search` + # tool requires a list of vector store IDs. sig do params( code_interpreter: OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter, @@ -96,6 +110,9 @@ module OpenAI end class CodeInterpreter < OpenAI::BaseModel + # A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + # available to the `code_interpreter` tool. There can be a maximum of 20 files + # associated with the tool. sig { returns(T.nilable(T::Array[String])) } def file_ids end @@ -114,6 +131,10 @@ module OpenAI end class FileSearch < OpenAI::BaseModel + # The + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # attached to this thread. There can be a maximum of 1 vector store attached to + # the thread. sig { returns(T.nilable(T::Array[String])) } def vector_store_ids end diff --git a/rbi/lib/openai/models/beta/threads/annotation.rbi b/rbi/lib/openai/models/beta/threads/annotation.rbi index 6510f8df..9311bee4 100644 --- a/rbi/lib/openai/models/beta/threads/annotation.rbi +++ b/rbi/lib/openai/models/beta/threads/annotation.rbi @@ -4,10 +4,14 @@ module OpenAI module Models module Beta module Threads + # A citation within the message that points to a specific quote from a specific + # File associated with the assistant or the message. Generated when the assistant + # uses the "file_search" tool to search files. class Annotation < OpenAI::Union abstract! class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/beta/threads/annotation_delta.rbi b/rbi/lib/openai/models/beta/threads/annotation_delta.rbi index 81699c4b..fad66836 100644 --- a/rbi/lib/openai/models/beta/threads/annotation_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/annotation_delta.rbi @@ -4,10 +4,14 @@ module OpenAI module Models module Beta module Threads + # A citation within the message that points to a specific quote from a specific + # File associated with the assistant or the message. Generated when the assistant + # uses the "file_search" tool to search files. class AnnotationDelta < OpenAI::Union abstract! class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/beta/threads/file_citation_annotation.rbi b/rbi/lib/openai/models/beta/threads/file_citation_annotation.rbi index 631db0a5..75c0a985 100644 --- a/rbi/lib/openai/models/beta/threads/file_citation_annotation.rbi +++ b/rbi/lib/openai/models/beta/threads/file_citation_annotation.rbi @@ -32,6 +32,7 @@ module OpenAI def start_index=(_) end + # The text in the message content that needs to be replaced. sig { returns(String) } def text end @@ -40,6 +41,7 @@ module OpenAI def text=(_) end + # Always `file_citation`. sig { returns(Symbol) } def type end @@ -48,6 +50,9 @@ module OpenAI def type=(_) end + # A citation within the message that points to a specific quote from a specific + # File associated with the assistant or the message. Generated when the assistant + # uses the "file_search" tool to search files. sig do params( end_index: Integer, @@ -77,6 +82,7 @@ module OpenAI end class FileCitation < OpenAI::BaseModel + # The ID of the specific File the citation is from. sig { returns(String) } def file_id end diff --git a/rbi/lib/openai/models/beta/threads/file_citation_delta_annotation.rbi b/rbi/lib/openai/models/beta/threads/file_citation_delta_annotation.rbi index f2e75732..7031c979 100644 --- a/rbi/lib/openai/models/beta/threads/file_citation_delta_annotation.rbi +++ b/rbi/lib/openai/models/beta/threads/file_citation_delta_annotation.rbi @@ -5,6 +5,7 @@ module OpenAI module Beta module Threads class FileCitationDeltaAnnotation < OpenAI::BaseModel + # The index of the annotation in the text content part. sig { returns(Integer) } def index end @@ -13,6 +14,7 @@ module OpenAI def index=(_) end + # Always `file_citation`. sig { returns(Symbol) } def type end @@ -48,6 +50,7 @@ module OpenAI def start_index=(_) end + # The text in the message content that needs to be replaced. sig { returns(T.nilable(String)) } def text end @@ -56,6 +59,9 @@ module OpenAI def text=(_) end + # A citation within the message that points to a specific quote from a specific + # File associated with the assistant or the message. Generated when the assistant + # uses the "file_search" tool to search files. sig do params( index: Integer, @@ -87,6 +93,7 @@ module OpenAI end class FileCitation < OpenAI::BaseModel + # The ID of the specific File the citation is from. sig { returns(T.nilable(String)) } def file_id end @@ -95,6 +102,7 @@ module OpenAI def file_id=(_) end + # The specific quote in the file. sig { returns(T.nilable(String)) } def quote end diff --git a/rbi/lib/openai/models/beta/threads/file_path_annotation.rbi b/rbi/lib/openai/models/beta/threads/file_path_annotation.rbi index 0feca1b6..c5e902d9 100644 --- a/rbi/lib/openai/models/beta/threads/file_path_annotation.rbi +++ b/rbi/lib/openai/models/beta/threads/file_path_annotation.rbi @@ -32,6 +32,7 @@ module OpenAI def start_index=(_) end + # The text in the message content that needs to be replaced. sig { returns(String) } def text end @@ -40,6 +41,7 @@ module OpenAI def text=(_) end + # Always `file_path`. sig { returns(Symbol) } def type end @@ -48,6 +50,8 @@ module OpenAI def type=(_) end + # A URL for the file that's generated when the assistant used the + # `code_interpreter` tool to generate a file. sig do params( end_index: Integer, @@ -77,6 +81,7 @@ module OpenAI end class FilePath < OpenAI::BaseModel + # The ID of the file that was generated. sig { returns(String) } def file_id end diff --git a/rbi/lib/openai/models/beta/threads/file_path_delta_annotation.rbi b/rbi/lib/openai/models/beta/threads/file_path_delta_annotation.rbi index f12cbc1f..3da258f2 100644 --- a/rbi/lib/openai/models/beta/threads/file_path_delta_annotation.rbi +++ b/rbi/lib/openai/models/beta/threads/file_path_delta_annotation.rbi @@ -5,6 +5,7 @@ module OpenAI module Beta module Threads class FilePathDeltaAnnotation < OpenAI::BaseModel + # The index of the annotation in the text content part. sig { returns(Integer) } def index end @@ -13,6 +14,7 @@ module OpenAI def index=(_) end + # Always `file_path`. sig { returns(Symbol) } def type end @@ -48,6 +50,7 @@ module OpenAI def start_index=(_) end + # The text in the message content that needs to be replaced. sig { returns(T.nilable(String)) } def text end @@ -56,6 +59,8 @@ module OpenAI def text=(_) end + # A URL for the file that's generated when the assistant used the + # `code_interpreter` tool to generate a file. sig do params( index: Integer, @@ -87,6 +92,7 @@ module OpenAI end class FilePath < OpenAI::BaseModel + # The ID of the file that was generated. sig { returns(T.nilable(String)) } def file_id end diff --git a/rbi/lib/openai/models/beta/threads/image_file.rbi b/rbi/lib/openai/models/beta/threads/image_file.rbi index 5f75f9fc..dccaa783 100644 --- a/rbi/lib/openai/models/beta/threads/image_file.rbi +++ b/rbi/lib/openai/models/beta/threads/image_file.rbi @@ -5,6 +5,9 @@ module OpenAI module Beta module Threads class ImageFile < OpenAI::BaseModel + # The [File](https://platform.openai.com/docs/api-reference/files) ID of the image + # in the message content. Set `purpose="vision"` when uploading the File if you + # need to later display the file content. sig { returns(String) } def file_id end @@ -13,6 +16,8 @@ module OpenAI def file_id=(_) end + # Specifies the detail level of the image if specified by the user. `low` uses + # fewer tokens, you can opt in to high resolution using `high`. sig { returns(T.nilable(Symbol)) } def detail end @@ -29,6 +34,8 @@ module OpenAI def to_hash end + # Specifies the detail level of the image if specified by the user. `low` uses + # fewer tokens, you can opt in to high resolution using `high`. class Detail < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/beta/threads/image_file_content_block.rbi b/rbi/lib/openai/models/beta/threads/image_file_content_block.rbi index ebd1e9c5..281b7cb7 100644 --- a/rbi/lib/openai/models/beta/threads/image_file_content_block.rbi +++ b/rbi/lib/openai/models/beta/threads/image_file_content_block.rbi @@ -13,6 +13,7 @@ module OpenAI def image_file=(_) end + # Always `image_file`. sig { returns(Symbol) } def type end @@ -21,6 +22,8 @@ module OpenAI def type=(_) end + # References an image [File](https://platform.openai.com/docs/api-reference/files) + # in the content of a message. sig { params(image_file: OpenAI::Models::Beta::Threads::ImageFile, type: Symbol).returns(T.attached_class) } def self.new(image_file:, type: :image_file) end diff --git a/rbi/lib/openai/models/beta/threads/image_file_delta.rbi b/rbi/lib/openai/models/beta/threads/image_file_delta.rbi index 2f6a3435..faa92642 100644 --- a/rbi/lib/openai/models/beta/threads/image_file_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/image_file_delta.rbi @@ -5,6 +5,8 @@ module OpenAI module Beta module Threads class ImageFileDelta < OpenAI::BaseModel + # Specifies the detail level of the image if specified by the user. `low` uses + # fewer tokens, you can opt in to high resolution using `high`. sig { returns(T.nilable(Symbol)) } def detail end @@ -13,6 +15,9 @@ module OpenAI def detail=(_) end + # The [File](https://platform.openai.com/docs/api-reference/files) ID of the image + # in the message content. Set `purpose="vision"` when uploading the File if you + # need to later display the file content. sig { returns(T.nilable(String)) } def file_id end @@ -29,6 +34,8 @@ module OpenAI def to_hash end + # Specifies the detail level of the image if specified by the user. `low` uses + # fewer tokens, you can opt in to high resolution using `high`. class Detail < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/beta/threads/image_file_delta_block.rbi b/rbi/lib/openai/models/beta/threads/image_file_delta_block.rbi index dd93fbb6..9dba68e1 100644 --- a/rbi/lib/openai/models/beta/threads/image_file_delta_block.rbi +++ b/rbi/lib/openai/models/beta/threads/image_file_delta_block.rbi @@ -5,6 +5,7 @@ module OpenAI module Beta module Threads class ImageFileDeltaBlock < OpenAI::BaseModel + # The index of the content part in the message. sig { returns(Integer) } def index end @@ -13,6 +14,7 @@ module OpenAI def index=(_) end + # Always `image_file`. sig { returns(Symbol) } def type end @@ -32,6 +34,8 @@ module OpenAI def image_file=(_) end + # References an image [File](https://platform.openai.com/docs/api-reference/files) + # in the content of a message. sig do params(index: Integer, image_file: OpenAI::Models::Beta::Threads::ImageFileDelta, type: Symbol) .returns(T.attached_class) diff --git a/rbi/lib/openai/models/beta/threads/image_url.rbi b/rbi/lib/openai/models/beta/threads/image_url.rbi index 512367f1..7baa0ea2 100644 --- a/rbi/lib/openai/models/beta/threads/image_url.rbi +++ b/rbi/lib/openai/models/beta/threads/image_url.rbi @@ -5,6 +5,8 @@ module OpenAI module Beta module Threads class ImageURL < OpenAI::BaseModel + # The external URL of the image, must be a supported image types: jpeg, jpg, png, + # gif, webp. sig { returns(String) } def url end @@ -13,6 +15,8 @@ module OpenAI def url=(_) end + # Specifies the detail level of the image. `low` uses fewer tokens, you can opt in + # to high resolution using `high`. Default value is `auto` sig { returns(T.nilable(Symbol)) } def detail end @@ -29,6 +33,8 @@ module OpenAI def to_hash end + # Specifies the detail level of the image. `low` uses fewer tokens, you can opt in + # to high resolution using `high`. Default value is `auto` class Detail < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/beta/threads/image_url_content_block.rbi b/rbi/lib/openai/models/beta/threads/image_url_content_block.rbi index 13a1daf7..a6c65020 100644 --- a/rbi/lib/openai/models/beta/threads/image_url_content_block.rbi +++ b/rbi/lib/openai/models/beta/threads/image_url_content_block.rbi @@ -13,6 +13,7 @@ module OpenAI def image_url=(_) end + # The type of the content part. sig { returns(Symbol) } def type end @@ -21,6 +22,7 @@ module OpenAI def type=(_) end + # References an image URL in the content of a message. sig { params(image_url: OpenAI::Models::Beta::Threads::ImageURL, type: Symbol).returns(T.attached_class) } def self.new(image_url:, type: :image_url) end diff --git a/rbi/lib/openai/models/beta/threads/image_url_delta.rbi b/rbi/lib/openai/models/beta/threads/image_url_delta.rbi index b5ffce82..59edca5a 100644 --- a/rbi/lib/openai/models/beta/threads/image_url_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/image_url_delta.rbi @@ -5,6 +5,8 @@ module OpenAI module Beta module Threads class ImageURLDelta < OpenAI::BaseModel + # Specifies the detail level of the image. `low` uses fewer tokens, you can opt in + # to high resolution using `high`. sig { returns(T.nilable(Symbol)) } def detail end @@ -13,6 +15,8 @@ module OpenAI def detail=(_) end + # The URL of the image, must be a supported image types: jpeg, jpg, png, gif, + # webp. sig { returns(T.nilable(String)) } def url end @@ -29,6 +33,8 @@ module OpenAI def to_hash end + # Specifies the detail level of the image. `low` uses fewer tokens, you can opt in + # to high resolution using `high`. class Detail < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/beta/threads/image_url_delta_block.rbi b/rbi/lib/openai/models/beta/threads/image_url_delta_block.rbi index 38635411..9f8e8803 100644 --- a/rbi/lib/openai/models/beta/threads/image_url_delta_block.rbi +++ b/rbi/lib/openai/models/beta/threads/image_url_delta_block.rbi @@ -5,6 +5,7 @@ module OpenAI module Beta module Threads class ImageURLDeltaBlock < OpenAI::BaseModel + # The index of the content part in the message. sig { returns(Integer) } def index end @@ -13,6 +14,7 @@ module OpenAI def index=(_) end + # Always `image_url`. sig { returns(Symbol) } def type end @@ -32,6 +34,7 @@ module OpenAI def image_url=(_) end + # References an image URL in the content of a message. sig do params(index: Integer, image_url: OpenAI::Models::Beta::Threads::ImageURLDelta, type: Symbol) .returns(T.attached_class) diff --git a/rbi/lib/openai/models/beta/threads/message.rbi b/rbi/lib/openai/models/beta/threads/message.rbi index 3f40f84e..f27ae4ab 100644 --- a/rbi/lib/openai/models/beta/threads/message.rbi +++ b/rbi/lib/openai/models/beta/threads/message.rbi @@ -5,6 +5,7 @@ module OpenAI module Beta module Threads class Message < OpenAI::BaseModel + # The identifier, which can be referenced in API endpoints. sig { returns(String) } def id end @@ -13,6 +14,9 @@ module OpenAI def id=(_) end + # If applicable, the ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) that + # authored this message. sig { returns(T.nilable(String)) } def assistant_id end @@ -21,6 +25,7 @@ module OpenAI def assistant_id=(_) end + # A list of files attached to the message, and the tools they were added to. sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::Threads::Message::Attachment])) } def attachments end @@ -32,6 +37,7 @@ module OpenAI def attachments=(_) end + # The Unix timestamp (in seconds) for when the message was completed. sig { returns(T.nilable(Integer)) } def completed_at end @@ -40,6 +46,7 @@ module OpenAI def completed_at=(_) end + # The content of the message in array of text and/or images. sig do returns( T::Array[ @@ -80,6 +87,7 @@ module OpenAI def content=(_) end + # The Unix timestamp (in seconds) for when the message was created. sig { returns(Integer) } def created_at end @@ -88,6 +96,7 @@ module OpenAI def created_at=(_) end + # The Unix timestamp (in seconds) for when the message was marked as incomplete. sig { returns(T.nilable(Integer)) } def incomplete_at end @@ -96,6 +105,7 @@ module OpenAI def incomplete_at=(_) end + # On an incomplete message, details about why the message is incomplete. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Message::IncompleteDetails)) } def incomplete_details end @@ -107,6 +117,12 @@ module OpenAI def incomplete_details=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -115,6 +131,7 @@ module OpenAI def metadata=(_) end + # The object type, which is always `thread.message`. sig { returns(Symbol) } def object end @@ -123,6 +140,7 @@ module OpenAI def object=(_) end + # The entity that produced the message. One of `user` or `assistant`. sig { returns(Symbol) } def role end @@ -131,6 +149,9 @@ module OpenAI def role=(_) end + # The ID of the [run](https://platform.openai.com/docs/api-reference/runs) + # associated with the creation of this message. Value is `null` when messages are + # created manually using the create message or create thread endpoints. sig { returns(T.nilable(String)) } def run_id end @@ -139,6 +160,8 @@ module OpenAI def run_id=(_) end + # The status of the message, which can be either `in_progress`, `incomplete`, or + # `completed`. sig { returns(Symbol) } def status end @@ -147,6 +170,8 @@ module OpenAI def status=(_) end + # The [thread](https://platform.openai.com/docs/api-reference/threads) ID that + # this message belongs to. sig { returns(String) } def thread_id end @@ -155,6 +180,8 @@ module OpenAI def thread_id=(_) end + # Represents a message within a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig do params( id: String, @@ -231,6 +258,7 @@ module OpenAI end class Attachment < OpenAI::BaseModel + # The ID of the file to attach to the message. sig { returns(T.nilable(String)) } def file_id end @@ -239,6 +267,7 @@ module OpenAI def file_id=(_) end + # The tools to add this file to. sig do returns( T.nilable( @@ -311,6 +340,7 @@ module OpenAI abstract! class AssistantToolsFileSearchTypeOnly < OpenAI::BaseModel + # The type of tool being defined: `file_search` sig { returns(Symbol) } def type end @@ -329,6 +359,7 @@ module OpenAI end class << self + # @api private sig do override .returns( @@ -342,6 +373,7 @@ module OpenAI end class IncompleteDetails < OpenAI::BaseModel + # The reason the message is incomplete. sig { returns(Symbol) } def reason end @@ -350,6 +382,7 @@ module OpenAI def reason=(_) end + # On an incomplete message, details about why the message is incomplete. sig { params(reason: Symbol).returns(T.attached_class) } def self.new(reason:) end @@ -358,6 +391,7 @@ module OpenAI def to_hash end + # The reason the message is incomplete. class Reason < OpenAI::Enum abstract! @@ -375,6 +409,7 @@ module OpenAI end end + # The entity that produced the message. One of `user` or `assistant`. class Role < OpenAI::Enum abstract! @@ -388,6 +423,8 @@ module OpenAI end end + # The status of the message, which can be either `in_progress`, `incomplete`, or + # `completed`. class Status < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/beta/threads/message_content.rbi b/rbi/lib/openai/models/beta/threads/message_content.rbi index b2972189..591b21e4 100644 --- a/rbi/lib/openai/models/beta/threads/message_content.rbi +++ b/rbi/lib/openai/models/beta/threads/message_content.rbi @@ -4,10 +4,13 @@ module OpenAI module Models module Beta module Threads + # References an image [File](https://platform.openai.com/docs/api-reference/files) + # in the content of a message. class MessageContent < OpenAI::Union abstract! class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/beta/threads/message_content_delta.rbi b/rbi/lib/openai/models/beta/threads/message_content_delta.rbi index fff1ee0b..a91d5b77 100644 --- a/rbi/lib/openai/models/beta/threads/message_content_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/message_content_delta.rbi @@ -4,10 +4,13 @@ module OpenAI module Models module Beta module Threads + # References an image [File](https://platform.openai.com/docs/api-reference/files) + # in the content of a message. class MessageContentDelta < OpenAI::Union abstract! class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/beta/threads/message_content_part_param.rbi b/rbi/lib/openai/models/beta/threads/message_content_part_param.rbi index 82903174..65c8d43f 100644 --- a/rbi/lib/openai/models/beta/threads/message_content_part_param.rbi +++ b/rbi/lib/openai/models/beta/threads/message_content_part_param.rbi @@ -4,10 +4,13 @@ module OpenAI module Models module Beta module Threads + # References an image [File](https://platform.openai.com/docs/api-reference/files) + # in the content of a message. class MessageContentPartParam < OpenAI::Union abstract! class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/beta/threads/message_create_params.rbi b/rbi/lib/openai/models/beta/threads/message_create_params.rbi index 4a1db9ed..8169f3b8 100644 --- a/rbi/lib/openai/models/beta/threads/message_create_params.rbi +++ b/rbi/lib/openai/models/beta/threads/message_create_params.rbi @@ -8,6 +8,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The text contents of the message. sig do returns( T.any( @@ -54,6 +55,12 @@ module OpenAI def content=(_) end + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. sig { returns(Symbol) } def role end @@ -62,6 +69,7 @@ module OpenAI def role=(_) end + # A list of files attached to the message, and the tools they should be added to. sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment])) } def attachments end @@ -73,6 +81,12 @@ module OpenAI def attachments=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -127,6 +141,7 @@ module OpenAI def to_hash end + # The text contents of the message. class Content < OpenAI::Union abstract! @@ -141,6 +156,7 @@ module OpenAI end class << self + # @api private sig do override .returns( @@ -164,6 +180,12 @@ module OpenAI end end + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. class Role < OpenAI::Enum abstract! @@ -178,6 +200,7 @@ module OpenAI end class Attachment < OpenAI::BaseModel + # The ID of the file to attach to the message. sig { returns(T.nilable(String)) } def file_id end @@ -186,6 +209,7 @@ module OpenAI def file_id=(_) end + # The tools to add this file to. sig do returns( T.nilable( @@ -258,6 +282,7 @@ module OpenAI abstract! class FileSearch < OpenAI::BaseModel + # The type of tool being defined: `file_search` sig { returns(Symbol) } def type end @@ -276,6 +301,7 @@ module OpenAI end class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/beta/threads/message_delta.rbi b/rbi/lib/openai/models/beta/threads/message_delta.rbi index 4830ec92..f85da37a 100644 --- a/rbi/lib/openai/models/beta/threads/message_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/message_delta.rbi @@ -5,6 +5,7 @@ module OpenAI module Beta module Threads class MessageDelta < OpenAI::BaseModel + # The content of the message in array of text and/or images. sig do returns( T.nilable( @@ -47,6 +48,7 @@ module OpenAI def content=(_) end + # The entity that produced the message. One of `user` or `assistant`. sig { returns(T.nilable(Symbol)) } def role end @@ -55,6 +57,7 @@ module OpenAI def role=(_) end + # The delta containing the fields that have changed on the Message. sig do params( content: T::Array[ @@ -91,6 +94,7 @@ module OpenAI def to_hash end + # The entity that produced the message. One of `user` or `assistant`. class Role < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/beta/threads/message_delta_event.rbi b/rbi/lib/openai/models/beta/threads/message_delta_event.rbi index 7908606f..5df8b179 100644 --- a/rbi/lib/openai/models/beta/threads/message_delta_event.rbi +++ b/rbi/lib/openai/models/beta/threads/message_delta_event.rbi @@ -5,6 +5,7 @@ module OpenAI module Beta module Threads class MessageDeltaEvent < OpenAI::BaseModel + # The identifier of the message, which can be referenced in API endpoints. sig { returns(String) } def id end @@ -13,6 +14,7 @@ module OpenAI def id=(_) end + # The delta containing the fields that have changed on the Message. sig { returns(OpenAI::Models::Beta::Threads::MessageDelta) } def delta end @@ -23,6 +25,7 @@ module OpenAI def delta=(_) end + # The object type, which is always `thread.message.delta`. sig { returns(Symbol) } def object end @@ -31,6 +34,8 @@ module OpenAI def object=(_) end + # Represents a message delta i.e. any changed fields on a message during + # streaming. sig do params(id: String, delta: OpenAI::Models::Beta::Threads::MessageDelta, object: Symbol) .returns(T.attached_class) diff --git a/rbi/lib/openai/models/beta/threads/message_list_params.rbi b/rbi/lib/openai/models/beta/threads/message_list_params.rbi index d2cb9550..2745c825 100644 --- a/rbi/lib/openai/models/beta/threads/message_list_params.rbi +++ b/rbi/lib/openai/models/beta/threads/message_list_params.rbi @@ -8,6 +8,10 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. sig { returns(T.nilable(String)) } def after end @@ -16,6 +20,10 @@ module OpenAI def after=(_) end + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. sig { returns(T.nilable(String)) } def before end @@ -24,6 +32,8 @@ module OpenAI def before=(_) end + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. sig { returns(T.nilable(Integer)) } def limit end @@ -32,6 +42,8 @@ module OpenAI def limit=(_) end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. sig { returns(T.nilable(Symbol)) } def order end @@ -40,6 +52,7 @@ module OpenAI def order=(_) end + # Filter messages by the run ID that generated them. sig { returns(T.nilable(String)) } def run_id end @@ -78,6 +91,8 @@ module OpenAI def to_hash end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. class Order < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/beta/threads/message_update_params.rbi b/rbi/lib/openai/models/beta/threads/message_update_params.rbi index 4fe12d9d..f2d3e73a 100644 --- a/rbi/lib/openai/models/beta/threads/message_update_params.rbi +++ b/rbi/lib/openai/models/beta/threads/message_update_params.rbi @@ -16,6 +16,12 @@ module OpenAI def thread_id=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end diff --git a/rbi/lib/openai/models/beta/threads/refusal_content_block.rbi b/rbi/lib/openai/models/beta/threads/refusal_content_block.rbi index 2451c015..c2179549 100644 --- a/rbi/lib/openai/models/beta/threads/refusal_content_block.rbi +++ b/rbi/lib/openai/models/beta/threads/refusal_content_block.rbi @@ -13,6 +13,7 @@ module OpenAI def refusal=(_) end + # Always `refusal`. sig { returns(Symbol) } def type end @@ -21,6 +22,7 @@ module OpenAI def type=(_) end + # The refusal content generated by the assistant. sig { params(refusal: String, type: Symbol).returns(T.attached_class) } def self.new(refusal:, type: :refusal) end diff --git a/rbi/lib/openai/models/beta/threads/refusal_delta_block.rbi b/rbi/lib/openai/models/beta/threads/refusal_delta_block.rbi index c6f9732d..87dd9957 100644 --- a/rbi/lib/openai/models/beta/threads/refusal_delta_block.rbi +++ b/rbi/lib/openai/models/beta/threads/refusal_delta_block.rbi @@ -5,6 +5,7 @@ module OpenAI module Beta module Threads class RefusalDeltaBlock < OpenAI::BaseModel + # The index of the refusal part in the message. sig { returns(Integer) } def index end @@ -13,6 +14,7 @@ module OpenAI def index=(_) end + # Always `refusal`. sig { returns(Symbol) } def type end @@ -29,6 +31,7 @@ module OpenAI def refusal=(_) end + # The refusal content that is part of a message. sig { params(index: Integer, refusal: String, type: Symbol).returns(T.attached_class) } def self.new(index:, refusal: nil, type: :refusal) end diff --git a/rbi/lib/openai/models/beta/threads/required_action_function_tool_call.rbi b/rbi/lib/openai/models/beta/threads/required_action_function_tool_call.rbi index 2627ee3f..ef418354 100644 --- a/rbi/lib/openai/models/beta/threads/required_action_function_tool_call.rbi +++ b/rbi/lib/openai/models/beta/threads/required_action_function_tool_call.rbi @@ -5,6 +5,10 @@ module OpenAI module Beta module Threads class RequiredActionFunctionToolCall < OpenAI::BaseModel + # The ID of the tool call. This ID must be referenced when you submit the tool + # outputs in using the + # [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) + # endpoint. sig { returns(String) } def id end @@ -13,6 +17,7 @@ module OpenAI def id=(_) end + # The function definition. sig { returns(OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall::Function) } def function end @@ -24,6 +29,8 @@ module OpenAI def function=(_) end + # The type of tool call the output is required for. For now, this is always + # `function`. sig { returns(Symbol) } def type end @@ -32,6 +39,7 @@ module OpenAI def type=(_) end + # Tool call objects sig do params( id: String, @@ -53,6 +61,7 @@ module OpenAI end class Function < OpenAI::BaseModel + # The arguments that the model expects you to pass to the function. sig { returns(String) } def arguments end @@ -61,6 +70,7 @@ module OpenAI def arguments=(_) end + # The name of the function. sig { returns(String) } def name end @@ -69,6 +79,7 @@ module OpenAI def name=(_) end + # The function definition. sig { params(arguments: String, name: String).returns(T.attached_class) } def self.new(arguments:, name:) end diff --git a/rbi/lib/openai/models/beta/threads/run.rbi b/rbi/lib/openai/models/beta/threads/run.rbi index 61ddea55..6e74d579 100644 --- a/rbi/lib/openai/models/beta/threads/run.rbi +++ b/rbi/lib/openai/models/beta/threads/run.rbi @@ -5,6 +5,7 @@ module OpenAI module Beta module Threads class Run < OpenAI::BaseModel + # The identifier, which can be referenced in API endpoints. sig { returns(String) } def id end @@ -13,6 +14,9 @@ module OpenAI def id=(_) end + # The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # execution of this run. sig { returns(String) } def assistant_id end @@ -21,6 +25,7 @@ module OpenAI def assistant_id=(_) end + # The Unix timestamp (in seconds) for when the run was cancelled. sig { returns(T.nilable(Integer)) } def cancelled_at end @@ -29,6 +34,7 @@ module OpenAI def cancelled_at=(_) end + # The Unix timestamp (in seconds) for when the run was completed. sig { returns(T.nilable(Integer)) } def completed_at end @@ -37,6 +43,7 @@ module OpenAI def completed_at=(_) end + # The Unix timestamp (in seconds) for when the run was created. sig { returns(Integer) } def created_at end @@ -45,6 +52,7 @@ module OpenAI def created_at=(_) end + # The Unix timestamp (in seconds) for when the run will expire. sig { returns(T.nilable(Integer)) } def expires_at end @@ -53,6 +61,7 @@ module OpenAI def expires_at=(_) end + # The Unix timestamp (in seconds) for when the run failed. sig { returns(T.nilable(Integer)) } def failed_at end @@ -61,6 +70,8 @@ module OpenAI def failed_at=(_) end + # Details on why the run is incomplete. Will be `null` if the run is not + # incomplete. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Run::IncompleteDetails)) } def incomplete_details end @@ -72,6 +83,9 @@ module OpenAI def incomplete_details=(_) end + # The instructions that the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # this run. sig { returns(String) } def instructions end @@ -80,6 +94,7 @@ module OpenAI def instructions=(_) end + # The last error associated with this run. Will be `null` if there are no errors. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Run::LastError)) } def last_error end @@ -91,6 +106,8 @@ module OpenAI def last_error=(_) end + # The maximum number of completion tokens specified to have been used over the + # course of the run. sig { returns(T.nilable(Integer)) } def max_completion_tokens end @@ -99,6 +116,8 @@ module OpenAI def max_completion_tokens=(_) end + # The maximum number of prompt tokens specified to have been used over the course + # of the run. sig { returns(T.nilable(Integer)) } def max_prompt_tokens end @@ -107,6 +126,12 @@ module OpenAI def max_prompt_tokens=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -115,6 +140,9 @@ module OpenAI def metadata=(_) end + # The model that the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # this run. sig { returns(String) } def model end @@ -123,6 +151,7 @@ module OpenAI def model=(_) end + # The object type, which is always `thread.run`. sig { returns(Symbol) } def object end @@ -131,6 +160,9 @@ module OpenAI def object=(_) end + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. sig { returns(T::Boolean) } def parallel_tool_calls end @@ -139,6 +171,8 @@ module OpenAI def parallel_tool_calls=(_) end + # Details on the action required to continue the run. Will be `null` if no action + # is required. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Run::RequiredAction)) } def required_action end @@ -150,6 +184,26 @@ module OpenAI def required_action=(_) end + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. sig do returns( T.nilable( @@ -190,6 +244,7 @@ module OpenAI def response_format=(_) end + # The Unix timestamp (in seconds) for when the run was started. sig { returns(T.nilable(Integer)) } def started_at end @@ -198,6 +253,9 @@ module OpenAI def started_at=(_) end + # The status of the run, which can be either `queued`, `in_progress`, + # `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, + # `incomplete`, or `expired`. sig { returns(Symbol) } def status end @@ -206,6 +264,8 @@ module OpenAI def status=(_) end + # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) + # that was executed on as a part of this run. sig { returns(String) } def thread_id end @@ -214,6 +274,13 @@ module OpenAI def thread_id=(_) end + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. sig { returns(T.nilable(T.any(Symbol, OpenAI::Models::Beta::AssistantToolChoice))) } def tool_choice end @@ -225,6 +292,9 @@ module OpenAI def tool_choice=(_) end + # The list of tools that the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) used for + # this run. sig do returns( T::Array[ @@ -262,6 +332,8 @@ module OpenAI def tools=(_) end + # Controls for how a thread will be truncated prior to the run. Use this to + # control the intial context window of the run. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Run::TruncationStrategy)) } def truncation_strategy end @@ -273,6 +345,8 @@ module OpenAI def truncation_strategy=(_) end + # Usage statistics related to the run. This value will be `null` if the run is not + # in a terminal state (i.e. `in_progress`, `queued`, etc.). sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Run::Usage)) } def usage end @@ -284,6 +358,7 @@ module OpenAI def usage=(_) end + # The sampling temperature used for this run. If not set, defaults to 1. sig { returns(T.nilable(Float)) } def temperature end @@ -292,6 +367,7 @@ module OpenAI def temperature=(_) end + # The nucleus sampling value used for this run. If not set, defaults to 1. sig { returns(T.nilable(Float)) } def top_p end @@ -300,6 +376,8 @@ module OpenAI def top_p=(_) end + # Represents an execution run on a + # [thread](https://platform.openai.com/docs/api-reference/threads). sig do params( id: String, @@ -427,6 +505,8 @@ module OpenAI end class IncompleteDetails < OpenAI::BaseModel + # The reason why the run is incomplete. This will point to which specific token + # limit was reached over the course of the run. sig { returns(T.nilable(Symbol)) } def reason end @@ -435,6 +515,8 @@ module OpenAI def reason=(_) end + # Details on why the run is incomplete. Will be `null` if the run is not + # incomplete. sig { params(reason: Symbol).returns(T.attached_class) } def self.new(reason: nil) end @@ -443,6 +525,8 @@ module OpenAI def to_hash end + # The reason why the run is incomplete. This will point to which specific token + # limit was reached over the course of the run. class Reason < OpenAI::Enum abstract! @@ -458,6 +542,7 @@ module OpenAI end class LastError < OpenAI::BaseModel + # One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. sig { returns(Symbol) } def code end @@ -466,6 +551,7 @@ module OpenAI def code=(_) end + # A human-readable description of the error. sig { returns(String) } def message end @@ -474,6 +560,7 @@ module OpenAI def message=(_) end + # The last error associated with this run. Will be `null` if there are no errors. sig { params(code: Symbol, message: String).returns(T.attached_class) } def self.new(code:, message:) end @@ -482,6 +569,7 @@ module OpenAI def to_hash end + # One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. class Code < OpenAI::Enum abstract! @@ -498,6 +586,7 @@ module OpenAI end class RequiredAction < OpenAI::BaseModel + # Details on the tool outputs needed for this run to continue. sig { returns(OpenAI::Models::Beta::Threads::Run::RequiredAction::SubmitToolOutputs) } def submit_tool_outputs end @@ -509,6 +598,7 @@ module OpenAI def submit_tool_outputs=(_) end + # For now, this is always `submit_tool_outputs`. sig { returns(Symbol) } def type end @@ -517,6 +607,8 @@ module OpenAI def type=(_) end + # Details on the action required to continue the run. Will be `null` if no action + # is required. sig do params( submit_tool_outputs: OpenAI::Models::Beta::Threads::Run::RequiredAction::SubmitToolOutputs, @@ -537,6 +629,7 @@ module OpenAI end class SubmitToolOutputs < OpenAI::BaseModel + # A list of the relevant tool calls. sig { returns(T::Array[OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall]) } def tool_calls end @@ -548,6 +641,7 @@ module OpenAI def tool_calls=(_) end + # Details on the tool outputs needed for this run to continue. sig do params(tool_calls: T::Array[OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall]) .returns(T.attached_class) @@ -562,6 +656,10 @@ module OpenAI end class TruncationStrategy < OpenAI::BaseModel + # The truncation strategy to use for the thread. The default is `auto`. If set to + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. sig { returns(Symbol) } def type end @@ -570,6 +668,8 @@ module OpenAI def type=(_) end + # The number of most recent messages from the thread when constructing the context + # for the run. sig { returns(T.nilable(Integer)) } def last_messages end @@ -578,6 +678,8 @@ module OpenAI def last_messages=(_) end + # Controls for how a thread will be truncated prior to the run. Use this to + # control the intial context window of the run. sig { params(type: Symbol, last_messages: T.nilable(Integer)).returns(T.attached_class) } def self.new(type:, last_messages: nil) end @@ -586,6 +688,10 @@ module OpenAI def to_hash end + # The truncation strategy to use for the thread. The default is `auto`. If set to + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. class Type < OpenAI::Enum abstract! @@ -601,6 +707,7 @@ module OpenAI end class Usage < OpenAI::BaseModel + # Number of completion tokens used over the course of the run. sig { returns(Integer) } def completion_tokens end @@ -609,6 +716,7 @@ module OpenAI def completion_tokens=(_) end + # Number of prompt tokens used over the course of the run. sig { returns(Integer) } def prompt_tokens end @@ -617,6 +725,7 @@ module OpenAI def prompt_tokens=(_) end + # Total number of tokens used (prompt + completion). sig { returns(Integer) } def total_tokens end @@ -625,6 +734,8 @@ module OpenAI def total_tokens=(_) end + # Usage statistics related to the run. This value will be `null` if the run is not + # in a terminal state (i.e. `in_progress`, `queued`, etc.). sig do params( completion_tokens: Integer, diff --git a/rbi/lib/openai/models/beta/threads/run_create_params.rbi b/rbi/lib/openai/models/beta/threads/run_create_params.rbi index 3c923be3..dbf8d42e 100644 --- a/rbi/lib/openai/models/beta/threads/run_create_params.rbi +++ b/rbi/lib/openai/models/beta/threads/run_create_params.rbi @@ -8,6 +8,9 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + # execute this run. sig { returns(String) } def assistant_id end @@ -16,6 +19,13 @@ module OpenAI def assistant_id=(_) end + # A list of additional fields to include in the response. Currently the only + # supported value is `step_details.tool_calls[*].file_search.results[*].content` + # to fetch the file search result content. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. sig { returns(T.nilable(T::Array[Symbol])) } def include end @@ -24,6 +34,9 @@ module OpenAI def include=(_) end + # Appends additional instructions at the end of the instructions for the run. This + # is useful for modifying the behavior on a per-run basis without overriding other + # instructions. sig { returns(T.nilable(String)) } def additional_instructions end @@ -32,6 +45,7 @@ module OpenAI def additional_instructions=(_) end + # Adds additional messages to the thread before creating the run. sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage])) } def additional_messages end @@ -43,6 +57,9 @@ module OpenAI def additional_messages=(_) end + # Overrides the + # [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) + # of the assistant. This is useful for modifying the behavior on a per-run basis. sig { returns(T.nilable(String)) } def instructions end @@ -51,6 +68,11 @@ module OpenAI def instructions=(_) end + # The maximum number of completion tokens that may be used over the course of the + # run. The run will make a best effort to use only the number of completion tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # completion tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. sig { returns(T.nilable(Integer)) } def max_completion_tokens end @@ -59,6 +81,11 @@ module OpenAI def max_completion_tokens=(_) end + # The maximum number of prompt tokens that may be used over the course of the run. + # The run will make a best effort to use only the number of prompt tokens + # specified, across multiple turns of the run. If the run exceeds the number of + # prompt tokens specified, the run will end with status `incomplete`. See + # `incomplete_details` for more info. sig { returns(T.nilable(Integer)) } def max_prompt_tokens end @@ -67,6 +94,12 @@ module OpenAI def max_prompt_tokens=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -75,6 +108,10 @@ module OpenAI def metadata=(_) end + # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. sig { returns(T.nilable(T.any(String, Symbol))) } def model end @@ -83,6 +120,9 @@ module OpenAI def model=(_) end + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. sig { returns(T.nilable(T::Boolean)) } def parallel_tool_calls end @@ -91,6 +131,12 @@ module OpenAI def parallel_tool_calls=(_) end + # **o-series models only** + # + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. sig { returns(T.nilable(Symbol)) } def reasoning_effort end @@ -99,6 +145,26 @@ module OpenAI def reasoning_effort=(_) end + # Specifies the format that the model must output. Compatible with + # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + # [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + # and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + # message the model generates is valid JSON. + # + # **Important:** when using JSON mode, you **must** also instruct the model to + # produce JSON yourself via a system or user message. Without this, the model may + # generate an unending stream of whitespace until the generation reaches the token + # limit, resulting in a long-running and seemingly "stuck" request. Also note that + # the message content may be partially cut off if `finish_reason="length"`, which + # indicates the generation exceeded `max_tokens` or the conversation exceeded the + # max context length. sig do returns( T.nilable( @@ -139,6 +205,9 @@ module OpenAI def response_format=(_) end + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. sig { returns(T.nilable(Float)) } def temperature end @@ -147,6 +216,13 @@ module OpenAI def temperature=(_) end + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tools and instead generates a message. `auto` is the default value + # and means the model can pick between generating a message or calling one or more + # tools. `required` means the model must call one or more tools before responding + # to the user. Specifying a particular tool like `{"type": "file_search"}` or + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. sig { returns(T.nilable(T.any(Symbol, OpenAI::Models::Beta::AssistantToolChoice))) } def tool_choice end @@ -158,6 +234,8 @@ module OpenAI def tool_choice=(_) end + # Override the tools the assistant can use for this run. This is useful for + # modifying the behavior on a per-run basis. sig do returns( T.nilable( @@ -201,6 +279,11 @@ module OpenAI def tools=(_) end + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or temperature but not both. sig { returns(T.nilable(Float)) } def top_p end @@ -209,6 +292,8 @@ module OpenAI def top_p=(_) end + # Controls for how a thread will be truncated prior to the run. Use this to + # control the intial context window of the run. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy)) } def truncation_strategy end @@ -324,6 +409,7 @@ module OpenAI end class AdditionalMessage < OpenAI::BaseModel + # The text contents of the message. sig do returns( T.any( @@ -370,6 +456,12 @@ module OpenAI def content=(_) end + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. sig { returns(Symbol) } def role end @@ -378,6 +470,7 @@ module OpenAI def role=(_) end + # A list of files attached to the message, and the tools they should be added to. sig do returns( T.nilable(T::Array[OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment]) @@ -397,6 +490,12 @@ module OpenAI def attachments=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -449,6 +548,7 @@ module OpenAI def to_hash end + # The text contents of the message. class Content < OpenAI::Union abstract! @@ -463,6 +563,7 @@ module OpenAI end class << self + # @api private sig do override .returns( @@ -486,6 +587,12 @@ module OpenAI end end + # The role of the entity that is creating the message. Allowed values include: + # + # - `user`: Indicates the message is sent by an actual user and should be used in + # most cases to represent user-generated messages. + # - `assistant`: Indicates the message is generated by the assistant. Use this + # value to insert messages from the assistant into the conversation. class Role < OpenAI::Enum abstract! @@ -500,6 +607,7 @@ module OpenAI end class Attachment < OpenAI::BaseModel + # The ID of the file to attach to the message. sig { returns(T.nilable(String)) } def file_id end @@ -508,6 +616,7 @@ module OpenAI def file_id=(_) end + # The tools to add this file to. sig do returns( T.nilable( @@ -580,6 +689,7 @@ module OpenAI abstract! class FileSearch < OpenAI::BaseModel + # The type of tool being defined: `file_search` sig { returns(Symbol) } def type end @@ -598,6 +708,7 @@ module OpenAI end class << self + # @api private sig do override .returns( @@ -611,10 +722,15 @@ module OpenAI end end + # The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + # be used to execute this run. If a value is provided here, it will override the + # model associated with the assistant. If not, the model associated with the + # assistant will be used. class Model < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } private def variants end @@ -622,6 +738,10 @@ module OpenAI end class TruncationStrategy < OpenAI::BaseModel + # The truncation strategy to use for the thread. The default is `auto`. If set to + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. sig { returns(Symbol) } def type end @@ -630,6 +750,8 @@ module OpenAI def type=(_) end + # The number of most recent messages from the thread when constructing the context + # for the run. sig { returns(T.nilable(Integer)) } def last_messages end @@ -638,6 +760,8 @@ module OpenAI def last_messages=(_) end + # Controls for how a thread will be truncated prior to the run. Use this to + # control the intial context window of the run. sig { params(type: Symbol, last_messages: T.nilable(Integer)).returns(T.attached_class) } def self.new(type:, last_messages: nil) end @@ -646,6 +770,10 @@ module OpenAI def to_hash end + # The truncation strategy to use for the thread. The default is `auto`. If set to + # `last_messages`, the thread will be truncated to the n most recent messages in + # the thread. When set to `auto`, messages in the middle of the thread will be + # dropped to fit the context length of the model, `max_prompt_tokens`. class Type < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/beta/threads/run_list_params.rbi b/rbi/lib/openai/models/beta/threads/run_list_params.rbi index cffb671d..ce7fa02c 100644 --- a/rbi/lib/openai/models/beta/threads/run_list_params.rbi +++ b/rbi/lib/openai/models/beta/threads/run_list_params.rbi @@ -8,6 +8,10 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. sig { returns(T.nilable(String)) } def after end @@ -16,6 +20,10 @@ module OpenAI def after=(_) end + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. sig { returns(T.nilable(String)) } def before end @@ -24,6 +32,8 @@ module OpenAI def before=(_) end + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. sig { returns(T.nilable(Integer)) } def limit end @@ -32,6 +42,8 @@ module OpenAI def limit=(_) end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. sig { returns(T.nilable(Symbol)) } def order end @@ -68,6 +80,8 @@ module OpenAI def to_hash end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. class Order < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/beta/threads/run_status.rbi b/rbi/lib/openai/models/beta/threads/run_status.rbi index e890d711..ea69e05c 100644 --- a/rbi/lib/openai/models/beta/threads/run_status.rbi +++ b/rbi/lib/openai/models/beta/threads/run_status.rbi @@ -4,6 +4,9 @@ module OpenAI module Models module Beta module Threads + # The status of the run, which can be either `queued`, `in_progress`, + # `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, + # `incomplete`, or `expired`. class RunStatus < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rbi b/rbi/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rbi index 42c12d0a..0a39a132 100644 --- a/rbi/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rbi +++ b/rbi/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rbi @@ -16,6 +16,7 @@ module OpenAI def thread_id=(_) end + # A list of tools for which the outputs are being submitted. sig { returns(T::Array[OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput]) } def tool_outputs end @@ -52,6 +53,7 @@ module OpenAI end class ToolOutput < OpenAI::BaseModel + # The output of the tool call to be submitted to continue the run. sig { returns(T.nilable(String)) } def output end @@ -60,6 +62,8 @@ module OpenAI def output=(_) end + # The ID of the tool call in the `required_action` object within the run object + # the output is being submitted for. sig { returns(T.nilable(String)) } def tool_call_id end diff --git a/rbi/lib/openai/models/beta/threads/run_update_params.rbi b/rbi/lib/openai/models/beta/threads/run_update_params.rbi index 0ede0dab..d01aa52d 100644 --- a/rbi/lib/openai/models/beta/threads/run_update_params.rbi +++ b/rbi/lib/openai/models/beta/threads/run_update_params.rbi @@ -16,6 +16,12 @@ module OpenAI def thread_id=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end diff --git a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_logs.rbi b/rbi/lib/openai/models/beta/threads/runs/code_interpreter_logs.rbi index 53f5040c..a6c26d19 100644 --- a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_logs.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/code_interpreter_logs.rbi @@ -6,6 +6,7 @@ module OpenAI module Threads module Runs class CodeInterpreterLogs < OpenAI::BaseModel + # The index of the output in the outputs array. sig { returns(Integer) } def index end @@ -14,6 +15,7 @@ module OpenAI def index=(_) end + # Always `logs`. sig { returns(Symbol) } def type end @@ -22,6 +24,7 @@ module OpenAI def type=(_) end + # The text output from the Code Interpreter tool call. sig { returns(T.nilable(String)) } def logs end @@ -30,6 +33,7 @@ module OpenAI def logs=(_) end + # Text output from the Code Interpreter tool call as part of a run step. sig { params(index: Integer, logs: String, type: Symbol).returns(T.attached_class) } def self.new(index:, logs: nil, type: :logs) end diff --git a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_output_image.rbi b/rbi/lib/openai/models/beta/threads/runs/code_interpreter_output_image.rbi index e3a5d1f7..d598eb87 100644 --- a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_output_image.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/code_interpreter_output_image.rbi @@ -6,6 +6,7 @@ module OpenAI module Threads module Runs class CodeInterpreterOutputImage < OpenAI::BaseModel + # The index of the output in the outputs array. sig { returns(Integer) } def index end @@ -14,6 +15,7 @@ module OpenAI def index=(_) end + # Always `image`. sig { returns(Symbol) } def type end @@ -54,6 +56,8 @@ module OpenAI end class Image < OpenAI::BaseModel + # The [file](https://platform.openai.com/docs/api-reference/files) ID of the + # image. sig { returns(T.nilable(String)) } def file_id end diff --git a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rbi b/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rbi index fb36dea2..373b9a5a 100644 --- a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rbi @@ -6,6 +6,7 @@ module OpenAI module Threads module Runs class CodeInterpreterToolCall < OpenAI::BaseModel + # The ID of the tool call. sig { returns(String) } def id end @@ -14,6 +15,7 @@ module OpenAI def id=(_) end + # The Code Interpreter tool call definition. sig { returns(OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter) } def code_interpreter end @@ -25,6 +27,8 @@ module OpenAI def code_interpreter=(_) end + # The type of tool call. This is always going to be `code_interpreter` for this + # type of tool call. sig { returns(Symbol) } def type end @@ -33,6 +37,7 @@ module OpenAI def type=(_) end + # Details of the Code Interpreter tool call the run step was involved in. sig do params( id: String, @@ -58,6 +63,7 @@ module OpenAI end class CodeInterpreter < OpenAI::BaseModel + # The input to the Code Interpreter tool call. sig { returns(String) } def input end @@ -66,6 +72,9 @@ module OpenAI def input=(_) end + # The outputs from the Code Interpreter tool call. Code Interpreter can output one + # or more items, including text (`logs`) or images (`image`). Each of these are + # represented by a different object type. sig do returns( T::Array[ @@ -100,6 +109,7 @@ module OpenAI def outputs=(_) end + # The Code Interpreter tool call definition. sig do params( input: String, @@ -132,10 +142,12 @@ module OpenAI def to_hash end + # Text output from the Code Interpreter tool call as part of a run step. class Output < OpenAI::Union abstract! class Logs < OpenAI::BaseModel + # The text output from the Code Interpreter tool call. sig { returns(String) } def logs end @@ -144,6 +156,7 @@ module OpenAI def logs=(_) end + # Always `logs`. sig { returns(Symbol) } def type end @@ -152,6 +165,7 @@ module OpenAI def type=(_) end + # Text output from the Code Interpreter tool call as part of a run step. sig { params(logs: String, type: Symbol).returns(T.attached_class) } def self.new(logs:, type: :logs) end @@ -181,6 +195,7 @@ module OpenAI def image=(_) end + # Always `image`. sig { returns(Symbol) } def type end @@ -212,6 +227,8 @@ module OpenAI end class Image < OpenAI::BaseModel + # The [file](https://platform.openai.com/docs/api-reference/files) ID of the + # image. sig { returns(String) } def file_id end @@ -231,6 +248,7 @@ module OpenAI end class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rbi b/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rbi index 763d9012..e355df3e 100644 --- a/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rbi @@ -6,6 +6,7 @@ module OpenAI module Threads module Runs class CodeInterpreterToolCallDelta < OpenAI::BaseModel + # The index of the tool call in the tool calls array. sig { returns(Integer) } def index end @@ -14,6 +15,8 @@ module OpenAI def index=(_) end + # The type of tool call. This is always going to be `code_interpreter` for this + # type of tool call. sig { returns(Symbol) } def type end @@ -22,6 +25,7 @@ module OpenAI def type=(_) end + # The ID of the tool call. sig { returns(T.nilable(String)) } def id end @@ -30,6 +34,7 @@ module OpenAI def id=(_) end + # The Code Interpreter tool call definition. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter)) } def code_interpreter end @@ -41,6 +46,7 @@ module OpenAI def code_interpreter=(_) end + # Details of the Code Interpreter tool call the run step was involved in. sig do params( index: Integer, @@ -68,6 +74,7 @@ module OpenAI end class CodeInterpreter < OpenAI::BaseModel + # The input to the Code Interpreter tool call. sig { returns(T.nilable(String)) } def input end @@ -76,6 +83,9 @@ module OpenAI def input=(_) end + # The outputs from the Code Interpreter tool call. Code Interpreter can output one + # or more items, including text (`logs`) or images (`image`). Each of these are + # represented by a different object type. sig do returns( T.nilable( @@ -112,6 +122,7 @@ module OpenAI def outputs=(_) end + # The Code Interpreter tool call definition. sig do params( input: String, @@ -144,10 +155,12 @@ module OpenAI def to_hash end + # Text output from the Code Interpreter tool call as part of a run step. class Output < OpenAI::Union abstract! class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call.rbi b/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call.rbi index 9aaccc68..af309515 100644 --- a/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call.rbi @@ -6,6 +6,7 @@ module OpenAI module Threads module Runs class FileSearchToolCall < OpenAI::BaseModel + # The ID of the tool call object. sig { returns(String) } def id end @@ -14,6 +15,7 @@ module OpenAI def id=(_) end + # For now, this is always going to be an empty object. sig { returns(OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch) } def file_search end @@ -25,6 +27,8 @@ module OpenAI def file_search=(_) end + # The type of tool call. This is always going to be `file_search` for this type of + # tool call. sig { returns(Symbol) } def type end @@ -54,6 +58,7 @@ module OpenAI end class FileSearch < OpenAI::BaseModel + # The ranking options for the file search. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions)) } def ranking_options end @@ -65,6 +70,7 @@ module OpenAI def ranking_options=(_) end + # The results of the file search. sig { returns(T.nilable(T::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result])) } def results end @@ -76,6 +82,7 @@ module OpenAI def results=(_) end + # For now, this is always going to be an empty object. sig do params( ranking_options: OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions, @@ -99,6 +106,8 @@ module OpenAI end class RankingOptions < OpenAI::BaseModel + # The ranker to use for the file search. If not specified will use the `auto` + # ranker. sig { returns(Symbol) } def ranker end @@ -107,6 +116,8 @@ module OpenAI def ranker=(_) end + # The score threshold for the file search. All values must be a floating point + # number between 0 and 1. sig { returns(Float) } def score_threshold end @@ -115,6 +126,7 @@ module OpenAI def score_threshold=(_) end + # The ranking options for the file search. sig { params(ranker: Symbol, score_threshold: Float).returns(T.attached_class) } def self.new(ranker:, score_threshold:) end @@ -123,6 +135,8 @@ module OpenAI def to_hash end + # The ranker to use for the file search. If not specified will use the `auto` + # ranker. class Ranker < OpenAI::Enum abstract! @@ -138,6 +152,7 @@ module OpenAI end class Result < OpenAI::BaseModel + # The ID of the file that result was found in. sig { returns(String) } def file_id end @@ -146,6 +161,7 @@ module OpenAI def file_id=(_) end + # The name of the file that result was found in. sig { returns(String) } def file_name end @@ -154,6 +170,8 @@ module OpenAI def file_name=(_) end + # The score of the result. All values must be a floating point number between 0 + # and 1. sig { returns(Float) } def score end @@ -162,6 +180,8 @@ module OpenAI def score=(_) end + # The content of the result that was found. The content is only included if + # requested via the include query parameter. sig do returns( T.nilable(T::Array[OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content]) @@ -177,6 +197,7 @@ module OpenAI def content=(_) end + # A result instance of the file search. sig do params( file_id: String, @@ -204,6 +225,7 @@ module OpenAI end class Content < OpenAI::BaseModel + # The text content of the file. sig { returns(T.nilable(String)) } def text end @@ -212,6 +234,7 @@ module OpenAI def text=(_) end + # The type of the content. sig { returns(T.nilable(Symbol)) } def type end @@ -228,6 +251,7 @@ module OpenAI def to_hash end + # The type of the content. class Type < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call_delta.rbi b/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call_delta.rbi index 82d07489..4986a86f 100644 --- a/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/file_search_tool_call_delta.rbi @@ -6,6 +6,7 @@ module OpenAI module Threads module Runs class FileSearchToolCallDelta < OpenAI::BaseModel + # For now, this is always going to be an empty object. sig { returns(T.anything) } def file_search end @@ -14,6 +15,7 @@ module OpenAI def file_search=(_) end + # The index of the tool call in the tool calls array. sig { returns(Integer) } def index end @@ -22,6 +24,8 @@ module OpenAI def index=(_) end + # The type of tool call. This is always going to be `file_search` for this type of + # tool call. sig { returns(Symbol) } def type end @@ -30,6 +34,7 @@ module OpenAI def type=(_) end + # The ID of the tool call object. sig { returns(T.nilable(String)) } def id end diff --git a/rbi/lib/openai/models/beta/threads/runs/function_tool_call.rbi b/rbi/lib/openai/models/beta/threads/runs/function_tool_call.rbi index 4f13f258..2fbc10a5 100644 --- a/rbi/lib/openai/models/beta/threads/runs/function_tool_call.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/function_tool_call.rbi @@ -6,6 +6,7 @@ module OpenAI module Threads module Runs class FunctionToolCall < OpenAI::BaseModel + # The ID of the tool call object. sig { returns(String) } def id end @@ -14,6 +15,7 @@ module OpenAI def id=(_) end + # The definition of the function that was called. sig { returns(OpenAI::Models::Beta::Threads::Runs::FunctionToolCall::Function) } def function end @@ -25,6 +27,8 @@ module OpenAI def function=(_) end + # The type of tool call. This is always going to be `function` for this type of + # tool call. sig { returns(Symbol) } def type end @@ -54,6 +58,7 @@ module OpenAI end class Function < OpenAI::BaseModel + # The arguments passed to the function. sig { returns(String) } def arguments end @@ -62,6 +67,7 @@ module OpenAI def arguments=(_) end + # The name of the function. sig { returns(String) } def name end @@ -70,6 +76,9 @@ module OpenAI def name=(_) end + # The output of the function. This will be `null` if the outputs have not been + # [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) + # yet. sig { returns(T.nilable(String)) } def output end @@ -78,6 +87,7 @@ module OpenAI def output=(_) end + # The definition of the function that was called. sig do params(arguments: String, name: String, output: T.nilable(String)).returns(T.attached_class) end diff --git a/rbi/lib/openai/models/beta/threads/runs/function_tool_call_delta.rbi b/rbi/lib/openai/models/beta/threads/runs/function_tool_call_delta.rbi index d5acf0b1..c026f468 100644 --- a/rbi/lib/openai/models/beta/threads/runs/function_tool_call_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/function_tool_call_delta.rbi @@ -6,6 +6,7 @@ module OpenAI module Threads module Runs class FunctionToolCallDelta < OpenAI::BaseModel + # The index of the tool call in the tool calls array. sig { returns(Integer) } def index end @@ -14,6 +15,8 @@ module OpenAI def index=(_) end + # The type of tool call. This is always going to be `function` for this type of + # tool call. sig { returns(Symbol) } def type end @@ -22,6 +25,7 @@ module OpenAI def type=(_) end + # The ID of the tool call object. sig { returns(T.nilable(String)) } def id end @@ -30,6 +34,7 @@ module OpenAI def id=(_) end + # The definition of the function that was called. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta::Function)) } def function end @@ -68,6 +73,7 @@ module OpenAI end class Function < OpenAI::BaseModel + # The arguments passed to the function. sig { returns(T.nilable(String)) } def arguments end @@ -76,6 +82,7 @@ module OpenAI def arguments=(_) end + # The name of the function. sig { returns(T.nilable(String)) } def name end @@ -84,6 +91,9 @@ module OpenAI def name=(_) end + # The output of the function. This will be `null` if the outputs have not been + # [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) + # yet. sig { returns(T.nilable(String)) } def output end @@ -92,6 +102,7 @@ module OpenAI def output=(_) end + # The definition of the function that was called. sig do params(arguments: String, name: String, output: T.nilable(String)).returns(T.attached_class) end diff --git a/rbi/lib/openai/models/beta/threads/runs/message_creation_step_details.rbi b/rbi/lib/openai/models/beta/threads/runs/message_creation_step_details.rbi index 73222dd0..ba7932c9 100644 --- a/rbi/lib/openai/models/beta/threads/runs/message_creation_step_details.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/message_creation_step_details.rbi @@ -17,6 +17,7 @@ module OpenAI def message_creation=(_) end + # Always `message_creation`. sig { returns(Symbol) } def type end @@ -25,6 +26,7 @@ module OpenAI def type=(_) end + # Details of the message creation by the run step. sig do params( message_creation: OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails::MessageCreation, @@ -48,6 +50,7 @@ module OpenAI end class MessageCreation < OpenAI::BaseModel + # The ID of the message that was created by this run step. sig { returns(String) } def message_id end diff --git a/rbi/lib/openai/models/beta/threads/runs/run_step.rbi b/rbi/lib/openai/models/beta/threads/runs/run_step.rbi index 2f08a34f..557b42f9 100644 --- a/rbi/lib/openai/models/beta/threads/runs/run_step.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/run_step.rbi @@ -8,6 +8,7 @@ module OpenAI module Runs class RunStep < OpenAI::BaseModel + # The identifier of the run step, which can be referenced in API endpoints. sig { returns(String) } def id end @@ -16,6 +17,9 @@ module OpenAI def id=(_) end + # The ID of the + # [assistant](https://platform.openai.com/docs/api-reference/assistants) + # associated with the run step. sig { returns(String) } def assistant_id end @@ -24,6 +28,7 @@ module OpenAI def assistant_id=(_) end + # The Unix timestamp (in seconds) for when the run step was cancelled. sig { returns(T.nilable(Integer)) } def cancelled_at end @@ -32,6 +37,7 @@ module OpenAI def cancelled_at=(_) end + # The Unix timestamp (in seconds) for when the run step completed. sig { returns(T.nilable(Integer)) } def completed_at end @@ -40,6 +46,7 @@ module OpenAI def completed_at=(_) end + # The Unix timestamp (in seconds) for when the run step was created. sig { returns(Integer) } def created_at end @@ -48,6 +55,8 @@ module OpenAI def created_at=(_) end + # The Unix timestamp (in seconds) for when the run step expired. A step is + # considered expired if the parent run is expired. sig { returns(T.nilable(Integer)) } def expired_at end @@ -56,6 +65,7 @@ module OpenAI def expired_at=(_) end + # The Unix timestamp (in seconds) for when the run step failed. sig { returns(T.nilable(Integer)) } def failed_at end @@ -64,6 +74,8 @@ module OpenAI def failed_at=(_) end + # The last error associated with this run step. Will be `null` if there are no + # errors. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Runs::RunStep::LastError)) } def last_error end @@ -75,6 +87,12 @@ module OpenAI def last_error=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -83,6 +101,7 @@ module OpenAI def metadata=(_) end + # The object type, which is always `thread.run.step`. sig { returns(Symbol) } def object end @@ -91,6 +110,8 @@ module OpenAI def object=(_) end + # The ID of the [run](https://platform.openai.com/docs/api-reference/runs) that + # this run step is a part of. sig { returns(String) } def run_id end @@ -99,6 +120,8 @@ module OpenAI def run_id=(_) end + # The status of the run step, which can be either `in_progress`, `cancelled`, + # `failed`, `completed`, or `expired`. sig { returns(Symbol) } def status end @@ -107,6 +130,7 @@ module OpenAI def status=(_) end + # The details of the run step. sig do returns( T.any( @@ -135,6 +159,8 @@ module OpenAI def step_details=(_) end + # The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) + # that was run. sig { returns(String) } def thread_id end @@ -143,6 +169,7 @@ module OpenAI def thread_id=(_) end + # The type of run step, which can be either `message_creation` or `tool_calls`. sig { returns(Symbol) } def type end @@ -151,6 +178,8 @@ module OpenAI def type=(_) end + # Usage statistics related to the run step. This value will be `null` while the + # run step's status is `in_progress`. sig { returns(T.nilable(OpenAI::Models::Beta::Threads::Runs::RunStep::Usage)) } def usage end @@ -162,6 +191,7 @@ module OpenAI def usage=(_) end + # Represents a step in execution of a run. sig do params( id: String, @@ -236,6 +266,7 @@ module OpenAI end class LastError < OpenAI::BaseModel + # One of `server_error` or `rate_limit_exceeded`. sig { returns(Symbol) } def code end @@ -244,6 +275,7 @@ module OpenAI def code=(_) end + # A human-readable description of the error. sig { returns(String) } def message end @@ -252,6 +284,8 @@ module OpenAI def message=(_) end + # The last error associated with this run step. Will be `null` if there are no + # errors. sig { params(code: Symbol, message: String).returns(T.attached_class) } def self.new(code:, message:) end @@ -260,6 +294,7 @@ module OpenAI def to_hash end + # One of `server_error` or `rate_limit_exceeded`. class Code < OpenAI::Enum abstract! @@ -274,6 +309,8 @@ module OpenAI end end + # The status of the run step, which can be either `in_progress`, `cancelled`, + # `failed`, `completed`, or `expired`. class Status < OpenAI::Enum abstract! @@ -290,10 +327,12 @@ module OpenAI end end + # The details of the run step. class StepDetails < OpenAI::Union abstract! class << self + # @api private sig do override .returns( @@ -305,6 +344,7 @@ module OpenAI end end + # The type of run step, which can be either `message_creation` or `tool_calls`. class Type < OpenAI::Enum abstract! @@ -319,6 +359,7 @@ module OpenAI end class Usage < OpenAI::BaseModel + # Number of completion tokens used over the course of the run step. sig { returns(Integer) } def completion_tokens end @@ -327,6 +368,7 @@ module OpenAI def completion_tokens=(_) end + # Number of prompt tokens used over the course of the run step. sig { returns(Integer) } def prompt_tokens end @@ -335,6 +377,7 @@ module OpenAI def prompt_tokens=(_) end + # Total number of tokens used (prompt + completion). sig { returns(Integer) } def total_tokens end @@ -343,6 +386,8 @@ module OpenAI def total_tokens=(_) end + # Usage statistics related to the run step. This value will be `null` while the + # run step's status is `in_progress`. sig do params( completion_tokens: Integer, diff --git a/rbi/lib/openai/models/beta/threads/runs/run_step_delta.rbi b/rbi/lib/openai/models/beta/threads/runs/run_step_delta.rbi index 60805244..a5c04563 100644 --- a/rbi/lib/openai/models/beta/threads/runs/run_step_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/run_step_delta.rbi @@ -8,6 +8,7 @@ module OpenAI module Runs class RunStepDelta < OpenAI::BaseModel + # The details of the run step. sig do returns( T.nilable( @@ -38,6 +39,7 @@ module OpenAI def step_details=(_) end + # The delta containing the fields that have changed on the run step. sig do params( step_details: T.any( @@ -64,10 +66,12 @@ module OpenAI def to_hash end + # The details of the run step. class StepDetails < OpenAI::Union abstract! class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/beta/threads/runs/run_step_delta_event.rbi b/rbi/lib/openai/models/beta/threads/runs/run_step_delta_event.rbi index 73076f3a..7cc4a3fb 100644 --- a/rbi/lib/openai/models/beta/threads/runs/run_step_delta_event.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/run_step_delta_event.rbi @@ -8,6 +8,7 @@ module OpenAI module Runs class RunStepDeltaEvent < OpenAI::BaseModel + # The identifier of the run step, which can be referenced in API endpoints. sig { returns(String) } def id end @@ -16,6 +17,7 @@ module OpenAI def id=(_) end + # The delta containing the fields that have changed on the run step. sig { returns(OpenAI::Models::Beta::Threads::Runs::RunStepDelta) } def delta end @@ -27,6 +29,7 @@ module OpenAI def delta=(_) end + # The object type, which is always `thread.run.step.delta`. sig { returns(Symbol) } def object end @@ -35,6 +38,8 @@ module OpenAI def object=(_) end + # Represents a run step delta i.e. any changed fields on a run step during + # streaming. sig do params(id: String, delta: OpenAI::Models::Beta::Threads::Runs::RunStepDelta, object: Symbol) .returns(T.attached_class) diff --git a/rbi/lib/openai/models/beta/threads/runs/run_step_delta_message_delta.rbi b/rbi/lib/openai/models/beta/threads/runs/run_step_delta_message_delta.rbi index e933c731..02216a5c 100644 --- a/rbi/lib/openai/models/beta/threads/runs/run_step_delta_message_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/run_step_delta_message_delta.rbi @@ -8,6 +8,7 @@ module OpenAI module Runs class RunStepDeltaMessageDelta < OpenAI::BaseModel + # Always `message_creation`. sig { returns(Symbol) } def type end @@ -27,6 +28,7 @@ module OpenAI def message_creation=(_) end + # Details of the message creation by the run step. sig do params( message_creation: OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation, @@ -50,6 +52,7 @@ module OpenAI end class MessageCreation < OpenAI::BaseModel + # The ID of the message that was created by this run step. sig { returns(T.nilable(String)) } def message_id end diff --git a/rbi/lib/openai/models/beta/threads/runs/step_list_params.rbi b/rbi/lib/openai/models/beta/threads/runs/step_list_params.rbi index 56b21c91..81075759 100644 --- a/rbi/lib/openai/models/beta/threads/runs/step_list_params.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/step_list_params.rbi @@ -17,6 +17,10 @@ module OpenAI def thread_id=(_) end + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. sig { returns(T.nilable(String)) } def after end @@ -25,6 +29,10 @@ module OpenAI def after=(_) end + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. sig { returns(T.nilable(String)) } def before end @@ -33,6 +41,13 @@ module OpenAI def before=(_) end + # A list of additional fields to include in the response. Currently the only + # supported value is `step_details.tool_calls[*].file_search.results[*].content` + # to fetch the file search result content. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. sig { returns(T.nilable(T::Array[Symbol])) } def include end @@ -41,6 +56,8 @@ module OpenAI def include=(_) end + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. sig { returns(T.nilable(Integer)) } def limit end @@ -49,6 +66,8 @@ module OpenAI def limit=(_) end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. sig { returns(T.nilable(Symbol)) } def order end @@ -89,6 +108,8 @@ module OpenAI def to_hash end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. class Order < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/beta/threads/runs/step_retrieve_params.rbi b/rbi/lib/openai/models/beta/threads/runs/step_retrieve_params.rbi index 9778a156..71dc2e52 100644 --- a/rbi/lib/openai/models/beta/threads/runs/step_retrieve_params.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/step_retrieve_params.rbi @@ -25,6 +25,13 @@ module OpenAI def run_id=(_) end + # A list of additional fields to include in the response. Currently the only + # supported value is `step_details.tool_calls[*].file_search.results[*].content` + # to fetch the file search result content. + # + # See the + # [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + # for more information. sig { returns(T.nilable(T::Array[Symbol])) } def include end diff --git a/rbi/lib/openai/models/beta/threads/runs/tool_call.rbi b/rbi/lib/openai/models/beta/threads/runs/tool_call.rbi index 5d68bb14..9e16226b 100644 --- a/rbi/lib/openai/models/beta/threads/runs/tool_call.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/tool_call.rbi @@ -5,10 +5,12 @@ module OpenAI module Beta module Threads module Runs + # Details of the Code Interpreter tool call the run step was involved in. class ToolCall < OpenAI::Union abstract! class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/beta/threads/runs/tool_call_delta.rbi b/rbi/lib/openai/models/beta/threads/runs/tool_call_delta.rbi index fc5420d6..9fae60be 100644 --- a/rbi/lib/openai/models/beta/threads/runs/tool_call_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/tool_call_delta.rbi @@ -5,10 +5,12 @@ module OpenAI module Beta module Threads module Runs + # Details of the Code Interpreter tool call the run step was involved in. class ToolCallDelta < OpenAI::Union abstract! class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/beta/threads/runs/tool_call_delta_object.rbi b/rbi/lib/openai/models/beta/threads/runs/tool_call_delta_object.rbi index f9cb13ff..3258b9d6 100644 --- a/rbi/lib/openai/models/beta/threads/runs/tool_call_delta_object.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/tool_call_delta_object.rbi @@ -6,6 +6,7 @@ module OpenAI module Threads module Runs class ToolCallDeltaObject < OpenAI::BaseModel + # Always `tool_calls`. sig { returns(Symbol) } def type end @@ -14,6 +15,9 @@ module OpenAI def type=(_) end + # An array of tool calls the run step was involved in. These can be associated + # with one of three types of tools: `code_interpreter`, `file_search`, or + # `function`. sig do returns( T.nilable( @@ -53,6 +57,7 @@ module OpenAI def tool_calls=(_) end + # Details of the tool call. sig do params( tool_calls: T::Array[ diff --git a/rbi/lib/openai/models/beta/threads/runs/tool_calls_step_details.rbi b/rbi/lib/openai/models/beta/threads/runs/tool_calls_step_details.rbi index 4d2d6cf2..3ad8cc7f 100644 --- a/rbi/lib/openai/models/beta/threads/runs/tool_calls_step_details.rbi +++ b/rbi/lib/openai/models/beta/threads/runs/tool_calls_step_details.rbi @@ -6,6 +6,9 @@ module OpenAI module Threads module Runs class ToolCallsStepDetails < OpenAI::BaseModel + # An array of tool calls the run step was involved in. These can be associated + # with one of three types of tools: `code_interpreter`, `file_search`, or + # `function`. sig do returns( T::Array[ @@ -43,6 +46,7 @@ module OpenAI def tool_calls=(_) end + # Always `tool_calls`. sig { returns(Symbol) } def type end @@ -51,6 +55,7 @@ module OpenAI def type=(_) end + # Details of the tool call. sig do params( tool_calls: T::Array[ diff --git a/rbi/lib/openai/models/beta/threads/text.rbi b/rbi/lib/openai/models/beta/threads/text.rbi index 97fe10b1..0ba8015c 100644 --- a/rbi/lib/openai/models/beta/threads/text.rbi +++ b/rbi/lib/openai/models/beta/threads/text.rbi @@ -39,6 +39,7 @@ module OpenAI def annotations=(_) end + # The data that makes up the text. sig { returns(String) } def value end diff --git a/rbi/lib/openai/models/beta/threads/text_content_block.rbi b/rbi/lib/openai/models/beta/threads/text_content_block.rbi index d714859a..5a3f345f 100644 --- a/rbi/lib/openai/models/beta/threads/text_content_block.rbi +++ b/rbi/lib/openai/models/beta/threads/text_content_block.rbi @@ -13,6 +13,7 @@ module OpenAI def text=(_) end + # Always `text`. sig { returns(Symbol) } def type end @@ -21,6 +22,7 @@ module OpenAI def type=(_) end + # The text content that is part of a message. sig { params(text: OpenAI::Models::Beta::Threads::Text, type: Symbol).returns(T.attached_class) } def self.new(text:, type: :text) end diff --git a/rbi/lib/openai/models/beta/threads/text_content_block_param.rbi b/rbi/lib/openai/models/beta/threads/text_content_block_param.rbi index dcfd074b..58764a6f 100644 --- a/rbi/lib/openai/models/beta/threads/text_content_block_param.rbi +++ b/rbi/lib/openai/models/beta/threads/text_content_block_param.rbi @@ -5,6 +5,7 @@ module OpenAI module Beta module Threads class TextContentBlockParam < OpenAI::BaseModel + # Text content to be sent to the model sig { returns(String) } def text end @@ -13,6 +14,7 @@ module OpenAI def text=(_) end + # Always `text`. sig { returns(Symbol) } def type end @@ -21,6 +23,7 @@ module OpenAI def type=(_) end + # The text content that is part of a message. sig { params(text: String, type: Symbol).returns(T.attached_class) } def self.new(text:, type: :text) end diff --git a/rbi/lib/openai/models/beta/threads/text_delta.rbi b/rbi/lib/openai/models/beta/threads/text_delta.rbi index 3ffea679..12996683 100644 --- a/rbi/lib/openai/models/beta/threads/text_delta.rbi +++ b/rbi/lib/openai/models/beta/threads/text_delta.rbi @@ -41,6 +41,7 @@ module OpenAI def annotations=(_) end + # The data that makes up the text. sig { returns(T.nilable(String)) } def value end diff --git a/rbi/lib/openai/models/beta/threads/text_delta_block.rbi b/rbi/lib/openai/models/beta/threads/text_delta_block.rbi index a3e06fc4..80145b24 100644 --- a/rbi/lib/openai/models/beta/threads/text_delta_block.rbi +++ b/rbi/lib/openai/models/beta/threads/text_delta_block.rbi @@ -5,6 +5,7 @@ module OpenAI module Beta module Threads class TextDeltaBlock < OpenAI::BaseModel + # The index of the content part in the message. sig { returns(Integer) } def index end @@ -13,6 +14,7 @@ module OpenAI def index=(_) end + # Always `text`. sig { returns(Symbol) } def type end @@ -29,6 +31,7 @@ module OpenAI def text=(_) end + # The text content that is part of a message. sig do params(index: Integer, text: OpenAI::Models::Beta::Threads::TextDelta, type: Symbol) .returns(T.attached_class) diff --git a/rbi/lib/openai/models/chat/chat_completion.rbi b/rbi/lib/openai/models/chat/chat_completion.rbi index 85749208..361ee7ba 100644 --- a/rbi/lib/openai/models/chat/chat_completion.rbi +++ b/rbi/lib/openai/models/chat/chat_completion.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletion < OpenAI::BaseModel + # A unique identifier for the chat completion. sig { returns(String) } def id end @@ -14,6 +15,8 @@ module OpenAI def id=(_) end + # A list of chat completion choices. Can be more than one if `n` is greater + # than 1. sig { returns(T::Array[OpenAI::Models::Chat::ChatCompletion::Choice]) } def choices end @@ -25,6 +28,7 @@ module OpenAI def choices=(_) end + # The Unix timestamp (in seconds) of when the chat completion was created. sig { returns(Integer) } def created end @@ -33,6 +37,7 @@ module OpenAI def created=(_) end + # The model used for the chat completion. sig { returns(String) } def model end @@ -41,6 +46,7 @@ module OpenAI def model=(_) end + # The object type, which is always `chat.completion`. sig { returns(Symbol) } def object end @@ -49,6 +55,7 @@ module OpenAI def object=(_) end + # The service tier used for processing the request. sig { returns(T.nilable(Symbol)) } def service_tier end @@ -57,6 +64,10 @@ module OpenAI def service_tier=(_) end + # This fingerprint represents the backend configuration that the model runs with. + # + # Can be used in conjunction with the `seed` request parameter to understand when + # backend changes have been made that might impact determinism. sig { returns(T.nilable(String)) } def system_fingerprint end @@ -65,6 +76,7 @@ module OpenAI def system_fingerprint=(_) end + # Usage statistics for the completion request. sig { returns(T.nilable(OpenAI::Models::CompletionUsage)) } def usage end @@ -73,6 +85,8 @@ module OpenAI def usage=(_) end + # Represents a chat completion response returned by model, based on the provided + # input. sig do params( id: String, @@ -117,6 +131,12 @@ module OpenAI end class Choice < OpenAI::BaseModel + # The reason the model stopped generating tokens. This will be `stop` if the model + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, `content_filter` if + # content was omitted due to a flag from our content filters, `tool_calls` if the + # model called a tool, or `function_call` (deprecated) if the model called a + # function. sig { returns(Symbol) } def finish_reason end @@ -125,6 +145,7 @@ module OpenAI def finish_reason=(_) end + # The index of the choice in the list of choices. sig { returns(Integer) } def index end @@ -133,6 +154,7 @@ module OpenAI def index=(_) end + # Log probability information for the choice. sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletion::Choice::Logprobs)) } def logprobs end @@ -144,6 +166,7 @@ module OpenAI def logprobs=(_) end + # A chat completion message generated by the model. sig { returns(OpenAI::Models::Chat::ChatCompletionMessage) } def message end @@ -180,6 +203,12 @@ module OpenAI def to_hash end + # The reason the model stopped generating tokens. This will be `stop` if the model + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, `content_filter` if + # content was omitted due to a flag from our content filters, `tool_calls` if the + # model called a tool, or `function_call` (deprecated) if the model called a + # function. class FinishReason < OpenAI::Enum abstract! @@ -197,6 +226,7 @@ module OpenAI end class Logprobs < OpenAI::BaseModel + # A list of message content tokens with log probability information. sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob])) } def content end @@ -208,6 +238,7 @@ module OpenAI def content=(_) end + # A list of message refusal tokens with log probability information. sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob])) } def refusal end @@ -219,6 +250,7 @@ module OpenAI def refusal=(_) end + # Log probability information for the choice. sig do params( content: T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob]), @@ -243,6 +275,7 @@ module OpenAI end end + # The service tier used for processing the request. class ServiceTier < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/chat/chat_completion_assistant_message_param.rbi b/rbi/lib/openai/models/chat/chat_completion_assistant_message_param.rbi index 10203986..a2c3cf1c 100644 --- a/rbi/lib/openai/models/chat/chat_completion_assistant_message_param.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_assistant_message_param.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionAssistantMessageParam < OpenAI::BaseModel + # The role of the messages author, in this case `assistant`. sig { returns(Symbol) } def role end @@ -14,6 +15,8 @@ module OpenAI def role=(_) end + # Data about a previous audio response from the model. + # [Learn more](https://platform.openai.com/docs/guides/audio). sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Audio)) } def audio end @@ -25,6 +28,8 @@ module OpenAI def audio=(_) end + # The contents of the assistant message. Required unless `tool_calls` or + # `function_call` is specified. sig do returns( T.nilable( @@ -74,6 +79,8 @@ module OpenAI def content=(_) end + # Deprecated and replaced by `tool_calls`. The name and arguments of a function + # that should be called, as generated by the model. sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::FunctionCall)) } def function_call end @@ -85,6 +92,8 @@ module OpenAI def function_call=(_) end + # An optional name for the participant. Provides the model information to + # differentiate between participants of the same role. sig { returns(T.nilable(String)) } def name end @@ -93,6 +102,7 @@ module OpenAI def name=(_) end + # The refusal message by the assistant. sig { returns(T.nilable(String)) } def refusal end @@ -101,6 +111,7 @@ module OpenAI def refusal=(_) end + # The tool calls generated by the model, such as function calls. sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionMessageToolCall])) } def tool_calls end @@ -112,6 +123,7 @@ module OpenAI def tool_calls=(_) end + # Messages sent by the model in response to user messages. sig do params( audio: T.nilable(OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Audio), @@ -165,6 +177,7 @@ module OpenAI end class Audio < OpenAI::BaseModel + # Unique identifier for a previous audio response from the model. sig { returns(String) } def id end @@ -173,6 +186,8 @@ module OpenAI def id=(_) end + # Data about a previous audio response from the model. + # [Learn more](https://platform.openai.com/docs/guides/audio). sig { params(id: String).returns(T.attached_class) } def self.new(id:) end @@ -182,6 +197,8 @@ module OpenAI end end + # The contents of the assistant message. Required unless `tool_calls` or + # `function_call` is specified. class Content < OpenAI::Union abstract! @@ -194,10 +211,13 @@ module OpenAI ] end + # Learn about + # [text inputs](https://platform.openai.com/docs/guides/text-generation). class ArrayOfContentPart < OpenAI::Union abstract! class << self + # @api private sig do override .returns( @@ -210,6 +230,7 @@ module OpenAI end class << self + # @api private sig do override .returns( @@ -233,6 +254,10 @@ module OpenAI end class FunctionCall < OpenAI::BaseModel + # The arguments to call the function with, as generated by the model in JSON + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. sig { returns(String) } def arguments end @@ -241,6 +266,7 @@ module OpenAI def arguments=(_) end + # The name of the function to call. sig { returns(String) } def name end @@ -249,6 +275,8 @@ module OpenAI def name=(_) end + # Deprecated and replaced by `tool_calls`. The name and arguments of a function + # that should be called, as generated by the model. sig { params(arguments: String, name: String).returns(T.attached_class) } def self.new(arguments:, name:) end diff --git a/rbi/lib/openai/models/chat/chat_completion_audio.rbi b/rbi/lib/openai/models/chat/chat_completion_audio.rbi index f8431d64..489f4145 100644 --- a/rbi/lib/openai/models/chat/chat_completion_audio.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_audio.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionAudio < OpenAI::BaseModel + # Unique identifier for this audio response. sig { returns(String) } def id end @@ -14,6 +15,8 @@ module OpenAI def id=(_) end + # Base64 encoded audio bytes generated by the model, in the format specified in + # the request. sig { returns(String) } def data end @@ -22,6 +25,8 @@ module OpenAI def data=(_) end + # The Unix timestamp (in seconds) for when this audio response will no longer be + # accessible on the server for use in multi-turn conversations. sig { returns(Integer) } def expires_at end @@ -30,6 +35,7 @@ module OpenAI def expires_at=(_) end + # Transcript of the audio generated by the model. sig { returns(String) } def transcript end @@ -38,6 +44,9 @@ module OpenAI def transcript=(_) end + # If the audio output modality is requested, this object contains data about the + # audio response from the model. + # [Learn more](https://platform.openai.com/docs/guides/audio). sig do params(id: String, data: String, expires_at: Integer, transcript: String).returns(T.attached_class) end diff --git a/rbi/lib/openai/models/chat/chat_completion_audio_param.rbi b/rbi/lib/openai/models/chat/chat_completion_audio_param.rbi index 0cbbdba5..a8c18d75 100644 --- a/rbi/lib/openai/models/chat/chat_completion_audio_param.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_audio_param.rbi @@ -6,6 +6,8 @@ module OpenAI module Chat class ChatCompletionAudioParam < OpenAI::BaseModel + # Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, + # or `pcm16`. sig { returns(Symbol) } def format_ end @@ -14,6 +16,8 @@ module OpenAI def format_=(_) end + # The voice the model uses to respond. Supported voices are `alloy`, `ash`, + # `ballad`, `coral`, `echo`, `sage`, and `shimmer`. sig { returns(Symbol) } def voice end @@ -22,6 +26,9 @@ module OpenAI def voice=(_) end + # Parameters for audio output. Required when audio output is requested with + # `modalities: ["audio"]`. + # [Learn more](https://platform.openai.com/docs/guides/audio). sig { params(format_: Symbol, voice: Symbol).returns(T.attached_class) } def self.new(format_:, voice:) end @@ -30,6 +37,8 @@ module OpenAI def to_hash end + # Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, + # or `pcm16`. class Format < OpenAI::Enum abstract! @@ -46,6 +55,8 @@ module OpenAI end end + # The voice the model uses to respond. Supported voices are `alloy`, `ash`, + # `ballad`, `coral`, `echo`, `sage`, and `shimmer`. class Voice < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/chat/chat_completion_chunk.rbi b/rbi/lib/openai/models/chat/chat_completion_chunk.rbi index 12f6d8b5..7dbd7a66 100644 --- a/rbi/lib/openai/models/chat/chat_completion_chunk.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_chunk.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionChunk < OpenAI::BaseModel + # A unique identifier for the chat completion. Each chunk has the same ID. sig { returns(String) } def id end @@ -14,6 +15,9 @@ module OpenAI def id=(_) end + # A list of chat completion choices. Can contain more than one elements if `n` is + # greater than 1. Can also be empty for the last chunk if you set + # `stream_options: {"include_usage": true}`. sig { returns(T::Array[OpenAI::Models::Chat::ChatCompletionChunk::Choice]) } def choices end @@ -25,6 +29,8 @@ module OpenAI def choices=(_) end + # The Unix timestamp (in seconds) of when the chat completion was created. Each + # chunk has the same timestamp. sig { returns(Integer) } def created end @@ -33,6 +39,7 @@ module OpenAI def created=(_) end + # The model to generate the completion. sig { returns(String) } def model end @@ -41,6 +48,7 @@ module OpenAI def model=(_) end + # The object type, which is always `chat.completion.chunk`. sig { returns(Symbol) } def object end @@ -49,6 +57,7 @@ module OpenAI def object=(_) end + # The service tier used for processing the request. sig { returns(T.nilable(Symbol)) } def service_tier end @@ -57,6 +66,9 @@ module OpenAI def service_tier=(_) end + # This fingerprint represents the backend configuration that the model runs with. + # Can be used in conjunction with the `seed` request parameter to understand when + # backend changes have been made that might impact determinism. sig { returns(T.nilable(String)) } def system_fingerprint end @@ -65,6 +77,10 @@ module OpenAI def system_fingerprint=(_) end + # An optional field that will only be present when you set + # `stream_options: {"include_usage": true}` in your request. When present, it + # contains a null value except for the last chunk which contains the token usage + # statistics for the entire request. sig { returns(T.nilable(OpenAI::Models::CompletionUsage)) } def usage end @@ -73,6 +89,9 @@ module OpenAI def usage=(_) end + # Represents a streamed chunk of a chat completion response returned by the model, + # based on the provided input. + # [Learn more](https://platform.openai.com/docs/guides/streaming-responses). sig do params( id: String, @@ -117,6 +136,7 @@ module OpenAI end class Choice < OpenAI::BaseModel + # A chat completion delta generated by streamed model responses. sig { returns(OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta) } def delta end @@ -128,6 +148,12 @@ module OpenAI def delta=(_) end + # The reason the model stopped generating tokens. This will be `stop` if the model + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, `content_filter` if + # content was omitted due to a flag from our content filters, `tool_calls` if the + # model called a tool, or `function_call` (deprecated) if the model called a + # function. sig { returns(T.nilable(Symbol)) } def finish_reason end @@ -136,6 +162,7 @@ module OpenAI def finish_reason=(_) end + # The index of the choice in the list of choices. sig { returns(Integer) } def index end @@ -144,6 +171,7 @@ module OpenAI def index=(_) end + # Log probability information for the choice. sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionChunk::Choice::Logprobs)) } def logprobs end @@ -182,6 +210,7 @@ module OpenAI end class Delta < OpenAI::BaseModel + # The contents of the chunk message. sig { returns(T.nilable(String)) } def content end @@ -190,6 +219,8 @@ module OpenAI def content=(_) end + # Deprecated and replaced by `tool_calls`. The name and arguments of a function + # that should be called, as generated by the model. sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall)) } def function_call end @@ -201,6 +232,7 @@ module OpenAI def function_call=(_) end + # The refusal message generated by the model. sig { returns(T.nilable(String)) } def refusal end @@ -209,6 +241,7 @@ module OpenAI def refusal=(_) end + # The role of the author of this message. sig { returns(T.nilable(Symbol)) } def role end @@ -228,6 +261,7 @@ module OpenAI def tool_calls=(_) end + # A chat completion delta generated by streamed model responses. sig do params( content: T.nilable(String), @@ -257,6 +291,10 @@ module OpenAI end class FunctionCall < OpenAI::BaseModel + # The arguments to call the function with, as generated by the model in JSON + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. sig { returns(T.nilable(String)) } def arguments end @@ -265,6 +303,7 @@ module OpenAI def arguments=(_) end + # The name of the function to call. sig { returns(T.nilable(String)) } def name end @@ -273,6 +312,8 @@ module OpenAI def name=(_) end + # Deprecated and replaced by `tool_calls`. The name and arguments of a function + # that should be called, as generated by the model. sig { params(arguments: String, name: String).returns(T.attached_class) } def self.new(arguments: nil, name: nil) end @@ -282,6 +323,7 @@ module OpenAI end end + # The role of the author of this message. class Role < OpenAI::Enum abstract! @@ -307,6 +349,7 @@ module OpenAI def index=(_) end + # The ID of the tool call. sig { returns(T.nilable(String)) } def id end @@ -326,6 +369,7 @@ module OpenAI def function=(_) end + # The type of the tool. Currently, only `function` is supported. sig { returns(T.nilable(Symbol)) } def type end @@ -361,6 +405,10 @@ module OpenAI end class Function < OpenAI::BaseModel + # The arguments to call the function with, as generated by the model in JSON + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. sig { returns(T.nilable(String)) } def arguments end @@ -369,6 +417,7 @@ module OpenAI def arguments=(_) end + # The name of the function to call. sig { returns(T.nilable(String)) } def name end @@ -386,6 +435,7 @@ module OpenAI end end + # The type of the tool. Currently, only `function` is supported. class Type < OpenAI::Enum abstract! @@ -400,6 +450,12 @@ module OpenAI end end + # The reason the model stopped generating tokens. This will be `stop` if the model + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, `content_filter` if + # content was omitted due to a flag from our content filters, `tool_calls` if the + # model called a tool, or `function_call` (deprecated) if the model called a + # function. class FinishReason < OpenAI::Enum abstract! @@ -417,6 +473,7 @@ module OpenAI end class Logprobs < OpenAI::BaseModel + # A list of message content tokens with log probability information. sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob])) } def content end @@ -428,6 +485,7 @@ module OpenAI def content=(_) end + # A list of message refusal tokens with log probability information. sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob])) } def refusal end @@ -439,6 +497,7 @@ module OpenAI def refusal=(_) end + # Log probability information for the choice. sig do params( content: T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob]), @@ -463,6 +522,7 @@ module OpenAI end end + # The service tier used for processing the request. class ServiceTier < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/chat/chat_completion_content_part.rbi b/rbi/lib/openai/models/chat/chat_completion_content_part.rbi index 02a241f0..8ee6a1f4 100644 --- a/rbi/lib/openai/models/chat/chat_completion_content_part.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_content_part.rbi @@ -5,6 +5,8 @@ module OpenAI ChatCompletionContentPart = T.type_alias { Chat::ChatCompletionContentPart } module Chat + # Learn about + # [text inputs](https://platform.openai.com/docs/guides/text-generation). class ChatCompletionContentPart < OpenAI::Union abstract! @@ -20,6 +22,7 @@ module OpenAI def file=(_) end + # The type of the content part. Always `file`. sig { returns(Symbol) } def type end @@ -28,6 +31,8 @@ module OpenAI def type=(_) end + # Learn about [file inputs](https://platform.openai.com/docs/guides/text) for text + # generation. sig do params(file: OpenAI::Models::Chat::ChatCompletionContentPart::File::File, type: Symbol) .returns(T.attached_class) @@ -40,6 +45,8 @@ module OpenAI end class File < OpenAI::BaseModel + # The base64 encoded file data, used when passing the file to the model as a + # string. sig { returns(T.nilable(String)) } def file_data end @@ -48,6 +55,7 @@ module OpenAI def file_data=(_) end + # The ID of an uploaded file to use as input. sig { returns(T.nilable(String)) } def file_id end @@ -56,6 +64,7 @@ module OpenAI def file_id=(_) end + # The name of the file, used when passing the file to the model as a string. sig { returns(T.nilable(String)) } def file_name end @@ -75,6 +84,7 @@ module OpenAI end class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/chat/chat_completion_content_part_image.rbi b/rbi/lib/openai/models/chat/chat_completion_content_part_image.rbi index e74c6c80..4f78201d 100644 --- a/rbi/lib/openai/models/chat/chat_completion_content_part_image.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_content_part_image.rbi @@ -17,6 +17,7 @@ module OpenAI def image_url=(_) end + # The type of the content part. sig { returns(Symbol) } def type end @@ -25,6 +26,7 @@ module OpenAI def type=(_) end + # Learn about [image inputs](https://platform.openai.com/docs/guides/vision). sig do params(image_url: OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL, type: Symbol) .returns(T.attached_class) @@ -39,6 +41,7 @@ module OpenAI end class ImageURL < OpenAI::BaseModel + # Either a URL of the image or the base64 encoded image data. sig { returns(String) } def url end @@ -47,6 +50,8 @@ module OpenAI def url=(_) end + # Specifies the detail level of the image. Learn more in the + # [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). sig { returns(T.nilable(Symbol)) } def detail end @@ -63,6 +68,8 @@ module OpenAI def to_hash end + # Specifies the detail level of the image. Learn more in the + # [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). class Detail < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/chat/chat_completion_content_part_input_audio.rbi b/rbi/lib/openai/models/chat/chat_completion_content_part_input_audio.rbi index bff9cc3b..52dfed23 100644 --- a/rbi/lib/openai/models/chat/chat_completion_content_part_input_audio.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_content_part_input_audio.rbi @@ -17,6 +17,7 @@ module OpenAI def input_audio=(_) end + # The type of the content part. Always `input_audio`. sig { returns(Symbol) } def type end @@ -25,6 +26,7 @@ module OpenAI def type=(_) end + # Learn about [audio inputs](https://platform.openai.com/docs/guides/audio). sig do params(input_audio: OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio, type: Symbol) .returns(T.attached_class) @@ -42,6 +44,7 @@ module OpenAI end class InputAudio < OpenAI::BaseModel + # Base64 encoded audio data. sig { returns(String) } def data end @@ -50,6 +53,7 @@ module OpenAI def data=(_) end + # The format of the encoded audio data. Currently supports "wav" and "mp3". sig { returns(Symbol) } def format_ end @@ -66,6 +70,7 @@ module OpenAI def to_hash end + # The format of the encoded audio data. Currently supports "wav" and "mp3". class Format < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/chat/chat_completion_content_part_refusal.rbi b/rbi/lib/openai/models/chat/chat_completion_content_part_refusal.rbi index 263c9c0b..392ce584 100644 --- a/rbi/lib/openai/models/chat/chat_completion_content_part_refusal.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_content_part_refusal.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionContentPartRefusal < OpenAI::BaseModel + # The refusal message generated by the model. sig { returns(String) } def refusal end @@ -14,6 +15,7 @@ module OpenAI def refusal=(_) end + # The type of the content part. sig { returns(Symbol) } def type end diff --git a/rbi/lib/openai/models/chat/chat_completion_content_part_text.rbi b/rbi/lib/openai/models/chat/chat_completion_content_part_text.rbi index 84a24c5d..1154a72d 100644 --- a/rbi/lib/openai/models/chat/chat_completion_content_part_text.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_content_part_text.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionContentPartText < OpenAI::BaseModel + # The text content. sig { returns(String) } def text end @@ -14,6 +15,7 @@ module OpenAI def text=(_) end + # The type of the content part. sig { returns(Symbol) } def type end @@ -22,6 +24,8 @@ module OpenAI def type=(_) end + # Learn about + # [text inputs](https://platform.openai.com/docs/guides/text-generation). sig { params(text: String, type: Symbol).returns(T.attached_class) } def self.new(text:, type: :text) end diff --git a/rbi/lib/openai/models/chat/chat_completion_deleted.rbi b/rbi/lib/openai/models/chat/chat_completion_deleted.rbi index 763165dc..72764c05 100644 --- a/rbi/lib/openai/models/chat/chat_completion_deleted.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_deleted.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionDeleted < OpenAI::BaseModel + # The ID of the chat completion that was deleted. sig { returns(String) } def id end @@ -14,6 +15,7 @@ module OpenAI def id=(_) end + # Whether the chat completion was deleted. sig { returns(T::Boolean) } def deleted end @@ -22,6 +24,7 @@ module OpenAI def deleted=(_) end + # The type of object being deleted. sig { returns(Symbol) } def object end diff --git a/rbi/lib/openai/models/chat/chat_completion_developer_message_param.rbi b/rbi/lib/openai/models/chat/chat_completion_developer_message_param.rbi index b7f2d9e7..5f435cb4 100644 --- a/rbi/lib/openai/models/chat/chat_completion_developer_message_param.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_developer_message_param.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionDeveloperMessageParam < OpenAI::BaseModel + # The contents of the developer message. sig { returns(T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText])) } def content end @@ -17,6 +18,7 @@ module OpenAI def content=(_) end + # The role of the messages author, in this case `developer`. sig { returns(Symbol) } def role end @@ -25,6 +27,8 @@ module OpenAI def role=(_) end + # An optional name for the participant. Provides the model information to + # differentiate between participants of the same role. sig { returns(T.nilable(String)) } def name end @@ -33,6 +37,9 @@ module OpenAI def name=(_) end + # Developer-provided instructions that the model should follow, regardless of + # messages sent by the user. With o1 models and newer, `developer` messages + # replace the previous `system` messages. sig do params( content: T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]), @@ -57,12 +64,14 @@ module OpenAI def to_hash end + # The contents of the developer message. class Content < OpenAI::Union abstract! ChatCompletionContentPartTextArray = T.type_alias { T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText] } class << self + # @api private sig do override .returns([[NilClass, String], [NilClass, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]]]) diff --git a/rbi/lib/openai/models/chat/chat_completion_function_call_option.rbi b/rbi/lib/openai/models/chat/chat_completion_function_call_option.rbi index abf316d2..a17d3350 100644 --- a/rbi/lib/openai/models/chat/chat_completion_function_call_option.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_function_call_option.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionFunctionCallOption < OpenAI::BaseModel + # The name of the function to call. sig { returns(String) } def name end @@ -14,6 +15,8 @@ module OpenAI def name=(_) end + # Specifying a particular function via `{"name": "my_function"}` forces the model + # to call that function. sig { params(name: String).returns(T.attached_class) } def self.new(name:) end diff --git a/rbi/lib/openai/models/chat/chat_completion_function_message_param.rbi b/rbi/lib/openai/models/chat/chat_completion_function_message_param.rbi index 1cf6ef40..1c035a2e 100644 --- a/rbi/lib/openai/models/chat/chat_completion_function_message_param.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_function_message_param.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionFunctionMessageParam < OpenAI::BaseModel + # The contents of the function message. sig { returns(T.nilable(String)) } def content end @@ -14,6 +15,7 @@ module OpenAI def content=(_) end + # The name of the function to call. sig { returns(String) } def name end @@ -22,6 +24,7 @@ module OpenAI def name=(_) end + # The role of the messages author, in this case `function`. sig { returns(Symbol) } def role end diff --git a/rbi/lib/openai/models/chat/chat_completion_message.rbi b/rbi/lib/openai/models/chat/chat_completion_message.rbi index 91a6344d..a54250d7 100644 --- a/rbi/lib/openai/models/chat/chat_completion_message.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_message.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionMessage < OpenAI::BaseModel + # The contents of the message. sig { returns(T.nilable(String)) } def content end @@ -14,6 +15,7 @@ module OpenAI def content=(_) end + # The refusal message generated by the model. sig { returns(T.nilable(String)) } def refusal end @@ -22,6 +24,7 @@ module OpenAI def refusal=(_) end + # The role of the author of this message. sig { returns(Symbol) } def role end @@ -30,6 +33,8 @@ module OpenAI def role=(_) end + # Annotations for the message, when applicable, as when using the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionMessage::Annotation])) } def annotations end @@ -41,6 +46,9 @@ module OpenAI def annotations=(_) end + # If the audio output modality is requested, this object contains data about the + # audio response from the model. + # [Learn more](https://platform.openai.com/docs/guides/audio). sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionAudio)) } def audio end @@ -52,6 +60,8 @@ module OpenAI def audio=(_) end + # Deprecated and replaced by `tool_calls`. The name and arguments of a function + # that should be called, as generated by the model. sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionMessage::FunctionCall)) } def function_call end @@ -63,6 +73,7 @@ module OpenAI def function_call=(_) end + # The tool calls generated by the model, such as function calls. sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionMessageToolCall])) } def tool_calls end @@ -74,6 +85,7 @@ module OpenAI def tool_calls=(_) end + # A chat completion message generated by the model. sig do params( content: T.nilable(String), @@ -107,6 +119,7 @@ module OpenAI end class Annotation < OpenAI::BaseModel + # The type of the URL citation. Always `url_citation`. sig { returns(Symbol) } def type end @@ -115,6 +128,7 @@ module OpenAI def type=(_) end + # A URL citation when using web search. sig { returns(OpenAI::Models::Chat::ChatCompletionMessage::Annotation::URLCitation) } def url_citation end @@ -126,6 +140,7 @@ module OpenAI def url_citation=(_) end + # A URL citation when using web search. sig do params(url_citation: OpenAI::Models::Chat::ChatCompletionMessage::Annotation::URLCitation, type: Symbol) .returns(T.attached_class) @@ -143,6 +158,7 @@ module OpenAI end class URLCitation < OpenAI::BaseModel + # The index of the last character of the URL citation in the message. sig { returns(Integer) } def end_index end @@ -151,6 +167,7 @@ module OpenAI def end_index=(_) end + # The index of the first character of the URL citation in the message. sig { returns(Integer) } def start_index end @@ -159,6 +176,7 @@ module OpenAI def start_index=(_) end + # The title of the web resource. sig { returns(String) } def title end @@ -167,6 +185,7 @@ module OpenAI def title=(_) end + # The URL of the web resource. sig { returns(String) } def url end @@ -175,6 +194,7 @@ module OpenAI def url=(_) end + # A URL citation when using web search. sig do params( end_index: Integer, @@ -193,6 +213,10 @@ module OpenAI end class FunctionCall < OpenAI::BaseModel + # The arguments to call the function with, as generated by the model in JSON + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. sig { returns(String) } def arguments end @@ -201,6 +225,7 @@ module OpenAI def arguments=(_) end + # The name of the function to call. sig { returns(String) } def name end @@ -209,6 +234,8 @@ module OpenAI def name=(_) end + # Deprecated and replaced by `tool_calls`. The name and arguments of a function + # that should be called, as generated by the model. sig { params(arguments: String, name: String).returns(T.attached_class) } def self.new(arguments:, name:) end diff --git a/rbi/lib/openai/models/chat/chat_completion_message_param.rbi b/rbi/lib/openai/models/chat/chat_completion_message_param.rbi index 471f5f89..1a47472e 100644 --- a/rbi/lib/openai/models/chat/chat_completion_message_param.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_message_param.rbi @@ -5,10 +5,14 @@ module OpenAI ChatCompletionMessageParam = T.type_alias { Chat::ChatCompletionMessageParam } module Chat + # Developer-provided instructions that the model should follow, regardless of + # messages sent by the user. With o1 models and newer, `developer` messages + # replace the previous `system` messages. class ChatCompletionMessageParam < OpenAI::Union abstract! class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/chat/chat_completion_message_tool_call.rbi b/rbi/lib/openai/models/chat/chat_completion_message_tool_call.rbi index ca4c6ac0..fadd4257 100644 --- a/rbi/lib/openai/models/chat/chat_completion_message_tool_call.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_message_tool_call.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionMessageToolCall < OpenAI::BaseModel + # The ID of the tool call. sig { returns(String) } def id end @@ -14,6 +15,7 @@ module OpenAI def id=(_) end + # The function that the model called. sig { returns(OpenAI::Models::Chat::ChatCompletionMessageToolCall::Function) } def function end @@ -25,6 +27,7 @@ module OpenAI def function=(_) end + # The type of the tool. Currently, only `function` is supported. sig { returns(Symbol) } def type end @@ -50,6 +53,10 @@ module OpenAI end class Function < OpenAI::BaseModel + # The arguments to call the function with, as generated by the model in JSON + # format. Note that the model does not always generate valid JSON, and may + # hallucinate parameters not defined by your function schema. Validate the + # arguments in your code before calling your function. sig { returns(String) } def arguments end @@ -58,6 +65,7 @@ module OpenAI def arguments=(_) end + # The name of the function to call. sig { returns(String) } def name end @@ -66,6 +74,7 @@ module OpenAI def name=(_) end + # The function that the model called. sig { params(arguments: String, name: String).returns(T.attached_class) } def self.new(arguments:, name:) end diff --git a/rbi/lib/openai/models/chat/chat_completion_named_tool_choice.rbi b/rbi/lib/openai/models/chat/chat_completion_named_tool_choice.rbi index 89db6837..d1d23deb 100644 --- a/rbi/lib/openai/models/chat/chat_completion_named_tool_choice.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_named_tool_choice.rbi @@ -17,6 +17,7 @@ module OpenAI def function=(_) end + # The type of the tool. Currently, only `function` is supported. sig { returns(Symbol) } def type end @@ -25,6 +26,8 @@ module OpenAI def type=(_) end + # Specifies a tool the model should use. Use to force the model to call a specific + # function. sig do params(function: OpenAI::Models::Chat::ChatCompletionNamedToolChoice::Function, type: Symbol) .returns(T.attached_class) @@ -37,6 +40,7 @@ module OpenAI end class Function < OpenAI::BaseModel + # The name of the function to call. sig { returns(String) } def name end diff --git a/rbi/lib/openai/models/chat/chat_completion_prediction_content.rbi b/rbi/lib/openai/models/chat/chat_completion_prediction_content.rbi index 279b9fbe..b16430d8 100644 --- a/rbi/lib/openai/models/chat/chat_completion_prediction_content.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_prediction_content.rbi @@ -6,6 +6,9 @@ module OpenAI module Chat class ChatCompletionPredictionContent < OpenAI::BaseModel + # The content that should be matched when generating a model response. If + # generated tokens would match this content, the entire model response can be + # returned much more quickly. sig { returns(T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText])) } def content end @@ -17,6 +20,8 @@ module OpenAI def content=(_) end + # The type of the predicted content you want to provide. This type is currently + # always `content`. sig { returns(Symbol) } def type end @@ -25,6 +30,8 @@ module OpenAI def type=(_) end + # Static predicted output content, such as the content of a text file that is + # being regenerated. sig do params( content: T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]), @@ -44,12 +51,16 @@ module OpenAI def to_hash end + # The content that should be matched when generating a model response. If + # generated tokens would match this content, the entire model response can be + # returned much more quickly. class Content < OpenAI::Union abstract! ChatCompletionContentPartTextArray = T.type_alias { T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText] } class << self + # @api private sig do override .returns([[NilClass, String], [NilClass, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]]]) diff --git a/rbi/lib/openai/models/chat/chat_completion_role.rbi b/rbi/lib/openai/models/chat/chat_completion_role.rbi index 807d6735..24a8acf6 100644 --- a/rbi/lib/openai/models/chat/chat_completion_role.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_role.rbi @@ -5,6 +5,7 @@ module OpenAI ChatCompletionRole = T.type_alias { Chat::ChatCompletionRole } module Chat + # The role of the author of a message class ChatCompletionRole < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/chat/chat_completion_store_message.rbi b/rbi/lib/openai/models/chat/chat_completion_store_message.rbi index 0c487bdc..a63c1c01 100644 --- a/rbi/lib/openai/models/chat/chat_completion_store_message.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_store_message.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionStoreMessage < OpenAI::Models::Chat::ChatCompletionMessage + # The identifier of the chat message. sig { returns(String) } def id end @@ -14,6 +15,7 @@ module OpenAI def id=(_) end + # A chat completion message generated by the model. sig { params(id: String).returns(T.attached_class) } def self.new(id:) end diff --git a/rbi/lib/openai/models/chat/chat_completion_stream_options.rbi b/rbi/lib/openai/models/chat/chat_completion_stream_options.rbi index 88104415..9fbf5879 100644 --- a/rbi/lib/openai/models/chat/chat_completion_stream_options.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_stream_options.rbi @@ -6,6 +6,10 @@ module OpenAI module Chat class ChatCompletionStreamOptions < OpenAI::BaseModel + # If set, an additional chunk will be streamed before the `data: [DONE]` message. + # The `usage` field on this chunk shows the token usage statistics for the entire + # request, and the `choices` field will always be an empty array. All other chunks + # will also include a `usage` field, but with a null value. sig { returns(T.nilable(T::Boolean)) } def include_usage end @@ -14,6 +18,7 @@ module OpenAI def include_usage=(_) end + # Options for streaming response. Only set this when you set `stream: true`. sig { params(include_usage: T::Boolean).returns(T.attached_class) } def self.new(include_usage: nil) end diff --git a/rbi/lib/openai/models/chat/chat_completion_system_message_param.rbi b/rbi/lib/openai/models/chat/chat_completion_system_message_param.rbi index 4c4e8def..b2a9408e 100644 --- a/rbi/lib/openai/models/chat/chat_completion_system_message_param.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_system_message_param.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionSystemMessageParam < OpenAI::BaseModel + # The contents of the system message. sig { returns(T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText])) } def content end @@ -17,6 +18,7 @@ module OpenAI def content=(_) end + # The role of the messages author, in this case `system`. sig { returns(Symbol) } def role end @@ -25,6 +27,8 @@ module OpenAI def role=(_) end + # An optional name for the participant. Provides the model information to + # differentiate between participants of the same role. sig { returns(T.nilable(String)) } def name end @@ -33,6 +37,9 @@ module OpenAI def name=(_) end + # Developer-provided instructions that the model should follow, regardless of + # messages sent by the user. With o1 models and newer, use `developer` messages + # for this purpose instead. sig do params( content: T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]), @@ -57,12 +64,14 @@ module OpenAI def to_hash end + # The contents of the system message. class Content < OpenAI::Union abstract! ChatCompletionContentPartTextArray = T.type_alias { T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText] } class << self + # @api private sig do override .returns([[NilClass, String], [NilClass, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]]]) diff --git a/rbi/lib/openai/models/chat/chat_completion_token_logprob.rbi b/rbi/lib/openai/models/chat/chat_completion_token_logprob.rbi index c280c13e..2341c139 100644 --- a/rbi/lib/openai/models/chat/chat_completion_token_logprob.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_token_logprob.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionTokenLogprob < OpenAI::BaseModel + # The token. sig { returns(String) } def token end @@ -14,6 +15,10 @@ module OpenAI def token=(_) end + # A list of integers representing the UTF-8 bytes representation of the token. + # Useful in instances where characters are represented by multiple tokens and + # their byte representations must be combined to generate the correct text + # representation. Can be `null` if there is no bytes representation for the token. sig { returns(T.nilable(T::Array[Integer])) } def bytes end @@ -22,6 +27,9 @@ module OpenAI def bytes=(_) end + # The log probability of this token, if it is within the top 20 most likely + # tokens. Otherwise, the value `-9999.0` is used to signify that the token is very + # unlikely. sig { returns(Float) } def logprob end @@ -30,6 +38,9 @@ module OpenAI def logprob=(_) end + # List of the most likely tokens and their log probability, at this token + # position. In rare cases, there may be fewer than the number of requested + # `top_logprobs` returned. sig { returns(T::Array[OpenAI::Models::Chat::ChatCompletionTokenLogprob::TopLogprob]) } def top_logprobs end @@ -68,6 +79,7 @@ module OpenAI end class TopLogprob < OpenAI::BaseModel + # The token. sig { returns(String) } def token end @@ -76,6 +88,10 @@ module OpenAI def token=(_) end + # A list of integers representing the UTF-8 bytes representation of the token. + # Useful in instances where characters are represented by multiple tokens and + # their byte representations must be combined to generate the correct text + # representation. Can be `null` if there is no bytes representation for the token. sig { returns(T.nilable(T::Array[Integer])) } def bytes end @@ -84,6 +100,9 @@ module OpenAI def bytes=(_) end + # The log probability of this token, if it is within the top 20 most likely + # tokens. Otherwise, the value `-9999.0` is used to signify that the token is very + # unlikely. sig { returns(Float) } def logprob end diff --git a/rbi/lib/openai/models/chat/chat_completion_tool.rbi b/rbi/lib/openai/models/chat/chat_completion_tool.rbi index 363528f2..5a41a09d 100644 --- a/rbi/lib/openai/models/chat/chat_completion_tool.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_tool.rbi @@ -14,6 +14,7 @@ module OpenAI def function=(_) end + # The type of the tool. Currently, only `function` is supported. sig { returns(Symbol) } def type end diff --git a/rbi/lib/openai/models/chat/chat_completion_tool_choice_option.rbi b/rbi/lib/openai/models/chat/chat_completion_tool_choice_option.rbi index 3d7a6aea..218b467d 100644 --- a/rbi/lib/openai/models/chat/chat_completion_tool_choice_option.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_tool_choice_option.rbi @@ -5,9 +5,21 @@ module OpenAI ChatCompletionToolChoiceOption = T.type_alias { Chat::ChatCompletionToolChoiceOption } module Chat + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tool and instead generates a message. `auto` means the model can + # pick between generating a message or calling one or more tools. `required` means + # the model must call one or more tools. Specifying a particular tool via + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. + # + # `none` is the default when no tools are present. `auto` is the default if tools + # are present. class ChatCompletionToolChoiceOption < OpenAI::Union abstract! + # `none` means the model will not call any tool and instead generates a message. + # `auto` means the model can pick between generating a message or calling one or + # more tools. `required` means the model must call one or more tools. class Auto < OpenAI::Enum abstract! @@ -23,6 +35,7 @@ module OpenAI end class << self + # @api private sig { override.returns([[NilClass, Symbol], [NilClass, OpenAI::Models::Chat::ChatCompletionNamedToolChoice]]) } private def variants end diff --git a/rbi/lib/openai/models/chat/chat_completion_tool_message_param.rbi b/rbi/lib/openai/models/chat/chat_completion_tool_message_param.rbi index e320aa6c..d0aa1120 100644 --- a/rbi/lib/openai/models/chat/chat_completion_tool_message_param.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_tool_message_param.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionToolMessageParam < OpenAI::BaseModel + # The contents of the tool message. sig { returns(T.any(String, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText])) } def content end @@ -17,6 +18,7 @@ module OpenAI def content=(_) end + # The role of the messages author, in this case `tool`. sig { returns(Symbol) } def role end @@ -25,6 +27,7 @@ module OpenAI def role=(_) end + # Tool call that this message is responding to. sig { returns(String) } def tool_call_id end @@ -57,12 +60,14 @@ module OpenAI def to_hash end + # The contents of the tool message. class Content < OpenAI::Union abstract! ChatCompletionContentPartTextArray = T.type_alias { T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText] } class << self + # @api private sig do override .returns([[NilClass, String], [NilClass, T::Array[OpenAI::Models::Chat::ChatCompletionContentPartText]]]) diff --git a/rbi/lib/openai/models/chat/chat_completion_user_message_param.rbi b/rbi/lib/openai/models/chat/chat_completion_user_message_param.rbi index a41ad724..2ecc1036 100644 --- a/rbi/lib/openai/models/chat/chat_completion_user_message_param.rbi +++ b/rbi/lib/openai/models/chat/chat_completion_user_message_param.rbi @@ -6,6 +6,7 @@ module OpenAI module Chat class ChatCompletionUserMessageParam < OpenAI::BaseModel + # The contents of the user message. sig do returns( T.any( @@ -55,6 +56,7 @@ module OpenAI def content=(_) end + # The role of the messages author, in this case `user`. sig { returns(Symbol) } def role end @@ -63,6 +65,8 @@ module OpenAI def role=(_) end + # An optional name for the participant. Provides the model information to + # differentiate between participants of the same role. sig { returns(T.nilable(String)) } def name end @@ -71,6 +75,8 @@ module OpenAI def name=(_) end + # Messages sent by an end user, containing prompts or additional context + # information. sig do params( content: T.any( @@ -115,6 +121,7 @@ module OpenAI def to_hash end + # The contents of the user message. class Content < OpenAI::Union abstract! @@ -130,6 +137,7 @@ module OpenAI end class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/chat/completion_create_params.rbi b/rbi/lib/openai/models/chat/completion_create_params.rbi index 77aab350..6aa2796e 100644 --- a/rbi/lib/openai/models/chat/completion_create_params.rbi +++ b/rbi/lib/openai/models/chat/completion_create_params.rbi @@ -7,6 +7,12 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # A list of messages comprising the conversation so far. Depending on the + # [model](https://platform.openai.com/docs/models) you use, different message + # types (modalities) are supported, like + # [text](https://platform.openai.com/docs/guides/text-generation), + # [images](https://platform.openai.com/docs/guides/vision), and + # [audio](https://platform.openai.com/docs/guides/audio). sig do returns( T::Array[ @@ -53,6 +59,11 @@ module OpenAI def messages=(_) end + # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. sig { returns(T.any(String, Symbol)) } def model end @@ -61,6 +72,9 @@ module OpenAI def model=(_) end + # Parameters for audio output. Required when audio output is requested with + # `modalities: ["audio"]`. + # [Learn more](https://platform.openai.com/docs/guides/audio). sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionAudioParam)) } def audio end @@ -72,6 +86,9 @@ module OpenAI def audio=(_) end + # Number between -2.0 and 2.0. Positive values penalize new tokens based on their + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. sig { returns(T.nilable(Float)) } def frequency_penalty end @@ -80,6 +97,20 @@ module OpenAI def frequency_penalty=(_) end + # Deprecated in favor of `tool_choice`. + # + # Controls which (if any) function is called by the model. + # + # `none` means the model will not call a function and instead generates a message. + # + # `auto` means the model can pick between generating a message or calling a + # function. + # + # Specifying a particular function via `{"name": "my_function"}` forces the model + # to call that function. + # + # `none` is the default when no functions are present. `auto` is the default if + # functions are present. sig { returns(T.nilable(T.any(Symbol, OpenAI::Models::Chat::ChatCompletionFunctionCallOption))) } def function_call end @@ -91,6 +122,9 @@ module OpenAI def function_call=(_) end + # Deprecated in favor of `tools`. + # + # A list of functions the model may generate JSON inputs for. sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::CompletionCreateParams::Function])) } def functions end @@ -102,6 +136,14 @@ module OpenAI def functions=(_) end + # Modify the likelihood of specified tokens appearing in the completion. + # + # Accepts a JSON object that maps tokens (specified by their token ID in the + # tokenizer) to an associated bias value from -100 to 100. Mathematically, the + # bias is added to the logits generated by the model prior to sampling. The exact + # effect will vary per model, but values between -1 and 1 should decrease or + # increase likelihood of selection; values like -100 or 100 should result in a ban + # or exclusive selection of the relevant token. sig { returns(T.nilable(T::Hash[Symbol, Integer])) } def logit_bias end @@ -110,6 +152,9 @@ module OpenAI def logit_bias=(_) end + # Whether to return log probabilities of the output tokens or not. If true, + # returns the log probabilities of each output token returned in the `content` of + # `message`. sig { returns(T.nilable(T::Boolean)) } def logprobs end @@ -118,6 +163,9 @@ module OpenAI def logprobs=(_) end + # An upper bound for the number of tokens that can be generated for a completion, + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). sig { returns(T.nilable(Integer)) } def max_completion_tokens end @@ -126,6 +174,13 @@ module OpenAI def max_completion_tokens=(_) end + # The maximum number of [tokens](/tokenizer) that can be generated in the chat + # completion. This value can be used to control + # [costs](https://openai.com/api/pricing/) for text generated via API. + # + # This value is now deprecated in favor of `max_completion_tokens`, and is not + # compatible with + # [o1 series models](https://platform.openai.com/docs/guides/reasoning). sig { returns(T.nilable(Integer)) } def max_tokens end @@ -134,6 +189,12 @@ module OpenAI def max_tokens=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -142,6 +203,16 @@ module OpenAI def metadata=(_) end + # Output types that you would like the model to generate. Most models are capable + # of generating text, which is the default: + # + # `["text"]` + # + # The `gpt-4o-audio-preview` model can also be used to + # [generate audio](https://platform.openai.com/docs/guides/audio). To request that + # this model generate both text and audio responses, you can use: + # + # `["text", "audio"]` sig { returns(T.nilable(T::Array[Symbol])) } def modalities end @@ -150,6 +221,9 @@ module OpenAI def modalities=(_) end + # How many chat completion choices to generate for each input message. Note that + # you will be charged based on the number of generated tokens across all of the + # choices. Keep `n` as `1` to minimize costs. sig { returns(T.nilable(Integer)) } def n end @@ -158,6 +232,9 @@ module OpenAI def n=(_) end + # Whether to enable + # [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + # during tool use. sig { returns(T.nilable(T::Boolean)) } def parallel_tool_calls end @@ -166,6 +243,8 @@ module OpenAI def parallel_tool_calls=(_) end + # Static predicted output content, such as the content of a text file that is + # being regenerated. sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionPredictionContent)) } def prediction end @@ -177,6 +256,9 @@ module OpenAI def prediction=(_) end + # Number between -2.0 and 2.0. Positive values penalize new tokens based on + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. sig { returns(T.nilable(Float)) } def presence_penalty end @@ -185,6 +267,12 @@ module OpenAI def presence_penalty=(_) end + # **o-series models only** + # + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. sig { returns(T.nilable(Symbol)) } def reasoning_effort end @@ -193,6 +281,16 @@ module OpenAI def reasoning_effort=(_) end + # An object specifying the format that the model must output. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. sig do returns( T.nilable( @@ -226,6 +324,11 @@ module OpenAI def response_format=(_) end + # This feature is in Beta. If specified, our system will make a best effort to + # sample deterministically, such that repeated requests with the same `seed` and + # parameters should return the same result. Determinism is not guaranteed, and you + # should refer to the `system_fingerprint` response parameter to monitor changes + # in the backend. sig { returns(T.nilable(Integer)) } def seed end @@ -234,6 +337,20 @@ module OpenAI def seed=(_) end + # Specifies the latency tier to use for processing the request. This parameter is + # relevant for customers subscribed to the scale tier service: + # + # - If set to 'auto', and the Project is Scale tier enabled, the system will + # utilize scale tier credits until they are exhausted. + # - If set to 'auto', and the Project is not Scale tier enabled, the request will + # be processed using the default service tier with a lower uptime SLA and no + # latency guarentee. + # - If set to 'default', the request will be processed using the default service + # tier with a lower uptime SLA and no latency guarentee. + # - When not set, the default behavior is 'auto'. + # + # When this parameter is set, the response body will include the `service_tier` + # utilized. sig { returns(T.nilable(Symbol)) } def service_tier end @@ -242,6 +359,8 @@ module OpenAI def service_tier=(_) end + # Up to 4 sequences where the API will stop generating further tokens. The + # returned text will not contain the stop sequence. sig { returns(T.nilable(T.any(String, T::Array[String]))) } def stop end @@ -259,6 +378,9 @@ module OpenAI def stop=(_) end + # Whether or not to store the output of this chat completion request for use in + # our [model distillation](https://platform.openai.com/docs/guides/distillation) + # or [evals](https://platform.openai.com/docs/guides/evals) products. sig { returns(T.nilable(T::Boolean)) } def store end @@ -267,6 +389,7 @@ module OpenAI def store=(_) end + # Options for streaming response. Only set this when you set `stream: true`. sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionStreamOptions)) } def stream_options end @@ -278,6 +401,10 @@ module OpenAI def stream_options=(_) end + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. sig { returns(T.nilable(Float)) } def temperature end @@ -286,6 +413,15 @@ module OpenAI def temperature=(_) end + # Controls which (if any) tool is called by the model. `none` means the model will + # not call any tool and instead generates a message. `auto` means the model can + # pick between generating a message or calling one or more tools. `required` means + # the model must call one or more tools. Specifying a particular tool via + # `{"type": "function", "function": {"name": "my_function"}}` forces the model to + # call that tool. + # + # `none` is the default when no tools are present. `auto` is the default if tools + # are present. sig { returns(T.nilable(T.any(Symbol, OpenAI::Models::Chat::ChatCompletionNamedToolChoice))) } def tool_choice end @@ -297,6 +433,9 @@ module OpenAI def tool_choice=(_) end + # A list of tools the model may call. Currently, only functions are supported as a + # tool. Use this to provide a list of functions the model may generate JSON inputs + # for. A max of 128 functions are supported. sig { returns(T.nilable(T::Array[OpenAI::Models::Chat::ChatCompletionTool])) } def tools end @@ -308,6 +447,9 @@ module OpenAI def tools=(_) end + # An integer between 0 and 20 specifying the number of most likely tokens to + # return at each token position, each with an associated log probability. + # `logprobs` must be set to `true` if this parameter is used. sig { returns(T.nilable(Integer)) } def top_logprobs end @@ -316,6 +458,11 @@ module OpenAI def top_logprobs=(_) end + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. sig { returns(T.nilable(Float)) } def top_p end @@ -324,6 +471,9 @@ module OpenAI def top_p=(_) end + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). sig { returns(T.nilable(String)) } def user end @@ -332,6 +482,9 @@ module OpenAI def user=(_) end + # This tool searches the web for relevant results to use in a response. Learn more + # about the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). sig { returns(T.nilable(OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions)) } def web_search_options end @@ -481,19 +634,42 @@ module OpenAI def to_hash end + # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. class Model < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } private def variants end end end + # Deprecated in favor of `tool_choice`. + # + # Controls which (if any) function is called by the model. + # + # `none` means the model will not call a function and instead generates a message. + # + # `auto` means the model can pick between generating a message or calling a + # function. + # + # Specifying a particular function via `{"name": "my_function"}` forces the model + # to call that function. + # + # `none` is the default when no functions are present. `auto` is the default if + # functions are present. class FunctionCall < OpenAI::Union abstract! + # `none` means the model will not call a function and instead generates a message. + # `auto` means the model can pick between generating a message or calling a + # function. class FunctionCallMode < OpenAI::Enum abstract! @@ -508,6 +684,7 @@ module OpenAI end class << self + # @api private sig { override.returns([[NilClass, Symbol], [NilClass, OpenAI::Models::Chat::ChatCompletionFunctionCallOption]]) } private def variants end @@ -515,6 +692,8 @@ module OpenAI end class Function < OpenAI::BaseModel + # The name of the function to be called. Must be a-z, A-Z, 0-9, or contain + # underscores and dashes, with a maximum length of 64. sig { returns(String) } def name end @@ -523,6 +702,8 @@ module OpenAI def name=(_) end + # A description of what the function does, used by the model to choose when and + # how to call the function. sig { returns(T.nilable(String)) } def description end @@ -531,6 +712,13 @@ module OpenAI def description=(_) end + # The parameters the functions accepts, described as a JSON Schema object. See the + # [guide](https://platform.openai.com/docs/guides/function-calling) for examples, + # and the + # [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + # documentation about the format. + # + # Omitting `parameters` defines a function with an empty parameter list. sig { returns(T.nilable(OpenAI::Models::FunctionParameters)) } def parameters end @@ -564,10 +752,21 @@ module OpenAI end end + # An object specifying the format that the model must output. + # + # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + # Outputs which ensures the model will match your supplied JSON schema. Learn more + # in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. class ResponseFormat < OpenAI::Union abstract! class << self + # @api private sig do override .returns( @@ -579,6 +778,20 @@ module OpenAI end end + # Specifies the latency tier to use for processing the request. This parameter is + # relevant for customers subscribed to the scale tier service: + # + # - If set to 'auto', and the Project is Scale tier enabled, the system will + # utilize scale tier credits until they are exhausted. + # - If set to 'auto', and the Project is not Scale tier enabled, the request will + # be processed using the default service tier with a lower uptime SLA and no + # latency guarentee. + # - If set to 'default', the request will be processed using the default service + # tier with a lower uptime SLA and no latency guarentee. + # - When not set, the default behavior is 'auto'. + # + # When this parameter is set, the response body will include the `service_tier` + # utilized. class ServiceTier < OpenAI::Enum abstract! @@ -592,12 +805,15 @@ module OpenAI end end + # Up to 4 sequences where the API will stop generating further tokens. The + # returned text will not contain the stop sequence. class Stop < OpenAI::Union abstract! StringArray = T.type_alias { T::Array[String] } class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, T::Array[String]]]) } private def variants end @@ -605,6 +821,8 @@ module OpenAI end class WebSearchOptions < OpenAI::BaseModel + # High level guidance for the amount of context window space to use for the + # search. One of `low`, `medium`, or `high`. `medium` is the default. sig { returns(T.nilable(Symbol)) } def search_context_size end @@ -613,6 +831,7 @@ module OpenAI def search_context_size=(_) end + # Approximate location parameters for the search. sig { returns(T.nilable(OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation)) } def user_location end @@ -624,6 +843,9 @@ module OpenAI def user_location=(_) end + # This tool searches the web for relevant results to use in a response. Learn more + # about the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). sig do params( search_context_size: Symbol, @@ -646,6 +868,8 @@ module OpenAI def to_hash end + # High level guidance for the amount of context window space to use for the + # search. One of `low`, `medium`, or `high`. `medium` is the default. class SearchContextSize < OpenAI::Enum abstract! @@ -661,6 +885,7 @@ module OpenAI end class UserLocation < OpenAI::BaseModel + # Approximate location parameters for the search. sig { returns(OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate) } def approximate end @@ -672,6 +897,7 @@ module OpenAI def approximate=(_) end + # The type of location approximation. Always `approximate`. sig { returns(Symbol) } def type end @@ -680,6 +906,7 @@ module OpenAI def type=(_) end + # Approximate location parameters for the search. sig do params( approximate: OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate, @@ -703,6 +930,7 @@ module OpenAI end class Approximate < OpenAI::BaseModel + # Free text input for the city of the user, e.g. `San Francisco`. sig { returns(T.nilable(String)) } def city end @@ -711,6 +939,8 @@ module OpenAI def city=(_) end + # The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + # the user, e.g. `US`. sig { returns(T.nilable(String)) } def country end @@ -719,6 +949,7 @@ module OpenAI def country=(_) end + # Free text input for the region of the user, e.g. `California`. sig { returns(T.nilable(String)) } def region end @@ -727,6 +958,8 @@ module OpenAI def region=(_) end + # The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + # user, e.g. `America/Los_Angeles`. sig { returns(T.nilable(String)) } def timezone end @@ -735,6 +968,7 @@ module OpenAI def timezone=(_) end + # Approximate location parameters for the search. sig do params( city: String, diff --git a/rbi/lib/openai/models/chat/completion_list_params.rbi b/rbi/lib/openai/models/chat/completion_list_params.rbi index 224d64b7..66305617 100644 --- a/rbi/lib/openai/models/chat/completion_list_params.rbi +++ b/rbi/lib/openai/models/chat/completion_list_params.rbi @@ -7,6 +7,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # Identifier for the last chat completion from the previous pagination request. sig { returns(T.nilable(String)) } def after end @@ -15,6 +16,7 @@ module OpenAI def after=(_) end + # Number of Chat Completions to retrieve. sig { returns(T.nilable(Integer)) } def limit end @@ -23,6 +25,9 @@ module OpenAI def limit=(_) end + # A list of metadata keys to filter the Chat Completions by. Example: + # + # `metadata[key1]=value1&metadata[key2]=value2` sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -31,6 +36,7 @@ module OpenAI def metadata=(_) end + # The model used to generate the Chat Completions. sig { returns(T.nilable(String)) } def model end @@ -39,6 +45,8 @@ module OpenAI def model=(_) end + # Sort order for Chat Completions by timestamp. Use `asc` for ascending order or + # `desc` for descending order. Defaults to `asc`. sig { returns(T.nilable(Symbol)) } def order end @@ -77,6 +85,8 @@ module OpenAI def to_hash end + # Sort order for Chat Completions by timestamp. Use `asc` for ascending order or + # `desc` for descending order. Defaults to `asc`. class Order < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/chat/completion_update_params.rbi b/rbi/lib/openai/models/chat/completion_update_params.rbi index 7c557df2..0b3aa56f 100644 --- a/rbi/lib/openai/models/chat/completion_update_params.rbi +++ b/rbi/lib/openai/models/chat/completion_update_params.rbi @@ -7,6 +7,12 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end diff --git a/rbi/lib/openai/models/chat/completions/message_list_params.rbi b/rbi/lib/openai/models/chat/completions/message_list_params.rbi index b5474b6c..b639be67 100644 --- a/rbi/lib/openai/models/chat/completions/message_list_params.rbi +++ b/rbi/lib/openai/models/chat/completions/message_list_params.rbi @@ -8,6 +8,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # Identifier for the last message from the previous pagination request. sig { returns(T.nilable(String)) } def after end @@ -16,6 +17,7 @@ module OpenAI def after=(_) end + # Number of messages to retrieve. sig { returns(T.nilable(Integer)) } def limit end @@ -24,6 +26,8 @@ module OpenAI def limit=(_) end + # Sort order for messages by timestamp. Use `asc` for ascending order or `desc` + # for descending order. Defaults to `asc`. sig { returns(T.nilable(Symbol)) } def order end @@ -57,6 +61,8 @@ module OpenAI def to_hash end + # Sort order for messages by timestamp. Use `asc` for ascending order or `desc` + # for descending order. Defaults to `asc`. class Order < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/comparison_filter.rbi b/rbi/lib/openai/models/comparison_filter.rbi index a44961a9..48f85bd6 100644 --- a/rbi/lib/openai/models/comparison_filter.rbi +++ b/rbi/lib/openai/models/comparison_filter.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class ComparisonFilter < OpenAI::BaseModel + # The key to compare against the value. sig { returns(String) } def key end @@ -11,6 +12,14 @@ module OpenAI def key=(_) end + # Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. + # + # - `eq`: equals + # - `ne`: not equal + # - `gt`: greater than + # - `gte`: greater than or equal + # - `lt`: less than + # - `lte`: less than or equal sig { returns(Symbol) } def type end @@ -19,6 +28,8 @@ module OpenAI def type=(_) end + # The value to compare against the attribute key; supports string, number, or + # boolean types. sig { returns(T.any(String, Float, T::Boolean)) } def value end @@ -27,6 +38,8 @@ module OpenAI def value=(_) end + # A filter used to compare a specified attribute key to a given value using a + # defined comparison operation. sig do params(key: String, type: Symbol, value: T.any(String, Float, T::Boolean)).returns(T.attached_class) end @@ -37,6 +50,14 @@ module OpenAI def to_hash end + # Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. + # + # - `eq`: equals + # - `ne`: not equal + # - `gt`: greater than + # - `gte`: greater than or equal + # - `lt`: less than + # - `lte`: less than or equal class Type < OpenAI::Enum abstract! @@ -54,10 +75,13 @@ module OpenAI end end + # The value to compare against the attribute key; supports string, number, or + # boolean types. class Value < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, Float], [NilClass, T::Boolean]]) } private def variants end diff --git a/rbi/lib/openai/models/completion.rbi b/rbi/lib/openai/models/completion.rbi index 53205098..5c9f6e01 100644 --- a/rbi/lib/openai/models/completion.rbi +++ b/rbi/lib/openai/models/completion.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class Completion < OpenAI::BaseModel + # A unique identifier for the completion. sig { returns(String) } def id end @@ -11,6 +12,7 @@ module OpenAI def id=(_) end + # The list of completion choices the model generated for the input prompt. sig { returns(T::Array[OpenAI::Models::CompletionChoice]) } def choices end @@ -19,6 +21,7 @@ module OpenAI def choices=(_) end + # The Unix timestamp (in seconds) of when the completion was created. sig { returns(Integer) } def created end @@ -27,6 +30,7 @@ module OpenAI def created=(_) end + # The model used for completion. sig { returns(String) } def model end @@ -35,6 +39,7 @@ module OpenAI def model=(_) end + # The object type, which is always "text_completion" sig { returns(Symbol) } def object end @@ -43,6 +48,10 @@ module OpenAI def object=(_) end + # This fingerprint represents the backend configuration that the model runs with. + # + # Can be used in conjunction with the `seed` request parameter to understand when + # backend changes have been made that might impact determinism. sig { returns(T.nilable(String)) } def system_fingerprint end @@ -51,6 +60,7 @@ module OpenAI def system_fingerprint=(_) end + # Usage statistics for the completion request. sig { returns(T.nilable(OpenAI::Models::CompletionUsage)) } def usage end @@ -59,6 +69,8 @@ module OpenAI def usage=(_) end + # Represents a completion response from the API. Note: both the streamed and + # non-streamed response objects share the same shape (unlike the chat endpoint). sig do params( id: String, diff --git a/rbi/lib/openai/models/completion_choice.rbi b/rbi/lib/openai/models/completion_choice.rbi index 0e80d12b..860f0c29 100644 --- a/rbi/lib/openai/models/completion_choice.rbi +++ b/rbi/lib/openai/models/completion_choice.rbi @@ -3,6 +3,10 @@ module OpenAI module Models class CompletionChoice < OpenAI::BaseModel + # The reason the model stopped generating tokens. This will be `stop` if the model + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, or `content_filter` if + # content was omitted due to a flag from our content filters. sig { returns(Symbol) } def finish_reason end @@ -64,6 +68,10 @@ module OpenAI def to_hash end + # The reason the model stopped generating tokens. This will be `stop` if the model + # hit a natural stop point or a provided stop sequence, `length` if the maximum + # number of tokens specified in the request was reached, or `content_filter` if + # content was omitted due to a flag from our content filters. class FinishReason < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/completion_create_params.rbi b/rbi/lib/openai/models/completion_create_params.rbi index dc1a4be1..ea1a4342 100644 --- a/rbi/lib/openai/models/completion_create_params.rbi +++ b/rbi/lib/openai/models/completion_create_params.rbi @@ -6,6 +6,11 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. sig { returns(T.any(String, Symbol)) } def model end @@ -14,6 +19,12 @@ module OpenAI def model=(_) end + # The prompt(s) to generate completions for, encoded as a string, array of + # strings, array of tokens, or array of token arrays. + # + # Note that <|endoftext|> is the document separator that the model sees during + # training, so if a prompt is not specified the model will generate as if from the + # beginning of a new document. sig do returns(T.nilable(T.any(String, T::Array[String], T::Array[Integer], T::Array[T::Array[Integer]]))) end @@ -27,6 +38,15 @@ module OpenAI def prompt=(_) end + # Generates `best_of` completions server-side and returns the "best" (the one with + # the highest log probability per token). Results cannot be streamed. + # + # When used with `n`, `best_of` controls the number of candidate completions and + # `n` specifies how many to return – `best_of` must be greater than `n`. + # + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. sig { returns(T.nilable(Integer)) } def best_of end @@ -35,6 +55,7 @@ module OpenAI def best_of=(_) end + # Echo back the prompt in addition to the completion sig { returns(T.nilable(T::Boolean)) } def echo end @@ -43,6 +64,11 @@ module OpenAI def echo=(_) end + # Number between -2.0 and 2.0. Positive values penalize new tokens based on their + # existing frequency in the text so far, decreasing the model's likelihood to + # repeat the same line verbatim. + # + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) sig { returns(T.nilable(Float)) } def frequency_penalty end @@ -51,6 +77,18 @@ module OpenAI def frequency_penalty=(_) end + # Modify the likelihood of specified tokens appearing in the completion. + # + # Accepts a JSON object that maps tokens (specified by their token ID in the GPT + # tokenizer) to an associated bias value from -100 to 100. You can use this + # [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. + # Mathematically, the bias is added to the logits generated by the model prior to + # sampling. The exact effect will vary per model, but values between -1 and 1 + # should decrease or increase likelihood of selection; values like -100 or 100 + # should result in a ban or exclusive selection of the relevant token. + # + # As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token + # from being generated. sig { returns(T.nilable(T::Hash[Symbol, Integer])) } def logit_bias end @@ -59,6 +97,12 @@ module OpenAI def logit_bias=(_) end + # Include the log probabilities on the `logprobs` most likely output tokens, as + # well the chosen tokens. For example, if `logprobs` is 5, the API will return a + # list of the 5 most likely tokens. The API will always return the `logprob` of + # the sampled token, so there may be up to `logprobs+1` elements in the response. + # + # The maximum value for `logprobs` is 5. sig { returns(T.nilable(Integer)) } def logprobs end @@ -67,6 +111,13 @@ module OpenAI def logprobs=(_) end + # The maximum number of [tokens](/tokenizer) that can be generated in the + # completion. + # + # The token count of your prompt plus `max_tokens` cannot exceed the model's + # context length. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. sig { returns(T.nilable(Integer)) } def max_tokens end @@ -75,6 +126,11 @@ module OpenAI def max_tokens=(_) end + # How many completions to generate for each prompt. + # + # **Note:** Because this parameter generates many completions, it can quickly + # consume your token quota. Use carefully and ensure that you have reasonable + # settings for `max_tokens` and `stop`. sig { returns(T.nilable(Integer)) } def n end @@ -83,6 +139,11 @@ module OpenAI def n=(_) end + # Number between -2.0 and 2.0. Positive values penalize new tokens based on + # whether they appear in the text so far, increasing the model's likelihood to + # talk about new topics. + # + # [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation) sig { returns(T.nilable(Float)) } def presence_penalty end @@ -91,6 +152,12 @@ module OpenAI def presence_penalty=(_) end + # If specified, our system will make a best effort to sample deterministically, + # such that repeated requests with the same `seed` and parameters should return + # the same result. + # + # Determinism is not guaranteed, and you should refer to the `system_fingerprint` + # response parameter to monitor changes in the backend. sig { returns(T.nilable(Integer)) } def seed end @@ -99,6 +166,8 @@ module OpenAI def seed=(_) end + # Up to 4 sequences where the API will stop generating further tokens. The + # returned text will not contain the stop sequence. sig { returns(T.nilable(T.any(String, T::Array[String]))) } def stop end @@ -116,6 +185,7 @@ module OpenAI def stop=(_) end + # Options for streaming response. Only set this when you set `stream: true`. sig { returns(T.nilable(OpenAI::Models::Chat::ChatCompletionStreamOptions)) } def stream_options end @@ -127,6 +197,9 @@ module OpenAI def stream_options=(_) end + # The suffix that comes after a completion of inserted text. + # + # This parameter is only supported for `gpt-3.5-turbo-instruct`. sig { returns(T.nilable(String)) } def suffix end @@ -135,6 +208,11 @@ module OpenAI def suffix=(_) end + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. + # + # We generally recommend altering this or `top_p` but not both. sig { returns(T.nilable(Float)) } def temperature end @@ -143,6 +221,11 @@ module OpenAI def temperature=(_) end + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. sig { returns(T.nilable(Float)) } def top_p end @@ -151,6 +234,9 @@ module OpenAI def top_p=(_) end + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). sig { returns(T.nilable(String)) } def user end @@ -246,9 +332,19 @@ module OpenAI def to_hash end + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. class Model < OpenAI::Union abstract! + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. class Preset < OpenAI::Enum abstract! @@ -264,12 +360,19 @@ module OpenAI end class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } private def variants end end end + # The prompt(s) to generate completions for, encoded as a string, array of + # strings, array of tokens, or array of token arrays. + # + # Note that <|endoftext|> is the document separator that the model sees during + # training, so if a prompt is not specified the model will generate as if from the + # beginning of a new document. class Prompt < OpenAI::Union abstract! @@ -280,6 +383,7 @@ module OpenAI ArrayOfToken2DArray = T.type_alias { T::Array[T::Array[Integer]] } class << self + # @api private sig do override .returns( @@ -296,12 +400,15 @@ module OpenAI end end + # Up to 4 sequences where the API will stop generating further tokens. The + # returned text will not contain the stop sequence. class Stop < OpenAI::Union abstract! StringArray = T.type_alias { T::Array[String] } class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, T::Array[String]]]) } private def variants end diff --git a/rbi/lib/openai/models/completion_usage.rbi b/rbi/lib/openai/models/completion_usage.rbi index 36ebba0c..de91da0f 100644 --- a/rbi/lib/openai/models/completion_usage.rbi +++ b/rbi/lib/openai/models/completion_usage.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class CompletionUsage < OpenAI::BaseModel + # Number of tokens in the generated completion. sig { returns(Integer) } def completion_tokens end @@ -11,6 +12,7 @@ module OpenAI def completion_tokens=(_) end + # Number of tokens in the prompt. sig { returns(Integer) } def prompt_tokens end @@ -19,6 +21,7 @@ module OpenAI def prompt_tokens=(_) end + # Total number of tokens used in the request (prompt + completion). sig { returns(Integer) } def total_tokens end @@ -27,6 +30,7 @@ module OpenAI def total_tokens=(_) end + # Breakdown of tokens used in a completion. sig { returns(T.nilable(OpenAI::Models::CompletionUsage::CompletionTokensDetails)) } def completion_tokens_details end @@ -38,6 +42,7 @@ module OpenAI def completion_tokens_details=(_) end + # Breakdown of tokens used in the prompt. sig { returns(T.nilable(OpenAI::Models::CompletionUsage::PromptTokensDetails)) } def prompt_tokens_details end @@ -49,6 +54,7 @@ module OpenAI def prompt_tokens_details=(_) end + # Usage statistics for the completion request. sig do params( completion_tokens: Integer, @@ -84,6 +90,8 @@ module OpenAI end class CompletionTokensDetails < OpenAI::BaseModel + # When using Predicted Outputs, the number of tokens in the prediction that + # appeared in the completion. sig { returns(T.nilable(Integer)) } def accepted_prediction_tokens end @@ -92,6 +100,7 @@ module OpenAI def accepted_prediction_tokens=(_) end + # Audio input tokens generated by the model. sig { returns(T.nilable(Integer)) } def audio_tokens end @@ -100,6 +109,7 @@ module OpenAI def audio_tokens=(_) end + # Tokens generated by the model for reasoning. sig { returns(T.nilable(Integer)) } def reasoning_tokens end @@ -108,6 +118,10 @@ module OpenAI def reasoning_tokens=(_) end + # When using Predicted Outputs, the number of tokens in the prediction that did + # not appear in the completion. However, like reasoning tokens, these tokens are + # still counted in the total completion tokens for purposes of billing, output, + # and context window limits. sig { returns(T.nilable(Integer)) } def rejected_prediction_tokens end @@ -116,6 +130,7 @@ module OpenAI def rejected_prediction_tokens=(_) end + # Breakdown of tokens used in a completion. sig do params( accepted_prediction_tokens: Integer, @@ -144,6 +159,7 @@ module OpenAI end class PromptTokensDetails < OpenAI::BaseModel + # Audio input tokens present in the prompt. sig { returns(T.nilable(Integer)) } def audio_tokens end @@ -152,6 +168,7 @@ module OpenAI def audio_tokens=(_) end + # Cached tokens present in the prompt. sig { returns(T.nilable(Integer)) } def cached_tokens end @@ -160,6 +177,7 @@ module OpenAI def cached_tokens=(_) end + # Breakdown of tokens used in the prompt. sig { params(audio_tokens: Integer, cached_tokens: Integer).returns(T.attached_class) } def self.new(audio_tokens: nil, cached_tokens: nil) end diff --git a/rbi/lib/openai/models/compound_filter.rbi b/rbi/lib/openai/models/compound_filter.rbi index e8e64bd4..90cc38f4 100644 --- a/rbi/lib/openai/models/compound_filter.rbi +++ b/rbi/lib/openai/models/compound_filter.rbi @@ -3,6 +3,8 @@ module OpenAI module Models class CompoundFilter < OpenAI::BaseModel + # Array of filters to combine. Items can be `ComparisonFilter` or + # `CompoundFilter`. sig { returns(T::Array[T.any(OpenAI::Models::ComparisonFilter, T.anything)]) } def filters end @@ -14,6 +16,7 @@ module OpenAI def filters=(_) end + # Type of operation: `and` or `or`. sig { returns(Symbol) } def type end @@ -22,6 +25,7 @@ module OpenAI def type=(_) end + # Combine multiple filters using `and` or `or`. sig do params(filters: T::Array[T.any(OpenAI::Models::ComparisonFilter, T.anything)], type: Symbol) .returns(T.attached_class) @@ -33,16 +37,20 @@ module OpenAI def to_hash end + # A filter used to compare a specified attribute key to a given value using a + # defined comparison operation. class Filter < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, OpenAI::Models::ComparisonFilter], [NilClass, T.anything]]) } private def variants end end end + # Type of operation: `and` or `or`. class Type < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/create_embedding_response.rbi b/rbi/lib/openai/models/create_embedding_response.rbi index 6f823131..c095b791 100644 --- a/rbi/lib/openai/models/create_embedding_response.rbi +++ b/rbi/lib/openai/models/create_embedding_response.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class CreateEmbeddingResponse < OpenAI::BaseModel + # The list of embeddings generated by the model. sig { returns(T::Array[OpenAI::Models::Embedding]) } def data end @@ -11,6 +12,7 @@ module OpenAI def data=(_) end + # The name of the model used to generate the embedding. sig { returns(String) } def model end @@ -19,6 +21,7 @@ module OpenAI def model=(_) end + # The object type, which is always "list". sig { returns(Symbol) } def object end @@ -27,6 +30,7 @@ module OpenAI def object=(_) end + # The usage information for the request. sig { returns(OpenAI::Models::CreateEmbeddingResponse::Usage) } def usage end @@ -65,6 +69,7 @@ module OpenAI end class Usage < OpenAI::BaseModel + # The number of tokens used by the prompt. sig { returns(Integer) } def prompt_tokens end @@ -73,6 +78,7 @@ module OpenAI def prompt_tokens=(_) end + # The total number of tokens used by the request. sig { returns(Integer) } def total_tokens end @@ -81,6 +87,7 @@ module OpenAI def total_tokens=(_) end + # The usage information for the request. sig { params(prompt_tokens: Integer, total_tokens: Integer).returns(T.attached_class) } def self.new(prompt_tokens:, total_tokens:) end diff --git a/rbi/lib/openai/models/embedding.rbi b/rbi/lib/openai/models/embedding.rbi index 11cc9072..4218f3b9 100644 --- a/rbi/lib/openai/models/embedding.rbi +++ b/rbi/lib/openai/models/embedding.rbi @@ -3,6 +3,9 @@ module OpenAI module Models class Embedding < OpenAI::BaseModel + # The embedding vector, which is a list of floats. The length of vector depends on + # the model as listed in the + # [embedding guide](https://platform.openai.com/docs/guides/embeddings). sig { returns(T::Array[Float]) } def embedding end @@ -11,6 +14,7 @@ module OpenAI def embedding=(_) end + # The index of the embedding in the list of embeddings. sig { returns(Integer) } def index end @@ -19,6 +23,7 @@ module OpenAI def index=(_) end + # The object type, which is always "embedding". sig { returns(Symbol) } def object end @@ -27,6 +32,7 @@ module OpenAI def object=(_) end + # Represents an embedding vector returned by embedding endpoint. sig { params(embedding: T::Array[Float], index: Integer, object: Symbol).returns(T.attached_class) } def self.new(embedding:, index:, object: :embedding) end diff --git a/rbi/lib/openai/models/embedding_create_params.rbi b/rbi/lib/openai/models/embedding_create_params.rbi index 2a08c856..bc012b2b 100644 --- a/rbi/lib/openai/models/embedding_create_params.rbi +++ b/rbi/lib/openai/models/embedding_create_params.rbi @@ -6,6 +6,14 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # Input text to embed, encoded as a string or array of tokens. To embed multiple + # inputs in a single request, pass an array of strings or array of token arrays. + # The input must not exceed the max input tokens for the model (8192 tokens for + # `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 + # dimensions or less. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. Some models may also impose a limit on total number of + # tokens summed across inputs. sig { returns(T.any(String, T::Array[String], T::Array[Integer], T::Array[T::Array[Integer]])) } def input end @@ -17,6 +25,11 @@ module OpenAI def input=(_) end + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. sig { returns(T.any(String, Symbol)) } def model end @@ -25,6 +38,8 @@ module OpenAI def model=(_) end + # The number of dimensions the resulting output embeddings should have. Only + # supported in `text-embedding-3` and later models. sig { returns(T.nilable(Integer)) } def dimensions end @@ -33,6 +48,8 @@ module OpenAI def dimensions=(_) end + # The format to return the embeddings in. Can be either `float` or + # [`base64`](https://pypi.org/project/pybase64/). sig { returns(T.nilable(Symbol)) } def encoding_format end @@ -41,6 +58,9 @@ module OpenAI def encoding_format=(_) end + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). sig { returns(T.nilable(String)) } def user end @@ -79,6 +99,14 @@ module OpenAI def to_hash end + # Input text to embed, encoded as a string or array of tokens. To embed multiple + # inputs in a single request, pass an array of strings or array of token arrays. + # The input must not exceed the max input tokens for the model (8192 tokens for + # `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 + # dimensions or less. + # [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + # for counting tokens. Some models may also impose a limit on total number of + # tokens summed across inputs. class Input < OpenAI::Union abstract! @@ -89,6 +117,7 @@ module OpenAI ArrayOfToken2DArray = T.type_alias { T::Array[T::Array[Integer]] } class << self + # @api private sig do override .returns( @@ -105,16 +134,24 @@ module OpenAI end end + # ID of the model to use. You can use the + # [List models](https://platform.openai.com/docs/api-reference/models/list) API to + # see all of your available models, or see our + # [Model overview](https://platform.openai.com/docs/models) for descriptions of + # them. class Model < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } private def variants end end end + # The format to return the embeddings in. Can be either `float` or + # [`base64`](https://pypi.org/project/pybase64/). class EncodingFormat < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/file_chunking_strategy.rbi b/rbi/lib/openai/models/file_chunking_strategy.rbi index b0b4a1b0..a7159b15 100644 --- a/rbi/lib/openai/models/file_chunking_strategy.rbi +++ b/rbi/lib/openai/models/file_chunking_strategy.rbi @@ -2,10 +2,12 @@ module OpenAI module Models + # The strategy used to chunk the file. class FileChunkingStrategy < OpenAI::Union abstract! class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/file_chunking_strategy_param.rbi b/rbi/lib/openai/models/file_chunking_strategy_param.rbi index 9a360f39..ccabae20 100644 --- a/rbi/lib/openai/models/file_chunking_strategy_param.rbi +++ b/rbi/lib/openai/models/file_chunking_strategy_param.rbi @@ -2,10 +2,13 @@ module OpenAI module Models + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. Only applicable if `file_ids` is non-empty. class FileChunkingStrategyParam < OpenAI::Union abstract! class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/file_create_params.rbi b/rbi/lib/openai/models/file_create_params.rbi index aa9afe8a..98619f88 100644 --- a/rbi/lib/openai/models/file_create_params.rbi +++ b/rbi/lib/openai/models/file_create_params.rbi @@ -6,6 +6,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The File object (not file name) to be uploaded. sig { returns(T.any(IO, StringIO)) } def file end @@ -14,6 +15,10 @@ module OpenAI def file=(_) end + # The intended purpose of the uploaded file. One of: - `assistants`: Used in the + # Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + # fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + # Flexible file type for any purpose - `evals`: Used for eval data sets sig { returns(Symbol) } def purpose end diff --git a/rbi/lib/openai/models/file_list_params.rbi b/rbi/lib/openai/models/file_list_params.rbi index 2da43a92..e5f795dc 100644 --- a/rbi/lib/openai/models/file_list_params.rbi +++ b/rbi/lib/openai/models/file_list_params.rbi @@ -6,6 +6,10 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. sig { returns(T.nilable(String)) } def after end @@ -14,6 +18,8 @@ module OpenAI def after=(_) end + # A limit on the number of objects to be returned. Limit can range between 1 and + # 10,000, and the default is 10,000. sig { returns(T.nilable(Integer)) } def limit end @@ -22,6 +28,8 @@ module OpenAI def limit=(_) end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. sig { returns(T.nilable(Symbol)) } def order end @@ -30,6 +38,7 @@ module OpenAI def order=(_) end + # Only return files with the given purpose. sig { returns(T.nilable(String)) } def purpose end @@ -66,6 +75,8 @@ module OpenAI def to_hash end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. class Order < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/file_object.rbi b/rbi/lib/openai/models/file_object.rbi index 91a4d778..6659dc79 100644 --- a/rbi/lib/openai/models/file_object.rbi +++ b/rbi/lib/openai/models/file_object.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class FileObject < OpenAI::BaseModel + # The file identifier, which can be referenced in the API endpoints. sig { returns(String) } def id end @@ -11,6 +12,7 @@ module OpenAI def id=(_) end + # The size of the file, in bytes. sig { returns(Integer) } def bytes end @@ -19,6 +21,7 @@ module OpenAI def bytes=(_) end + # The Unix timestamp (in seconds) for when the file was created. sig { returns(Integer) } def created_at end @@ -27,6 +30,7 @@ module OpenAI def created_at=(_) end + # The name of the file. sig { returns(String) } def filename end @@ -35,6 +39,7 @@ module OpenAI def filename=(_) end + # The object type, which is always `file`. sig { returns(Symbol) } def object end @@ -43,6 +48,9 @@ module OpenAI def object=(_) end + # The intended purpose of the file. Supported values are `assistants`, + # `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` + # and `vision`. sig { returns(Symbol) } def purpose end @@ -51,6 +59,8 @@ module OpenAI def purpose=(_) end + # Deprecated. The current status of the file, which can be either `uploaded`, + # `processed`, or `error`. sig { returns(Symbol) } def status end @@ -59,6 +69,7 @@ module OpenAI def status=(_) end + # The Unix timestamp (in seconds) for when the file will expire. sig { returns(T.nilable(Integer)) } def expires_at end @@ -67,6 +78,8 @@ module OpenAI def expires_at=(_) end + # Deprecated. For details on why a fine-tuning training file failed validation, + # see the `error` field on `fine_tuning.job`. sig { returns(T.nilable(String)) } def status_details end @@ -75,6 +88,7 @@ module OpenAI def status_details=(_) end + # The `File` object represents a document that has been uploaded to OpenAI. sig do params( id: String, @@ -121,6 +135,9 @@ module OpenAI def to_hash end + # The intended purpose of the file. Supported values are `assistants`, + # `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` + # and `vision`. class Purpose < OpenAI::Enum abstract! @@ -139,6 +156,8 @@ module OpenAI end end + # Deprecated. The current status of the file, which can be either `uploaded`, + # `processed`, or `error`. class Status < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/file_purpose.rbi b/rbi/lib/openai/models/file_purpose.rbi index a30abf94..edc943c5 100644 --- a/rbi/lib/openai/models/file_purpose.rbi +++ b/rbi/lib/openai/models/file_purpose.rbi @@ -2,6 +2,10 @@ module OpenAI module Models + # The intended purpose of the uploaded file. One of: - `assistants`: Used in the + # Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + # fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + # Flexible file type for any purpose - `evals`: Used for eval data sets class FilePurpose < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/fine_tuning/fine_tuning_job.rbi b/rbi/lib/openai/models/fine_tuning/fine_tuning_job.rbi index ece16391..b6221aef 100644 --- a/rbi/lib/openai/models/fine_tuning/fine_tuning_job.rbi +++ b/rbi/lib/openai/models/fine_tuning/fine_tuning_job.rbi @@ -6,6 +6,7 @@ module OpenAI module FineTuning class FineTuningJob < OpenAI::BaseModel + # The object identifier, which can be referenced in the API endpoints. sig { returns(String) } def id end @@ -14,6 +15,7 @@ module OpenAI def id=(_) end + # The Unix timestamp (in seconds) for when the fine-tuning job was created. sig { returns(Integer) } def created_at end @@ -22,6 +24,8 @@ module OpenAI def created_at=(_) end + # For fine-tuning jobs that have `failed`, this will contain more information on + # the cause of the failure. sig { returns(T.nilable(OpenAI::Models::FineTuning::FineTuningJob::Error)) } def error end @@ -33,6 +37,8 @@ module OpenAI def error=(_) end + # The name of the fine-tuned model that is being created. The value will be null + # if the fine-tuning job is still running. sig { returns(T.nilable(String)) } def fine_tuned_model end @@ -41,6 +47,8 @@ module OpenAI def fine_tuned_model=(_) end + # The Unix timestamp (in seconds) for when the fine-tuning job was finished. The + # value will be null if the fine-tuning job is still running. sig { returns(T.nilable(Integer)) } def finished_at end @@ -49,6 +57,8 @@ module OpenAI def finished_at=(_) end + # The hyperparameters used for the fine-tuning job. This value will only be + # returned when running `supervised` jobs. sig { returns(OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters) } def hyperparameters end @@ -60,6 +70,7 @@ module OpenAI def hyperparameters=(_) end + # The base model that is being fine-tuned. sig { returns(String) } def model end @@ -68,6 +79,7 @@ module OpenAI def model=(_) end + # The object type, which is always "fine_tuning.job". sig { returns(Symbol) } def object end @@ -76,6 +88,7 @@ module OpenAI def object=(_) end + # The organization that owns the fine-tuning job. sig { returns(String) } def organization_id end @@ -84,6 +97,9 @@ module OpenAI def organization_id=(_) end + # The compiled results file ID(s) for the fine-tuning job. You can retrieve the + # results with the + # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). sig { returns(T::Array[String]) } def result_files end @@ -92,6 +108,7 @@ module OpenAI def result_files=(_) end + # The seed used for the fine-tuning job. sig { returns(Integer) } def seed end @@ -100,6 +117,8 @@ module OpenAI def seed=(_) end + # The current status of the fine-tuning job, which can be either + # `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. sig { returns(Symbol) } def status end @@ -108,6 +127,8 @@ module OpenAI def status=(_) end + # The total number of billable tokens processed by this fine-tuning job. The value + # will be null if the fine-tuning job is still running. sig { returns(T.nilable(Integer)) } def trained_tokens end @@ -116,6 +137,8 @@ module OpenAI def trained_tokens=(_) end + # The file ID used for training. You can retrieve the training data with the + # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). sig { returns(String) } def training_file end @@ -124,6 +147,9 @@ module OpenAI def training_file=(_) end + # The file ID used for validation. You can retrieve the validation results with + # the + # [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). sig { returns(T.nilable(String)) } def validation_file end @@ -132,6 +158,8 @@ module OpenAI def validation_file=(_) end + # The Unix timestamp (in seconds) for when the fine-tuning job is estimated to + # finish. The value will be null if the fine-tuning job is not running. sig { returns(T.nilable(Integer)) } def estimated_finish end @@ -140,6 +168,7 @@ module OpenAI def estimated_finish=(_) end + # A list of integrations to enable for this fine-tuning job. sig { returns(T.nilable(T::Array[OpenAI::Models::FineTuning::FineTuningJobWandbIntegrationObject])) } def integrations end @@ -151,6 +180,12 @@ module OpenAI def integrations=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -159,6 +194,7 @@ module OpenAI def metadata=(_) end + # The method used for fine-tuning. sig { returns(T.nilable(OpenAI::Models::FineTuning::FineTuningJob::Method)) } def method_ end @@ -170,6 +206,8 @@ module OpenAI def method_=(_) end + # The `fine_tuning.job` object represents a fine-tuning job that has been created + # through the API. sig do params( id: String, @@ -247,6 +285,7 @@ module OpenAI end class Error < OpenAI::BaseModel + # A machine-readable error code. sig { returns(String) } def code end @@ -255,6 +294,7 @@ module OpenAI def code=(_) end + # A human-readable error message. sig { returns(String) } def message end @@ -263,6 +303,8 @@ module OpenAI def message=(_) end + # The parameter that was invalid, usually `training_file` or `validation_file`. + # This field will be null if the failure was not parameter-specific. sig { returns(T.nilable(String)) } def param end @@ -271,6 +313,8 @@ module OpenAI def param=(_) end + # For fine-tuning jobs that have `failed`, this will contain more information on + # the cause of the failure. sig { params(code: String, message: String, param: T.nilable(String)).returns(T.attached_class) } def self.new(code:, message:, param:) end @@ -281,6 +325,8 @@ module OpenAI end class Hyperparameters < OpenAI::BaseModel + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. sig { returns(T.nilable(T.any(Symbol, Integer))) } def batch_size end @@ -289,6 +335,8 @@ module OpenAI def batch_size=(_) end + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. sig { returns(T.nilable(T.any(Symbol, Float))) } def learning_rate_multiplier end @@ -297,6 +345,8 @@ module OpenAI def learning_rate_multiplier=(_) end + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. sig { returns(T.nilable(T.any(Symbol, Integer))) } def n_epochs end @@ -305,6 +355,8 @@ module OpenAI def n_epochs=(_) end + # The hyperparameters used for the fine-tuning job. This value will only be + # returned when running `supervised` jobs. sig do params( batch_size: T.any(Symbol, Integer), @@ -329,30 +381,39 @@ module OpenAI def to_hash end + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. class BatchSize < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } private def variants end end end + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. class LearningRateMultiplier < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, Symbol], [NilClass, Float]]) } private def variants end end end + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. class NEpochs < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } private def variants end @@ -360,6 +421,8 @@ module OpenAI end end + # The current status of the fine-tuning job, which can be either + # `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. class Status < OpenAI::Enum abstract! @@ -378,6 +441,7 @@ module OpenAI end class Method < OpenAI::BaseModel + # Configuration for the DPO fine-tuning method. sig { returns(T.nilable(OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo)) } def dpo end @@ -389,6 +453,7 @@ module OpenAI def dpo=(_) end + # Configuration for the supervised fine-tuning method. sig { returns(T.nilable(OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised)) } def supervised end @@ -400,6 +465,7 @@ module OpenAI def supervised=(_) end + # The type of method. Is either `supervised` or `dpo`. sig { returns(T.nilable(Symbol)) } def type end @@ -408,6 +474,7 @@ module OpenAI def type=(_) end + # The method used for fine-tuning. sig do params( dpo: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo, @@ -433,6 +500,7 @@ module OpenAI end class Dpo < OpenAI::BaseModel + # The hyperparameters used for the fine-tuning job. sig { returns(T.nilable(OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters)) } def hyperparameters end @@ -444,6 +512,7 @@ module OpenAI def hyperparameters=(_) end + # Configuration for the DPO fine-tuning method. sig do params(hyperparameters: OpenAI::Models::FineTuning::FineTuningJob::Method::Dpo::Hyperparameters) .returns(T.attached_class) @@ -459,6 +528,8 @@ module OpenAI end class Hyperparameters < OpenAI::BaseModel + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. sig { returns(T.nilable(T.any(Symbol, Integer))) } def batch_size end @@ -467,6 +538,8 @@ module OpenAI def batch_size=(_) end + # The beta value for the DPO method. A higher beta value will increase the weight + # of the penalty between the policy and reference model. sig { returns(T.nilable(T.any(Symbol, Float))) } def beta end @@ -475,6 +548,8 @@ module OpenAI def beta=(_) end + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. sig { returns(T.nilable(T.any(Symbol, Float))) } def learning_rate_multiplier end @@ -483,6 +558,8 @@ module OpenAI def learning_rate_multiplier=(_) end + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. sig { returns(T.nilable(T.any(Symbol, Integer))) } def n_epochs end @@ -491,6 +568,7 @@ module OpenAI def n_epochs=(_) end + # The hyperparameters used for the fine-tuning job. sig do params( batch_size: T.any(Symbol, Integer), @@ -517,40 +595,52 @@ module OpenAI def to_hash end + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. class BatchSize < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } private def variants end end end + # The beta value for the DPO method. A higher beta value will increase the weight + # of the penalty between the policy and reference model. class Beta < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, Symbol], [NilClass, Float]]) } private def variants end end end + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. class LearningRateMultiplier < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, Symbol], [NilClass, Float]]) } private def variants end end end + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. class NEpochs < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } private def variants end @@ -560,6 +650,7 @@ module OpenAI end class Supervised < OpenAI::BaseModel + # The hyperparameters used for the fine-tuning job. sig { returns(T.nilable(OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters)) } def hyperparameters end @@ -571,6 +662,7 @@ module OpenAI def hyperparameters=(_) end + # Configuration for the supervised fine-tuning method. sig do params(hyperparameters: OpenAI::Models::FineTuning::FineTuningJob::Method::Supervised::Hyperparameters) .returns(T.attached_class) @@ -586,6 +678,8 @@ module OpenAI end class Hyperparameters < OpenAI::BaseModel + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. sig { returns(T.nilable(T.any(Symbol, Integer))) } def batch_size end @@ -594,6 +688,8 @@ module OpenAI def batch_size=(_) end + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. sig { returns(T.nilable(T.any(Symbol, Float))) } def learning_rate_multiplier end @@ -602,6 +698,8 @@ module OpenAI def learning_rate_multiplier=(_) end + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. sig { returns(T.nilable(T.any(Symbol, Integer))) } def n_epochs end @@ -610,6 +708,7 @@ module OpenAI def n_epochs=(_) end + # The hyperparameters used for the fine-tuning job. sig do params( batch_size: T.any(Symbol, Integer), @@ -634,30 +733,39 @@ module OpenAI def to_hash end + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. class BatchSize < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } private def variants end end end + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. class LearningRateMultiplier < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, Symbol], [NilClass, Float]]) } private def variants end end end + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. class NEpochs < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } private def variants end @@ -666,6 +774,7 @@ module OpenAI end end + # The type of method. Is either `supervised` or `dpo`. class Type < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/fine_tuning/fine_tuning_job_event.rbi b/rbi/lib/openai/models/fine_tuning/fine_tuning_job_event.rbi index 15ce95c4..3f5027cb 100644 --- a/rbi/lib/openai/models/fine_tuning/fine_tuning_job_event.rbi +++ b/rbi/lib/openai/models/fine_tuning/fine_tuning_job_event.rbi @@ -6,6 +6,7 @@ module OpenAI module FineTuning class FineTuningJobEvent < OpenAI::BaseModel + # The object identifier. sig { returns(String) } def id end @@ -14,6 +15,7 @@ module OpenAI def id=(_) end + # The Unix timestamp (in seconds) for when the fine-tuning job was created. sig { returns(Integer) } def created_at end @@ -22,6 +24,7 @@ module OpenAI def created_at=(_) end + # The log level of the event. sig { returns(Symbol) } def level end @@ -30,6 +33,7 @@ module OpenAI def level=(_) end + # The message of the event. sig { returns(String) } def message end @@ -38,6 +42,7 @@ module OpenAI def message=(_) end + # The object type, which is always "fine_tuning.job.event". sig { returns(Symbol) } def object end @@ -46,6 +51,7 @@ module OpenAI def object=(_) end + # The data associated with the event. sig { returns(T.nilable(T.anything)) } def data end @@ -54,6 +60,7 @@ module OpenAI def data=(_) end + # The type of event. sig { returns(T.nilable(Symbol)) } def type end @@ -62,6 +69,7 @@ module OpenAI def type=(_) end + # Fine-tuning job event object sig do params( id: String, @@ -94,6 +102,7 @@ module OpenAI def to_hash end + # The log level of the event. class Level < OpenAI::Enum abstract! @@ -108,6 +117,7 @@ module OpenAI end end + # The type of event. class Type < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rbi b/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rbi index 37b15696..f3899e11 100644 --- a/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rbi +++ b/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration.rbi @@ -6,6 +6,7 @@ module OpenAI module FineTuning class FineTuningJobWandbIntegration < OpenAI::BaseModel + # The name of the project that the new run will be created under. sig { returns(String) } def project end @@ -14,6 +15,9 @@ module OpenAI def project=(_) end + # The entity to use for the run. This allows you to set the team or username of + # the WandB user that you would like associated with the run. If not set, the + # default entity for the registered WandB API key is used. sig { returns(T.nilable(String)) } def entity end @@ -22,6 +26,8 @@ module OpenAI def entity=(_) end + # A display name to set for the run. If not set, we will use the Job ID as the + # name. sig { returns(T.nilable(String)) } def name end @@ -30,6 +36,9 @@ module OpenAI def name=(_) end + # A list of tags to be attached to the newly created run. These tags are passed + # through directly to WandB. Some default tags are generated by OpenAI: + # "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". sig { returns(T.nilable(T::Array[String])) } def tags end @@ -38,6 +47,10 @@ module OpenAI def tags=(_) end + # The settings for your integration with Weights and Biases. This payload + # specifies the project that metrics will be sent to. Optionally, you can set an + # explicit display name for your run, add tags to your run, and set a default + # entity (team, username, etc) to be associated with your run. sig do params(project: String, entity: T.nilable(String), name: T.nilable(String), tags: T::Array[String]) .returns(T.attached_class) diff --git a/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rbi b/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rbi index 148ab9c5..f6af3f86 100644 --- a/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rbi +++ b/rbi/lib/openai/models/fine_tuning/fine_tuning_job_wandb_integration_object.rbi @@ -6,6 +6,7 @@ module OpenAI module FineTuning class FineTuningJobWandbIntegrationObject < OpenAI::BaseModel + # The type of the integration being enabled for the fine-tuning job sig { returns(Symbol) } def type end @@ -14,6 +15,10 @@ module OpenAI def type=(_) end + # The settings for your integration with Weights and Biases. This payload + # specifies the project that metrics will be sent to. Optionally, you can set an + # explicit display name for your run, add tags to your run, and set a default + # entity (team, username, etc) to be associated with your run. sig { returns(OpenAI::Models::FineTuning::FineTuningJobWandbIntegration) } def wandb end diff --git a/rbi/lib/openai/models/fine_tuning/job_create_params.rbi b/rbi/lib/openai/models/fine_tuning/job_create_params.rbi index 269a125d..9747dd2a 100644 --- a/rbi/lib/openai/models/fine_tuning/job_create_params.rbi +++ b/rbi/lib/openai/models/fine_tuning/job_create_params.rbi @@ -7,6 +7,8 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The name of the model to fine-tune. You can select one of the + # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). sig { returns(T.any(String, Symbol)) } def model end @@ -15,6 +17,23 @@ module OpenAI def model=(_) end + # The ID of an uploaded file that contains training data. + # + # See [upload file](https://platform.openai.com/docs/api-reference/files/create) + # for how to upload a file. + # + # Your dataset must be formatted as a JSONL file. Additionally, you must upload + # your file with the purpose `fine-tune`. + # + # The contents of the file should differ depending on if the model uses the + # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), + # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + # format, or if the fine-tuning method uses the + # [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) + # format. + # + # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + # for more details. sig { returns(String) } def training_file end @@ -23,6 +42,8 @@ module OpenAI def training_file=(_) end + # The hyperparameters used for the fine-tuning job. This value is now deprecated + # in favor of `method`, and should be passed in under the `method` parameter. sig { returns(T.nilable(OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters)) } def hyperparameters end @@ -34,6 +55,7 @@ module OpenAI def hyperparameters=(_) end + # A list of integrations to enable for your fine-tuning job. sig { returns(T.nilable(T::Array[OpenAI::Models::FineTuning::JobCreateParams::Integration])) } def integrations end @@ -45,6 +67,12 @@ module OpenAI def integrations=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -53,6 +81,7 @@ module OpenAI def metadata=(_) end + # The method used for fine-tuning. sig { returns(T.nilable(OpenAI::Models::FineTuning::JobCreateParams::Method)) } def method_ end @@ -64,6 +93,9 @@ module OpenAI def method_=(_) end + # The seed controls the reproducibility of the job. Passing in the same seed and + # job parameters should produce the same results, but may differ in rare cases. If + # a seed is not specified, one will be generated for you. sig { returns(T.nilable(Integer)) } def seed end @@ -72,6 +104,11 @@ module OpenAI def seed=(_) end + # A string of up to 64 characters that will be added to your fine-tuned model + # name. + # + # For example, a `suffix` of "custom-model-name" would produce a model name like + # `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. sig { returns(T.nilable(String)) } def suffix end @@ -80,6 +117,18 @@ module OpenAI def suffix=(_) end + # The ID of an uploaded file that contains validation data. + # + # If you provide this file, the data is used to generate validation metrics + # periodically during fine-tuning. These metrics can be viewed in the fine-tuning + # results file. The same data should not be present in both train and validation + # files. + # + # Your dataset must be formatted as a JSONL file. You must upload your file with + # the purpose `fine-tune`. + # + # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) + # for more details. sig { returns(T.nilable(String)) } def validation_file end @@ -137,9 +186,13 @@ module OpenAI def to_hash end + # The name of the model to fine-tune. You can select one of the + # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). class Model < OpenAI::Union abstract! + # The name of the model to fine-tune. You can select one of the + # [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned). class Preset < OpenAI::Enum abstract! @@ -156,6 +209,7 @@ module OpenAI end class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } private def variants end @@ -163,6 +217,8 @@ module OpenAI end class Hyperparameters < OpenAI::BaseModel + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. sig { returns(T.nilable(T.any(Symbol, Integer))) } def batch_size end @@ -171,6 +227,8 @@ module OpenAI def batch_size=(_) end + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. sig { returns(T.nilable(T.any(Symbol, Float))) } def learning_rate_multiplier end @@ -179,6 +237,8 @@ module OpenAI def learning_rate_multiplier=(_) end + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. sig { returns(T.nilable(T.any(Symbol, Integer))) } def n_epochs end @@ -187,6 +247,8 @@ module OpenAI def n_epochs=(_) end + # The hyperparameters used for the fine-tuning job. This value is now deprecated + # in favor of `method`, and should be passed in under the `method` parameter. sig do params( batch_size: T.any(Symbol, Integer), @@ -211,30 +273,39 @@ module OpenAI def to_hash end + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. class BatchSize < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } private def variants end end end + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. class LearningRateMultiplier < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, Symbol], [NilClass, Float]]) } private def variants end end end + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. class NEpochs < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } private def variants end @@ -243,6 +314,8 @@ module OpenAI end class Integration < OpenAI::BaseModel + # The type of integration to enable. Currently, only "wandb" (Weights and Biases) + # is supported. sig { returns(Symbol) } def type end @@ -251,6 +324,10 @@ module OpenAI def type=(_) end + # The settings for your integration with Weights and Biases. This payload + # specifies the project that metrics will be sent to. Optionally, you can set an + # explicit display name for your run, add tags to your run, and set a default + # entity (team, username, etc) to be associated with your run. sig { returns(OpenAI::Models::FineTuning::JobCreateParams::Integration::Wandb) } def wandb end @@ -274,6 +351,7 @@ module OpenAI end class Wandb < OpenAI::BaseModel + # The name of the project that the new run will be created under. sig { returns(String) } def project end @@ -282,6 +360,9 @@ module OpenAI def project=(_) end + # The entity to use for the run. This allows you to set the team or username of + # the WandB user that you would like associated with the run. If not set, the + # default entity for the registered WandB API key is used. sig { returns(T.nilable(String)) } def entity end @@ -290,6 +371,8 @@ module OpenAI def entity=(_) end + # A display name to set for the run. If not set, we will use the Job ID as the + # name. sig { returns(T.nilable(String)) } def name end @@ -298,6 +381,9 @@ module OpenAI def name=(_) end + # A list of tags to be attached to the newly created run. These tags are passed + # through directly to WandB. Some default tags are generated by OpenAI: + # "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". sig { returns(T.nilable(T::Array[String])) } def tags end @@ -306,6 +392,10 @@ module OpenAI def tags=(_) end + # The settings for your integration with Weights and Biases. This payload + # specifies the project that metrics will be sent to. Optionally, you can set an + # explicit display name for your run, add tags to your run, and set a default + # entity (team, username, etc) to be associated with your run. sig do params( project: String, @@ -333,6 +423,7 @@ module OpenAI end class Method < OpenAI::BaseModel + # Configuration for the DPO fine-tuning method. sig { returns(T.nilable(OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo)) } def dpo end @@ -344,6 +435,7 @@ module OpenAI def dpo=(_) end + # Configuration for the supervised fine-tuning method. sig { returns(T.nilable(OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised)) } def supervised end @@ -355,6 +447,7 @@ module OpenAI def supervised=(_) end + # The type of method. Is either `supervised` or `dpo`. sig { returns(T.nilable(Symbol)) } def type end @@ -363,6 +456,7 @@ module OpenAI def type=(_) end + # The method used for fine-tuning. sig do params( dpo: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo, @@ -388,6 +482,7 @@ module OpenAI end class Dpo < OpenAI::BaseModel + # The hyperparameters used for the fine-tuning job. sig { returns(T.nilable(OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters)) } def hyperparameters end @@ -399,6 +494,7 @@ module OpenAI def hyperparameters=(_) end + # Configuration for the DPO fine-tuning method. sig do params(hyperparameters: OpenAI::Models::FineTuning::JobCreateParams::Method::Dpo::Hyperparameters) .returns(T.attached_class) @@ -414,6 +510,8 @@ module OpenAI end class Hyperparameters < OpenAI::BaseModel + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. sig { returns(T.nilable(T.any(Symbol, Integer))) } def batch_size end @@ -422,6 +520,8 @@ module OpenAI def batch_size=(_) end + # The beta value for the DPO method. A higher beta value will increase the weight + # of the penalty between the policy and reference model. sig { returns(T.nilable(T.any(Symbol, Float))) } def beta end @@ -430,6 +530,8 @@ module OpenAI def beta=(_) end + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. sig { returns(T.nilable(T.any(Symbol, Float))) } def learning_rate_multiplier end @@ -438,6 +540,8 @@ module OpenAI def learning_rate_multiplier=(_) end + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. sig { returns(T.nilable(T.any(Symbol, Integer))) } def n_epochs end @@ -446,6 +550,7 @@ module OpenAI def n_epochs=(_) end + # The hyperparameters used for the fine-tuning job. sig do params( batch_size: T.any(Symbol, Integer), @@ -472,40 +577,52 @@ module OpenAI def to_hash end + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. class BatchSize < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } private def variants end end end + # The beta value for the DPO method. A higher beta value will increase the weight + # of the penalty between the policy and reference model. class Beta < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, Symbol], [NilClass, Float]]) } private def variants end end end + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. class LearningRateMultiplier < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, Symbol], [NilClass, Float]]) } private def variants end end end + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. class NEpochs < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } private def variants end @@ -515,6 +632,7 @@ module OpenAI end class Supervised < OpenAI::BaseModel + # The hyperparameters used for the fine-tuning job. sig { returns(T.nilable(OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters)) } def hyperparameters end @@ -526,6 +644,7 @@ module OpenAI def hyperparameters=(_) end + # Configuration for the supervised fine-tuning method. sig do params(hyperparameters: OpenAI::Models::FineTuning::JobCreateParams::Method::Supervised::Hyperparameters) .returns(T.attached_class) @@ -543,6 +662,8 @@ module OpenAI end class Hyperparameters < OpenAI::BaseModel + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. sig { returns(T.nilable(T.any(Symbol, Integer))) } def batch_size end @@ -551,6 +672,8 @@ module OpenAI def batch_size=(_) end + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. sig { returns(T.nilable(T.any(Symbol, Float))) } def learning_rate_multiplier end @@ -559,6 +682,8 @@ module OpenAI def learning_rate_multiplier=(_) end + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. sig { returns(T.nilable(T.any(Symbol, Integer))) } def n_epochs end @@ -567,6 +692,7 @@ module OpenAI def n_epochs=(_) end + # The hyperparameters used for the fine-tuning job. sig do params( batch_size: T.any(Symbol, Integer), @@ -591,30 +717,39 @@ module OpenAI def to_hash end + # Number of examples in each batch. A larger batch size means that model + # parameters are updated less frequently, but with lower variance. class BatchSize < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } private def variants end end end + # Scaling factor for the learning rate. A smaller learning rate may be useful to + # avoid overfitting. class LearningRateMultiplier < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, Symbol], [NilClass, Float]]) } private def variants end end end + # The number of epochs to train the model for. An epoch refers to one full cycle + # through the training dataset. class NEpochs < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, Symbol], [NilClass, Integer]]) } private def variants end @@ -623,6 +758,7 @@ module OpenAI end end + # The type of method. Is either `supervised` or `dpo`. class Type < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/fine_tuning/job_list_events_params.rbi b/rbi/lib/openai/models/fine_tuning/job_list_events_params.rbi index ba90b85e..fc4ede17 100644 --- a/rbi/lib/openai/models/fine_tuning/job_list_events_params.rbi +++ b/rbi/lib/openai/models/fine_tuning/job_list_events_params.rbi @@ -7,6 +7,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # Identifier for the last event from the previous pagination request. sig { returns(T.nilable(String)) } def after end @@ -15,6 +16,7 @@ module OpenAI def after=(_) end + # Number of events to retrieve. sig { returns(T.nilable(Integer)) } def limit end diff --git a/rbi/lib/openai/models/fine_tuning/job_list_params.rbi b/rbi/lib/openai/models/fine_tuning/job_list_params.rbi index e217f2fe..6e667d46 100644 --- a/rbi/lib/openai/models/fine_tuning/job_list_params.rbi +++ b/rbi/lib/openai/models/fine_tuning/job_list_params.rbi @@ -7,6 +7,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # Identifier for the last job from the previous pagination request. sig { returns(T.nilable(String)) } def after end @@ -15,6 +16,7 @@ module OpenAI def after=(_) end + # Number of fine-tuning jobs to retrieve. sig { returns(T.nilable(Integer)) } def limit end @@ -23,6 +25,8 @@ module OpenAI def limit=(_) end + # Optional metadata filter. To filter, use the syntax `metadata[k]=v`. + # Alternatively, set `metadata=null` to indicate no metadata. sig { returns(T.nilable(T::Hash[Symbol, String])) } def metadata end diff --git a/rbi/lib/openai/models/fine_tuning/jobs/checkpoint_list_params.rbi b/rbi/lib/openai/models/fine_tuning/jobs/checkpoint_list_params.rbi index a032fba4..27b1407f 100644 --- a/rbi/lib/openai/models/fine_tuning/jobs/checkpoint_list_params.rbi +++ b/rbi/lib/openai/models/fine_tuning/jobs/checkpoint_list_params.rbi @@ -8,6 +8,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # Identifier for the last checkpoint ID from the previous pagination request. sig { returns(T.nilable(String)) } def after end @@ -16,6 +17,7 @@ module OpenAI def after=(_) end + # Number of checkpoints to retrieve. sig { returns(T.nilable(Integer)) } def limit end diff --git a/rbi/lib/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rbi b/rbi/lib/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rbi index 077add44..54fe9d93 100644 --- a/rbi/lib/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rbi +++ b/rbi/lib/openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint.rbi @@ -5,6 +5,7 @@ module OpenAI module FineTuning module Jobs class FineTuningJobCheckpoint < OpenAI::BaseModel + # The checkpoint identifier, which can be referenced in the API endpoints. sig { returns(String) } def id end @@ -13,6 +14,7 @@ module OpenAI def id=(_) end + # The Unix timestamp (in seconds) for when the checkpoint was created. sig { returns(Integer) } def created_at end @@ -21,6 +23,7 @@ module OpenAI def created_at=(_) end + # The name of the fine-tuned checkpoint model that is created. sig { returns(String) } def fine_tuned_model_checkpoint end @@ -29,6 +32,7 @@ module OpenAI def fine_tuned_model_checkpoint=(_) end + # The name of the fine-tuning job that this checkpoint was created from. sig { returns(String) } def fine_tuning_job_id end @@ -37,6 +41,7 @@ module OpenAI def fine_tuning_job_id=(_) end + # Metrics at the step number during the fine-tuning job. sig { returns(OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics) } def metrics end @@ -48,6 +53,7 @@ module OpenAI def metrics=(_) end + # The object type, which is always "fine_tuning.job.checkpoint". sig { returns(Symbol) } def object end @@ -56,6 +62,7 @@ module OpenAI def object=(_) end + # The step number that the checkpoint was created at. sig { returns(Integer) } def step_number end @@ -64,6 +71,8 @@ module OpenAI def step_number=(_) end + # The `fine_tuning.job.checkpoint` object represents a model checkpoint for a + # fine-tuning job that is ready to use. sig do params( id: String, @@ -161,6 +170,7 @@ module OpenAI def valid_mean_token_accuracy=(_) end + # Metrics at the step number during the fine-tuning job. sig do params( full_valid_loss: Float, diff --git a/rbi/lib/openai/models/function_definition.rbi b/rbi/lib/openai/models/function_definition.rbi index f4fe38c3..75a4000f 100644 --- a/rbi/lib/openai/models/function_definition.rbi +++ b/rbi/lib/openai/models/function_definition.rbi @@ -3,6 +3,8 @@ module OpenAI module Models class FunctionDefinition < OpenAI::BaseModel + # The name of the function to be called. Must be a-z, A-Z, 0-9, or contain + # underscores and dashes, with a maximum length of 64. sig { returns(String) } def name end @@ -11,6 +13,8 @@ module OpenAI def name=(_) end + # A description of what the function does, used by the model to choose when and + # how to call the function. sig { returns(T.nilable(String)) } def description end @@ -19,6 +23,13 @@ module OpenAI def description=(_) end + # The parameters the functions accepts, described as a JSON Schema object. See the + # [guide](https://platform.openai.com/docs/guides/function-calling) for examples, + # and the + # [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + # documentation about the format. + # + # Omitting `parameters` defines a function with an empty parameter list. sig { returns(T.nilable(OpenAI::Models::FunctionParameters)) } def parameters end @@ -27,6 +38,11 @@ module OpenAI def parameters=(_) end + # Whether to enable strict schema adherence when generating the function call. If + # set to true, the model will follow the exact schema defined in the `parameters` + # field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn + # more about Structured Outputs in the + # [function calling guide](docs/guides/function-calling). sig { returns(T.nilable(T::Boolean)) } def strict end diff --git a/rbi/lib/openai/models/image.rbi b/rbi/lib/openai/models/image.rbi index 81607a7a..3c0fc4fa 100644 --- a/rbi/lib/openai/models/image.rbi +++ b/rbi/lib/openai/models/image.rbi @@ -3,6 +3,8 @@ module OpenAI module Models class Image < OpenAI::BaseModel + # The base64-encoded JSON of the generated image, if `response_format` is + # `b64_json`. sig { returns(T.nilable(String)) } def b64_json end @@ -11,6 +13,8 @@ module OpenAI def b64_json=(_) end + # The prompt that was used to generate the image, if there was any revision to the + # prompt. sig { returns(T.nilable(String)) } def revised_prompt end @@ -19,6 +23,7 @@ module OpenAI def revised_prompt=(_) end + # The URL of the generated image, if `response_format` is `url` (default). sig { returns(T.nilable(String)) } def url end @@ -27,6 +32,7 @@ module OpenAI def url=(_) end + # Represents the url or the content of an image generated by the OpenAI API. sig { params(b64_json: String, revised_prompt: String, url: String).returns(T.attached_class) } def self.new(b64_json: nil, revised_prompt: nil, url: nil) end diff --git a/rbi/lib/openai/models/image_create_variation_params.rbi b/rbi/lib/openai/models/image_create_variation_params.rbi index 1d40fb57..85b77ba9 100644 --- a/rbi/lib/openai/models/image_create_variation_params.rbi +++ b/rbi/lib/openai/models/image_create_variation_params.rbi @@ -6,6 +6,8 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The image to use as the basis for the variation(s). Must be a valid PNG file, + # less than 4MB, and square. sig { returns(T.any(IO, StringIO)) } def image end @@ -14,6 +16,8 @@ module OpenAI def image=(_) end + # The model to use for image generation. Only `dall-e-2` is supported at this + # time. sig { returns(T.nilable(T.any(String, Symbol))) } def model end @@ -22,6 +26,8 @@ module OpenAI def model=(_) end + # The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + # `n=1` is supported. sig { returns(T.nilable(Integer)) } def n end @@ -30,6 +36,9 @@ module OpenAI def n=(_) end + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. sig { returns(T.nilable(Symbol)) } def response_format end @@ -38,6 +47,8 @@ module OpenAI def response_format=(_) end + # The size of the generated images. Must be one of `256x256`, `512x512`, or + # `1024x1024`. sig { returns(T.nilable(Symbol)) } def size end @@ -46,6 +57,9 @@ module OpenAI def size=(_) end + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). sig { returns(T.nilable(String)) } def user end @@ -86,16 +100,22 @@ module OpenAI def to_hash end + # The model to use for image generation. Only `dall-e-2` is supported at this + # time. class Model < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } private def variants end end end + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. class ResponseFormat < OpenAI::Enum abstract! @@ -109,6 +129,8 @@ module OpenAI end end + # The size of the generated images. Must be one of `256x256`, `512x512`, or + # `1024x1024`. class Size < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/image_edit_params.rbi b/rbi/lib/openai/models/image_edit_params.rbi index fb3b78a8..e3241afe 100644 --- a/rbi/lib/openai/models/image_edit_params.rbi +++ b/rbi/lib/openai/models/image_edit_params.rbi @@ -6,6 +6,8 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask + # is not provided, image must have transparency, which will be used as the mask. sig { returns(T.any(IO, StringIO)) } def image end @@ -14,6 +16,8 @@ module OpenAI def image=(_) end + # A text description of the desired image(s). The maximum length is 1000 + # characters. sig { returns(String) } def prompt end @@ -22,6 +26,9 @@ module OpenAI def prompt=(_) end + # An additional image whose fully transparent areas (e.g. where alpha is zero) + # indicate where `image` should be edited. Must be a valid PNG file, less than + # 4MB, and have the same dimensions as `image`. sig { returns(T.nilable(T.any(IO, StringIO))) } def mask end @@ -30,6 +37,8 @@ module OpenAI def mask=(_) end + # The model to use for image generation. Only `dall-e-2` is supported at this + # time. sig { returns(T.nilable(T.any(String, Symbol))) } def model end @@ -38,6 +47,7 @@ module OpenAI def model=(_) end + # The number of images to generate. Must be between 1 and 10. sig { returns(T.nilable(Integer)) } def n end @@ -46,6 +56,9 @@ module OpenAI def n=(_) end + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. sig { returns(T.nilable(Symbol)) } def response_format end @@ -54,6 +67,8 @@ module OpenAI def response_format=(_) end + # The size of the generated images. Must be one of `256x256`, `512x512`, or + # `1024x1024`. sig { returns(T.nilable(Symbol)) } def size end @@ -62,6 +77,9 @@ module OpenAI def size=(_) end + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). sig { returns(T.nilable(String)) } def user end @@ -116,16 +134,22 @@ module OpenAI def to_hash end + # The model to use for image generation. Only `dall-e-2` is supported at this + # time. class Model < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } private def variants end end end + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. class ResponseFormat < OpenAI::Enum abstract! @@ -139,6 +163,8 @@ module OpenAI end end + # The size of the generated images. Must be one of `256x256`, `512x512`, or + # `1024x1024`. class Size < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/image_generate_params.rbi b/rbi/lib/openai/models/image_generate_params.rbi index c5e39887..32f55ca9 100644 --- a/rbi/lib/openai/models/image_generate_params.rbi +++ b/rbi/lib/openai/models/image_generate_params.rbi @@ -6,6 +6,8 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # A text description of the desired image(s). The maximum length is 1000 + # characters for `dall-e-2` and 4000 characters for `dall-e-3`. sig { returns(String) } def prompt end @@ -14,6 +16,7 @@ module OpenAI def prompt=(_) end + # The model to use for image generation. sig { returns(T.nilable(T.any(String, Symbol))) } def model end @@ -22,6 +25,8 @@ module OpenAI def model=(_) end + # The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + # `n=1` is supported. sig { returns(T.nilable(Integer)) } def n end @@ -30,6 +35,9 @@ module OpenAI def n=(_) end + # The quality of the image that will be generated. `hd` creates images with finer + # details and greater consistency across the image. This param is only supported + # for `dall-e-3`. sig { returns(T.nilable(Symbol)) } def quality end @@ -38,6 +46,9 @@ module OpenAI def quality=(_) end + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. sig { returns(T.nilable(Symbol)) } def response_format end @@ -46,6 +57,9 @@ module OpenAI def response_format=(_) end + # The size of the generated images. Must be one of `256x256`, `512x512`, or + # `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or + # `1024x1792` for `dall-e-3` models. sig { returns(T.nilable(Symbol)) } def size end @@ -54,6 +68,10 @@ module OpenAI def size=(_) end + # The style of the generated images. Must be one of `vivid` or `natural`. Vivid + # causes the model to lean towards generating hyper-real and dramatic images. + # Natural causes the model to produce more natural, less hyper-real looking + # images. This param is only supported for `dall-e-3`. sig { returns(T.nilable(Symbol)) } def style end @@ -62,6 +80,9 @@ module OpenAI def style=(_) end + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). sig { returns(T.nilable(String)) } def user end @@ -116,16 +137,21 @@ module OpenAI def to_hash end + # The model to use for image generation. class Model < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } private def variants end end end + # The quality of the image that will be generated. `hd` creates images with finer + # details and greater consistency across the image. This param is only supported + # for `dall-e-3`. class Quality < OpenAI::Enum abstract! @@ -139,6 +165,9 @@ module OpenAI end end + # The format in which the generated images are returned. Must be one of `url` or + # `b64_json`. URLs are only valid for 60 minutes after the image has been + # generated. class ResponseFormat < OpenAI::Enum abstract! @@ -152,6 +181,9 @@ module OpenAI end end + # The size of the generated images. Must be one of `256x256`, `512x512`, or + # `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or + # `1024x1792` for `dall-e-3` models. class Size < OpenAI::Enum abstract! @@ -168,6 +200,10 @@ module OpenAI end end + # The style of the generated images. Must be one of `vivid` or `natural`. Vivid + # causes the model to lean towards generating hyper-real and dramatic images. + # Natural causes the model to produce more natural, less hyper-real looking + # images. This param is only supported for `dall-e-3`. class Style < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/model.rbi b/rbi/lib/openai/models/model.rbi index ad1c5f72..07b59908 100644 --- a/rbi/lib/openai/models/model.rbi +++ b/rbi/lib/openai/models/model.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class Model < OpenAI::BaseModel + # The model identifier, which can be referenced in the API endpoints. sig { returns(String) } def id end @@ -11,6 +12,7 @@ module OpenAI def id=(_) end + # The Unix timestamp (in seconds) when the model was created. sig { returns(Integer) } def created end @@ -19,6 +21,7 @@ module OpenAI def created=(_) end + # The object type, which is always "model". sig { returns(Symbol) } def object end @@ -27,6 +30,7 @@ module OpenAI def object=(_) end + # The organization that owns the model. sig { returns(String) } def owned_by end @@ -35,6 +39,7 @@ module OpenAI def owned_by=(_) end + # Describes an OpenAI model offering that can be used with the API. sig { params(id: String, created: Integer, owned_by: String, object: Symbol).returns(T.attached_class) } def self.new(id:, created:, owned_by:, object: :model) end diff --git a/rbi/lib/openai/models/moderation.rbi b/rbi/lib/openai/models/moderation.rbi index aba76998..672182dd 100644 --- a/rbi/lib/openai/models/moderation.rbi +++ b/rbi/lib/openai/models/moderation.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class Moderation < OpenAI::BaseModel + # A list of the categories, and whether they are flagged or not. sig { returns(OpenAI::Models::Moderation::Categories) } def categories end @@ -11,6 +12,7 @@ module OpenAI def categories=(_) end + # A list of the categories along with the input type(s) that the score applies to. sig { returns(OpenAI::Models::Moderation::CategoryAppliedInputTypes) } def category_applied_input_types end @@ -22,6 +24,7 @@ module OpenAI def category_applied_input_types=(_) end + # A list of the categories along with their scores as predicted by model. sig { returns(OpenAI::Models::Moderation::CategoryScores) } def category_scores end @@ -30,6 +33,7 @@ module OpenAI def category_scores=(_) end + # Whether any of the below categories are flagged. sig { returns(T::Boolean) } def flagged end @@ -65,6 +69,8 @@ module OpenAI end class Categories < OpenAI::BaseModel + # Content that expresses, incites, or promotes harassing language towards any + # target. sig { returns(T::Boolean) } def harassment end @@ -73,6 +79,8 @@ module OpenAI def harassment=(_) end + # Harassment content that also includes violence or serious harm towards any + # target. sig { returns(T::Boolean) } def harassment_threatening end @@ -81,6 +89,10 @@ module OpenAI def harassment_threatening=(_) end + # Content that expresses, incites, or promotes hate based on race, gender, + # ethnicity, religion, nationality, sexual orientation, disability status, or + # caste. Hateful content aimed at non-protected groups (e.g., chess players) is + # harassment. sig { returns(T::Boolean) } def hate end @@ -89,6 +101,9 @@ module OpenAI def hate=(_) end + # Hateful content that also includes violence or serious harm towards the targeted + # group based on race, gender, ethnicity, religion, nationality, sexual + # orientation, disability status, or caste. sig { returns(T::Boolean) } def hate_threatening end @@ -97,6 +112,9 @@ module OpenAI def hate_threatening=(_) end + # Content that includes instructions or advice that facilitate the planning or + # execution of wrongdoing, or that gives advice or instruction on how to commit + # illicit acts. For example, "how to shoplift" would fit this category. sig { returns(T.nilable(T::Boolean)) } def illicit end @@ -105,6 +123,9 @@ module OpenAI def illicit=(_) end + # Content that includes instructions or advice that facilitate the planning or + # execution of wrongdoing that also includes violence, or that gives advice or + # instruction on the procurement of any weapon. sig { returns(T.nilable(T::Boolean)) } def illicit_violent end @@ -113,6 +134,8 @@ module OpenAI def illicit_violent=(_) end + # Content that promotes, encourages, or depicts acts of self-harm, such as + # suicide, cutting, and eating disorders. sig { returns(T::Boolean) } def self_harm end @@ -121,6 +144,9 @@ module OpenAI def self_harm=(_) end + # Content that encourages performing acts of self-harm, such as suicide, cutting, + # and eating disorders, or that gives instructions or advice on how to commit such + # acts. sig { returns(T::Boolean) } def self_harm_instructions end @@ -129,6 +155,8 @@ module OpenAI def self_harm_instructions=(_) end + # Content where the speaker expresses that they are engaging or intend to engage + # in acts of self-harm, such as suicide, cutting, and eating disorders. sig { returns(T::Boolean) } def self_harm_intent end @@ -137,6 +165,9 @@ module OpenAI def self_harm_intent=(_) end + # Content meant to arouse sexual excitement, such as the description of sexual + # activity, or that promotes sexual services (excluding sex education and + # wellness). sig { returns(T::Boolean) } def sexual end @@ -145,6 +176,7 @@ module OpenAI def sexual=(_) end + # Sexual content that includes an individual who is under 18 years old. sig { returns(T::Boolean) } def sexual_minors end @@ -153,6 +185,7 @@ module OpenAI def sexual_minors=(_) end + # Content that depicts death, violence, or physical injury. sig { returns(T::Boolean) } def violence end @@ -161,6 +194,7 @@ module OpenAI def violence=(_) end + # Content that depicts death, violence, or physical injury in graphic detail. sig { returns(T::Boolean) } def violence_graphic end @@ -169,6 +203,7 @@ module OpenAI def violence_graphic=(_) end + # A list of the categories, and whether they are flagged or not. sig do params( harassment: T::Boolean, @@ -229,6 +264,7 @@ module OpenAI end class CategoryAppliedInputTypes < OpenAI::BaseModel + # The applied input type(s) for the category 'harassment'. sig { returns(T::Array[Symbol]) } def harassment end @@ -237,6 +273,7 @@ module OpenAI def harassment=(_) end + # The applied input type(s) for the category 'harassment/threatening'. sig { returns(T::Array[Symbol]) } def harassment_threatening end @@ -245,6 +282,7 @@ module OpenAI def harassment_threatening=(_) end + # The applied input type(s) for the category 'hate'. sig { returns(T::Array[Symbol]) } def hate end @@ -253,6 +291,7 @@ module OpenAI def hate=(_) end + # The applied input type(s) for the category 'hate/threatening'. sig { returns(T::Array[Symbol]) } def hate_threatening end @@ -261,6 +300,7 @@ module OpenAI def hate_threatening=(_) end + # The applied input type(s) for the category 'illicit'. sig { returns(T::Array[Symbol]) } def illicit end @@ -269,6 +309,7 @@ module OpenAI def illicit=(_) end + # The applied input type(s) for the category 'illicit/violent'. sig { returns(T::Array[Symbol]) } def illicit_violent end @@ -277,6 +318,7 @@ module OpenAI def illicit_violent=(_) end + # The applied input type(s) for the category 'self-harm'. sig { returns(T::Array[Symbol]) } def self_harm end @@ -285,6 +327,7 @@ module OpenAI def self_harm=(_) end + # The applied input type(s) for the category 'self-harm/instructions'. sig { returns(T::Array[Symbol]) } def self_harm_instructions end @@ -293,6 +336,7 @@ module OpenAI def self_harm_instructions=(_) end + # The applied input type(s) for the category 'self-harm/intent'. sig { returns(T::Array[Symbol]) } def self_harm_intent end @@ -301,6 +345,7 @@ module OpenAI def self_harm_intent=(_) end + # The applied input type(s) for the category 'sexual'. sig { returns(T::Array[Symbol]) } def sexual end @@ -309,6 +354,7 @@ module OpenAI def sexual=(_) end + # The applied input type(s) for the category 'sexual/minors'. sig { returns(T::Array[Symbol]) } def sexual_minors end @@ -317,6 +363,7 @@ module OpenAI def sexual_minors=(_) end + # The applied input type(s) for the category 'violence'. sig { returns(T::Array[Symbol]) } def violence end @@ -325,6 +372,7 @@ module OpenAI def violence=(_) end + # The applied input type(s) for the category 'violence/graphic'. sig { returns(T::Array[Symbol]) } def violence_graphic end @@ -333,6 +381,7 @@ module OpenAI def violence_graphic=(_) end + # A list of the categories along with the input type(s) that the score applies to. sig do params( harassment: T::Array[Symbol], @@ -555,6 +604,7 @@ module OpenAI end class CategoryScores < OpenAI::BaseModel + # The score for the category 'harassment'. sig { returns(Float) } def harassment end @@ -563,6 +613,7 @@ module OpenAI def harassment=(_) end + # The score for the category 'harassment/threatening'. sig { returns(Float) } def harassment_threatening end @@ -571,6 +622,7 @@ module OpenAI def harassment_threatening=(_) end + # The score for the category 'hate'. sig { returns(Float) } def hate end @@ -579,6 +631,7 @@ module OpenAI def hate=(_) end + # The score for the category 'hate/threatening'. sig { returns(Float) } def hate_threatening end @@ -587,6 +640,7 @@ module OpenAI def hate_threatening=(_) end + # The score for the category 'illicit'. sig { returns(Float) } def illicit end @@ -595,6 +649,7 @@ module OpenAI def illicit=(_) end + # The score for the category 'illicit/violent'. sig { returns(Float) } def illicit_violent end @@ -603,6 +658,7 @@ module OpenAI def illicit_violent=(_) end + # The score for the category 'self-harm'. sig { returns(Float) } def self_harm end @@ -611,6 +667,7 @@ module OpenAI def self_harm=(_) end + # The score for the category 'self-harm/instructions'. sig { returns(Float) } def self_harm_instructions end @@ -619,6 +676,7 @@ module OpenAI def self_harm_instructions=(_) end + # The score for the category 'self-harm/intent'. sig { returns(Float) } def self_harm_intent end @@ -627,6 +685,7 @@ module OpenAI def self_harm_intent=(_) end + # The score for the category 'sexual'. sig { returns(Float) } def sexual end @@ -635,6 +694,7 @@ module OpenAI def sexual=(_) end + # The score for the category 'sexual/minors'. sig { returns(Float) } def sexual_minors end @@ -643,6 +703,7 @@ module OpenAI def sexual_minors=(_) end + # The score for the category 'violence'. sig { returns(Float) } def violence end @@ -651,6 +712,7 @@ module OpenAI def violence=(_) end + # The score for the category 'violence/graphic'. sig { returns(Float) } def violence_graphic end @@ -659,6 +721,7 @@ module OpenAI def violence_graphic=(_) end + # A list of the categories along with their scores as predicted by model. sig do params( harassment: Float, diff --git a/rbi/lib/openai/models/moderation_create_params.rbi b/rbi/lib/openai/models/moderation_create_params.rbi index 998863d3..bac4a3a8 100644 --- a/rbi/lib/openai/models/moderation_create_params.rbi +++ b/rbi/lib/openai/models/moderation_create_params.rbi @@ -6,6 +6,8 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # Input (or inputs) to classify. Can be a single string, an array of strings, or + # an array of multi-modal input objects similar to other models. sig do returns( T.any( @@ -37,6 +39,10 @@ module OpenAI def input=(_) end + # The content moderation model you would like to use. Learn more in + # [the moderation guide](https://platform.openai.com/docs/guides/moderation), and + # learn about available models + # [here](https://platform.openai.com/docs/models#moderation). sig { returns(T.nilable(T.any(String, Symbol))) } def model end @@ -77,6 +83,8 @@ module OpenAI def to_hash end + # Input (or inputs) to classify. Can be a single string, an array of strings, or + # an array of multi-modal input objects similar to other models. class Input < OpenAI::Union abstract! @@ -85,6 +93,7 @@ module OpenAI ModerationMultiModalInputArray = T.type_alias { T::Array[T.any(OpenAI::Models::ModerationImageURLInput, OpenAI::Models::ModerationTextInput)] } class << self + # @api private sig do override .returns( @@ -96,10 +105,15 @@ module OpenAI end end + # The content moderation model you would like to use. Learn more in + # [the moderation guide](https://platform.openai.com/docs/guides/moderation), and + # learn about available models + # [here](https://platform.openai.com/docs/models#moderation). class Model < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } private def variants end diff --git a/rbi/lib/openai/models/moderation_create_response.rbi b/rbi/lib/openai/models/moderation_create_response.rbi index 46b29878..bf831d82 100644 --- a/rbi/lib/openai/models/moderation_create_response.rbi +++ b/rbi/lib/openai/models/moderation_create_response.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class ModerationCreateResponse < OpenAI::BaseModel + # The unique identifier for the moderation request. sig { returns(String) } def id end @@ -11,6 +12,7 @@ module OpenAI def id=(_) end + # The model used to generate the moderation results. sig { returns(String) } def model end @@ -19,6 +21,7 @@ module OpenAI def model=(_) end + # A list of moderation objects. sig { returns(T::Array[OpenAI::Models::Moderation]) } def results end @@ -27,6 +30,7 @@ module OpenAI def results=(_) end + # Represents if a given text input is potentially harmful. sig { params(id: String, model: String, results: T::Array[OpenAI::Models::Moderation]).returns(T.attached_class) } def self.new(id:, model:, results:) end diff --git a/rbi/lib/openai/models/moderation_image_url_input.rbi b/rbi/lib/openai/models/moderation_image_url_input.rbi index a7ecaefe..222a1447 100644 --- a/rbi/lib/openai/models/moderation_image_url_input.rbi +++ b/rbi/lib/openai/models/moderation_image_url_input.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class ModerationImageURLInput < OpenAI::BaseModel + # Contains either an image URL or a data URL for a base64 encoded image. sig { returns(OpenAI::Models::ModerationImageURLInput::ImageURL) } def image_url end @@ -14,6 +15,7 @@ module OpenAI def image_url=(_) end + # Always `image_url`. sig { returns(Symbol) } def type end @@ -22,6 +24,7 @@ module OpenAI def type=(_) end + # An object describing an image to classify. sig do params(image_url: OpenAI::Models::ModerationImageURLInput::ImageURL, type: Symbol) .returns(T.attached_class) @@ -34,6 +37,7 @@ module OpenAI end class ImageURL < OpenAI::BaseModel + # Either a URL of the image or the base64 encoded image data. sig { returns(String) } def url end @@ -42,6 +46,7 @@ module OpenAI def url=(_) end + # Contains either an image URL or a data URL for a base64 encoded image. sig { params(url: String).returns(T.attached_class) } def self.new(url:) end diff --git a/rbi/lib/openai/models/moderation_multi_modal_input.rbi b/rbi/lib/openai/models/moderation_multi_modal_input.rbi index 2d658e57..1c24bbd3 100644 --- a/rbi/lib/openai/models/moderation_multi_modal_input.rbi +++ b/rbi/lib/openai/models/moderation_multi_modal_input.rbi @@ -2,10 +2,12 @@ module OpenAI module Models + # An object describing an image to classify. class ModerationMultiModalInput < OpenAI::Union abstract! class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/moderation_text_input.rbi b/rbi/lib/openai/models/moderation_text_input.rbi index 85c34f7f..41888533 100644 --- a/rbi/lib/openai/models/moderation_text_input.rbi +++ b/rbi/lib/openai/models/moderation_text_input.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class ModerationTextInput < OpenAI::BaseModel + # A string of text to classify. sig { returns(String) } def text end @@ -11,6 +12,7 @@ module OpenAI def text=(_) end + # Always `text`. sig { returns(Symbol) } def type end @@ -19,6 +21,7 @@ module OpenAI def type=(_) end + # An object describing text to classify. sig { params(text: String, type: Symbol).returns(T.attached_class) } def self.new(text:, type: :text) end diff --git a/rbi/lib/openai/models/other_file_chunking_strategy_object.rbi b/rbi/lib/openai/models/other_file_chunking_strategy_object.rbi index c4d89d20..db3ddb71 100644 --- a/rbi/lib/openai/models/other_file_chunking_strategy_object.rbi +++ b/rbi/lib/openai/models/other_file_chunking_strategy_object.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class OtherFileChunkingStrategyObject < OpenAI::BaseModel + # Always `other`. sig { returns(Symbol) } def type end @@ -11,6 +12,9 @@ module OpenAI def type=(_) end + # This is returned when the chunking strategy is unknown. Typically, this is + # because the file was indexed before the `chunking_strategy` concept was + # introduced in the API. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :other) end diff --git a/rbi/lib/openai/models/reasoning.rbi b/rbi/lib/openai/models/reasoning.rbi index de57d2db..3c5fb130 100644 --- a/rbi/lib/openai/models/reasoning.rbi +++ b/rbi/lib/openai/models/reasoning.rbi @@ -3,6 +3,12 @@ module OpenAI module Models class Reasoning < OpenAI::BaseModel + # **o-series models only** + # + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. sig { returns(T.nilable(Symbol)) } def effort end @@ -11,6 +17,11 @@ module OpenAI def effort=(_) end + # **o-series models only** + # + # A summary of the reasoning performed by the model. This can be useful for + # debugging and understanding the model's reasoning process. One of `concise` or + # `detailed`. sig { returns(T.nilable(Symbol)) } def generate_summary end @@ -19,6 +30,10 @@ module OpenAI def generate_summary=(_) end + # **o-series models only** + # + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). sig { params(effort: T.nilable(Symbol), generate_summary: T.nilable(Symbol)).returns(T.attached_class) } def self.new(effort:, generate_summary: nil) end @@ -27,6 +42,11 @@ module OpenAI def to_hash end + # **o-series models only** + # + # A summary of the reasoning performed by the model. This can be useful for + # debugging and understanding the model's reasoning process. One of `concise` or + # `detailed`. class GenerateSummary < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/reasoning_effort.rbi b/rbi/lib/openai/models/reasoning_effort.rbi index b4182a8d..8f9c3bc8 100644 --- a/rbi/lib/openai/models/reasoning_effort.rbi +++ b/rbi/lib/openai/models/reasoning_effort.rbi @@ -2,6 +2,12 @@ module OpenAI module Models + # **o-series models only** + # + # Constrains effort on reasoning for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + # supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + # result in faster responses and fewer tokens used on reasoning in a response. class ReasoningEffort < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/response_format_json_object.rbi b/rbi/lib/openai/models/response_format_json_object.rbi index ffd5658c..044c6ff6 100644 --- a/rbi/lib/openai/models/response_format_json_object.rbi +++ b/rbi/lib/openai/models/response_format_json_object.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class ResponseFormatJSONObject < OpenAI::BaseModel + # The type of response format being defined. Always `json_object`. sig { returns(Symbol) } def type end @@ -11,6 +12,9 @@ module OpenAI def type=(_) end + # JSON object response format. An older method of generating JSON responses. Using + # `json_schema` is recommended for models that support it. Note that the model + # will not generate JSON without a system or user message instructing it to do so. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :json_object) end diff --git a/rbi/lib/openai/models/response_format_json_schema.rbi b/rbi/lib/openai/models/response_format_json_schema.rbi index b622a63b..de32d2a7 100644 --- a/rbi/lib/openai/models/response_format_json_schema.rbi +++ b/rbi/lib/openai/models/response_format_json_schema.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class ResponseFormatJSONSchema < OpenAI::BaseModel + # Structured Outputs configuration options, including a JSON Schema. sig { returns(OpenAI::Models::ResponseFormatJSONSchema::JSONSchema) } def json_schema end @@ -14,6 +15,7 @@ module OpenAI def json_schema=(_) end + # The type of response format being defined. Always `json_schema`. sig { returns(Symbol) } def type end @@ -22,6 +24,9 @@ module OpenAI def type=(_) end + # JSON Schema response format. Used to generate structured JSON responses. Learn + # more about + # [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). sig do params(json_schema: OpenAI::Models::ResponseFormatJSONSchema::JSONSchema, type: Symbol) .returns(T.attached_class) @@ -34,6 +39,8 @@ module OpenAI end class JSONSchema < OpenAI::BaseModel + # The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores + # and dashes, with a maximum length of 64. sig { returns(String) } def name end @@ -42,6 +49,8 @@ module OpenAI def name=(_) end + # A description of what the response format is for, used by the model to determine + # how to respond in the format. sig { returns(T.nilable(String)) } def description end @@ -50,6 +59,8 @@ module OpenAI def description=(_) end + # The schema for the response format, described as a JSON Schema object. Learn how + # to build JSON schemas [here](https://json-schema.org/). sig { returns(T.nilable(T::Hash[Symbol, T.anything])) } def schema end @@ -58,6 +69,11 @@ module OpenAI def schema=(_) end + # Whether to enable strict schema adherence when generating the output. If set to + # true, the model will always follow the exact schema defined in the `schema` + # field. Only a subset of JSON Schema is supported when `strict` is `true`. To + # learn more, read the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). sig { returns(T.nilable(T::Boolean)) } def strict end @@ -66,6 +82,7 @@ module OpenAI def strict=(_) end + # Structured Outputs configuration options, including a JSON Schema. sig do params( name: String, diff --git a/rbi/lib/openai/models/response_format_text.rbi b/rbi/lib/openai/models/response_format_text.rbi index 6f3c8970..2894efdf 100644 --- a/rbi/lib/openai/models/response_format_text.rbi +++ b/rbi/lib/openai/models/response_format_text.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class ResponseFormatText < OpenAI::BaseModel + # The type of response format being defined. Always `text`. sig { returns(Symbol) } def type end @@ -11,6 +12,7 @@ module OpenAI def type=(_) end + # Default response format. Used to generate text responses. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :text) end diff --git a/rbi/lib/openai/models/responses/computer_tool.rbi b/rbi/lib/openai/models/responses/computer_tool.rbi index b6ba2c12..d37038e7 100644 --- a/rbi/lib/openai/models/responses/computer_tool.rbi +++ b/rbi/lib/openai/models/responses/computer_tool.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ComputerTool < OpenAI::BaseModel + # The height of the computer display. sig { returns(Float) } def display_height end @@ -12,6 +13,7 @@ module OpenAI def display_height=(_) end + # The width of the computer display. sig { returns(Float) } def display_width end @@ -20,6 +22,7 @@ module OpenAI def display_width=(_) end + # The type of computer environment to control. sig { returns(Symbol) } def environment end @@ -28,6 +31,7 @@ module OpenAI def environment=(_) end + # The type of the computer use tool. Always `computer_use_preview`. sig { returns(Symbol) } def type end @@ -36,6 +40,8 @@ module OpenAI def type=(_) end + # A tool that controls a virtual computer. Learn more about the + # [computer tool](https://platform.openai.com/docs/guides/tools-computer-use). sig do params(display_height: Float, display_width: Float, environment: Symbol, type: Symbol) .returns(T.attached_class) @@ -49,6 +55,7 @@ module OpenAI def to_hash end + # The type of computer environment to control. class Environment < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/responses/easy_input_message.rbi b/rbi/lib/openai/models/responses/easy_input_message.rbi index 428515ec..5d8b1737 100644 --- a/rbi/lib/openai/models/responses/easy_input_message.rbi +++ b/rbi/lib/openai/models/responses/easy_input_message.rbi @@ -4,6 +4,8 @@ module OpenAI module Models module Responses class EasyInputMessage < OpenAI::BaseModel + # Text, image, or audio input to the model, used to generate a response. Can also + # contain previous assistant responses. sig { returns(T.any(String, OpenAI::Models::Responses::ResponseInputMessageContentList)) } def content end @@ -15,6 +17,8 @@ module OpenAI def content=(_) end + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. sig { returns(Symbol) } def role end @@ -23,6 +27,7 @@ module OpenAI def role=(_) end + # The type of the message input. Always `message`. sig { returns(T.nilable(Symbol)) } def type end @@ -31,6 +36,11 @@ module OpenAI def type=(_) end + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. sig do params( content: T.any(String, OpenAI::Models::Responses::ResponseInputMessageContentList), @@ -51,10 +61,13 @@ module OpenAI def to_hash end + # Text, image, or audio input to the model, used to generate a response. Can also + # contain previous assistant responses. class Content < OpenAI::Union abstract! class << self + # @api private sig do override .returns([[NilClass, String], [NilClass, OpenAI::Models::Responses::ResponseInputMessageContentList]]) @@ -64,6 +77,8 @@ module OpenAI end end + # The role of the message input. One of `user`, `assistant`, `system`, or + # `developer`. class Role < OpenAI::Enum abstract! @@ -79,6 +94,7 @@ module OpenAI end end + # The type of the message input. Always `message`. class Type < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/responses/file_search_tool.rbi b/rbi/lib/openai/models/responses/file_search_tool.rbi index 204c6f9d..6854bd65 100644 --- a/rbi/lib/openai/models/responses/file_search_tool.rbi +++ b/rbi/lib/openai/models/responses/file_search_tool.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class FileSearchTool < OpenAI::BaseModel + # The type of the file search tool. Always `file_search`. sig { returns(Symbol) } def type end @@ -12,6 +13,7 @@ module OpenAI def type=(_) end + # The IDs of the vector stores to search. sig { returns(T::Array[String]) } def vector_store_ids end @@ -20,6 +22,7 @@ module OpenAI def vector_store_ids=(_) end + # A filter to apply based on file attributes. sig { returns(T.nilable(T.any(OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter))) } def filters end @@ -31,6 +34,8 @@ module OpenAI def filters=(_) end + # The maximum number of results to return. This number should be between 1 and 50 + # inclusive. sig { returns(T.nilable(Integer)) } def max_num_results end @@ -39,6 +44,7 @@ module OpenAI def max_num_results=(_) end + # Ranking options for search. sig { returns(T.nilable(OpenAI::Models::Responses::FileSearchTool::RankingOptions)) } def ranking_options end @@ -50,6 +56,9 @@ module OpenAI def ranking_options=(_) end + # A tool that searches for relevant content from uploaded files. Learn more about + # the + # [file search tool](https://platform.openai.com/docs/guides/tools-file-search). sig do params( vector_store_ids: T::Array[String], @@ -78,10 +87,12 @@ module OpenAI def to_hash end + # A filter to apply based on file attributes. class Filters < OpenAI::Union abstract! class << self + # @api private sig do override .returns([[NilClass, OpenAI::Models::ComparisonFilter], [NilClass, OpenAI::Models::CompoundFilter]]) @@ -92,6 +103,7 @@ module OpenAI end class RankingOptions < OpenAI::BaseModel + # The ranker to use for the file search. sig { returns(T.nilable(Symbol)) } def ranker end @@ -100,6 +112,9 @@ module OpenAI def ranker=(_) end + # The score threshold for the file search, a number between 0 and 1. Numbers + # closer to 1 will attempt to return only the most relevant results, but may + # return fewer results. sig { returns(T.nilable(Float)) } def score_threshold end @@ -108,6 +123,7 @@ module OpenAI def score_threshold=(_) end + # Ranking options for search. sig { params(ranker: Symbol, score_threshold: Float).returns(T.attached_class) } def self.new(ranker: nil, score_threshold: nil) end @@ -116,6 +132,7 @@ module OpenAI def to_hash end + # The ranker to use for the file search. class Ranker < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/responses/function_tool.rbi b/rbi/lib/openai/models/responses/function_tool.rbi index b1e8d293..8513be94 100644 --- a/rbi/lib/openai/models/responses/function_tool.rbi +++ b/rbi/lib/openai/models/responses/function_tool.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class FunctionTool < OpenAI::BaseModel + # The name of the function to call. sig { returns(String) } def name end @@ -12,6 +13,7 @@ module OpenAI def name=(_) end + # A JSON schema object describing the parameters of the function. sig { returns(T::Hash[Symbol, T.anything]) } def parameters end @@ -20,6 +22,7 @@ module OpenAI def parameters=(_) end + # Whether to enforce strict parameter validation. Default `true`. sig { returns(T::Boolean) } def strict end @@ -28,6 +31,7 @@ module OpenAI def strict=(_) end + # The type of the function tool. Always `function`. sig { returns(Symbol) } def type end @@ -36,6 +40,8 @@ module OpenAI def type=(_) end + # A description of the function. Used by the model to determine whether or not to + # call the function. sig { returns(T.nilable(String)) } def description end @@ -44,6 +50,9 @@ module OpenAI def description=(_) end + # Defines a function in your own code the model can choose to call. Learn more + # about + # [function calling](https://platform.openai.com/docs/guides/function-calling). sig do params( name: String, diff --git a/rbi/lib/openai/models/responses/input_item_list_params.rbi b/rbi/lib/openai/models/responses/input_item_list_params.rbi index 8f16ac93..77dd539f 100644 --- a/rbi/lib/openai/models/responses/input_item_list_params.rbi +++ b/rbi/lib/openai/models/responses/input_item_list_params.rbi @@ -7,6 +7,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # An item ID to list items after, used in pagination. sig { returns(T.nilable(String)) } def after end @@ -15,6 +16,7 @@ module OpenAI def after=(_) end + # An item ID to list items before, used in pagination. sig { returns(T.nilable(String)) } def before end @@ -23,6 +25,8 @@ module OpenAI def before=(_) end + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. sig { returns(T.nilable(Integer)) } def limit end @@ -31,6 +35,10 @@ module OpenAI def limit=(_) end + # The order to return the input items in. Default is `asc`. + # + # - `asc`: Return the input items in ascending order. + # - `desc`: Return the input items in descending order. sig { returns(T.nilable(Symbol)) } def order end @@ -67,6 +75,10 @@ module OpenAI def to_hash end + # The order to return the input items in. Default is `asc`. + # + # - `asc`: Return the input items in ascending order. + # - `desc`: Return the input items in descending order. class Order < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/responses/response.rbi b/rbi/lib/openai/models/responses/response.rbi index c4764143..c5e9ed8f 100644 --- a/rbi/lib/openai/models/responses/response.rbi +++ b/rbi/lib/openai/models/responses/response.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class Response < OpenAI::BaseModel + # Unique identifier for this Response. sig { returns(String) } def id end @@ -12,6 +13,7 @@ module OpenAI def id=(_) end + # Unix timestamp (in seconds) of when this Response was created. sig { returns(Float) } def created_at end @@ -20,6 +22,7 @@ module OpenAI def created_at=(_) end + # An error object returned when the model fails to generate a Response. sig { returns(T.nilable(OpenAI::Models::Responses::ResponseError)) } def error end @@ -31,6 +34,7 @@ module OpenAI def error=(_) end + # Details about why the response is incomplete. sig { returns(T.nilable(OpenAI::Models::Responses::Response::IncompleteDetails)) } def incomplete_details end @@ -42,6 +46,12 @@ module OpenAI def incomplete_details=(_) end + # Inserts a system (or developer) message as the first item in the model's + # context. + # + # When using along with `previous_response_id`, the instructions from a previous + # response will be not be carried over to the next response. This makes it simple + # to swap out system (or developer) messages in new responses. sig { returns(T.nilable(String)) } def instructions end @@ -50,6 +60,12 @@ module OpenAI def instructions=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -58,6 +74,11 @@ module OpenAI def metadata=(_) end + # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. sig { returns(T.any(String, Symbol)) } def model end @@ -66,6 +87,7 @@ module OpenAI def model=(_) end + # The object type of this resource - always set to `response`. sig { returns(Symbol) } def object end @@ -74,6 +96,13 @@ module OpenAI def object=(_) end + # An array of content items generated by the model. + # + # - The length and order of items in the `output` array is dependent on the + # model's response. + # - Rather than accessing the first item in the `output` array and assuming it's + # an `assistant` message with the content generated by the model, you might + # consider using the `output_text` property where supported in SDKs. sig do returns( T::Array[ @@ -120,6 +149,7 @@ module OpenAI def output=(_) end + # Whether to allow the model to run tool calls in parallel. sig { returns(T::Boolean) } def parallel_tool_calls end @@ -128,6 +158,10 @@ module OpenAI def parallel_tool_calls=(_) end + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. sig { returns(T.nilable(Float)) } def temperature end @@ -136,6 +170,9 @@ module OpenAI def temperature=(_) end + # How the model should select which tool (or tools) to use when generating a + # response. See the `tools` parameter to see how to specify which tools the model + # can call. sig do returns( T.any(Symbol, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction) @@ -155,6 +192,20 @@ module OpenAI def tool_choice=(_) end + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). sig do returns( T::Array[ @@ -195,6 +246,11 @@ module OpenAI def tools=(_) end + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. sig { returns(T.nilable(Float)) } def top_p end @@ -203,6 +259,9 @@ module OpenAI def top_p=(_) end + # An upper bound for the number of tokens that can be generated for a response, + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). sig { returns(T.nilable(Integer)) } def max_output_tokens end @@ -211,6 +270,9 @@ module OpenAI def max_output_tokens=(_) end + # The unique ID of the previous response to the model. Use this to create + # multi-turn conversations. Learn more about + # [conversation state](https://platform.openai.com/docs/guides/conversation-state). sig { returns(T.nilable(String)) } def previous_response_id end @@ -219,6 +281,10 @@ module OpenAI def previous_response_id=(_) end + # **o-series models only** + # + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). sig { returns(T.nilable(OpenAI::Models::Reasoning)) } def reasoning end @@ -227,6 +293,8 @@ module OpenAI def reasoning=(_) end + # The status of the response generation. One of `completed`, `failed`, + # `in_progress`, or `incomplete`. sig { returns(T.nilable(Symbol)) } def status end @@ -235,6 +303,11 @@ module OpenAI def status=(_) end + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) sig { returns(T.nilable(OpenAI::Models::Responses::ResponseTextConfig)) } def text end @@ -246,6 +319,13 @@ module OpenAI def text=(_) end + # The truncation strategy to use for the model response. + # + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. sig { returns(T.nilable(Symbol)) } def truncation end @@ -254,6 +334,8 @@ module OpenAI def truncation=(_) end + # Represents token usage details including input tokens, output tokens, a + # breakdown of output tokens, and the total tokens used. sig { returns(T.nilable(OpenAI::Models::Responses::ResponseUsage)) } def usage end @@ -262,6 +344,9 @@ module OpenAI def usage=(_) end + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). sig { returns(T.nilable(String)) } def user end @@ -388,6 +473,7 @@ module OpenAI end class IncompleteDetails < OpenAI::BaseModel + # The reason why the response is incomplete. sig { returns(T.nilable(Symbol)) } def reason end @@ -396,6 +482,7 @@ module OpenAI def reason=(_) end + # Details about why the response is incomplete. sig { params(reason: Symbol).returns(T.attached_class) } def self.new(reason: nil) end @@ -404,6 +491,7 @@ module OpenAI def to_hash end + # The reason why the response is incomplete. class Reason < OpenAI::Enum abstract! @@ -418,20 +506,30 @@ module OpenAI end end + # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. class Model < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } private def variants end end end + # How the model should select which tool (or tools) to use when generating a + # response. See the `tools` parameter to see how to specify which tools the model + # can call. class ToolChoice < OpenAI::Union abstract! class << self + # @api private sig do override .returns( @@ -443,6 +541,13 @@ module OpenAI end end + # The truncation strategy to use for the model response. + # + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. class Truncation < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/responses/response_audio_delta_event.rbi b/rbi/lib/openai/models/responses/response_audio_delta_event.rbi index cecdd81e..54ec9c86 100644 --- a/rbi/lib/openai/models/responses/response_audio_delta_event.rbi +++ b/rbi/lib/openai/models/responses/response_audio_delta_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseAudioDeltaEvent < OpenAI::BaseModel + # A chunk of Base64 encoded response audio bytes. sig { returns(String) } def delta end @@ -12,6 +13,7 @@ module OpenAI def delta=(_) end + # The type of the event. Always `response.audio.delta`. sig { returns(Symbol) } def type end @@ -20,6 +22,7 @@ module OpenAI def type=(_) end + # Emitted when there is a partial audio response. sig { params(delta: String, type: Symbol).returns(T.attached_class) } def self.new(delta:, type: :"response.audio.delta") end diff --git a/rbi/lib/openai/models/responses/response_audio_done_event.rbi b/rbi/lib/openai/models/responses/response_audio_done_event.rbi index c67012a1..d60d8ffe 100644 --- a/rbi/lib/openai/models/responses/response_audio_done_event.rbi +++ b/rbi/lib/openai/models/responses/response_audio_done_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseAudioDoneEvent < OpenAI::BaseModel + # The type of the event. Always `response.audio.done`. sig { returns(Symbol) } def type end @@ -12,6 +13,7 @@ module OpenAI def type=(_) end + # Emitted when the audio response is complete. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :"response.audio.done") end diff --git a/rbi/lib/openai/models/responses/response_audio_transcript_delta_event.rbi b/rbi/lib/openai/models/responses/response_audio_transcript_delta_event.rbi index fe5f4c18..072b6541 100644 --- a/rbi/lib/openai/models/responses/response_audio_transcript_delta_event.rbi +++ b/rbi/lib/openai/models/responses/response_audio_transcript_delta_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseAudioTranscriptDeltaEvent < OpenAI::BaseModel + # The partial transcript of the audio response. sig { returns(String) } def delta end @@ -12,6 +13,7 @@ module OpenAI def delta=(_) end + # The type of the event. Always `response.audio.transcript.delta`. sig { returns(Symbol) } def type end @@ -20,6 +22,7 @@ module OpenAI def type=(_) end + # Emitted when there is a partial transcript of audio. sig { params(delta: String, type: Symbol).returns(T.attached_class) } def self.new(delta:, type: :"response.audio.transcript.delta") end diff --git a/rbi/lib/openai/models/responses/response_audio_transcript_done_event.rbi b/rbi/lib/openai/models/responses/response_audio_transcript_done_event.rbi index 97204636..940f3497 100644 --- a/rbi/lib/openai/models/responses/response_audio_transcript_done_event.rbi +++ b/rbi/lib/openai/models/responses/response_audio_transcript_done_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseAudioTranscriptDoneEvent < OpenAI::BaseModel + # The type of the event. Always `response.audio.transcript.done`. sig { returns(Symbol) } def type end @@ -12,6 +13,7 @@ module OpenAI def type=(_) end + # Emitted when the full audio transcript is completed. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :"response.audio.transcript.done") end diff --git a/rbi/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi b/rbi/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi index 15a3e9c4..21d55044 100644 --- a/rbi/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi +++ b/rbi/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseCodeInterpreterCallCodeDeltaEvent < OpenAI::BaseModel + # The partial code snippet added by the code interpreter. sig { returns(String) } def delta end @@ -12,6 +13,7 @@ module OpenAI def delta=(_) end + # The index of the output item that the code interpreter call is in progress. sig { returns(Integer) } def output_index end @@ -20,6 +22,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.code_interpreter_call.code.delta`. sig { returns(Symbol) } def type end @@ -28,6 +31,7 @@ module OpenAI def type=(_) end + # Emitted when a partial code snippet is added by the code interpreter. sig { params(delta: String, output_index: Integer, type: Symbol).returns(T.attached_class) } def self.new(delta:, output_index:, type: :"response.code_interpreter_call.code.delta") end diff --git a/rbi/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rbi b/rbi/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rbi index 25f31749..294664ab 100644 --- a/rbi/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rbi +++ b/rbi/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseCodeInterpreterCallCodeDoneEvent < OpenAI::BaseModel + # The final code snippet output by the code interpreter. sig { returns(String) } def code end @@ -12,6 +13,7 @@ module OpenAI def code=(_) end + # The index of the output item that the code interpreter call is in progress. sig { returns(Integer) } def output_index end @@ -20,6 +22,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.code_interpreter_call.code.done`. sig { returns(Symbol) } def type end @@ -28,6 +31,7 @@ module OpenAI def type=(_) end + # Emitted when code snippet output is finalized by the code interpreter. sig { params(code: String, output_index: Integer, type: Symbol).returns(T.attached_class) } def self.new(code:, output_index:, type: :"response.code_interpreter_call.code.done") end diff --git a/rbi/lib/openai/models/responses/response_code_interpreter_call_completed_event.rbi b/rbi/lib/openai/models/responses/response_code_interpreter_call_completed_event.rbi index 93ae27fe..389d9f49 100644 --- a/rbi/lib/openai/models/responses/response_code_interpreter_call_completed_event.rbi +++ b/rbi/lib/openai/models/responses/response_code_interpreter_call_completed_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseCodeInterpreterCallCompletedEvent < OpenAI::BaseModel + # A tool call to run code. sig { returns(OpenAI::Models::Responses::ResponseCodeInterpreterToolCall) } def code_interpreter_call end @@ -15,6 +16,7 @@ module OpenAI def code_interpreter_call=(_) end + # The index of the output item that the code interpreter call is in progress. sig { returns(Integer) } def output_index end @@ -23,6 +25,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.code_interpreter_call.completed`. sig { returns(Symbol) } def type end @@ -31,6 +34,7 @@ module OpenAI def type=(_) end + # Emitted when the code interpreter call is completed. sig do params( code_interpreter_call: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, diff --git a/rbi/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi b/rbi/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi index 815750a0..9d0d0524 100644 --- a/rbi/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi +++ b/rbi/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseCodeInterpreterCallInProgressEvent < OpenAI::BaseModel + # A tool call to run code. sig { returns(OpenAI::Models::Responses::ResponseCodeInterpreterToolCall) } def code_interpreter_call end @@ -15,6 +16,7 @@ module OpenAI def code_interpreter_call=(_) end + # The index of the output item that the code interpreter call is in progress. sig { returns(Integer) } def output_index end @@ -23,6 +25,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.code_interpreter_call.in_progress`. sig { returns(Symbol) } def type end @@ -31,6 +34,7 @@ module OpenAI def type=(_) end + # Emitted when a code interpreter call is in progress. sig do params( code_interpreter_call: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, diff --git a/rbi/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi b/rbi/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi index fa22f0e6..4757018f 100644 --- a/rbi/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi +++ b/rbi/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseCodeInterpreterCallInterpretingEvent < OpenAI::BaseModel + # A tool call to run code. sig { returns(OpenAI::Models::Responses::ResponseCodeInterpreterToolCall) } def code_interpreter_call end @@ -15,6 +16,7 @@ module OpenAI def code_interpreter_call=(_) end + # The index of the output item that the code interpreter call is in progress. sig { returns(Integer) } def output_index end @@ -23,6 +25,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.code_interpreter_call.interpreting`. sig { returns(Symbol) } def type end @@ -31,6 +34,7 @@ module OpenAI def type=(_) end + # Emitted when the code interpreter is actively interpreting the code snippet. sig do params( code_interpreter_call: OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, diff --git a/rbi/lib/openai/models/responses/response_code_interpreter_tool_call.rbi b/rbi/lib/openai/models/responses/response_code_interpreter_tool_call.rbi index d1a61cfa..c4e3d1ae 100644 --- a/rbi/lib/openai/models/responses/response_code_interpreter_tool_call.rbi +++ b/rbi/lib/openai/models/responses/response_code_interpreter_tool_call.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseCodeInterpreterToolCall < OpenAI::BaseModel + # The unique ID of the code interpreter tool call. sig { returns(String) } def id end @@ -12,6 +13,7 @@ module OpenAI def id=(_) end + # The code to run. sig { returns(String) } def code end @@ -20,6 +22,7 @@ module OpenAI def code=(_) end + # The results of the code interpreter tool call. sig do returns( T::Array[ @@ -54,6 +57,7 @@ module OpenAI def results=(_) end + # The status of the code interpreter tool call. sig { returns(Symbol) } def status end @@ -62,6 +66,7 @@ module OpenAI def status=(_) end + # The type of the code interpreter tool call. Always `code_interpreter_call`. sig { returns(Symbol) } def type end @@ -70,6 +75,7 @@ module OpenAI def type=(_) end + # A tool call to run code. sig do params( id: String, @@ -108,10 +114,12 @@ module OpenAI def to_hash end + # The output of a code interpreter tool call that is text. class Result < OpenAI::Union abstract! class Logs < OpenAI::BaseModel + # The logs of the code interpreter tool call. sig { returns(String) } def logs end @@ -120,6 +128,7 @@ module OpenAI def logs=(_) end + # The type of the code interpreter text output. Always `logs`. sig { returns(Symbol) } def type end @@ -128,6 +137,7 @@ module OpenAI def type=(_) end + # The output of a code interpreter tool call that is text. sig { params(logs: String, type: Symbol).returns(T.attached_class) } def self.new(logs:, type: :logs) end @@ -149,6 +159,7 @@ module OpenAI def files=(_) end + # The type of the code interpreter file output. Always `files`. sig { returns(Symbol) } def type end @@ -157,6 +168,7 @@ module OpenAI def type=(_) end + # The output of a code interpreter tool call that is a file. sig do params( files: T::Array[OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Result::Files::File], @@ -180,6 +192,7 @@ module OpenAI end class File < OpenAI::BaseModel + # The ID of the file. sig { returns(String) } def file_id end @@ -188,6 +201,7 @@ module OpenAI def file_id=(_) end + # The MIME type of the file. sig { returns(String) } def mime_type end @@ -207,6 +221,7 @@ module OpenAI end class << self + # @api private sig do override .returns( @@ -218,6 +233,7 @@ module OpenAI end end + # The status of the code interpreter tool call. class Status < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/responses/response_completed_event.rbi b/rbi/lib/openai/models/responses/response_completed_event.rbi index 6ae602db..7db04649 100644 --- a/rbi/lib/openai/models/responses/response_completed_event.rbi +++ b/rbi/lib/openai/models/responses/response_completed_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseCompletedEvent < OpenAI::BaseModel + # Properties of the completed response. sig { returns(OpenAI::Models::Responses::Response) } def response end @@ -12,6 +13,7 @@ module OpenAI def response=(_) end + # The type of the event. Always `response.completed`. sig { returns(Symbol) } def type end @@ -20,6 +22,7 @@ module OpenAI def type=(_) end + # Emitted when the model response is complete. sig { params(response: OpenAI::Models::Responses::Response, type: Symbol).returns(T.attached_class) } def self.new(response:, type: :"response.completed") end diff --git a/rbi/lib/openai/models/responses/response_computer_tool_call.rbi b/rbi/lib/openai/models/responses/response_computer_tool_call.rbi index 9360dc6c..cc58669a 100644 --- a/rbi/lib/openai/models/responses/response_computer_tool_call.rbi +++ b/rbi/lib/openai/models/responses/response_computer_tool_call.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseComputerToolCall < OpenAI::BaseModel + # The unique ID of the computer call. sig { returns(String) } def id end @@ -12,6 +13,7 @@ module OpenAI def id=(_) end + # A click action. sig do returns( T.any( @@ -61,6 +63,7 @@ module OpenAI def action=(_) end + # An identifier used when responding to the tool call with output. sig { returns(String) } def call_id end @@ -69,6 +72,7 @@ module OpenAI def call_id=(_) end + # The pending safety checks for the computer call. sig { returns(T::Array[OpenAI::Models::Responses::ResponseComputerToolCall::PendingSafetyCheck]) } def pending_safety_checks end @@ -80,6 +84,8 @@ module OpenAI def pending_safety_checks=(_) end + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. sig { returns(Symbol) } def status end @@ -88,6 +94,7 @@ module OpenAI def status=(_) end + # The type of the computer call. Always `computer_call`. sig { returns(Symbol) } def type end @@ -96,6 +103,9 @@ module OpenAI def type=(_) end + # A tool call to a computer use tool. See the + # [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) + # for more information. sig do params( id: String, @@ -146,10 +156,13 @@ module OpenAI def to_hash end + # A click action. class Action < OpenAI::Union abstract! class Click < OpenAI::BaseModel + # Indicates which mouse button was pressed during the click. One of `left`, + # `right`, `wheel`, `back`, or `forward`. sig { returns(Symbol) } def button end @@ -158,6 +171,8 @@ module OpenAI def button=(_) end + # Specifies the event type. For a click action, this property is always set to + # `click`. sig { returns(Symbol) } def type end @@ -166,6 +181,7 @@ module OpenAI def type=(_) end + # The x-coordinate where the click occurred. sig { returns(Integer) } def x end @@ -174,6 +190,7 @@ module OpenAI def x=(_) end + # The y-coordinate where the click occurred. sig { returns(Integer) } def y_ end @@ -182,6 +199,7 @@ module OpenAI def y_=(_) end + # A click action. sig { params(button: Symbol, x: Integer, y_: Integer, type: Symbol).returns(T.attached_class) } def self.new(button:, x:, y_:, type: :click) end @@ -190,6 +208,8 @@ module OpenAI def to_hash end + # Indicates which mouse button was pressed during the click. One of `left`, + # `right`, `wheel`, `back`, or `forward`. class Button < OpenAI::Enum abstract! @@ -208,6 +228,8 @@ module OpenAI end class DoubleClick < OpenAI::BaseModel + # Specifies the event type. For a double click action, this property is always set + # to `double_click`. sig { returns(Symbol) } def type end @@ -216,6 +238,7 @@ module OpenAI def type=(_) end + # The x-coordinate where the double click occurred. sig { returns(Integer) } def x end @@ -224,6 +247,7 @@ module OpenAI def x=(_) end + # The y-coordinate where the double click occurred. sig { returns(Integer) } def y_ end @@ -232,6 +256,7 @@ module OpenAI def y_=(_) end + # A double click action. sig { params(x: Integer, y_: Integer, type: Symbol).returns(T.attached_class) } def self.new(x:, y_:, type: :double_click) end @@ -242,6 +267,15 @@ module OpenAI end class Drag < OpenAI::BaseModel + # An array of coordinates representing the path of the drag action. Coordinates + # will appear as an array of objects, eg + # + # ``` + # [ + # { x: 100, y: 200 }, + # { x: 200, y: 300 } + # ] + # ``` sig { returns(T::Array[OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag::Path]) } def path end @@ -253,6 +287,8 @@ module OpenAI def path=(_) end + # Specifies the event type. For a drag action, this property is always set to + # `drag`. sig { returns(Symbol) } def type end @@ -261,6 +297,7 @@ module OpenAI def type=(_) end + # A drag action. sig do params( path: T::Array[OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag::Path], @@ -281,6 +318,7 @@ module OpenAI end class Path < OpenAI::BaseModel + # The x-coordinate. sig { returns(Integer) } def x end @@ -289,6 +327,7 @@ module OpenAI def x=(_) end + # The y-coordinate. sig { returns(Integer) } def y_ end @@ -297,6 +336,7 @@ module OpenAI def y_=(_) end + # A series of x/y coordinate pairs in the drag path. sig { params(x: Integer, y_: Integer).returns(T.attached_class) } def self.new(x:, y_:) end @@ -308,6 +348,8 @@ module OpenAI end class Keypress < OpenAI::BaseModel + # The combination of keys the model is requesting to be pressed. This is an array + # of strings, each representing a key. sig { returns(T::Array[String]) } def keys end @@ -316,6 +358,8 @@ module OpenAI def keys=(_) end + # Specifies the event type. For a keypress action, this property is always set to + # `keypress`. sig { returns(Symbol) } def type end @@ -324,6 +368,7 @@ module OpenAI def type=(_) end + # A collection of keypresses the model would like to perform. sig { params(keys: T::Array[String], type: Symbol).returns(T.attached_class) } def self.new(keys:, type: :keypress) end @@ -334,6 +379,8 @@ module OpenAI end class Move < OpenAI::BaseModel + # Specifies the event type. For a move action, this property is always set to + # `move`. sig { returns(Symbol) } def type end @@ -342,6 +389,7 @@ module OpenAI def type=(_) end + # The x-coordinate to move to. sig { returns(Integer) } def x end @@ -350,6 +398,7 @@ module OpenAI def x=(_) end + # The y-coordinate to move to. sig { returns(Integer) } def y_ end @@ -358,6 +407,7 @@ module OpenAI def y_=(_) end + # A mouse move action. sig { params(x: Integer, y_: Integer, type: Symbol).returns(T.attached_class) } def self.new(x:, y_:, type: :move) end @@ -368,6 +418,8 @@ module OpenAI end class Screenshot < OpenAI::BaseModel + # Specifies the event type. For a screenshot action, this property is always set + # to `screenshot`. sig { returns(Symbol) } def type end @@ -376,6 +428,7 @@ module OpenAI def type=(_) end + # A screenshot action. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :screenshot) end @@ -386,6 +439,7 @@ module OpenAI end class Scroll < OpenAI::BaseModel + # The horizontal scroll distance. sig { returns(Integer) } def scroll_x end @@ -394,6 +448,7 @@ module OpenAI def scroll_x=(_) end + # The vertical scroll distance. sig { returns(Integer) } def scroll_y end @@ -402,6 +457,8 @@ module OpenAI def scroll_y=(_) end + # Specifies the event type. For a scroll action, this property is always set to + # `scroll`. sig { returns(Symbol) } def type end @@ -410,6 +467,7 @@ module OpenAI def type=(_) end + # The x-coordinate where the scroll occurred. sig { returns(Integer) } def x end @@ -418,6 +476,7 @@ module OpenAI def x=(_) end + # The y-coordinate where the scroll occurred. sig { returns(Integer) } def y_ end @@ -426,6 +485,7 @@ module OpenAI def y_=(_) end + # A scroll action. sig do params(scroll_x: Integer, scroll_y: Integer, x: Integer, y_: Integer, type: Symbol) .returns(T.attached_class) @@ -441,6 +501,7 @@ module OpenAI end class Type < OpenAI::BaseModel + # The text to type. sig { returns(String) } def text end @@ -449,6 +510,8 @@ module OpenAI def text=(_) end + # Specifies the event type. For a type action, this property is always set to + # `type`. sig { returns(Symbol) } def type end @@ -457,6 +520,7 @@ module OpenAI def type=(_) end + # An action to type in text. sig { params(text: String, type: Symbol).returns(T.attached_class) } def self.new(text:, type: :type) end @@ -467,6 +531,8 @@ module OpenAI end class Wait < OpenAI::BaseModel + # Specifies the event type. For a wait action, this property is always set to + # `wait`. sig { returns(Symbol) } def type end @@ -475,6 +541,7 @@ module OpenAI def type=(_) end + # A wait action. sig { params(type: Symbol).returns(T.attached_class) } def self.new(type: :wait) end @@ -485,6 +552,7 @@ module OpenAI end class << self + # @api private sig do override .returns( @@ -497,6 +565,7 @@ module OpenAI end class PendingSafetyCheck < OpenAI::BaseModel + # The ID of the pending safety check. sig { returns(String) } def id end @@ -505,6 +574,7 @@ module OpenAI def id=(_) end + # The type of the pending safety check. sig { returns(String) } def code end @@ -513,6 +583,7 @@ module OpenAI def code=(_) end + # Details about the pending safety check. sig { returns(String) } def message end @@ -521,6 +592,7 @@ module OpenAI def message=(_) end + # A pending safety check for the computer call. sig { params(id: String, code: String, message: String).returns(T.attached_class) } def self.new(id:, code:, message:) end @@ -530,6 +602,8 @@ module OpenAI end end + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. class Status < OpenAI::Enum abstract! @@ -544,6 +618,7 @@ module OpenAI end end + # The type of the computer call. Always `computer_call`. class Type < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/responses/response_content.rbi b/rbi/lib/openai/models/responses/response_content.rbi index c2e6aed1..3580164a 100644 --- a/rbi/lib/openai/models/responses/response_content.rbi +++ b/rbi/lib/openai/models/responses/response_content.rbi @@ -3,10 +3,12 @@ module OpenAI module Models module Responses + # Multi-modal input and output contents. class ResponseContent < OpenAI::Union abstract! class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/responses/response_content_part_added_event.rbi b/rbi/lib/openai/models/responses/response_content_part_added_event.rbi index 6280062a..9a037fc4 100644 --- a/rbi/lib/openai/models/responses/response_content_part_added_event.rbi +++ b/rbi/lib/openai/models/responses/response_content_part_added_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseContentPartAddedEvent < OpenAI::BaseModel + # The index of the content part that was added. sig { returns(Integer) } def content_index end @@ -12,6 +13,7 @@ module OpenAI def content_index=(_) end + # The ID of the output item that the content part was added to. sig { returns(String) } def item_id end @@ -20,6 +22,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the content part was added to. sig { returns(Integer) } def output_index end @@ -28,6 +31,7 @@ module OpenAI def output_index=(_) end + # The content part that was added. sig do returns( T.any(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal) @@ -47,6 +51,7 @@ module OpenAI def part=(_) end + # The type of the event. Always `response.content_part.added`. sig { returns(Symbol) } def type end @@ -55,6 +60,7 @@ module OpenAI def type=(_) end + # Emitted when a new content part is added. sig do params( content_index: Integer, @@ -83,10 +89,12 @@ module OpenAI def to_hash end + # The content part that was added. class Part < OpenAI::Union abstract! class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/responses/response_content_part_done_event.rbi b/rbi/lib/openai/models/responses/response_content_part_done_event.rbi index 01ea5776..c7102f5b 100644 --- a/rbi/lib/openai/models/responses/response_content_part_done_event.rbi +++ b/rbi/lib/openai/models/responses/response_content_part_done_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseContentPartDoneEvent < OpenAI::BaseModel + # The index of the content part that is done. sig { returns(Integer) } def content_index end @@ -12,6 +13,7 @@ module OpenAI def content_index=(_) end + # The ID of the output item that the content part was added to. sig { returns(String) } def item_id end @@ -20,6 +22,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the content part was added to. sig { returns(Integer) } def output_index end @@ -28,6 +31,7 @@ module OpenAI def output_index=(_) end + # The content part that is done. sig do returns( T.any(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal) @@ -47,6 +51,7 @@ module OpenAI def part=(_) end + # The type of the event. Always `response.content_part.done`. sig { returns(Symbol) } def type end @@ -55,6 +60,7 @@ module OpenAI def type=(_) end + # Emitted when a content part is done. sig do params( content_index: Integer, @@ -83,10 +89,12 @@ module OpenAI def to_hash end + # The content part that is done. class Part < OpenAI::Union abstract! class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/responses/response_create_params.rbi b/rbi/lib/openai/models/responses/response_create_params.rbi index fe1d6f07..e7a3ffca 100644 --- a/rbi/lib/openai/models/responses/response_create_params.rbi +++ b/rbi/lib/openai/models/responses/response_create_params.rbi @@ -7,6 +7,15 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # Text, image, or file inputs to the model, used to generate a response. + # + # Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Image inputs](https://platform.openai.com/docs/guides/images) + # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + # - [Function calling](https://platform.openai.com/docs/guides/function-calling) sig { returns(T.any(String, OpenAI::Models::Responses::ResponseInput)) } def input end @@ -18,6 +27,11 @@ module OpenAI def input=(_) end + # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. sig { returns(T.any(String, Symbol)) } def model end @@ -26,6 +40,14 @@ module OpenAI def model=(_) end + # Specify additional output data to include in the model response. Currently + # supported values are: + # + # - `file_search_call.results`: Include the search results of the file search tool + # call. + # - `message.input_image.image_url`: Include image urls from the input message. + # - `computer_call_output.output.image_url`: Include image urls from the computer + # call output. sig { returns(T.nilable(T::Array[Symbol])) } def include end @@ -34,6 +56,12 @@ module OpenAI def include=(_) end + # Inserts a system (or developer) message as the first item in the model's + # context. + # + # When using along with `previous_response_id`, the instructions from a previous + # response will be not be carried over to the next response. This makes it simple + # to swap out system (or developer) messages in new responses. sig { returns(T.nilable(String)) } def instructions end @@ -42,6 +70,9 @@ module OpenAI def instructions=(_) end + # An upper bound for the number of tokens that can be generated for a response, + # including visible output tokens and + # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). sig { returns(T.nilable(Integer)) } def max_output_tokens end @@ -50,6 +81,12 @@ module OpenAI def max_output_tokens=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -58,6 +95,7 @@ module OpenAI def metadata=(_) end + # Whether to allow the model to run tool calls in parallel. sig { returns(T.nilable(T::Boolean)) } def parallel_tool_calls end @@ -66,6 +104,9 @@ module OpenAI def parallel_tool_calls=(_) end + # The unique ID of the previous response to the model. Use this to create + # multi-turn conversations. Learn more about + # [conversation state](https://platform.openai.com/docs/guides/conversation-state). sig { returns(T.nilable(String)) } def previous_response_id end @@ -74,6 +115,10 @@ module OpenAI def previous_response_id=(_) end + # **o-series models only** + # + # Configuration options for + # [reasoning models](https://platform.openai.com/docs/guides/reasoning). sig { returns(T.nilable(OpenAI::Models::Reasoning)) } def reasoning end @@ -82,6 +127,7 @@ module OpenAI def reasoning=(_) end + # Whether to store the generated model response for later retrieval via API. sig { returns(T.nilable(T::Boolean)) } def store end @@ -90,6 +136,10 @@ module OpenAI def store=(_) end + # What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + # make the output more random, while lower values like 0.2 will make it more + # focused and deterministic. We generally recommend altering this or `top_p` but + # not both. sig { returns(T.nilable(Float)) } def temperature end @@ -98,6 +148,11 @@ module OpenAI def temperature=(_) end + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) sig { returns(T.nilable(OpenAI::Models::Responses::ResponseTextConfig)) } def text end @@ -109,6 +164,9 @@ module OpenAI def text=(_) end + # How the model should select which tool (or tools) to use when generating a + # response. See the `tools` parameter to see how to specify which tools the model + # can call. sig do returns( T.nilable( @@ -130,6 +188,20 @@ module OpenAI def tool_choice=(_) end + # An array of tools the model may call while generating a response. You can + # specify which tool to use by setting the `tool_choice` parameter. + # + # The two categories of tools you can provide the model are: + # + # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + # capabilities, like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search). + # Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # - **Function calls (custom tools)**: Functions that are defined by you, enabling + # the model to call your own code. Learn more about + # [function calling](https://platform.openai.com/docs/guides/function-calling). sig do returns( T.nilable( @@ -172,6 +244,11 @@ module OpenAI def tools=(_) end + # An alternative to sampling with temperature, called nucleus sampling, where the + # model considers the results of the tokens with top_p probability mass. So 0.1 + # means only the tokens comprising the top 10% probability mass are considered. + # + # We generally recommend altering this or `temperature` but not both. sig { returns(T.nilable(Float)) } def top_p end @@ -180,6 +257,13 @@ module OpenAI def top_p=(_) end + # The truncation strategy to use for the model response. + # + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. sig { returns(T.nilable(Symbol)) } def truncation end @@ -188,6 +272,9 @@ module OpenAI def truncation=(_) end + # A unique identifier representing your end-user, which can help OpenAI to monitor + # and detect abuse. + # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). sig { returns(T.nilable(String)) } def user end @@ -283,30 +370,50 @@ module OpenAI def to_hash end + # Text, image, or file inputs to the model, used to generate a response. + # + # Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Image inputs](https://platform.openai.com/docs/guides/images) + # - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + # - [Function calling](https://platform.openai.com/docs/guides/function-calling) class Input < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, OpenAI::Models::Responses::ResponseInput]]) } private def variants end end end + # Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + # wide range of models with different capabilities, performance characteristics, + # and price points. Refer to the + # [model guide](https://platform.openai.com/docs/models) to browse and compare + # available models. class Model < OpenAI::Union abstract! class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, Symbol]]) } private def variants end end end + # How the model should select which tool (or tools) to use when generating a + # response. See the `tools` parameter to see how to specify which tools the model + # can call. class ToolChoice < OpenAI::Union abstract! class << self + # @api private sig do override .returns( @@ -318,6 +425,13 @@ module OpenAI end end + # The truncation strategy to use for the model response. + # + # - `auto`: If the context of this response and previous ones exceeds the model's + # context window size, the model will truncate the response to fit the context + # window by dropping input items in the middle of the conversation. + # - `disabled` (default): If a model response will exceed the context window size + # for a model, the request will fail with a 400 error. class Truncation < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/responses/response_created_event.rbi b/rbi/lib/openai/models/responses/response_created_event.rbi index 2a500348..c68b3697 100644 --- a/rbi/lib/openai/models/responses/response_created_event.rbi +++ b/rbi/lib/openai/models/responses/response_created_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseCreatedEvent < OpenAI::BaseModel + # The response that was created. sig { returns(OpenAI::Models::Responses::Response) } def response end @@ -12,6 +13,7 @@ module OpenAI def response=(_) end + # The type of the event. Always `response.created`. sig { returns(Symbol) } def type end @@ -20,6 +22,7 @@ module OpenAI def type=(_) end + # An event that is emitted when a response is created. sig { params(response: OpenAI::Models::Responses::Response, type: Symbol).returns(T.attached_class) } def self.new(response:, type: :"response.created") end diff --git a/rbi/lib/openai/models/responses/response_error.rbi b/rbi/lib/openai/models/responses/response_error.rbi index 367eea43..f6a6c36d 100644 --- a/rbi/lib/openai/models/responses/response_error.rbi +++ b/rbi/lib/openai/models/responses/response_error.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseError < OpenAI::BaseModel + # The error code for the response. sig { returns(Symbol) } def code end @@ -12,6 +13,7 @@ module OpenAI def code=(_) end + # A human-readable description of the error. sig { returns(String) } def message end @@ -20,6 +22,7 @@ module OpenAI def message=(_) end + # An error object returned when the model fails to generate a Response. sig { params(code: Symbol, message: String).returns(T.attached_class) } def self.new(code:, message:) end @@ -28,6 +31,7 @@ module OpenAI def to_hash end + # The error code for the response. class Code < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/responses/response_error_event.rbi b/rbi/lib/openai/models/responses/response_error_event.rbi index 03c5b3b3..f4c0e9f0 100644 --- a/rbi/lib/openai/models/responses/response_error_event.rbi +++ b/rbi/lib/openai/models/responses/response_error_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseErrorEvent < OpenAI::BaseModel + # The error code. sig { returns(T.nilable(String)) } def code end @@ -12,6 +13,7 @@ module OpenAI def code=(_) end + # The error message. sig { returns(String) } def message end @@ -20,6 +22,7 @@ module OpenAI def message=(_) end + # The error parameter. sig { returns(T.nilable(String)) } def param end @@ -28,6 +31,7 @@ module OpenAI def param=(_) end + # The type of the event. Always `error`. sig { returns(Symbol) } def type end @@ -36,6 +40,7 @@ module OpenAI def type=(_) end + # Emitted when an error occurs. sig do params(code: T.nilable(String), message: String, param: T.nilable(String), type: Symbol) .returns(T.attached_class) diff --git a/rbi/lib/openai/models/responses/response_failed_event.rbi b/rbi/lib/openai/models/responses/response_failed_event.rbi index 5c3f69cb..c6d9fd32 100644 --- a/rbi/lib/openai/models/responses/response_failed_event.rbi +++ b/rbi/lib/openai/models/responses/response_failed_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseFailedEvent < OpenAI::BaseModel + # The response that failed. sig { returns(OpenAI::Models::Responses::Response) } def response end @@ -12,6 +13,7 @@ module OpenAI def response=(_) end + # The type of the event. Always `response.failed`. sig { returns(Symbol) } def type end @@ -20,6 +22,7 @@ module OpenAI def type=(_) end + # An event that is emitted when a response fails. sig { params(response: OpenAI::Models::Responses::Response, type: Symbol).returns(T.attached_class) } def self.new(response:, type: :"response.failed") end diff --git a/rbi/lib/openai/models/responses/response_file_search_call_completed_event.rbi b/rbi/lib/openai/models/responses/response_file_search_call_completed_event.rbi index 77ecf89c..ffb5cae7 100644 --- a/rbi/lib/openai/models/responses/response_file_search_call_completed_event.rbi +++ b/rbi/lib/openai/models/responses/response_file_search_call_completed_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseFileSearchCallCompletedEvent < OpenAI::BaseModel + # The ID of the output item that the file search call is initiated. sig { returns(String) } def item_id end @@ -12,6 +13,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the file search call is initiated. sig { returns(Integer) } def output_index end @@ -20,6 +22,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.file_search_call.completed`. sig { returns(Symbol) } def type end @@ -28,6 +31,7 @@ module OpenAI def type=(_) end + # Emitted when a file search call is completed (results found). sig { params(item_id: String, output_index: Integer, type: Symbol).returns(T.attached_class) } def self.new(item_id:, output_index:, type: :"response.file_search_call.completed") end diff --git a/rbi/lib/openai/models/responses/response_file_search_call_in_progress_event.rbi b/rbi/lib/openai/models/responses/response_file_search_call_in_progress_event.rbi index f4d4c09f..2feeebed 100644 --- a/rbi/lib/openai/models/responses/response_file_search_call_in_progress_event.rbi +++ b/rbi/lib/openai/models/responses/response_file_search_call_in_progress_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseFileSearchCallInProgressEvent < OpenAI::BaseModel + # The ID of the output item that the file search call is initiated. sig { returns(String) } def item_id end @@ -12,6 +13,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the file search call is initiated. sig { returns(Integer) } def output_index end @@ -20,6 +22,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.file_search_call.in_progress`. sig { returns(Symbol) } def type end @@ -28,6 +31,7 @@ module OpenAI def type=(_) end + # Emitted when a file search call is initiated. sig { params(item_id: String, output_index: Integer, type: Symbol).returns(T.attached_class) } def self.new(item_id:, output_index:, type: :"response.file_search_call.in_progress") end diff --git a/rbi/lib/openai/models/responses/response_file_search_call_searching_event.rbi b/rbi/lib/openai/models/responses/response_file_search_call_searching_event.rbi index 3ea7ffd2..b340e2ff 100644 --- a/rbi/lib/openai/models/responses/response_file_search_call_searching_event.rbi +++ b/rbi/lib/openai/models/responses/response_file_search_call_searching_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseFileSearchCallSearchingEvent < OpenAI::BaseModel + # The ID of the output item that the file search call is initiated. sig { returns(String) } def item_id end @@ -12,6 +13,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the file search call is searching. sig { returns(Integer) } def output_index end @@ -20,6 +22,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.file_search_call.searching`. sig { returns(Symbol) } def type end @@ -28,6 +31,7 @@ module OpenAI def type=(_) end + # Emitted when a file search is currently searching. sig { params(item_id: String, output_index: Integer, type: Symbol).returns(T.attached_class) } def self.new(item_id:, output_index:, type: :"response.file_search_call.searching") end diff --git a/rbi/lib/openai/models/responses/response_file_search_tool_call.rbi b/rbi/lib/openai/models/responses/response_file_search_tool_call.rbi index c0c8564d..e3a52573 100644 --- a/rbi/lib/openai/models/responses/response_file_search_tool_call.rbi +++ b/rbi/lib/openai/models/responses/response_file_search_tool_call.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseFileSearchToolCall < OpenAI::BaseModel + # The unique ID of the file search tool call. sig { returns(String) } def id end @@ -12,6 +13,7 @@ module OpenAI def id=(_) end + # The queries used to search for files. sig { returns(T::Array[String]) } def queries end @@ -20,6 +22,8 @@ module OpenAI def queries=(_) end + # The status of the file search tool call. One of `in_progress`, `searching`, + # `incomplete` or `failed`, sig { returns(Symbol) } def status end @@ -28,6 +32,7 @@ module OpenAI def status=(_) end + # The type of the file search tool call. Always `file_search_call`. sig { returns(Symbol) } def type end @@ -36,6 +41,7 @@ module OpenAI def type=(_) end + # The results of the file search tool call. sig { returns(T.nilable(T::Array[OpenAI::Models::Responses::ResponseFileSearchToolCall::Result])) } def results end @@ -47,6 +53,9 @@ module OpenAI def results=(_) end + # The results of a file search tool call. See the + # [file search guide](https://platform.openai.com/docs/guides/tools-file-search) + # for more information. sig do params( id: String, @@ -75,6 +84,8 @@ module OpenAI def to_hash end + # The status of the file search tool call. One of `in_progress`, `searching`, + # `incomplete` or `failed`, class Status < OpenAI::Enum abstract! @@ -92,6 +103,11 @@ module OpenAI end class Result < OpenAI::BaseModel + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. sig { returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) } def attributes end @@ -103,6 +119,7 @@ module OpenAI def attributes=(_) end + # The unique ID of the file. sig { returns(T.nilable(String)) } def file_id end @@ -111,6 +128,7 @@ module OpenAI def file_id=(_) end + # The name of the file. sig { returns(T.nilable(String)) } def filename end @@ -119,6 +137,7 @@ module OpenAI def filename=(_) end + # The relevance score of the file - a value between 0 and 1. sig { returns(T.nilable(Float)) } def score end @@ -127,6 +146,7 @@ module OpenAI def score=(_) end + # The text that was retrieved from the file. sig { returns(T.nilable(String)) } def text end @@ -167,6 +187,7 @@ module OpenAI abstract! class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, Float], [NilClass, T::Boolean]]) } private def variants end diff --git a/rbi/lib/openai/models/responses/response_format_text_config.rbi b/rbi/lib/openai/models/responses/response_format_text_config.rbi index 170c0610..68fa12af 100644 --- a/rbi/lib/openai/models/responses/response_format_text_config.rbi +++ b/rbi/lib/openai/models/responses/response_format_text_config.rbi @@ -3,10 +3,24 @@ module OpenAI module Models module Responses + # An object specifying the format that the model must output. + # + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # The default format is `{ "type": "text" }` with no additional options. + # + # **Not recommended for gpt-4o and newer models:** + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. class ResponseFormatTextConfig < OpenAI::Union abstract! class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/responses/response_format_text_json_schema_config.rbi b/rbi/lib/openai/models/responses/response_format_text_json_schema_config.rbi index 4899b49f..aab69ea5 100644 --- a/rbi/lib/openai/models/responses/response_format_text_json_schema_config.rbi +++ b/rbi/lib/openai/models/responses/response_format_text_json_schema_config.rbi @@ -4,6 +4,8 @@ module OpenAI module Models module Responses class ResponseFormatTextJSONSchemaConfig < OpenAI::BaseModel + # The schema for the response format, described as a JSON Schema object. Learn how + # to build JSON schemas [here](https://json-schema.org/). sig { returns(T::Hash[Symbol, T.anything]) } def schema end @@ -12,6 +14,7 @@ module OpenAI def schema=(_) end + # The type of response format being defined. Always `json_schema`. sig { returns(Symbol) } def type end @@ -20,6 +23,8 @@ module OpenAI def type=(_) end + # A description of what the response format is for, used by the model to determine + # how to respond in the format. sig { returns(T.nilable(String)) } def description end @@ -28,6 +33,8 @@ module OpenAI def description=(_) end + # The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores + # and dashes, with a maximum length of 64. sig { returns(T.nilable(String)) } def name end @@ -36,6 +43,11 @@ module OpenAI def name=(_) end + # Whether to enable strict schema adherence when generating the output. If set to + # true, the model will always follow the exact schema defined in the `schema` + # field. Only a subset of JSON Schema is supported when `strict` is `true`. To + # learn more, read the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). sig { returns(T.nilable(T::Boolean)) } def strict end @@ -44,6 +56,9 @@ module OpenAI def strict=(_) end + # JSON Schema response format. Used to generate structured JSON responses. Learn + # more about + # [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). sig do params( schema: T::Hash[Symbol, T.anything], diff --git a/rbi/lib/openai/models/responses/response_function_call_arguments_delta_event.rbi b/rbi/lib/openai/models/responses/response_function_call_arguments_delta_event.rbi index c543936a..bd790e94 100644 --- a/rbi/lib/openai/models/responses/response_function_call_arguments_delta_event.rbi +++ b/rbi/lib/openai/models/responses/response_function_call_arguments_delta_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseFunctionCallArgumentsDeltaEvent < OpenAI::BaseModel + # The function-call arguments delta that is added. sig { returns(String) } def delta end @@ -12,6 +13,7 @@ module OpenAI def delta=(_) end + # The ID of the output item that the function-call arguments delta is added to. sig { returns(String) } def item_id end @@ -20,6 +22,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the function-call arguments delta is added to. sig { returns(Integer) } def output_index end @@ -28,6 +31,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.function_call_arguments.delta`. sig { returns(Symbol) } def type end @@ -36,6 +40,7 @@ module OpenAI def type=(_) end + # Emitted when there is a partial function-call arguments delta. sig do params( delta: String, diff --git a/rbi/lib/openai/models/responses/response_function_call_arguments_done_event.rbi b/rbi/lib/openai/models/responses/response_function_call_arguments_done_event.rbi index 17234bf9..48684e3e 100644 --- a/rbi/lib/openai/models/responses/response_function_call_arguments_done_event.rbi +++ b/rbi/lib/openai/models/responses/response_function_call_arguments_done_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseFunctionCallArgumentsDoneEvent < OpenAI::BaseModel + # The function-call arguments. sig { returns(String) } def arguments end @@ -12,6 +13,7 @@ module OpenAI def arguments=(_) end + # The ID of the item. sig { returns(String) } def item_id end @@ -20,6 +22,7 @@ module OpenAI def item_id=(_) end + # The index of the output item. sig { returns(Integer) } def output_index end @@ -36,6 +39,7 @@ module OpenAI def type=(_) end + # Emitted when function-call arguments are finalized. sig do params( arguments: String, diff --git a/rbi/lib/openai/models/responses/response_function_tool_call.rbi b/rbi/lib/openai/models/responses/response_function_tool_call.rbi index 97c8db84..49e2b6d9 100644 --- a/rbi/lib/openai/models/responses/response_function_tool_call.rbi +++ b/rbi/lib/openai/models/responses/response_function_tool_call.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseFunctionToolCall < OpenAI::BaseModel + # The unique ID of the function tool call. sig { returns(String) } def id end @@ -12,6 +13,7 @@ module OpenAI def id=(_) end + # A JSON string of the arguments to pass to the function. sig { returns(String) } def arguments end @@ -20,6 +22,7 @@ module OpenAI def arguments=(_) end + # The unique ID of the function tool call generated by the model. sig { returns(String) } def call_id end @@ -28,6 +31,7 @@ module OpenAI def call_id=(_) end + # The name of the function to run. sig { returns(String) } def name end @@ -36,6 +40,7 @@ module OpenAI def name=(_) end + # The type of the function tool call. Always `function_call`. sig { returns(Symbol) } def type end @@ -44,6 +49,8 @@ module OpenAI def type=(_) end + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. sig { returns(T.nilable(Symbol)) } def status end @@ -52,6 +59,9 @@ module OpenAI def status=(_) end + # A tool call to run a function. See the + # [function calling guide](https://platform.openai.com/docs/guides/function-calling) + # for more information. sig do params(id: String, arguments: String, call_id: String, name: String, status: Symbol, type: Symbol) .returns(T.attached_class) @@ -73,6 +83,8 @@ module OpenAI def to_hash end + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. class Status < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/responses/response_function_web_search.rbi b/rbi/lib/openai/models/responses/response_function_web_search.rbi index fc8ec7a6..daf897ed 100644 --- a/rbi/lib/openai/models/responses/response_function_web_search.rbi +++ b/rbi/lib/openai/models/responses/response_function_web_search.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseFunctionWebSearch < OpenAI::BaseModel + # The unique ID of the web search tool call. sig { returns(String) } def id end @@ -12,6 +13,7 @@ module OpenAI def id=(_) end + # The status of the web search tool call. sig { returns(Symbol) } def status end @@ -20,6 +22,7 @@ module OpenAI def status=(_) end + # The type of the web search tool call. Always `web_search_call`. sig { returns(Symbol) } def type end @@ -28,6 +31,9 @@ module OpenAI def type=(_) end + # The results of a web search tool call. See the + # [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for + # more information. sig { params(id: String, status: Symbol, type: Symbol).returns(T.attached_class) } def self.new(id:, status:, type: :web_search_call) end @@ -36,6 +42,7 @@ module OpenAI def to_hash end + # The status of the web search tool call. class Status < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/responses/response_in_progress_event.rbi b/rbi/lib/openai/models/responses/response_in_progress_event.rbi index ad20d756..57a31950 100644 --- a/rbi/lib/openai/models/responses/response_in_progress_event.rbi +++ b/rbi/lib/openai/models/responses/response_in_progress_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseInProgressEvent < OpenAI::BaseModel + # The response that is in progress. sig { returns(OpenAI::Models::Responses::Response) } def response end @@ -12,6 +13,7 @@ module OpenAI def response=(_) end + # The type of the event. Always `response.in_progress`. sig { returns(Symbol) } def type end @@ -20,6 +22,7 @@ module OpenAI def type=(_) end + # Emitted when the response is in progress. sig { params(response: OpenAI::Models::Responses::Response, type: Symbol).returns(T.attached_class) } def self.new(response:, type: :"response.in_progress") end diff --git a/rbi/lib/openai/models/responses/response_includable.rbi b/rbi/lib/openai/models/responses/response_includable.rbi index 6eab634e..f36ae216 100644 --- a/rbi/lib/openai/models/responses/response_includable.rbi +++ b/rbi/lib/openai/models/responses/response_includable.rbi @@ -3,6 +3,14 @@ module OpenAI module Models module Responses + # Specify additional output data to include in the model response. Currently + # supported values are: + # + # - `file_search_call.results`: Include the search results of the file search tool + # call. + # - `message.input_image.image_url`: Include image urls from the input message. + # - `computer_call_output.output.image_url`: Include image urls from the computer + # call output. class ResponseIncludable < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/responses/response_incomplete_event.rbi b/rbi/lib/openai/models/responses/response_incomplete_event.rbi index ec47033d..9ca0c85e 100644 --- a/rbi/lib/openai/models/responses/response_incomplete_event.rbi +++ b/rbi/lib/openai/models/responses/response_incomplete_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseIncompleteEvent < OpenAI::BaseModel + # The response that was incomplete. sig { returns(OpenAI::Models::Responses::Response) } def response end @@ -12,6 +13,7 @@ module OpenAI def response=(_) end + # The type of the event. Always `response.incomplete`. sig { returns(Symbol) } def type end @@ -20,6 +22,7 @@ module OpenAI def type=(_) end + # An event that is emitted when a response finishes as incomplete. sig { params(response: OpenAI::Models::Responses::Response, type: Symbol).returns(T.attached_class) } def self.new(response:, type: :"response.incomplete") end diff --git a/rbi/lib/openai/models/responses/response_input_audio.rbi b/rbi/lib/openai/models/responses/response_input_audio.rbi index d7f1eee2..4a8628f6 100644 --- a/rbi/lib/openai/models/responses/response_input_audio.rbi +++ b/rbi/lib/openai/models/responses/response_input_audio.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseInputAudio < OpenAI::BaseModel + # Base64-encoded audio data. sig { returns(String) } def data end @@ -12,6 +13,7 @@ module OpenAI def data=(_) end + # The format of the audio data. Currently supported formats are `mp3` and `wav`. sig { returns(Symbol) } def format_ end @@ -20,6 +22,7 @@ module OpenAI def format_=(_) end + # The type of the input item. Always `input_audio`. sig { returns(Symbol) } def type end @@ -28,6 +31,7 @@ module OpenAI def type=(_) end + # An audio input to the model. sig { params(data: String, format_: Symbol, type: Symbol).returns(T.attached_class) } def self.new(data:, format_:, type: :input_audio) end @@ -36,6 +40,7 @@ module OpenAI def to_hash end + # The format of the audio data. Currently supported formats are `mp3` and `wav`. class Format < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/responses/response_input_content.rbi b/rbi/lib/openai/models/responses/response_input_content.rbi index 5857ddd3..04d1918f 100644 --- a/rbi/lib/openai/models/responses/response_input_content.rbi +++ b/rbi/lib/openai/models/responses/response_input_content.rbi @@ -3,10 +3,12 @@ module OpenAI module Models module Responses + # A text input to the model. class ResponseInputContent < OpenAI::Union abstract! class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/responses/response_input_file.rbi b/rbi/lib/openai/models/responses/response_input_file.rbi index e6245523..cedf90ec 100644 --- a/rbi/lib/openai/models/responses/response_input_file.rbi +++ b/rbi/lib/openai/models/responses/response_input_file.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseInputFile < OpenAI::BaseModel + # The type of the input item. Always `input_file`. sig { returns(Symbol) } def type end @@ -12,6 +13,7 @@ module OpenAI def type=(_) end + # The content of the file to be sent to the model. sig { returns(T.nilable(String)) } def file_data end @@ -20,6 +22,7 @@ module OpenAI def file_data=(_) end + # The ID of the file to be sent to the model. sig { returns(T.nilable(String)) } def file_id end @@ -28,6 +31,7 @@ module OpenAI def file_id=(_) end + # The name of the file to be sent to the model. sig { returns(T.nilable(String)) } def filename end @@ -36,6 +40,7 @@ module OpenAI def filename=(_) end + # A file input to the model. sig do params(file_data: String, file_id: String, filename: String, type: Symbol).returns(T.attached_class) end diff --git a/rbi/lib/openai/models/responses/response_input_image.rbi b/rbi/lib/openai/models/responses/response_input_image.rbi index 62bd604a..2e1819a4 100644 --- a/rbi/lib/openai/models/responses/response_input_image.rbi +++ b/rbi/lib/openai/models/responses/response_input_image.rbi @@ -4,6 +4,8 @@ module OpenAI module Models module Responses class ResponseInputImage < OpenAI::BaseModel + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. sig { returns(Symbol) } def detail end @@ -12,6 +14,7 @@ module OpenAI def detail=(_) end + # The type of the input item. Always `input_image`. sig { returns(Symbol) } def type end @@ -20,6 +23,7 @@ module OpenAI def type=(_) end + # The ID of the file to be sent to the model. sig { returns(T.nilable(String)) } def file_id end @@ -28,6 +32,8 @@ module OpenAI def file_id=(_) end + # The URL of the image to be sent to the model. A fully qualified URL or base64 + # encoded image in a data URL. sig { returns(T.nilable(String)) } def image_url end @@ -36,6 +42,8 @@ module OpenAI def image_url=(_) end + # An image input to the model. Learn about + # [image inputs](https://platform.openai.com/docs/guides/vision). sig do params(detail: Symbol, file_id: T.nilable(String), image_url: T.nilable(String), type: Symbol) .returns(T.attached_class) @@ -56,6 +64,8 @@ module OpenAI def to_hash end + # The detail level of the image to be sent to the model. One of `high`, `low`, or + # `auto`. Defaults to `auto`. class Detail < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/responses/response_input_item.rbi b/rbi/lib/openai/models/responses/response_input_item.rbi index a2ac733a..faf692fb 100644 --- a/rbi/lib/openai/models/responses/response_input_item.rbi +++ b/rbi/lib/openai/models/responses/response_input_item.rbi @@ -3,10 +3,17 @@ module OpenAI module Models module Responses + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. Messages with the + # `assistant` role are presumed to have been generated by the model in previous + # interactions. class ResponseInputItem < OpenAI::Union abstract! class Message < OpenAI::BaseModel + # A list of one or many input items to the model, containing different content + # types. sig { returns(OpenAI::Models::Responses::ResponseInputMessageContentList) } def content end @@ -18,6 +25,7 @@ module OpenAI def content=(_) end + # The role of the message input. One of `user`, `system`, or `developer`. sig { returns(Symbol) } def role end @@ -26,6 +34,8 @@ module OpenAI def role=(_) end + # The status of item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. sig { returns(T.nilable(Symbol)) } def status end @@ -34,6 +44,7 @@ module OpenAI def status=(_) end + # The type of the message input. Always set to `message`. sig { returns(T.nilable(Symbol)) } def type end @@ -42,6 +53,9 @@ module OpenAI def type=(_) end + # A message input to the model with a role indicating instruction following + # hierarchy. Instructions given with the `developer` or `system` role take + # precedence over instructions given with the `user` role. sig do params( content: OpenAI::Models::Responses::ResponseInputMessageContentList, @@ -63,6 +77,7 @@ module OpenAI def to_hash end + # The role of the message input. One of `user`, `system`, or `developer`. class Role < OpenAI::Enum abstract! @@ -77,6 +92,8 @@ module OpenAI end end + # The status of item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. class Status < OpenAI::Enum abstract! @@ -91,6 +108,7 @@ module OpenAI end end + # The type of the message input. Always set to `message`. class Type < OpenAI::Enum abstract! @@ -105,6 +123,7 @@ module OpenAI end class ComputerCallOutput < OpenAI::BaseModel + # The ID of the computer tool call that produced the output. sig { returns(String) } def call_id end @@ -113,6 +132,7 @@ module OpenAI def call_id=(_) end + # A computer screenshot image used with the computer use tool. sig { returns(OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::Output) } def output end @@ -124,6 +144,7 @@ module OpenAI def output=(_) end + # The type of the computer tool call output. Always `computer_call_output`. sig { returns(Symbol) } def type end @@ -132,6 +153,7 @@ module OpenAI def type=(_) end + # The ID of the computer tool call output. sig { returns(T.nilable(String)) } def id end @@ -140,6 +162,8 @@ module OpenAI def id=(_) end + # The safety checks reported by the API that have been acknowledged by the + # developer. sig do returns( T.nilable( @@ -161,6 +185,8 @@ module OpenAI def acknowledged_safety_checks=(_) end + # The status of the message input. One of `in_progress`, `completed`, or + # `incomplete`. Populated when input items are returned via API. sig { returns(T.nilable(Symbol)) } def status end @@ -169,6 +195,7 @@ module OpenAI def status=(_) end + # The output of a computer tool call. sig do params( call_id: String, @@ -200,6 +227,8 @@ module OpenAI end class Output < OpenAI::BaseModel + # Specifies the event type. For a computer screenshot, this property is always set + # to `computer_screenshot`. sig { returns(Symbol) } def type end @@ -208,6 +237,7 @@ module OpenAI def type=(_) end + # The identifier of an uploaded file that contains the screenshot. sig { returns(T.nilable(String)) } def file_id end @@ -216,6 +246,7 @@ module OpenAI def file_id=(_) end + # The URL of the screenshot image. sig { returns(T.nilable(String)) } def image_url end @@ -224,6 +255,7 @@ module OpenAI def image_url=(_) end + # A computer screenshot image used with the computer use tool. sig { params(file_id: String, image_url: String, type: Symbol).returns(T.attached_class) } def self.new(file_id: nil, image_url: nil, type: :computer_screenshot) end @@ -234,6 +266,7 @@ module OpenAI end class AcknowledgedSafetyCheck < OpenAI::BaseModel + # The ID of the pending safety check. sig { returns(String) } def id end @@ -242,6 +275,7 @@ module OpenAI def id=(_) end + # The type of the pending safety check. sig { returns(String) } def code end @@ -250,6 +284,7 @@ module OpenAI def code=(_) end + # Details about the pending safety check. sig { returns(String) } def message end @@ -258,6 +293,7 @@ module OpenAI def message=(_) end + # A pending safety check for the computer call. sig { params(id: String, code: String, message: String).returns(T.attached_class) } def self.new(id:, code:, message:) end @@ -267,6 +303,8 @@ module OpenAI end end + # The status of the message input. One of `in_progress`, `completed`, or + # `incomplete`. Populated when input items are returned via API. class Status < OpenAI::Enum abstract! @@ -283,6 +321,7 @@ module OpenAI end class FunctionCallOutput < OpenAI::BaseModel + # The unique ID of the function tool call generated by the model. sig { returns(String) } def call_id end @@ -291,6 +330,7 @@ module OpenAI def call_id=(_) end + # A JSON string of the output of the function tool call. sig { returns(String) } def output end @@ -299,6 +339,7 @@ module OpenAI def output=(_) end + # The type of the function tool call output. Always `function_call_output`. sig { returns(Symbol) } def type end @@ -307,6 +348,8 @@ module OpenAI def type=(_) end + # The unique ID of the function tool call output. Populated when this item is + # returned via API. sig { returns(T.nilable(String)) } def id end @@ -315,6 +358,8 @@ module OpenAI def id=(_) end + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. sig { returns(T.nilable(Symbol)) } def status end @@ -323,6 +368,7 @@ module OpenAI def status=(_) end + # The output of a function tool call. sig do params( call_id: String, @@ -341,6 +387,8 @@ module OpenAI def to_hash end + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. class Status < OpenAI::Enum abstract! @@ -357,6 +405,7 @@ module OpenAI end class ItemReference < OpenAI::BaseModel + # The ID of the item to reference. sig { returns(String) } def id end @@ -365,6 +414,7 @@ module OpenAI def id=(_) end + # The type of item to reference. Always `item_reference`. sig { returns(Symbol) } def type end @@ -373,6 +423,7 @@ module OpenAI def type=(_) end + # An internal identifier for an item to reference. sig { params(id: String, type: Symbol).returns(T.attached_class) } def self.new(id:, type: :item_reference) end @@ -383,6 +434,7 @@ module OpenAI end class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/responses/response_input_text.rbi b/rbi/lib/openai/models/responses/response_input_text.rbi index df900197..16fc4040 100644 --- a/rbi/lib/openai/models/responses/response_input_text.rbi +++ b/rbi/lib/openai/models/responses/response_input_text.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseInputText < OpenAI::BaseModel + # The text input to the model. sig { returns(String) } def text end @@ -12,6 +13,7 @@ module OpenAI def text=(_) end + # The type of the input item. Always `input_text`. sig { returns(Symbol) } def type end @@ -20,6 +22,7 @@ module OpenAI def type=(_) end + # A text input to the model. sig { params(text: String, type: Symbol).returns(T.attached_class) } def self.new(text:, type: :input_text) end diff --git a/rbi/lib/openai/models/responses/response_item_list.rbi b/rbi/lib/openai/models/responses/response_item_list.rbi index c24e41f8..6b49186e 100644 --- a/rbi/lib/openai/models/responses/response_item_list.rbi +++ b/rbi/lib/openai/models/responses/response_item_list.rbi @@ -6,6 +6,7 @@ module OpenAI module Responses class ResponseItemList < OpenAI::BaseModel + # A list of items used to generate this response. sig do returns( T::Array[ @@ -58,6 +59,7 @@ module OpenAI def data=(_) end + # The ID of the first item in the list. sig { returns(String) } def first_id end @@ -66,6 +68,7 @@ module OpenAI def first_id=(_) end + # Whether there are more items available. sig { returns(T::Boolean) } def has_more end @@ -74,6 +77,7 @@ module OpenAI def has_more=(_) end + # The ID of the last item in the list. sig { returns(String) } def last_id end @@ -82,6 +86,7 @@ module OpenAI def last_id=(_) end + # The type of object returned, must be `list`. sig { returns(Symbol) } def object end @@ -90,6 +95,7 @@ module OpenAI def object=(_) end + # A list of Response items. sig do params( data: T::Array[ @@ -140,10 +146,12 @@ module OpenAI def to_hash end + # Content item used to generate a response. class Data < OpenAI::Union abstract! class Message < OpenAI::BaseModel + # The unique ID of the message input. sig { returns(String) } def id end @@ -152,6 +160,8 @@ module OpenAI def id=(_) end + # A list of one or many input items to the model, containing different content + # types. sig { returns(OpenAI::Models::Responses::ResponseInputMessageContentList) } def content end @@ -163,6 +173,7 @@ module OpenAI def content=(_) end + # The role of the message input. One of `user`, `system`, or `developer`. sig { returns(Symbol) } def role end @@ -171,6 +182,8 @@ module OpenAI def role=(_) end + # The status of item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. sig { returns(T.nilable(Symbol)) } def status end @@ -179,6 +192,7 @@ module OpenAI def status=(_) end + # The type of the message input. Always set to `message`. sig { returns(T.nilable(Symbol)) } def type end @@ -215,6 +229,7 @@ module OpenAI def to_hash end + # The role of the message input. One of `user`, `system`, or `developer`. class Role < OpenAI::Enum abstract! @@ -229,6 +244,8 @@ module OpenAI end end + # The status of item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. class Status < OpenAI::Enum abstract! @@ -243,6 +260,7 @@ module OpenAI end end + # The type of the message input. Always set to `message`. class Type < OpenAI::Enum abstract! @@ -257,6 +275,7 @@ module OpenAI end class ComputerCallOutput < OpenAI::BaseModel + # The unique ID of the computer call tool output. sig { returns(String) } def id end @@ -265,6 +284,7 @@ module OpenAI def id=(_) end + # The ID of the computer tool call that produced the output. sig { returns(String) } def call_id end @@ -273,6 +293,7 @@ module OpenAI def call_id=(_) end + # A computer screenshot image used with the computer use tool. sig { returns(OpenAI::Models::Responses::ResponseItemList::Data::ComputerCallOutput::Output) } def output end @@ -284,6 +305,7 @@ module OpenAI def output=(_) end + # The type of the computer tool call output. Always `computer_call_output`. sig { returns(Symbol) } def type end @@ -292,6 +314,8 @@ module OpenAI def type=(_) end + # The safety checks reported by the API that have been acknowledged by the + # developer. sig do returns( T.nilable( @@ -313,6 +337,8 @@ module OpenAI def acknowledged_safety_checks=(_) end + # The status of the message input. One of `in_progress`, `completed`, or + # `incomplete`. Populated when input items are returned via API. sig { returns(T.nilable(Symbol)) } def status end @@ -352,6 +378,8 @@ module OpenAI end class Output < OpenAI::BaseModel + # Specifies the event type. For a computer screenshot, this property is always set + # to `computer_screenshot`. sig { returns(Symbol) } def type end @@ -360,6 +388,7 @@ module OpenAI def type=(_) end + # The identifier of an uploaded file that contains the screenshot. sig { returns(T.nilable(String)) } def file_id end @@ -368,6 +397,7 @@ module OpenAI def file_id=(_) end + # The URL of the screenshot image. sig { returns(T.nilable(String)) } def image_url end @@ -376,6 +406,7 @@ module OpenAI def image_url=(_) end + # A computer screenshot image used with the computer use tool. sig { params(file_id: String, image_url: String, type: Symbol).returns(T.attached_class) } def self.new(file_id: nil, image_url: nil, type: :computer_screenshot) end @@ -386,6 +417,7 @@ module OpenAI end class AcknowledgedSafetyCheck < OpenAI::BaseModel + # The ID of the pending safety check. sig { returns(String) } def id end @@ -394,6 +426,7 @@ module OpenAI def id=(_) end + # The type of the pending safety check. sig { returns(String) } def code end @@ -402,6 +435,7 @@ module OpenAI def code=(_) end + # Details about the pending safety check. sig { returns(String) } def message end @@ -410,6 +444,7 @@ module OpenAI def message=(_) end + # A pending safety check for the computer call. sig { params(id: String, code: String, message: String).returns(T.attached_class) } def self.new(id:, code:, message:) end @@ -419,6 +454,8 @@ module OpenAI end end + # The status of the message input. One of `in_progress`, `completed`, or + # `incomplete`. Populated when input items are returned via API. class Status < OpenAI::Enum abstract! @@ -435,6 +472,7 @@ module OpenAI end class FunctionCallOutput < OpenAI::BaseModel + # The unique ID of the function call tool output. sig { returns(String) } def id end @@ -443,6 +481,7 @@ module OpenAI def id=(_) end + # The unique ID of the function tool call generated by the model. sig { returns(String) } def call_id end @@ -451,6 +490,7 @@ module OpenAI def call_id=(_) end + # A JSON string of the output of the function tool call. sig { returns(String) } def output end @@ -459,6 +499,7 @@ module OpenAI def output=(_) end + # The type of the function tool call output. Always `function_call_output`. sig { returns(Symbol) } def type end @@ -467,6 +508,8 @@ module OpenAI def type=(_) end + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. sig { returns(T.nilable(Symbol)) } def status end @@ -493,6 +536,8 @@ module OpenAI def to_hash end + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. class Status < OpenAI::Enum abstract! @@ -509,6 +554,7 @@ module OpenAI end class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/responses/response_output_audio.rbi b/rbi/lib/openai/models/responses/response_output_audio.rbi index 1aed1ddd..162e5138 100644 --- a/rbi/lib/openai/models/responses/response_output_audio.rbi +++ b/rbi/lib/openai/models/responses/response_output_audio.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseOutputAudio < OpenAI::BaseModel + # Base64-encoded audio data from the model. sig { returns(String) } def data end @@ -12,6 +13,7 @@ module OpenAI def data=(_) end + # The transcript of the audio data from the model. sig { returns(String) } def transcript end @@ -20,6 +22,7 @@ module OpenAI def transcript=(_) end + # The type of the output audio. Always `output_audio`. sig { returns(Symbol) } def type end @@ -28,6 +31,7 @@ module OpenAI def type=(_) end + # An audio output from the model. sig { params(data: String, transcript: String, type: Symbol).returns(T.attached_class) } def self.new(data:, transcript:, type: :output_audio) end diff --git a/rbi/lib/openai/models/responses/response_output_item.rbi b/rbi/lib/openai/models/responses/response_output_item.rbi index 8a2bf039..f508403c 100644 --- a/rbi/lib/openai/models/responses/response_output_item.rbi +++ b/rbi/lib/openai/models/responses/response_output_item.rbi @@ -3,10 +3,12 @@ module OpenAI module Models module Responses + # An output message from the model. class ResponseOutputItem < OpenAI::Union abstract! class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/responses/response_output_item_added_event.rbi b/rbi/lib/openai/models/responses/response_output_item_added_event.rbi index 837047df..0e49a206 100644 --- a/rbi/lib/openai/models/responses/response_output_item_added_event.rbi +++ b/rbi/lib/openai/models/responses/response_output_item_added_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseOutputItemAddedEvent < OpenAI::BaseModel + # The output item that was added. sig do returns( T.any( @@ -44,6 +45,7 @@ module OpenAI def item=(_) end + # The index of the output item that was added. sig { returns(Integer) } def output_index end @@ -52,6 +54,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.output_item.added`. sig { returns(Symbol) } def type end @@ -60,6 +63,7 @@ module OpenAI def type=(_) end + # Emitted when a new output item is added. sig do params( item: T.any( diff --git a/rbi/lib/openai/models/responses/response_output_item_done_event.rbi b/rbi/lib/openai/models/responses/response_output_item_done_event.rbi index 75961890..e53adef5 100644 --- a/rbi/lib/openai/models/responses/response_output_item_done_event.rbi +++ b/rbi/lib/openai/models/responses/response_output_item_done_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseOutputItemDoneEvent < OpenAI::BaseModel + # The output item that was marked done. sig do returns( T.any( @@ -44,6 +45,7 @@ module OpenAI def item=(_) end + # The index of the output item that was marked done. sig { returns(Integer) } def output_index end @@ -52,6 +54,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.output_item.done`. sig { returns(Symbol) } def type end @@ -60,6 +63,7 @@ module OpenAI def type=(_) end + # Emitted when an output item is marked done. sig do params( item: T.any( diff --git a/rbi/lib/openai/models/responses/response_output_message.rbi b/rbi/lib/openai/models/responses/response_output_message.rbi index d2cbf773..80dfb0e2 100644 --- a/rbi/lib/openai/models/responses/response_output_message.rbi +++ b/rbi/lib/openai/models/responses/response_output_message.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseOutputMessage < OpenAI::BaseModel + # The unique ID of the output message. sig { returns(String) } def id end @@ -12,6 +13,7 @@ module OpenAI def id=(_) end + # The content of the output message. sig do returns( T::Array[T.any(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal)] @@ -31,6 +33,7 @@ module OpenAI def content=(_) end + # The role of the output message. Always `assistant`. sig { returns(Symbol) } def role end @@ -39,6 +42,8 @@ module OpenAI def role=(_) end + # The status of the message input. One of `in_progress`, `completed`, or + # `incomplete`. Populated when input items are returned via API. sig { returns(Symbol) } def status end @@ -47,6 +52,7 @@ module OpenAI def status=(_) end + # The type of the output message. Always `message`. sig { returns(Symbol) } def type end @@ -55,6 +61,7 @@ module OpenAI def type=(_) end + # An output message from the model. sig do params( id: String, @@ -83,10 +90,12 @@ module OpenAI def to_hash end + # A text output from the model. class Content < OpenAI::Union abstract! class << self + # @api private sig do override .returns( @@ -98,6 +107,8 @@ module OpenAI end end + # The status of the message input. One of `in_progress`, `completed`, or + # `incomplete`. Populated when input items are returned via API. class Status < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/responses/response_output_refusal.rbi b/rbi/lib/openai/models/responses/response_output_refusal.rbi index f573eb83..1db3c101 100644 --- a/rbi/lib/openai/models/responses/response_output_refusal.rbi +++ b/rbi/lib/openai/models/responses/response_output_refusal.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseOutputRefusal < OpenAI::BaseModel + # The refusal explanationfrom the model. sig { returns(String) } def refusal end @@ -12,6 +13,7 @@ module OpenAI def refusal=(_) end + # The type of the refusal. Always `refusal`. sig { returns(Symbol) } def type end @@ -20,6 +22,7 @@ module OpenAI def type=(_) end + # A refusal from the model. sig { params(refusal: String, type: Symbol).returns(T.attached_class) } def self.new(refusal:, type: :refusal) end diff --git a/rbi/lib/openai/models/responses/response_output_text.rbi b/rbi/lib/openai/models/responses/response_output_text.rbi index 7f9e510e..e9ce0233 100644 --- a/rbi/lib/openai/models/responses/response_output_text.rbi +++ b/rbi/lib/openai/models/responses/response_output_text.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseOutputText < OpenAI::BaseModel + # The annotations of the text output. sig do returns( T::Array[ @@ -41,6 +42,7 @@ module OpenAI def annotations=(_) end + # The text output from the model. sig { returns(String) } def text end @@ -49,6 +51,7 @@ module OpenAI def text=(_) end + # The type of the output text. Always `output_text`. sig { returns(Symbol) } def type end @@ -57,6 +60,7 @@ module OpenAI def type=(_) end + # A text output from the model. sig do params( annotations: T::Array[ @@ -93,10 +97,12 @@ module OpenAI def to_hash end + # A citation to a file. class Annotation < OpenAI::Union abstract! class FileCitation < OpenAI::BaseModel + # The ID of the file. sig { returns(String) } def file_id end @@ -105,6 +111,7 @@ module OpenAI def file_id=(_) end + # The index of the file in the list of files. sig { returns(Integer) } def index end @@ -113,6 +120,7 @@ module OpenAI def index=(_) end + # The type of the file citation. Always `file_citation`. sig { returns(Symbol) } def type end @@ -121,6 +129,7 @@ module OpenAI def type=(_) end + # A citation to a file. sig { params(file_id: String, index: Integer, type: Symbol).returns(T.attached_class) } def self.new(file_id:, index:, type: :file_citation) end @@ -131,6 +140,7 @@ module OpenAI end class URLCitation < OpenAI::BaseModel + # The index of the last character of the URL citation in the message. sig { returns(Integer) } def end_index end @@ -139,6 +149,7 @@ module OpenAI def end_index=(_) end + # The index of the first character of the URL citation in the message. sig { returns(Integer) } def start_index end @@ -147,6 +158,7 @@ module OpenAI def start_index=(_) end + # The title of the web resource. sig { returns(String) } def title end @@ -155,6 +167,7 @@ module OpenAI def title=(_) end + # The type of the URL citation. Always `url_citation`. sig { returns(Symbol) } def type end @@ -163,6 +176,7 @@ module OpenAI def type=(_) end + # The URL of the web resource. sig { returns(String) } def url end @@ -171,6 +185,7 @@ module OpenAI def url=(_) end + # A citation for a web resource used to generate a model response. sig do params(end_index: Integer, start_index: Integer, title: String, url: String, type: Symbol) .returns(T.attached_class) @@ -194,6 +209,7 @@ module OpenAI end class FilePath < OpenAI::BaseModel + # The ID of the file. sig { returns(String) } def file_id end @@ -202,6 +218,7 @@ module OpenAI def file_id=(_) end + # The index of the file in the list of files. sig { returns(Integer) } def index end @@ -210,6 +227,7 @@ module OpenAI def index=(_) end + # The type of the file path. Always `file_path`. sig { returns(Symbol) } def type end @@ -218,6 +236,7 @@ module OpenAI def type=(_) end + # A path to a file. sig { params(file_id: String, index: Integer, type: Symbol).returns(T.attached_class) } def self.new(file_id:, index:, type: :file_path) end @@ -228,6 +247,7 @@ module OpenAI end class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/responses/response_reasoning_item.rbi b/rbi/lib/openai/models/responses/response_reasoning_item.rbi index 4733fee4..9207ae25 100644 --- a/rbi/lib/openai/models/responses/response_reasoning_item.rbi +++ b/rbi/lib/openai/models/responses/response_reasoning_item.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseReasoningItem < OpenAI::BaseModel + # The unique identifier of the reasoning content. sig { returns(String) } def id end @@ -12,6 +13,7 @@ module OpenAI def id=(_) end + # Reasoning text contents. sig { returns(T::Array[OpenAI::Models::Responses::ResponseReasoningItem::Summary]) } def summary end @@ -23,6 +25,7 @@ module OpenAI def summary=(_) end + # The type of the object. Always `reasoning`. sig { returns(Symbol) } def type end @@ -31,6 +34,8 @@ module OpenAI def type=(_) end + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. sig { returns(T.nilable(Symbol)) } def status end @@ -39,6 +44,8 @@ module OpenAI def status=(_) end + # A description of the chain of thought used by a reasoning model while generating + # a response. sig do params( id: String, @@ -66,6 +73,7 @@ module OpenAI end class Summary < OpenAI::BaseModel + # A short summary of the reasoning used by the model when generating the response. sig { returns(String) } def text end @@ -74,6 +82,7 @@ module OpenAI def text=(_) end + # The type of the object. Always `summary_text`. sig { returns(Symbol) } def type end @@ -91,6 +100,8 @@ module OpenAI end end + # The status of the item. One of `in_progress`, `completed`, or `incomplete`. + # Populated when items are returned via API. class Status < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/responses/response_refusal_delta_event.rbi b/rbi/lib/openai/models/responses/response_refusal_delta_event.rbi index 7517aba1..f4a53f02 100644 --- a/rbi/lib/openai/models/responses/response_refusal_delta_event.rbi +++ b/rbi/lib/openai/models/responses/response_refusal_delta_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseRefusalDeltaEvent < OpenAI::BaseModel + # The index of the content part that the refusal text is added to. sig { returns(Integer) } def content_index end @@ -12,6 +13,7 @@ module OpenAI def content_index=(_) end + # The refusal text that is added. sig { returns(String) } def delta end @@ -20,6 +22,7 @@ module OpenAI def delta=(_) end + # The ID of the output item that the refusal text is added to. sig { returns(String) } def item_id end @@ -28,6 +31,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the refusal text is added to. sig { returns(Integer) } def output_index end @@ -36,6 +40,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.refusal.delta`. sig { returns(Symbol) } def type end @@ -44,6 +49,7 @@ module OpenAI def type=(_) end + # Emitted when there is a partial refusal text. sig do params(content_index: Integer, delta: String, item_id: String, output_index: Integer, type: Symbol) .returns(T.attached_class) diff --git a/rbi/lib/openai/models/responses/response_refusal_done_event.rbi b/rbi/lib/openai/models/responses/response_refusal_done_event.rbi index 560a27ef..1e4bf80b 100644 --- a/rbi/lib/openai/models/responses/response_refusal_done_event.rbi +++ b/rbi/lib/openai/models/responses/response_refusal_done_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseRefusalDoneEvent < OpenAI::BaseModel + # The index of the content part that the refusal text is finalized. sig { returns(Integer) } def content_index end @@ -12,6 +13,7 @@ module OpenAI def content_index=(_) end + # The ID of the output item that the refusal text is finalized. sig { returns(String) } def item_id end @@ -20,6 +22,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the refusal text is finalized. sig { returns(Integer) } def output_index end @@ -28,6 +31,7 @@ module OpenAI def output_index=(_) end + # The refusal text that is finalized. sig { returns(String) } def refusal end @@ -36,6 +40,7 @@ module OpenAI def refusal=(_) end + # The type of the event. Always `response.refusal.done`. sig { returns(Symbol) } def type end @@ -44,6 +49,7 @@ module OpenAI def type=(_) end + # Emitted when refusal text is finalized. sig do params( content_index: Integer, diff --git a/rbi/lib/openai/models/responses/response_retrieve_params.rbi b/rbi/lib/openai/models/responses/response_retrieve_params.rbi index 229f8fbb..8e817f0d 100644 --- a/rbi/lib/openai/models/responses/response_retrieve_params.rbi +++ b/rbi/lib/openai/models/responses/response_retrieve_params.rbi @@ -7,6 +7,8 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # Additional fields to include in the response. See the `include` parameter for + # Response creation above for more information. sig { returns(T.nilable(T::Array[Symbol])) } def include end diff --git a/rbi/lib/openai/models/responses/response_status.rbi b/rbi/lib/openai/models/responses/response_status.rbi index 11ae0a2a..9fcb3ab3 100644 --- a/rbi/lib/openai/models/responses/response_status.rbi +++ b/rbi/lib/openai/models/responses/response_status.rbi @@ -3,6 +3,8 @@ module OpenAI module Models module Responses + # The status of the response generation. One of `completed`, `failed`, + # `in_progress`, or `incomplete`. class ResponseStatus < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/responses/response_stream_event.rbi b/rbi/lib/openai/models/responses/response_stream_event.rbi index a0f3b82c..6e6608c2 100644 --- a/rbi/lib/openai/models/responses/response_stream_event.rbi +++ b/rbi/lib/openai/models/responses/response_stream_event.rbi @@ -3,10 +3,12 @@ module OpenAI module Models module Responses + # Emitted when there is a partial audio response. class ResponseStreamEvent < OpenAI::Union abstract! class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/responses/response_text_annotation_delta_event.rbi b/rbi/lib/openai/models/responses/response_text_annotation_delta_event.rbi index 2d60b64e..bafadc88 100644 --- a/rbi/lib/openai/models/responses/response_text_annotation_delta_event.rbi +++ b/rbi/lib/openai/models/responses/response_text_annotation_delta_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseTextAnnotationDeltaEvent < OpenAI::BaseModel + # A citation to a file. sig do returns( T.any( @@ -35,6 +36,7 @@ module OpenAI def annotation=(_) end + # The index of the annotation that was added. sig { returns(Integer) } def annotation_index end @@ -43,6 +45,7 @@ module OpenAI def annotation_index=(_) end + # The index of the content part that the text annotation was added to. sig { returns(Integer) } def content_index end @@ -51,6 +54,7 @@ module OpenAI def content_index=(_) end + # The ID of the output item that the text annotation was added to. sig { returns(String) } def item_id end @@ -59,6 +63,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the text annotation was added to. sig { returns(Integer) } def output_index end @@ -67,6 +72,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.output_text.annotation.added`. sig { returns(Symbol) } def type end @@ -75,6 +81,7 @@ module OpenAI def type=(_) end + # Emitted when a text annotation is added. sig do params( annotation: T.any( @@ -120,10 +127,12 @@ module OpenAI def to_hash end + # A citation to a file. class Annotation < OpenAI::Union abstract! class FileCitation < OpenAI::BaseModel + # The ID of the file. sig { returns(String) } def file_id end @@ -132,6 +141,7 @@ module OpenAI def file_id=(_) end + # The index of the file in the list of files. sig { returns(Integer) } def index end @@ -140,6 +150,7 @@ module OpenAI def index=(_) end + # The type of the file citation. Always `file_citation`. sig { returns(Symbol) } def type end @@ -148,6 +159,7 @@ module OpenAI def type=(_) end + # A citation to a file. sig { params(file_id: String, index: Integer, type: Symbol).returns(T.attached_class) } def self.new(file_id:, index:, type: :file_citation) end @@ -158,6 +170,7 @@ module OpenAI end class URLCitation < OpenAI::BaseModel + # The index of the last character of the URL citation in the message. sig { returns(Integer) } def end_index end @@ -166,6 +179,7 @@ module OpenAI def end_index=(_) end + # The index of the first character of the URL citation in the message. sig { returns(Integer) } def start_index end @@ -174,6 +188,7 @@ module OpenAI def start_index=(_) end + # The title of the web resource. sig { returns(String) } def title end @@ -182,6 +197,7 @@ module OpenAI def title=(_) end + # The type of the URL citation. Always `url_citation`. sig { returns(Symbol) } def type end @@ -190,6 +206,7 @@ module OpenAI def type=(_) end + # The URL of the web resource. sig { returns(String) } def url end @@ -198,6 +215,7 @@ module OpenAI def url=(_) end + # A citation for a web resource used to generate a model response. sig do params(end_index: Integer, start_index: Integer, title: String, url: String, type: Symbol) .returns(T.attached_class) @@ -221,6 +239,7 @@ module OpenAI end class FilePath < OpenAI::BaseModel + # The ID of the file. sig { returns(String) } def file_id end @@ -229,6 +248,7 @@ module OpenAI def file_id=(_) end + # The index of the file in the list of files. sig { returns(Integer) } def index end @@ -237,6 +257,7 @@ module OpenAI def index=(_) end + # The type of the file path. Always `file_path`. sig { returns(Symbol) } def type end @@ -245,6 +266,7 @@ module OpenAI def type=(_) end + # A path to a file. sig { params(file_id: String, index: Integer, type: Symbol).returns(T.attached_class) } def self.new(file_id:, index:, type: :file_path) end @@ -255,6 +277,7 @@ module OpenAI end class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/responses/response_text_config.rbi b/rbi/lib/openai/models/responses/response_text_config.rbi index a04a35f9..2287e496 100644 --- a/rbi/lib/openai/models/responses/response_text_config.rbi +++ b/rbi/lib/openai/models/responses/response_text_config.rbi @@ -4,6 +4,19 @@ module OpenAI module Models module Responses class ResponseTextConfig < OpenAI::BaseModel + # An object specifying the format that the model must output. + # + # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + # ensures the model will match your supplied JSON schema. Learn more in the + # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + # + # The default format is `{ "type": "text" }` with no additional options. + # + # **Not recommended for gpt-4o and newer models:** + # + # Setting to `{ "type": "json_object" }` enables the older JSON mode, which + # ensures the message the model generates is valid JSON. Using `json_schema` is + # preferred for models that support it. sig do returns( T.nilable( @@ -37,6 +50,11 @@ module OpenAI def format_=(_) end + # Configuration options for a text response from the model. Can be plain text or + # structured JSON data. Learn more: + # + # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) sig do params( format_: T.any( diff --git a/rbi/lib/openai/models/responses/response_text_delta_event.rbi b/rbi/lib/openai/models/responses/response_text_delta_event.rbi index acc2fa04..50307018 100644 --- a/rbi/lib/openai/models/responses/response_text_delta_event.rbi +++ b/rbi/lib/openai/models/responses/response_text_delta_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseTextDeltaEvent < OpenAI::BaseModel + # The index of the content part that the text delta was added to. sig { returns(Integer) } def content_index end @@ -12,6 +13,7 @@ module OpenAI def content_index=(_) end + # The text delta that was added. sig { returns(String) } def delta end @@ -20,6 +22,7 @@ module OpenAI def delta=(_) end + # The ID of the output item that the text delta was added to. sig { returns(String) } def item_id end @@ -28,6 +31,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the text delta was added to. sig { returns(Integer) } def output_index end @@ -36,6 +40,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.output_text.delta`. sig { returns(Symbol) } def type end @@ -44,6 +49,7 @@ module OpenAI def type=(_) end + # Emitted when there is an additional text delta. sig do params(content_index: Integer, delta: String, item_id: String, output_index: Integer, type: Symbol) .returns(T.attached_class) diff --git a/rbi/lib/openai/models/responses/response_text_done_event.rbi b/rbi/lib/openai/models/responses/response_text_done_event.rbi index 8b2ece95..56e6ddd7 100644 --- a/rbi/lib/openai/models/responses/response_text_done_event.rbi +++ b/rbi/lib/openai/models/responses/response_text_done_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseTextDoneEvent < OpenAI::BaseModel + # The index of the content part that the text content is finalized. sig { returns(Integer) } def content_index end @@ -12,6 +13,7 @@ module OpenAI def content_index=(_) end + # The ID of the output item that the text content is finalized. sig { returns(String) } def item_id end @@ -20,6 +22,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the text content is finalized. sig { returns(Integer) } def output_index end @@ -28,6 +31,7 @@ module OpenAI def output_index=(_) end + # The text content that is finalized. sig { returns(String) } def text end @@ -36,6 +40,7 @@ module OpenAI def text=(_) end + # The type of the event. Always `response.output_text.done`. sig { returns(Symbol) } def type end @@ -44,6 +49,7 @@ module OpenAI def type=(_) end + # Emitted when text content is finalized. sig do params(content_index: Integer, item_id: String, output_index: Integer, text: String, type: Symbol) .returns(T.attached_class) diff --git a/rbi/lib/openai/models/responses/response_usage.rbi b/rbi/lib/openai/models/responses/response_usage.rbi index 87f7c238..b46f45aa 100644 --- a/rbi/lib/openai/models/responses/response_usage.rbi +++ b/rbi/lib/openai/models/responses/response_usage.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseUsage < OpenAI::BaseModel + # The number of input tokens. sig { returns(Integer) } def input_tokens end @@ -12,6 +13,7 @@ module OpenAI def input_tokens=(_) end + # The number of output tokens. sig { returns(Integer) } def output_tokens end @@ -20,6 +22,7 @@ module OpenAI def output_tokens=(_) end + # A detailed breakdown of the output tokens. sig { returns(OpenAI::Models::Responses::ResponseUsage::OutputTokensDetails) } def output_tokens_details end @@ -31,6 +34,7 @@ module OpenAI def output_tokens_details=(_) end + # The total number of tokens used. sig { returns(Integer) } def total_tokens end @@ -39,6 +43,8 @@ module OpenAI def total_tokens=(_) end + # Represents token usage details including input tokens, output tokens, a + # breakdown of output tokens, and the total tokens used. sig do params( input_tokens: Integer, @@ -66,6 +72,7 @@ module OpenAI end class OutputTokensDetails < OpenAI::BaseModel + # The number of reasoning tokens. sig { returns(Integer) } def reasoning_tokens end @@ -74,6 +81,7 @@ module OpenAI def reasoning_tokens=(_) end + # A detailed breakdown of the output tokens. sig { params(reasoning_tokens: Integer).returns(T.attached_class) } def self.new(reasoning_tokens:) end diff --git a/rbi/lib/openai/models/responses/response_web_search_call_completed_event.rbi b/rbi/lib/openai/models/responses/response_web_search_call_completed_event.rbi index 16bde4c4..1348fded 100644 --- a/rbi/lib/openai/models/responses/response_web_search_call_completed_event.rbi +++ b/rbi/lib/openai/models/responses/response_web_search_call_completed_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseWebSearchCallCompletedEvent < OpenAI::BaseModel + # Unique ID for the output item associated with the web search call. sig { returns(String) } def item_id end @@ -12,6 +13,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the web search call is associated with. sig { returns(Integer) } def output_index end @@ -20,6 +22,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.web_search_call.completed`. sig { returns(Symbol) } def type end @@ -28,6 +31,7 @@ module OpenAI def type=(_) end + # Emitted when a web search call is completed. sig { params(item_id: String, output_index: Integer, type: Symbol).returns(T.attached_class) } def self.new(item_id:, output_index:, type: :"response.web_search_call.completed") end diff --git a/rbi/lib/openai/models/responses/response_web_search_call_in_progress_event.rbi b/rbi/lib/openai/models/responses/response_web_search_call_in_progress_event.rbi index 654b7293..891725d6 100644 --- a/rbi/lib/openai/models/responses/response_web_search_call_in_progress_event.rbi +++ b/rbi/lib/openai/models/responses/response_web_search_call_in_progress_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseWebSearchCallInProgressEvent < OpenAI::BaseModel + # Unique ID for the output item associated with the web search call. sig { returns(String) } def item_id end @@ -12,6 +13,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the web search call is associated with. sig { returns(Integer) } def output_index end @@ -20,6 +22,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.web_search_call.in_progress`. sig { returns(Symbol) } def type end @@ -28,6 +31,7 @@ module OpenAI def type=(_) end + # Emitted when a web search call is initiated. sig { params(item_id: String, output_index: Integer, type: Symbol).returns(T.attached_class) } def self.new(item_id:, output_index:, type: :"response.web_search_call.in_progress") end diff --git a/rbi/lib/openai/models/responses/response_web_search_call_searching_event.rbi b/rbi/lib/openai/models/responses/response_web_search_call_searching_event.rbi index c0de9efa..a2f0a421 100644 --- a/rbi/lib/openai/models/responses/response_web_search_call_searching_event.rbi +++ b/rbi/lib/openai/models/responses/response_web_search_call_searching_event.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ResponseWebSearchCallSearchingEvent < OpenAI::BaseModel + # Unique ID for the output item associated with the web search call. sig { returns(String) } def item_id end @@ -12,6 +13,7 @@ module OpenAI def item_id=(_) end + # The index of the output item that the web search call is associated with. sig { returns(Integer) } def output_index end @@ -20,6 +22,7 @@ module OpenAI def output_index=(_) end + # The type of the event. Always `response.web_search_call.searching`. sig { returns(Symbol) } def type end @@ -28,6 +31,7 @@ module OpenAI def type=(_) end + # Emitted when a web search call is executing. sig { params(item_id: String, output_index: Integer, type: Symbol).returns(T.attached_class) } def self.new(item_id:, output_index:, type: :"response.web_search_call.searching") end diff --git a/rbi/lib/openai/models/responses/tool.rbi b/rbi/lib/openai/models/responses/tool.rbi index 00e4ecf8..104f7bf0 100644 --- a/rbi/lib/openai/models/responses/tool.rbi +++ b/rbi/lib/openai/models/responses/tool.rbi @@ -3,10 +3,14 @@ module OpenAI module Models module Responses + # A tool that searches for relevant content from uploaded files. Learn more about + # the + # [file search tool](https://platform.openai.com/docs/guides/tools-file-search). class Tool < OpenAI::Union abstract! class << self + # @api private sig do override .returns( diff --git a/rbi/lib/openai/models/responses/tool_choice_function.rbi b/rbi/lib/openai/models/responses/tool_choice_function.rbi index c11c91a4..a8afd2d4 100644 --- a/rbi/lib/openai/models/responses/tool_choice_function.rbi +++ b/rbi/lib/openai/models/responses/tool_choice_function.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module Responses class ToolChoiceFunction < OpenAI::BaseModel + # The name of the function to call. sig { returns(String) } def name end @@ -12,6 +13,7 @@ module OpenAI def name=(_) end + # For function calling, the type is always `function`. sig { returns(Symbol) } def type end @@ -20,6 +22,7 @@ module OpenAI def type=(_) end + # Use this option to force the model to call a specific function. sig { params(name: String, type: Symbol).returns(T.attached_class) } def self.new(name:, type: :function) end diff --git a/rbi/lib/openai/models/responses/tool_choice_options.rbi b/rbi/lib/openai/models/responses/tool_choice_options.rbi index 7e1f9984..f2fdff28 100644 --- a/rbi/lib/openai/models/responses/tool_choice_options.rbi +++ b/rbi/lib/openai/models/responses/tool_choice_options.rbi @@ -3,6 +3,14 @@ module OpenAI module Models module Responses + # Controls which (if any) tool is called by the model. + # + # `none` means the model will not call any tool and instead generates a message. + # + # `auto` means the model can pick between generating a message or calling one or + # more tools. + # + # `required` means the model must call one or more tools. class ToolChoiceOptions < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/responses/tool_choice_types.rbi b/rbi/lib/openai/models/responses/tool_choice_types.rbi index 8176455f..7f039150 100644 --- a/rbi/lib/openai/models/responses/tool_choice_types.rbi +++ b/rbi/lib/openai/models/responses/tool_choice_types.rbi @@ -4,6 +4,14 @@ module OpenAI module Models module Responses class ToolChoiceTypes < OpenAI::BaseModel + # The type of hosted tool the model should to use. Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # + # Allowed values are: + # + # - `file_search` + # - `web_search_preview` + # - `computer_use_preview` sig { returns(Symbol) } def type end @@ -12,6 +20,8 @@ module OpenAI def type=(_) end + # Indicates that the model should use a built-in tool to generate a response. + # [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools). sig { params(type: Symbol).returns(T.attached_class) } def self.new(type:) end @@ -20,6 +30,14 @@ module OpenAI def to_hash end + # The type of hosted tool the model should to use. Learn more about + # [built-in tools](https://platform.openai.com/docs/guides/tools). + # + # Allowed values are: + # + # - `file_search` + # - `web_search_preview` + # - `computer_use_preview` class Type < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/responses/web_search_tool.rbi b/rbi/lib/openai/models/responses/web_search_tool.rbi index be37fbf0..71b37e66 100644 --- a/rbi/lib/openai/models/responses/web_search_tool.rbi +++ b/rbi/lib/openai/models/responses/web_search_tool.rbi @@ -4,6 +4,10 @@ module OpenAI module Models module Responses class WebSearchTool < OpenAI::BaseModel + # The type of the web search tool. One of: + # + # - `web_search_preview` + # - `web_search_preview_2025_03_11` sig { returns(Symbol) } def type end @@ -12,6 +16,8 @@ module OpenAI def type=(_) end + # High level guidance for the amount of context window space to use for the + # search. One of `low`, `medium`, or `high`. `medium` is the default. sig { returns(T.nilable(Symbol)) } def search_context_size end @@ -31,6 +37,9 @@ module OpenAI def user_location=(_) end + # This tool searches the web for relevant results to use in a response. Learn more + # about the + # [web search tool](https://platform.openai.com/docs/guides/tools-web-search). sig do params( type: Symbol, @@ -55,6 +64,10 @@ module OpenAI def to_hash end + # The type of the web search tool. One of: + # + # - `web_search_preview` + # - `web_search_preview_2025_03_11` class Type < OpenAI::Enum abstract! @@ -68,6 +81,8 @@ module OpenAI end end + # High level guidance for the amount of context window space to use for the + # search. One of `low`, `medium`, or `high`. `medium` is the default. class SearchContextSize < OpenAI::Enum abstract! @@ -83,6 +98,7 @@ module OpenAI end class UserLocation < OpenAI::BaseModel + # The type of location approximation. Always `approximate`. sig { returns(Symbol) } def type end @@ -91,6 +107,7 @@ module OpenAI def type=(_) end + # Free text input for the city of the user, e.g. `San Francisco`. sig { returns(T.nilable(String)) } def city end @@ -99,6 +116,8 @@ module OpenAI def city=(_) end + # The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + # the user, e.g. `US`. sig { returns(T.nilable(String)) } def country end @@ -107,6 +126,7 @@ module OpenAI def country=(_) end + # Free text input for the region of the user, e.g. `California`. sig { returns(T.nilable(String)) } def region end @@ -115,6 +135,8 @@ module OpenAI def region=(_) end + # The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + # user, e.g. `America/Los_Angeles`. sig { returns(T.nilable(String)) } def timezone end diff --git a/rbi/lib/openai/models/static_file_chunking_strategy.rbi b/rbi/lib/openai/models/static_file_chunking_strategy.rbi index b076e093..1d8d219c 100644 --- a/rbi/lib/openai/models/static_file_chunking_strategy.rbi +++ b/rbi/lib/openai/models/static_file_chunking_strategy.rbi @@ -3,6 +3,9 @@ module OpenAI module Models class StaticFileChunkingStrategy < OpenAI::BaseModel + # The number of tokens that overlap between chunks. The default value is `400`. + # + # Note that the overlap must not exceed half of `max_chunk_size_tokens`. sig { returns(Integer) } def chunk_overlap_tokens end @@ -11,6 +14,8 @@ module OpenAI def chunk_overlap_tokens=(_) end + # The maximum number of tokens in each chunk. The default value is `800`. The + # minimum value is `100` and the maximum value is `4096`. sig { returns(Integer) } def max_chunk_size_tokens end diff --git a/rbi/lib/openai/models/static_file_chunking_strategy_object.rbi b/rbi/lib/openai/models/static_file_chunking_strategy_object.rbi index 94e5e78c..99645473 100644 --- a/rbi/lib/openai/models/static_file_chunking_strategy_object.rbi +++ b/rbi/lib/openai/models/static_file_chunking_strategy_object.rbi @@ -11,6 +11,7 @@ module OpenAI def static=(_) end + # Always `static`. sig { returns(Symbol) } def type end diff --git a/rbi/lib/openai/models/static_file_chunking_strategy_object_param.rbi b/rbi/lib/openai/models/static_file_chunking_strategy_object_param.rbi index 3eed65cb..a7ed94e1 100644 --- a/rbi/lib/openai/models/static_file_chunking_strategy_object_param.rbi +++ b/rbi/lib/openai/models/static_file_chunking_strategy_object_param.rbi @@ -11,6 +11,7 @@ module OpenAI def static=(_) end + # Always `static`. sig { returns(Symbol) } def type end @@ -19,6 +20,7 @@ module OpenAI def type=(_) end + # Customize your own chunking strategy by setting chunk size and chunk overlap. sig { params(static: OpenAI::Models::StaticFileChunkingStrategy, type: Symbol).returns(T.attached_class) } def self.new(static:, type: :static) end diff --git a/rbi/lib/openai/models/upload.rbi b/rbi/lib/openai/models/upload.rbi index 422b80e9..6d1b30b0 100644 --- a/rbi/lib/openai/models/upload.rbi +++ b/rbi/lib/openai/models/upload.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class Upload < OpenAI::BaseModel + # The Upload unique identifier, which can be referenced in API endpoints. sig { returns(String) } def id end @@ -11,6 +12,7 @@ module OpenAI def id=(_) end + # The intended number of bytes to be uploaded. sig { returns(Integer) } def bytes end @@ -19,6 +21,7 @@ module OpenAI def bytes=(_) end + # The Unix timestamp (in seconds) for when the Upload was created. sig { returns(Integer) } def created_at end @@ -27,6 +30,7 @@ module OpenAI def created_at=(_) end + # The Unix timestamp (in seconds) for when the Upload will expire. sig { returns(Integer) } def expires_at end @@ -35,6 +39,7 @@ module OpenAI def expires_at=(_) end + # The name of the file to be uploaded. sig { returns(String) } def filename end @@ -43,6 +48,7 @@ module OpenAI def filename=(_) end + # The object type, which is always "upload". sig { returns(Symbol) } def object end @@ -51,6 +57,9 @@ module OpenAI def object=(_) end + # The intended purpose of the file. + # [Please refer here](https://platform.openai.com/docs/api-reference/files/object#files/object-purpose) + # for acceptable values. sig { returns(String) } def purpose end @@ -59,6 +68,7 @@ module OpenAI def purpose=(_) end + # The status of the Upload. sig { returns(Symbol) } def status end @@ -67,6 +77,7 @@ module OpenAI def status=(_) end + # The `File` object represents a document that has been uploaded to OpenAI. sig { returns(T.nilable(OpenAI::Models::FileObject)) } def file end @@ -75,6 +86,7 @@ module OpenAI def file=(_) end + # The Upload object can accept byte chunks in the form of Parts. sig do params( id: String, @@ -111,6 +123,7 @@ module OpenAI def to_hash end + # The status of the Upload. class Status < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/upload_complete_params.rbi b/rbi/lib/openai/models/upload_complete_params.rbi index 840707e6..f8550617 100644 --- a/rbi/lib/openai/models/upload_complete_params.rbi +++ b/rbi/lib/openai/models/upload_complete_params.rbi @@ -6,6 +6,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The ordered list of Part IDs. sig { returns(T::Array[String]) } def part_ids end @@ -14,6 +15,8 @@ module OpenAI def part_ids=(_) end + # The optional md5 checksum for the file contents to verify if the bytes uploaded + # matches what you expect. sig { returns(T.nilable(String)) } def md5 end diff --git a/rbi/lib/openai/models/upload_create_params.rbi b/rbi/lib/openai/models/upload_create_params.rbi index 6d144a54..22555f0e 100644 --- a/rbi/lib/openai/models/upload_create_params.rbi +++ b/rbi/lib/openai/models/upload_create_params.rbi @@ -6,6 +6,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The number of bytes in the file you are uploading. sig { returns(Integer) } def bytes end @@ -14,6 +15,7 @@ module OpenAI def bytes=(_) end + # The name of the file to upload. sig { returns(String) } def filename end @@ -22,6 +24,10 @@ module OpenAI def filename=(_) end + # The MIME type of the file. + # + # This must fall within the supported MIME types for your file purpose. See the + # supported MIME types for assistants and vision. sig { returns(String) } def mime_type end @@ -30,6 +36,10 @@ module OpenAI def mime_type=(_) end + # The intended purpose of the uploaded file. + # + # See the + # [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). sig { returns(Symbol) } def purpose end diff --git a/rbi/lib/openai/models/uploads/part_create_params.rbi b/rbi/lib/openai/models/uploads/part_create_params.rbi index 4e2fe8c7..7ef6052a 100644 --- a/rbi/lib/openai/models/uploads/part_create_params.rbi +++ b/rbi/lib/openai/models/uploads/part_create_params.rbi @@ -7,6 +7,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The chunk of bytes for this Part. sig { returns(T.any(IO, StringIO)) } def data end diff --git a/rbi/lib/openai/models/uploads/upload_part.rbi b/rbi/lib/openai/models/uploads/upload_part.rbi index 6801e859..ae805178 100644 --- a/rbi/lib/openai/models/uploads/upload_part.rbi +++ b/rbi/lib/openai/models/uploads/upload_part.rbi @@ -6,6 +6,7 @@ module OpenAI module Uploads class UploadPart < OpenAI::BaseModel + # The upload Part unique identifier, which can be referenced in API endpoints. sig { returns(String) } def id end @@ -14,6 +15,7 @@ module OpenAI def id=(_) end + # The Unix timestamp (in seconds) for when the Part was created. sig { returns(Integer) } def created_at end @@ -22,6 +24,7 @@ module OpenAI def created_at=(_) end + # The object type, which is always `upload.part`. sig { returns(Symbol) } def object end @@ -30,6 +33,7 @@ module OpenAI def object=(_) end + # The ID of the Upload object that this Part was added to. sig { returns(String) } def upload_id end @@ -38,6 +42,7 @@ module OpenAI def upload_id=(_) end + # The upload Part represents a chunk of bytes we can add to an Upload object. sig do params(id: String, created_at: Integer, upload_id: String, object: Symbol).returns(T.attached_class) end diff --git a/rbi/lib/openai/models/vector_store.rbi b/rbi/lib/openai/models/vector_store.rbi index e60530c4..bf4bb14f 100644 --- a/rbi/lib/openai/models/vector_store.rbi +++ b/rbi/lib/openai/models/vector_store.rbi @@ -3,6 +3,7 @@ module OpenAI module Models class VectorStore < OpenAI::BaseModel + # The identifier, which can be referenced in API endpoints. sig { returns(String) } def id end @@ -11,6 +12,7 @@ module OpenAI def id=(_) end + # The Unix timestamp (in seconds) for when the vector store was created. sig { returns(Integer) } def created_at end @@ -27,6 +29,7 @@ module OpenAI def file_counts=(_) end + # The Unix timestamp (in seconds) for when the vector store was last active. sig { returns(T.nilable(Integer)) } def last_active_at end @@ -35,6 +38,12 @@ module OpenAI def last_active_at=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -43,6 +52,7 @@ module OpenAI def metadata=(_) end + # The name of the vector store. sig { returns(String) } def name end @@ -51,6 +61,7 @@ module OpenAI def name=(_) end + # The object type, which is always `vector_store`. sig { returns(Symbol) } def object end @@ -59,6 +70,9 @@ module OpenAI def object=(_) end + # The status of the vector store, which can be either `expired`, `in_progress`, or + # `completed`. A status of `completed` indicates that the vector store is ready + # for use. sig { returns(Symbol) } def status end @@ -67,6 +81,7 @@ module OpenAI def status=(_) end + # The total number of bytes used by the files in the vector store. sig { returns(Integer) } def usage_bytes end @@ -75,6 +90,7 @@ module OpenAI def usage_bytes=(_) end + # The expiration policy for a vector store. sig { returns(T.nilable(OpenAI::Models::VectorStore::ExpiresAfter)) } def expires_after end @@ -83,6 +99,7 @@ module OpenAI def expires_after=(_) end + # The Unix timestamp (in seconds) for when the vector store will expire. sig { returns(T.nilable(Integer)) } def expires_at end @@ -91,6 +108,8 @@ module OpenAI def expires_at=(_) end + # A vector store is a collection of processed files can be used by the + # `file_search` tool. sig do params( id: String, @@ -144,6 +163,7 @@ module OpenAI end class FileCounts < OpenAI::BaseModel + # The number of files that were cancelled. sig { returns(Integer) } def cancelled end @@ -152,6 +172,7 @@ module OpenAI def cancelled=(_) end + # The number of files that have been successfully processed. sig { returns(Integer) } def completed end @@ -160,6 +181,7 @@ module OpenAI def completed=(_) end + # The number of files that have failed to process. sig { returns(Integer) } def failed end @@ -168,6 +190,7 @@ module OpenAI def failed=(_) end + # The number of files that are currently being processed. sig { returns(Integer) } def in_progress end @@ -176,6 +199,7 @@ module OpenAI def in_progress=(_) end + # The total number of files. sig { returns(Integer) } def total end @@ -211,6 +235,9 @@ module OpenAI end end + # The status of the vector store, which can be either `expired`, `in_progress`, or + # `completed`. A status of `completed` indicates that the vector store is ready + # for use. class Status < OpenAI::Enum abstract! @@ -226,6 +253,8 @@ module OpenAI end class ExpiresAfter < OpenAI::BaseModel + # Anchor timestamp after which the expiration policy applies. Supported anchors: + # `last_active_at`. sig { returns(Symbol) } def anchor end @@ -234,6 +263,7 @@ module OpenAI def anchor=(_) end + # The number of days after the anchor time that the vector store will expire. sig { returns(Integer) } def days end @@ -242,6 +272,7 @@ module OpenAI def days=(_) end + # The expiration policy for a vector store. sig { params(days: Integer, anchor: Symbol).returns(T.attached_class) } def self.new(days:, anchor: :last_active_at) end diff --git a/rbi/lib/openai/models/vector_store_create_params.rbi b/rbi/lib/openai/models/vector_store_create_params.rbi index 4444751d..02ffb8d9 100644 --- a/rbi/lib/openai/models/vector_store_create_params.rbi +++ b/rbi/lib/openai/models/vector_store_create_params.rbi @@ -6,6 +6,8 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. Only applicable if `file_ids` is non-empty. sig do returns( T.nilable( @@ -36,6 +38,7 @@ module OpenAI def chunking_strategy=(_) end + # The expiration policy for a vector store. sig { returns(T.nilable(OpenAI::Models::VectorStoreCreateParams::ExpiresAfter)) } def expires_after end @@ -47,6 +50,9 @@ module OpenAI def expires_after=(_) end + # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + # the vector store should use. Useful for tools like `file_search` that can access + # files. sig { returns(T.nilable(T::Array[String])) } def file_ids end @@ -55,6 +61,12 @@ module OpenAI def file_ids=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -63,6 +75,7 @@ module OpenAI def metadata=(_) end + # The name of the vector store. sig { returns(T.nilable(String)) } def name end @@ -108,6 +121,8 @@ module OpenAI end class ExpiresAfter < OpenAI::BaseModel + # Anchor timestamp after which the expiration policy applies. Supported anchors: + # `last_active_at`. sig { returns(Symbol) } def anchor end @@ -116,6 +131,7 @@ module OpenAI def anchor=(_) end + # The number of days after the anchor time that the vector store will expire. sig { returns(Integer) } def days end @@ -124,6 +140,7 @@ module OpenAI def days=(_) end + # The expiration policy for a vector store. sig { params(days: Integer, anchor: Symbol).returns(T.attached_class) } def self.new(days:, anchor: :last_active_at) end diff --git a/rbi/lib/openai/models/vector_store_list_params.rbi b/rbi/lib/openai/models/vector_store_list_params.rbi index a0c37415..91af6210 100644 --- a/rbi/lib/openai/models/vector_store_list_params.rbi +++ b/rbi/lib/openai/models/vector_store_list_params.rbi @@ -6,6 +6,10 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. sig { returns(T.nilable(String)) } def after end @@ -14,6 +18,10 @@ module OpenAI def after=(_) end + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. sig { returns(T.nilable(String)) } def before end @@ -22,6 +30,8 @@ module OpenAI def before=(_) end + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. sig { returns(T.nilable(Integer)) } def limit end @@ -30,6 +40,8 @@ module OpenAI def limit=(_) end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. sig { returns(T.nilable(Symbol)) } def order end @@ -66,6 +78,8 @@ module OpenAI def to_hash end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. class Order < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/vector_store_search_params.rbi b/rbi/lib/openai/models/vector_store_search_params.rbi index 52ea39e4..593b1417 100644 --- a/rbi/lib/openai/models/vector_store_search_params.rbi +++ b/rbi/lib/openai/models/vector_store_search_params.rbi @@ -6,6 +6,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # A query string for a search sig { returns(T.any(String, T::Array[String])) } def query end @@ -14,6 +15,7 @@ module OpenAI def query=(_) end + # A filter to apply based on file attributes. sig { returns(T.nilable(T.any(OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter))) } def filters end @@ -25,6 +27,8 @@ module OpenAI def filters=(_) end + # The maximum number of results to return. This number should be between 1 and 50 + # inclusive. sig { returns(T.nilable(Integer)) } def max_num_results end @@ -33,6 +37,7 @@ module OpenAI def max_num_results=(_) end + # Ranking options for search. sig { returns(T.nilable(OpenAI::Models::VectorStoreSearchParams::RankingOptions)) } def ranking_options end @@ -44,6 +49,7 @@ module OpenAI def ranking_options=(_) end + # Whether to rewrite the natural language query for vector search. sig { returns(T.nilable(T::Boolean)) } def rewrite_query end @@ -82,22 +88,26 @@ module OpenAI def to_hash end + # A query string for a search class Query < OpenAI::Union abstract! StringArray = T.type_alias { T::Array[String] } class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, T::Array[String]]]) } private def variants end end end + # A filter to apply based on file attributes. class Filters < OpenAI::Union abstract! class << self + # @api private sig do override .returns([[NilClass, OpenAI::Models::ComparisonFilter], [NilClass, OpenAI::Models::CompoundFilter]]) @@ -124,6 +134,7 @@ module OpenAI def score_threshold=(_) end + # Ranking options for search. sig { params(ranker: Symbol, score_threshold: Float).returns(T.attached_class) } def self.new(ranker: nil, score_threshold: nil) end diff --git a/rbi/lib/openai/models/vector_store_search_response.rbi b/rbi/lib/openai/models/vector_store_search_response.rbi index d8ffa5fa..ab3f35c5 100644 --- a/rbi/lib/openai/models/vector_store_search_response.rbi +++ b/rbi/lib/openai/models/vector_store_search_response.rbi @@ -3,6 +3,11 @@ module OpenAI module Models class VectorStoreSearchResponse < OpenAI::BaseModel + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. sig { returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) } def attributes end @@ -14,6 +19,7 @@ module OpenAI def attributes=(_) end + # Content chunks from the file. sig { returns(T::Array[OpenAI::Models::VectorStoreSearchResponse::Content]) } def content end @@ -25,6 +31,7 @@ module OpenAI def content=(_) end + # The ID of the vector store file. sig { returns(String) } def file_id end @@ -33,6 +40,7 @@ module OpenAI def file_id=(_) end + # The name of the vector store file. sig { returns(String) } def filename end @@ -41,6 +49,7 @@ module OpenAI def filename=(_) end + # The similarity score for the result. sig { returns(Float) } def score end @@ -81,6 +90,7 @@ module OpenAI abstract! class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, Float], [NilClass, T::Boolean]]) } private def variants end @@ -88,6 +98,7 @@ module OpenAI end class Content < OpenAI::BaseModel + # The text content returned from search. sig { returns(String) } def text end @@ -96,6 +107,7 @@ module OpenAI def text=(_) end + # The type of content. sig { returns(Symbol) } def type end @@ -112,6 +124,7 @@ module OpenAI def to_hash end + # The type of content. class Type < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/vector_store_update_params.rbi b/rbi/lib/openai/models/vector_store_update_params.rbi index 609a1ea7..88614cee 100644 --- a/rbi/lib/openai/models/vector_store_update_params.rbi +++ b/rbi/lib/openai/models/vector_store_update_params.rbi @@ -6,6 +6,7 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # The expiration policy for a vector store. sig { returns(T.nilable(OpenAI::Models::VectorStoreUpdateParams::ExpiresAfter)) } def expires_after end @@ -17,6 +18,12 @@ module OpenAI def expires_after=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. + # + # Keys are strings with a maximum length of 64 characters. Values are strings with + # a maximum length of 512 characters. sig { returns(T.nilable(OpenAI::Models::Metadata)) } def metadata end @@ -25,6 +32,7 @@ module OpenAI def metadata=(_) end + # The name of the vector store. sig { returns(T.nilable(String)) } def name end @@ -60,6 +68,8 @@ module OpenAI end class ExpiresAfter < OpenAI::BaseModel + # Anchor timestamp after which the expiration policy applies. Supported anchors: + # `last_active_at`. sig { returns(Symbol) } def anchor end @@ -68,6 +78,7 @@ module OpenAI def anchor=(_) end + # The number of days after the anchor time that the vector store will expire. sig { returns(Integer) } def days end @@ -76,6 +87,7 @@ module OpenAI def days=(_) end + # The expiration policy for a vector store. sig { params(days: Integer, anchor: Symbol).returns(T.attached_class) } def self.new(days:, anchor: :last_active_at) end diff --git a/rbi/lib/openai/models/vector_stores/file_batch_create_params.rbi b/rbi/lib/openai/models/vector_stores/file_batch_create_params.rbi index 50d4260c..fd90edaf 100644 --- a/rbi/lib/openai/models/vector_stores/file_batch_create_params.rbi +++ b/rbi/lib/openai/models/vector_stores/file_batch_create_params.rbi @@ -7,6 +7,9 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + # the vector store should use. Useful for tools like `file_search` that can access + # files. sig { returns(T::Array[String]) } def file_ids end @@ -15,6 +18,11 @@ module OpenAI def file_ids=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. sig { returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) } def attributes end @@ -26,6 +34,8 @@ module OpenAI def attributes=(_) end + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. Only applicable if `file_ids` is non-empty. sig do returns( T.nilable( @@ -92,6 +102,7 @@ module OpenAI abstract! class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, Float], [NilClass, T::Boolean]]) } private def variants end diff --git a/rbi/lib/openai/models/vector_stores/file_batch_list_files_params.rbi b/rbi/lib/openai/models/vector_stores/file_batch_list_files_params.rbi index 6847460c..c1cde336 100644 --- a/rbi/lib/openai/models/vector_stores/file_batch_list_files_params.rbi +++ b/rbi/lib/openai/models/vector_stores/file_batch_list_files_params.rbi @@ -15,6 +15,10 @@ module OpenAI def vector_store_id=(_) end + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. sig { returns(T.nilable(String)) } def after end @@ -23,6 +27,10 @@ module OpenAI def after=(_) end + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. sig { returns(T.nilable(String)) } def before end @@ -31,6 +39,7 @@ module OpenAI def before=(_) end + # Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. sig { returns(T.nilable(Symbol)) } def filter end @@ -39,6 +48,8 @@ module OpenAI def filter=(_) end + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. sig { returns(T.nilable(Integer)) } def limit end @@ -47,6 +58,8 @@ module OpenAI def limit=(_) end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. sig { returns(T.nilable(Symbol)) } def order end @@ -87,6 +100,7 @@ module OpenAI def to_hash end + # Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. class Filter < OpenAI::Enum abstract! @@ -102,6 +116,8 @@ module OpenAI end end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. class Order < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/vector_stores/file_content_response.rbi b/rbi/lib/openai/models/vector_stores/file_content_response.rbi index da110839..7986b030 100644 --- a/rbi/lib/openai/models/vector_stores/file_content_response.rbi +++ b/rbi/lib/openai/models/vector_stores/file_content_response.rbi @@ -4,6 +4,7 @@ module OpenAI module Models module VectorStores class FileContentResponse < OpenAI::BaseModel + # The text content sig { returns(T.nilable(String)) } def text end @@ -12,6 +13,7 @@ module OpenAI def text=(_) end + # The content type (currently only `"text"`) sig { returns(T.nilable(String)) } def type end diff --git a/rbi/lib/openai/models/vector_stores/file_create_params.rbi b/rbi/lib/openai/models/vector_stores/file_create_params.rbi index 9c1d277d..e29d8b47 100644 --- a/rbi/lib/openai/models/vector_stores/file_create_params.rbi +++ b/rbi/lib/openai/models/vector_stores/file_create_params.rbi @@ -7,6 +7,9 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # A [File](https://platform.openai.com/docs/api-reference/files) ID that the + # vector store should use. Useful for tools like `file_search` that can access + # files. sig { returns(String) } def file_id end @@ -15,6 +18,11 @@ module OpenAI def file_id=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. sig { returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) } def attributes end @@ -26,6 +34,8 @@ module OpenAI def attributes=(_) end + # The chunking strategy used to chunk the file(s). If not set, will use the `auto` + # strategy. Only applicable if `file_ids` is non-empty. sig do returns( T.nilable( @@ -92,6 +102,7 @@ module OpenAI abstract! class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, Float], [NilClass, T::Boolean]]) } private def variants end diff --git a/rbi/lib/openai/models/vector_stores/file_list_params.rbi b/rbi/lib/openai/models/vector_stores/file_list_params.rbi index 42032f29..6613871b 100644 --- a/rbi/lib/openai/models/vector_stores/file_list_params.rbi +++ b/rbi/lib/openai/models/vector_stores/file_list_params.rbi @@ -7,6 +7,10 @@ module OpenAI extend OpenAI::RequestParameters::Converter include OpenAI::RequestParameters + # A cursor for use in pagination. `after` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # ending with obj_foo, your subsequent call can include after=obj_foo in order to + # fetch the next page of the list. sig { returns(T.nilable(String)) } def after end @@ -15,6 +19,10 @@ module OpenAI def after=(_) end + # A cursor for use in pagination. `before` is an object ID that defines your place + # in the list. For instance, if you make a list request and receive 100 objects, + # starting with obj_foo, your subsequent call can include before=obj_foo in order + # to fetch the previous page of the list. sig { returns(T.nilable(String)) } def before end @@ -23,6 +31,7 @@ module OpenAI def before=(_) end + # Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. sig { returns(T.nilable(Symbol)) } def filter end @@ -31,6 +40,8 @@ module OpenAI def filter=(_) end + # A limit on the number of objects to be returned. Limit can range between 1 and + # 100, and the default is 20. sig { returns(T.nilable(Integer)) } def limit end @@ -39,6 +50,8 @@ module OpenAI def limit=(_) end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. sig { returns(T.nilable(Symbol)) } def order end @@ -77,6 +90,7 @@ module OpenAI def to_hash end + # Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. class Filter < OpenAI::Enum abstract! @@ -92,6 +106,8 @@ module OpenAI end end + # Sort order by the `created_at` timestamp of the objects. `asc` for ascending + # order and `desc` for descending order. class Order < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/models/vector_stores/file_update_params.rbi b/rbi/lib/openai/models/vector_stores/file_update_params.rbi index 865c29c6..ea9cd54d 100644 --- a/rbi/lib/openai/models/vector_stores/file_update_params.rbi +++ b/rbi/lib/openai/models/vector_stores/file_update_params.rbi @@ -15,6 +15,11 @@ module OpenAI def vector_store_id=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. sig { returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) } def attributes end @@ -54,6 +59,7 @@ module OpenAI abstract! class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, Float], [NilClass, T::Boolean]]) } private def variants end diff --git a/rbi/lib/openai/models/vector_stores/vector_store_file.rbi b/rbi/lib/openai/models/vector_stores/vector_store_file.rbi index 102bd1c5..ca194b2c 100644 --- a/rbi/lib/openai/models/vector_stores/vector_store_file.rbi +++ b/rbi/lib/openai/models/vector_stores/vector_store_file.rbi @@ -6,6 +6,7 @@ module OpenAI module VectorStores class VectorStoreFile < OpenAI::BaseModel + # The identifier, which can be referenced in API endpoints. sig { returns(String) } def id end @@ -14,6 +15,7 @@ module OpenAI def id=(_) end + # The Unix timestamp (in seconds) for when the vector store file was created. sig { returns(Integer) } def created_at end @@ -22,6 +24,8 @@ module OpenAI def created_at=(_) end + # The last error associated with this vector store file. Will be `null` if there + # are no errors. sig { returns(T.nilable(OpenAI::Models::VectorStores::VectorStoreFile::LastError)) } def last_error end @@ -33,6 +37,7 @@ module OpenAI def last_error=(_) end + # The object type, which is always `vector_store.file`. sig { returns(Symbol) } def object end @@ -41,6 +46,9 @@ module OpenAI def object=(_) end + # The status of the vector store file, which can be either `in_progress`, + # `completed`, `cancelled`, or `failed`. The status `completed` indicates that the + # vector store file is ready for use. sig { returns(Symbol) } def status end @@ -49,6 +57,8 @@ module OpenAI def status=(_) end + # The total vector store usage in bytes. Note that this may be different from the + # original file size. sig { returns(Integer) } def usage_bytes end @@ -57,6 +67,10 @@ module OpenAI def usage_bytes=(_) end + # The ID of the + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # that the [File](https://platform.openai.com/docs/api-reference/files) is + # attached to. sig { returns(String) } def vector_store_id end @@ -65,6 +79,11 @@ module OpenAI def vector_store_id=(_) end + # Set of 16 key-value pairs that can be attached to an object. This can be useful + # for storing additional information about the object in a structured format, and + # querying for objects via API or the dashboard. Keys are strings with a maximum + # length of 64 characters. Values are strings with a maximum length of 512 + # characters, booleans, or numbers. sig { returns(T.nilable(T::Hash[Symbol, T.any(String, Float, T::Boolean)])) } def attributes end @@ -76,6 +95,7 @@ module OpenAI def attributes=(_) end + # The strategy used to chunk the file. sig do returns( T.nilable( @@ -97,6 +117,7 @@ module OpenAI def chunking_strategy=(_) end + # A list of files attached to a vector store. sig do params( id: String, @@ -144,6 +165,7 @@ module OpenAI end class LastError < OpenAI::BaseModel + # One of `server_error` or `rate_limit_exceeded`. sig { returns(Symbol) } def code end @@ -152,6 +174,7 @@ module OpenAI def code=(_) end + # A human-readable description of the error. sig { returns(String) } def message end @@ -160,6 +183,8 @@ module OpenAI def message=(_) end + # The last error associated with this vector store file. Will be `null` if there + # are no errors. sig { params(code: Symbol, message: String).returns(T.attached_class) } def self.new(code:, message:) end @@ -168,6 +193,7 @@ module OpenAI def to_hash end + # One of `server_error` or `rate_limit_exceeded`. class Code < OpenAI::Enum abstract! @@ -183,6 +209,9 @@ module OpenAI end end + # The status of the vector store file, which can be either `in_progress`, + # `completed`, `cancelled`, or `failed`. The status `completed` indicates that the + # vector store file is ready for use. class Status < OpenAI::Enum abstract! @@ -202,6 +231,7 @@ module OpenAI abstract! class << self + # @api private sig { override.returns([[NilClass, String], [NilClass, Float], [NilClass, T::Boolean]]) } private def variants end diff --git a/rbi/lib/openai/models/vector_stores/vector_store_file_batch.rbi b/rbi/lib/openai/models/vector_stores/vector_store_file_batch.rbi index 1fa22e5f..497044cd 100644 --- a/rbi/lib/openai/models/vector_stores/vector_store_file_batch.rbi +++ b/rbi/lib/openai/models/vector_stores/vector_store_file_batch.rbi @@ -6,6 +6,7 @@ module OpenAI module VectorStores class VectorStoreFileBatch < OpenAI::BaseModel + # The identifier, which can be referenced in API endpoints. sig { returns(String) } def id end @@ -14,6 +15,8 @@ module OpenAI def id=(_) end + # The Unix timestamp (in seconds) for when the vector store files batch was + # created. sig { returns(Integer) } def created_at end @@ -33,6 +36,7 @@ module OpenAI def file_counts=(_) end + # The object type, which is always `vector_store.file_batch`. sig { returns(Symbol) } def object end @@ -41,6 +45,8 @@ module OpenAI def object=(_) end + # The status of the vector store files batch, which can be either `in_progress`, + # `completed`, `cancelled` or `failed`. sig { returns(Symbol) } def status end @@ -49,6 +55,10 @@ module OpenAI def status=(_) end + # The ID of the + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + # that the [File](https://platform.openai.com/docs/api-reference/files) is + # attached to. sig { returns(String) } def vector_store_id end @@ -57,6 +67,7 @@ module OpenAI def vector_store_id=(_) end + # A batch of files attached to a vector store. sig do params( id: String, @@ -88,6 +99,7 @@ module OpenAI end class FileCounts < OpenAI::BaseModel + # The number of files that where cancelled. sig { returns(Integer) } def cancelled end @@ -96,6 +108,7 @@ module OpenAI def cancelled=(_) end + # The number of files that have been processed. sig { returns(Integer) } def completed end @@ -104,6 +117,7 @@ module OpenAI def completed=(_) end + # The number of files that have failed to process. sig { returns(Integer) } def failed end @@ -112,6 +126,7 @@ module OpenAI def failed=(_) end + # The number of files that are currently being processed. sig { returns(Integer) } def in_progress end @@ -120,6 +135,7 @@ module OpenAI def in_progress=(_) end + # The total number of files. sig { returns(Integer) } def total end @@ -155,6 +171,8 @@ module OpenAI end end + # The status of the vector store files batch, which can be either `in_progress`, + # `completed`, `cancelled` or `failed`. class Status < OpenAI::Enum abstract! diff --git a/rbi/lib/openai/pooled_net_requester.rbi b/rbi/lib/openai/pooled_net_requester.rbi index 9d80cd5f..e940c4f4 100644 --- a/rbi/lib/openai/pooled_net_requester.rbi +++ b/rbi/lib/openai/pooled_net_requester.rbi @@ -7,14 +7,17 @@ module OpenAI end class << self + # @api private sig { params(url: URI::Generic).returns(Net::HTTP) } def connect(url) end + # @api private sig { params(conn: Net::HTTP, deadline: Float).void } def calibrate_socket_timeout(conn, deadline) end + # @api private sig do params(request: OpenAI::PooledNetRequester::RequestShape, blk: T.proc.params(arg0: String).void) .returns(Net::HTTPGenericRequest) @@ -23,10 +26,12 @@ module OpenAI end end + # @api private sig { params(url: URI::Generic, blk: T.proc.params(arg0: Net::HTTP).void).void } private def with_pool(url, &blk) end + # @api private sig do params(request: OpenAI::PooledNetRequester::RequestShape) .returns([Net::HTTPResponse, T::Enumerable[String]]) @@ -34,6 +39,7 @@ module OpenAI def execute(request) end + # @api private sig { params(size: Integer).returns(T.attached_class) } def self.new(size: Etc.nprocessors) end diff --git a/rbi/lib/openai/request_options.rbi b/rbi/lib/openai/request_options.rbi index f55d6e56..efd2de36 100644 --- a/rbi/lib/openai/request_options.rbi +++ b/rbi/lib/openai/request_options.rbi @@ -1,9 +1,11 @@ # typed: strong module OpenAI + # @api private module RequestParameters abstract! + # Options to specify HTTP behaviour for this request. sig { returns(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything])) } def request_options end @@ -15,18 +17,28 @@ module OpenAI def request_options=(_) end + # @api private module Converter + # @api private sig { params(params: T.anything).returns([T.anything, T::Hash[Symbol, T.anything]]) } def dump_request(params) end end end + # Specify HTTP behaviour to use for a specific request. These options supplement + # or override those provided at the client level. + # + # When making a request, you can pass an actual {RequestOptions} instance, or + # simply pass a Hash with symbol keys matching the attributes on this class. class RequestOptions < OpenAI::BaseModel + # @api private sig { params(opts: T.any(T.self_type, T::Hash[Symbol, T.anything])).void } def self.validate!(opts) end + # Idempotency key to send with request and all associated retries. Will only be + # sent for write requests. sig { returns(T.nilable(String)) } def idempotency_key end @@ -35,6 +47,8 @@ module OpenAI def idempotency_key=(_) end + # Extra query params to send with the request. These are `.merge`’d into any + # `query` given at the client level. sig { returns(T.nilable(T::Hash[String, T.nilable(T.any(T::Array[String], String))])) } def extra_query end @@ -46,6 +60,8 @@ module OpenAI def extra_query=(_) end + # Extra headers to send with the request. These are `.merged`’d into any + # `extra_headers` given at the client level. sig { returns(T.nilable(T::Hash[String, T.nilable(String)])) } def extra_headers end @@ -57,6 +73,8 @@ module OpenAI def extra_headers=(_) end + # Extra data to send with the request. These are deep merged into any data + # generated as part of the normal request. sig { returns(T.nilable(T::Hash[Symbol, T.anything])) } def extra_body end @@ -65,6 +83,7 @@ module OpenAI def extra_body=(_) end + # Maximum number of retries to attempt after a failed initial request. sig { returns(T.nilable(Integer)) } def max_retries end @@ -73,6 +92,7 @@ module OpenAI def max_retries=(_) end + # Request timeout in seconds. sig { returns(T.nilable(Float)) } def timeout end @@ -81,6 +101,7 @@ module OpenAI def timeout=(_) end + # Returns a new instance of RequestOptions. sig { params(values: T::Hash[Symbol, T.anything]).returns(T.attached_class) } def self.new(values = {}) end diff --git a/rbi/lib/openai/resources/audio/speech.rbi b/rbi/lib/openai/resources/audio/speech.rbi index 5ad154db..ac1162aa 100644 --- a/rbi/lib/openai/resources/audio/speech.rbi +++ b/rbi/lib/openai/resources/audio/speech.rbi @@ -4,6 +4,7 @@ module OpenAI module Resources class Audio class Speech + # Generates audio from the input text. sig do params( input: String, diff --git a/rbi/lib/openai/resources/audio/transcriptions.rbi b/rbi/lib/openai/resources/audio/transcriptions.rbi index a88bec77..b0455588 100644 --- a/rbi/lib/openai/resources/audio/transcriptions.rbi +++ b/rbi/lib/openai/resources/audio/transcriptions.rbi @@ -4,6 +4,7 @@ module OpenAI module Resources class Audio class Transcriptions + # Transcribes audio into the input language. sig do params( file: T.any(IO, StringIO), diff --git a/rbi/lib/openai/resources/audio/translations.rbi b/rbi/lib/openai/resources/audio/translations.rbi index 2b323900..02565a2d 100644 --- a/rbi/lib/openai/resources/audio/translations.rbi +++ b/rbi/lib/openai/resources/audio/translations.rbi @@ -4,6 +4,7 @@ module OpenAI module Resources class Audio class Translations + # Translates audio into English. sig do params( file: T.any(IO, StringIO), diff --git a/rbi/lib/openai/resources/batches.rbi b/rbi/lib/openai/resources/batches.rbi index ec305bd4..fa633d8c 100644 --- a/rbi/lib/openai/resources/batches.rbi +++ b/rbi/lib/openai/resources/batches.rbi @@ -3,6 +3,7 @@ module OpenAI module Resources class Batches + # Creates and executes a batch from an uploaded file of requests sig do params( completion_window: Symbol, @@ -16,6 +17,7 @@ module OpenAI def create(completion_window:, endpoint:, input_file_id:, metadata: nil, request_options: {}) end + # Retrieves a batch. sig do params( batch_id: String, @@ -26,6 +28,7 @@ module OpenAI def retrieve(batch_id, request_options: {}) end + # List your organization's batches. sig do params( after: String, @@ -37,6 +40,9 @@ module OpenAI def list(after: nil, limit: nil, request_options: {}) end + # Cancels an in-progress batch. The batch will be in status `cancelling` for up to + # 10 minutes, before changing to `cancelled`, where it will have partial results + # (if any) available in the output file. sig do params( batch_id: String, diff --git a/rbi/lib/openai/resources/beta/assistants.rbi b/rbi/lib/openai/resources/beta/assistants.rbi index 72828f08..5a235b78 100644 --- a/rbi/lib/openai/resources/beta/assistants.rbi +++ b/rbi/lib/openai/resources/beta/assistants.rbi @@ -4,6 +4,7 @@ module OpenAI module Resources class Beta class Assistants + # Create an assistant with a model and instructions. sig do params( model: T.any(String, Symbol), @@ -50,6 +51,7 @@ module OpenAI ) end + # Retrieves an assistant. sig do params( assistant_id: String, @@ -60,6 +62,7 @@ module OpenAI def retrieve(assistant_id, request_options: {}) end + # Modifies an assistant. sig do params( assistant_id: String, @@ -108,6 +111,7 @@ module OpenAI ) end + # Returns a list of assistants. sig do params( after: String, @@ -121,6 +125,7 @@ module OpenAI def list(after: nil, before: nil, limit: nil, order: nil, request_options: {}) end + # Delete an assistant. sig do params( assistant_id: String, diff --git a/rbi/lib/openai/resources/beta/threads.rbi b/rbi/lib/openai/resources/beta/threads.rbi index 02bc8060..b2f631a3 100644 --- a/rbi/lib/openai/resources/beta/threads.rbi +++ b/rbi/lib/openai/resources/beta/threads.rbi @@ -12,6 +12,7 @@ module OpenAI def messages end + # Create a thread. sig do params( messages: T::Array[OpenAI::Models::Beta::ThreadCreateParams::Message], @@ -24,6 +25,7 @@ module OpenAI def create(messages: nil, metadata: nil, tool_resources: nil, request_options: {}) end + # Retrieves a thread. sig do params( thread_id: String, @@ -34,6 +36,7 @@ module OpenAI def retrieve(thread_id, request_options: {}) end + # Modifies a thread. sig do params( thread_id: String, @@ -46,6 +49,7 @@ module OpenAI def update(thread_id, metadata: nil, tool_resources: nil, request_options: {}) end + # Delete a thread. sig do params( thread_id: String, @@ -56,6 +60,7 @@ module OpenAI def delete(thread_id, request_options: {}) end + # Create a thread and run it in one request. sig do params( assistant_id: String, @@ -112,6 +117,7 @@ module OpenAI ) end + # Create a thread and run it in one request. sig do params( assistant_id: String, diff --git a/rbi/lib/openai/resources/beta/threads/messages.rbi b/rbi/lib/openai/resources/beta/threads/messages.rbi index 28c889d1..2e0009e8 100644 --- a/rbi/lib/openai/resources/beta/threads/messages.rbi +++ b/rbi/lib/openai/resources/beta/threads/messages.rbi @@ -5,6 +5,7 @@ module OpenAI class Beta class Threads class Messages + # Create a message. sig do params( thread_id: String, @@ -28,6 +29,7 @@ module OpenAI def create(thread_id, content:, role:, attachments: nil, metadata: nil, request_options: {}) end + # Retrieve a message. sig do params( message_id: String, @@ -39,6 +41,7 @@ module OpenAI def retrieve(message_id, thread_id:, request_options: {}) end + # Modifies a message. sig do params( message_id: String, @@ -51,6 +54,7 @@ module OpenAI def update(message_id, thread_id:, metadata: nil, request_options: {}) end + # Returns a list of messages for a given thread. sig do params( thread_id: String, @@ -74,6 +78,7 @@ module OpenAI ) end + # Deletes a message. sig do params( message_id: String, diff --git a/rbi/lib/openai/resources/beta/threads/runs.rbi b/rbi/lib/openai/resources/beta/threads/runs.rbi index 7f5df3f4..ecf30e5f 100644 --- a/rbi/lib/openai/resources/beta/threads/runs.rbi +++ b/rbi/lib/openai/resources/beta/threads/runs.rbi @@ -9,6 +9,7 @@ module OpenAI def steps end + # Create a run. sig do params( thread_id: String, @@ -71,6 +72,7 @@ module OpenAI ) end + # Create a run. sig do params( thread_id: String, @@ -162,6 +164,7 @@ module OpenAI ) end + # Retrieves a run. sig do params( run_id: String, @@ -173,6 +176,7 @@ module OpenAI def retrieve(run_id, thread_id:, request_options: {}) end + # Modifies a run. sig do params( run_id: String, @@ -185,6 +189,7 @@ module OpenAI def update(run_id, thread_id:, metadata: nil, request_options: {}) end + # Returns a list of runs belonging to a thread. sig do params( thread_id: String, @@ -199,6 +204,7 @@ module OpenAI def list(thread_id, after: nil, before: nil, limit: nil, order: nil, request_options: {}) end + # Cancels a run that is `in_progress`. sig do params( run_id: String, @@ -210,6 +216,10 @@ module OpenAI def cancel(run_id, thread_id:, request_options: {}) end + # When a run has the `status: "requires_action"` and `required_action.type` is + # `submit_tool_outputs`, this endpoint can be used to submit the outputs from the + # tool calls once they're all completed. All outputs must be submitted in a single + # request. sig do params( run_id: String, @@ -222,6 +232,10 @@ module OpenAI def submit_tool_outputs(run_id, thread_id:, tool_outputs:, request_options: {}) end + # When a run has the `status: "requires_action"` and `required_action.type` is + # `submit_tool_outputs`, this endpoint can be used to submit the outputs from the + # tool calls once they're all completed. All outputs must be submitted in a single + # request. sig do params( run_id: String, diff --git a/rbi/lib/openai/resources/beta/threads/runs/steps.rbi b/rbi/lib/openai/resources/beta/threads/runs/steps.rbi index 1714ab4d..0987c273 100644 --- a/rbi/lib/openai/resources/beta/threads/runs/steps.rbi +++ b/rbi/lib/openai/resources/beta/threads/runs/steps.rbi @@ -6,6 +6,7 @@ module OpenAI class Threads class Runs class Steps + # Retrieves a run step. sig do params( step_id: String, @@ -19,6 +20,7 @@ module OpenAI def retrieve(step_id, thread_id:, run_id:, include: nil, request_options: {}) end + # Returns a list of run steps belonging to a run. sig do params( run_id: String, diff --git a/rbi/lib/openai/resources/chat/completions.rbi b/rbi/lib/openai/resources/chat/completions.rbi index 2ea5223f..b4a45a99 100644 --- a/rbi/lib/openai/resources/chat/completions.rbi +++ b/rbi/lib/openai/resources/chat/completions.rbi @@ -8,6 +8,23 @@ module OpenAI def messages end + # **Starting a new project?** We recommend trying + # [Responses](https://platform.openai.com/docs/api-reference/responses) to take + # advantage of the latest OpenAI platform features. Compare + # [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). + # + # --- + # + # Creates a model response for the given chat conversation. Learn more in the + # [text generation](https://platform.openai.com/docs/guides/text-generation), + # [vision](https://platform.openai.com/docs/guides/vision), and + # [audio](https://platform.openai.com/docs/guides/audio) guides. + # + # Parameter support can differ depending on the model used to generate the + # response, particularly for newer reasoning models. Parameters that are only + # supported for reasoning models are noted below. For the current state of + # unsupported parameters in reasoning models, + # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). sig do params( messages: T::Array[ @@ -92,6 +109,23 @@ module OpenAI ) end + # **Starting a new project?** We recommend trying + # [Responses](https://platform.openai.com/docs/api-reference/responses) to take + # advantage of the latest OpenAI platform features. Compare + # [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). + # + # --- + # + # Creates a model response for the given chat conversation. Learn more in the + # [text generation](https://platform.openai.com/docs/guides/text-generation), + # [vision](https://platform.openai.com/docs/guides/vision), and + # [audio](https://platform.openai.com/docs/guides/audio) guides. + # + # Parameter support can differ depending on the model used to generate the + # response, particularly for newer reasoning models. Parameters that are only + # supported for reasoning models are noted below. For the current state of + # unsupported parameters in reasoning models, + # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). sig do params( messages: T::Array[ @@ -176,6 +210,8 @@ module OpenAI ) end + # Get a stored chat completion. Only Chat Completions that have been created with + # the `store` parameter set to `true` will be returned. sig do params( completion_id: String, @@ -186,6 +222,9 @@ module OpenAI def retrieve(completion_id, request_options: {}) end + # Modify a stored chat completion. Only Chat Completions that have been created + # with the `store` parameter set to `true` can be modified. Currently, the only + # supported modification is to update the `metadata` field. sig do params( completion_id: String, @@ -197,6 +236,8 @@ module OpenAI def update(completion_id, metadata:, request_options: {}) end + # List stored Chat Completions. Only Chat Completions that have been stored with + # the `store` parameter set to `true` will be returned. sig do params( after: String, @@ -211,6 +252,8 @@ module OpenAI def list(after: nil, limit: nil, metadata: nil, model: nil, order: nil, request_options: {}) end + # Delete a stored chat completion. Only Chat Completions that have been created + # with the `store` parameter set to `true` can be deleted. sig do params( completion_id: String, diff --git a/rbi/lib/openai/resources/chat/completions/messages.rbi b/rbi/lib/openai/resources/chat/completions/messages.rbi index 4f39f196..26b43645 100644 --- a/rbi/lib/openai/resources/chat/completions/messages.rbi +++ b/rbi/lib/openai/resources/chat/completions/messages.rbi @@ -5,6 +5,8 @@ module OpenAI class Chat class Completions class Messages + # Get the messages in a stored chat completion. Only Chat Completions that have + # been created with the `store` parameter set to `true` will be returned. sig do params( completion_id: String, diff --git a/rbi/lib/openai/resources/completions.rbi b/rbi/lib/openai/resources/completions.rbi index 5f018bc1..2291fad9 100644 --- a/rbi/lib/openai/resources/completions.rbi +++ b/rbi/lib/openai/resources/completions.rbi @@ -3,6 +3,7 @@ module OpenAI module Resources class Completions + # Creates a completion for the provided prompt and parameters. sig do params( model: T.any(String, Symbol), @@ -55,6 +56,7 @@ module OpenAI ) end + # Creates a completion for the provided prompt and parameters. sig do params( model: T.any(String, Symbol), diff --git a/rbi/lib/openai/resources/embeddings.rbi b/rbi/lib/openai/resources/embeddings.rbi index 86d61392..a251c27d 100644 --- a/rbi/lib/openai/resources/embeddings.rbi +++ b/rbi/lib/openai/resources/embeddings.rbi @@ -3,6 +3,7 @@ module OpenAI module Resources class Embeddings + # Creates an embedding vector representing the input text. sig do params( input: T.any(String, T::Array[String], T::Array[Integer], T::Array[T::Array[Integer]]), diff --git a/rbi/lib/openai/resources/files.rbi b/rbi/lib/openai/resources/files.rbi index 508e9d63..39b009e9 100644 --- a/rbi/lib/openai/resources/files.rbi +++ b/rbi/lib/openai/resources/files.rbi @@ -3,6 +3,27 @@ module OpenAI module Resources class Files + # Upload a file that can be used across various endpoints. Individual files can be + # up to 512 MB, and the size of all files uploaded by one organization can be up + # to 100 GB. + # + # The Assistants API supports files up to 2 million tokens and of specific file + # types. See the + # [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for + # details. + # + # The Fine-tuning API only supports `.jsonl` files. The input also has certain + # required formats for fine-tuning + # [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or + # [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + # models. + # + # The Batch API only supports `.jsonl` files up to 200 MB in size. The input also + # has a specific required + # [format](https://platform.openai.com/docs/api-reference/batch/request-input). + # + # Please [contact us](https://help.openai.com/) if you need to increase these + # storage limits. sig do params( file: T.any(IO, StringIO), @@ -14,6 +35,7 @@ module OpenAI def create(file:, purpose:, request_options: {}) end + # Returns information about a specific file. sig do params( file_id: String, @@ -24,6 +46,7 @@ module OpenAI def retrieve(file_id, request_options: {}) end + # Returns a list of files. sig do params( after: String, @@ -37,6 +60,7 @@ module OpenAI def list(after: nil, limit: nil, order: nil, purpose: nil, request_options: {}) end + # Delete a file. sig do params( file_id: String, @@ -47,6 +71,7 @@ module OpenAI def delete(file_id, request_options: {}) end + # Returns the contents of the specified file. sig do params( file_id: String, diff --git a/rbi/lib/openai/resources/fine_tuning/jobs.rbi b/rbi/lib/openai/resources/fine_tuning/jobs.rbi index 8ace3da3..5c720561 100644 --- a/rbi/lib/openai/resources/fine_tuning/jobs.rbi +++ b/rbi/lib/openai/resources/fine_tuning/jobs.rbi @@ -8,6 +8,13 @@ module OpenAI def checkpoints end + # Creates a fine-tuning job which begins the process of creating a new model from + # a given dataset. + # + # Response includes details of the enqueued job including job status and the name + # of the fine-tuned models once complete. + # + # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) sig do params( model: T.any(String, Symbol), @@ -37,6 +44,9 @@ module OpenAI ) end + # Get info about a fine-tuning job. + # + # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) sig do params( fine_tuning_job_id: String, @@ -47,6 +57,7 @@ module OpenAI def retrieve(fine_tuning_job_id, request_options: {}) end + # List your organization's fine-tuning jobs sig do params( after: String, @@ -59,6 +70,7 @@ module OpenAI def list(after: nil, limit: nil, metadata: nil, request_options: {}) end + # Immediately cancel a fine-tune job. sig do params( fine_tuning_job_id: String, @@ -69,6 +81,7 @@ module OpenAI def cancel(fine_tuning_job_id, request_options: {}) end + # Get status updates for a fine-tuning job. sig do params( fine_tuning_job_id: String, diff --git a/rbi/lib/openai/resources/fine_tuning/jobs/checkpoints.rbi b/rbi/lib/openai/resources/fine_tuning/jobs/checkpoints.rbi index 297b57f1..d0a7bb83 100644 --- a/rbi/lib/openai/resources/fine_tuning/jobs/checkpoints.rbi +++ b/rbi/lib/openai/resources/fine_tuning/jobs/checkpoints.rbi @@ -5,6 +5,7 @@ module OpenAI class FineTuning class Jobs class Checkpoints + # List checkpoints for a fine-tuning job. sig do params( fine_tuning_job_id: String, diff --git a/rbi/lib/openai/resources/images.rbi b/rbi/lib/openai/resources/images.rbi index 357eccc3..3655f172 100644 --- a/rbi/lib/openai/resources/images.rbi +++ b/rbi/lib/openai/resources/images.rbi @@ -3,6 +3,7 @@ module OpenAI module Resources class Images + # Creates a variation of a given image. sig do params( image: T.any(IO, StringIO), @@ -26,6 +27,7 @@ module OpenAI ) end + # Creates an edited or extended image given an original image and a prompt. sig do params( image: T.any(IO, StringIO), @@ -53,6 +55,7 @@ module OpenAI ) end + # Creates an image given a prompt. sig do params( prompt: String, diff --git a/rbi/lib/openai/resources/models.rbi b/rbi/lib/openai/resources/models.rbi index 2e4b916b..04a4cf0d 100644 --- a/rbi/lib/openai/resources/models.rbi +++ b/rbi/lib/openai/resources/models.rbi @@ -3,6 +3,8 @@ module OpenAI module Resources class Models + # Retrieves a model instance, providing basic information about the model such as + # the owner and permissioning. sig do params( model: String, @@ -13,6 +15,8 @@ module OpenAI def retrieve(model, request_options: {}) end + # Lists the currently available models, and provides basic information about each + # one such as the owner and availability. sig do params(request_options: T.nilable(T.any(OpenAI::RequestOptions, T::Hash[Symbol, T.anything]))) .returns(OpenAI::Page[OpenAI::Models::Model]) @@ -20,6 +24,8 @@ module OpenAI def list(request_options: {}) end + # Delete a fine-tuned model. You must have the Owner role in your organization to + # delete a model. sig do params( model: String, diff --git a/rbi/lib/openai/resources/moderations.rbi b/rbi/lib/openai/resources/moderations.rbi index 8b836716..3b856753 100644 --- a/rbi/lib/openai/resources/moderations.rbi +++ b/rbi/lib/openai/resources/moderations.rbi @@ -3,6 +3,8 @@ module OpenAI module Resources class Moderations + # Classifies if text and/or image inputs are potentially harmful. Learn more in + # the [moderation guide](https://platform.openai.com/docs/guides/moderation). sig do params( input: T.any( diff --git a/rbi/lib/openai/resources/responses.rbi b/rbi/lib/openai/resources/responses.rbi index d225d52c..ded8cf36 100644 --- a/rbi/lib/openai/resources/responses.rbi +++ b/rbi/lib/openai/resources/responses.rbi @@ -7,6 +7,17 @@ module OpenAI def input_items end + # Creates a model response. Provide + # [text](https://platform.openai.com/docs/guides/text) or + # [image](https://platform.openai.com/docs/guides/images) inputs to generate + # [text](https://platform.openai.com/docs/guides/text) or + # [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have + # the model call your own + # [custom code](https://platform.openai.com/docs/guides/function-calling) or use + # built-in [tools](https://platform.openai.com/docs/guides/tools) like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use + # your own data as input for the model's response. sig do params( input: T.any(String, OpenAI::Models::Responses::ResponseInput), @@ -59,6 +70,17 @@ module OpenAI ) end + # Creates a model response. Provide + # [text](https://platform.openai.com/docs/guides/text) or + # [image](https://platform.openai.com/docs/guides/images) inputs to generate + # [text](https://platform.openai.com/docs/guides/text) or + # [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have + # the model call your own + # [custom code](https://platform.openai.com/docs/guides/function-calling) or use + # built-in [tools](https://platform.openai.com/docs/guides/tools) like + # [web search](https://platform.openai.com/docs/guides/tools-web-search) or + # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use + # your own data as input for the model's response. sig do params( input: T.any(String, OpenAI::Models::Responses::ResponseInput), @@ -148,6 +170,7 @@ module OpenAI ) end + # Retrieves a model response with the given ID. sig do params( response_id: String, @@ -159,6 +182,7 @@ module OpenAI def retrieve(response_id, include: nil, request_options: {}) end + # Deletes a model response with the given ID. sig do params( response_id: String, diff --git a/rbi/lib/openai/resources/responses/input_items.rbi b/rbi/lib/openai/resources/responses/input_items.rbi index 5c8d359e..afd82a10 100644 --- a/rbi/lib/openai/resources/responses/input_items.rbi +++ b/rbi/lib/openai/resources/responses/input_items.rbi @@ -4,6 +4,7 @@ module OpenAI module Resources class Responses class InputItems + # Returns a list of input items for a given response. sig do params( response_id: String, diff --git a/rbi/lib/openai/resources/uploads.rbi b/rbi/lib/openai/resources/uploads.rbi index 094b93ea..561a624f 100644 --- a/rbi/lib/openai/resources/uploads.rbi +++ b/rbi/lib/openai/resources/uploads.rbi @@ -7,6 +7,25 @@ module OpenAI def parts end + # Creates an intermediate + # [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object + # that you can add + # [Parts](https://platform.openai.com/docs/api-reference/uploads/part-object) to. + # Currently, an Upload can accept at most 8 GB in total and expires after an hour + # after you create it. + # + # Once you complete the Upload, we will create a + # [File](https://platform.openai.com/docs/api-reference/files/object) object that + # contains all the parts you uploaded. This File is usable in the rest of our + # platform as a regular File object. + # + # For certain `purpose` values, the correct `mime_type` must be specified. Please + # refer to documentation for the + # [supported MIME types for your use case](https://platform.openai.com/docs/assistants/tools/file-search#supported-files). + # + # For guidance on the proper filename extensions for each purpose, please follow + # the documentation on + # [creating a File](https://platform.openai.com/docs/api-reference/files/create). sig do params( bytes: Integer, @@ -20,6 +39,7 @@ module OpenAI def create(bytes:, filename:, mime_type:, purpose:, request_options: {}) end + # Cancels the Upload. No Parts may be added after an Upload is cancelled. sig do params( upload_id: String, @@ -30,6 +50,19 @@ module OpenAI def cancel(upload_id, request_options: {}) end + # Completes the + # [Upload](https://platform.openai.com/docs/api-reference/uploads/object). + # + # Within the returned Upload object, there is a nested + # [File](https://platform.openai.com/docs/api-reference/files/object) object that + # is ready to use in the rest of the platform. + # + # You can specify the order of the Parts by passing in an ordered list of the Part + # IDs. + # + # The number of bytes uploaded upon completion must match the number of bytes + # initially specified when creating the Upload object. No Parts may be added after + # an Upload is completed. sig do params( upload_id: String, diff --git a/rbi/lib/openai/resources/uploads/parts.rbi b/rbi/lib/openai/resources/uploads/parts.rbi index e32900d9..347903f3 100644 --- a/rbi/lib/openai/resources/uploads/parts.rbi +++ b/rbi/lib/openai/resources/uploads/parts.rbi @@ -4,6 +4,17 @@ module OpenAI module Resources class Uploads class Parts + # Adds a + # [Part](https://platform.openai.com/docs/api-reference/uploads/part-object) to an + # [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object. + # A Part represents a chunk of bytes from the file you are trying to upload. + # + # Each Part can be at most 64 MB, and you can add Parts until you hit the Upload + # maximum of 8 GB. + # + # It is possible to add multiple Parts in parallel. You can decide the intended + # order of the Parts when you + # [complete the Upload](https://platform.openai.com/docs/api-reference/uploads/complete). sig do params( upload_id: String, diff --git a/rbi/lib/openai/resources/vector_stores.rbi b/rbi/lib/openai/resources/vector_stores.rbi index fdbb2632..4420a17f 100644 --- a/rbi/lib/openai/resources/vector_stores.rbi +++ b/rbi/lib/openai/resources/vector_stores.rbi @@ -11,6 +11,7 @@ module OpenAI def file_batches end + # Create a vector store. sig do params( chunking_strategy: T.any( @@ -35,6 +36,7 @@ module OpenAI ) end + # Retrieves a vector store. sig do params( vector_store_id: String, @@ -45,6 +47,7 @@ module OpenAI def retrieve(vector_store_id, request_options: {}) end + # Modifies a vector store. sig do params( vector_store_id: String, @@ -58,6 +61,7 @@ module OpenAI def update(vector_store_id, expires_after: nil, metadata: nil, name: nil, request_options: {}) end + # Returns a list of vector stores. sig do params( after: String, @@ -71,6 +75,7 @@ module OpenAI def list(after: nil, before: nil, limit: nil, order: nil, request_options: {}) end + # Delete a vector store. sig do params( vector_store_id: String, @@ -81,6 +86,8 @@ module OpenAI def delete(vector_store_id, request_options: {}) end + # Search a vector store for relevant chunks based on a query and file attributes + # filter. sig do params( vector_store_id: String, diff --git a/rbi/lib/openai/resources/vector_stores/file_batches.rbi b/rbi/lib/openai/resources/vector_stores/file_batches.rbi index bcec19cb..dbed991d 100644 --- a/rbi/lib/openai/resources/vector_stores/file_batches.rbi +++ b/rbi/lib/openai/resources/vector_stores/file_batches.rbi @@ -4,6 +4,7 @@ module OpenAI module Resources class VectorStores class FileBatches + # Create a vector store file batch. sig do params( vector_store_id: String, @@ -20,6 +21,7 @@ module OpenAI def create(vector_store_id, file_ids:, attributes: nil, chunking_strategy: nil, request_options: {}) end + # Retrieves a vector store file batch. sig do params( batch_id: String, @@ -31,6 +33,8 @@ module OpenAI def retrieve(batch_id, vector_store_id:, request_options: {}) end + # Cancel a vector store file batch. This attempts to cancel the processing of + # files in this batch as soon as possible. sig do params( batch_id: String, @@ -42,6 +46,7 @@ module OpenAI def cancel(batch_id, vector_store_id:, request_options: {}) end + # Returns a list of vector store files in a batch. sig do params( batch_id: String, diff --git a/rbi/lib/openai/resources/vector_stores/files.rbi b/rbi/lib/openai/resources/vector_stores/files.rbi index 85c53733..daf2b28a 100644 --- a/rbi/lib/openai/resources/vector_stores/files.rbi +++ b/rbi/lib/openai/resources/vector_stores/files.rbi @@ -4,6 +4,9 @@ module OpenAI module Resources class VectorStores class Files + # Create a vector store file by attaching a + # [File](https://platform.openai.com/docs/api-reference/files) to a + # [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object). sig do params( vector_store_id: String, @@ -20,6 +23,7 @@ module OpenAI def create(vector_store_id, file_id:, attributes: nil, chunking_strategy: nil, request_options: {}) end + # Retrieves a vector store file. sig do params( file_id: String, @@ -31,6 +35,7 @@ module OpenAI def retrieve(file_id, vector_store_id:, request_options: {}) end + # Update attributes on a vector store file. sig do params( file_id: String, @@ -43,6 +48,7 @@ module OpenAI def update(file_id, vector_store_id:, attributes:, request_options: {}) end + # Returns a list of vector store files. sig do params( vector_store_id: String, @@ -66,6 +72,10 @@ module OpenAI ) end + # Delete a vector store file. This will remove the file from the vector store but + # the file itself will not be deleted. To delete the file, use the + # [delete file](https://platform.openai.com/docs/api-reference/files/delete) + # endpoint. sig do params( file_id: String, @@ -77,6 +87,7 @@ module OpenAI def delete(file_id, vector_store_id:, request_options: {}) end + # Retrieve the parsed contents of a vector store file. sig do params( file_id: String, diff --git a/rbi/lib/openai/stream.rbi b/rbi/lib/openai/stream.rbi index 01f98197..75e469ce 100644 --- a/rbi/lib/openai/stream.rbi +++ b/rbi/lib/openai/stream.rbi @@ -7,6 +7,7 @@ module OpenAI Message = type_member(:in) { {fixed: OpenAI::Util::ServerSentEvent} } Elem = type_member(:out) + # @api private sig { override.returns(T::Enumerable[Elem]) } private def iterator end diff --git a/rbi/lib/openai/util.rbi b/rbi/lib/openai/util.rbi index 0888f1da..62ce155d 100644 --- a/rbi/lib/openai/util.rbi +++ b/rbi/lib/openai/util.rbi @@ -1,42 +1,52 @@ # typed: strong module OpenAI + # @api private module Util + # @api private sig { returns(Float) } def self.monotonic_secs end class << self + # @api private sig { returns(String) } def arch end + # @api private sig { returns(String) } def os end end class << self + # @api private sig { params(input: T.anything).returns(T.any(T::Boolean, T.anything)) } def primitive?(input) end + # @api private sig { params(input: T.anything).returns(T.any(T::Boolean, T.anything)) } def coerce_boolean(input) end + # @api private sig { params(input: T.anything).returns(T.nilable(T::Boolean)) } def coerce_boolean!(input) end + # @api private sig { params(input: T.anything).returns(T.any(Integer, T.anything)) } def coerce_integer(input) end + # @api private sig { params(input: T.anything).returns(T.any(Float, T.anything)) } def coerce_float(input) end + # @api private sig { params(input: T.anything).returns(T.any(T::Hash[T.anything, T.anything], T.anything)) } def coerce_hash(input) end @@ -45,10 +55,15 @@ module OpenAI OMIT = T.let(T.anything, T.anything) class << self + # @api private sig { params(lhs: T.anything, rhs: T.anything, concat: T::Boolean).returns(T.anything) } private def deep_merge_lr(lhs, rhs, concat: false) end + # @api private + # + # Recursively merge one hash with another. If the values at a given key are not + # both hashes, just take the new value. sig do params(values: T::Array[T.anything], sentinel: T.nilable(T.anything), concat: T::Boolean) .returns(T.anything) @@ -56,6 +71,7 @@ module OpenAI def deep_merge(*values, sentinel: nil, concat: false) end + # @api private sig do params( data: T.any(T::Hash[Symbol, T.anything], T::Array[T.anything], T.anything), @@ -70,20 +86,24 @@ module OpenAI end class << self + # @api private sig { params(uri: URI::Generic).returns(String) } def uri_origin(uri) end + # @api private sig { params(path: T.any(String, T::Array[String])).returns(String) } def interpolate_path(path) end end class << self + # @api private sig { params(query: T.nilable(String)).returns(T::Hash[String, T::Array[String]]) } def decode_query(query) end + # @api private sig do params(query: T.nilable(T::Hash[String, T.nilable(T.any(T::Array[String], String))])) .returns(T.nilable(String)) @@ -103,14 +123,17 @@ module OpenAI end class << self + # @api private sig { params(url: T.any(URI::Generic, String)).returns(OpenAI::Util::ParsedUriShape) } def parse_uri(url) end + # @api private sig { params(parsed: OpenAI::Util::ParsedUriShape).returns(URI::Generic) } def unparse_uri(parsed) end + # @api private sig do params(lhs: OpenAI::Util::ParsedUriShape, rhs: OpenAI::Util::ParsedUriShape).returns(URI::Generic) end @@ -119,6 +142,7 @@ module OpenAI end class << self + # @api private sig do params( headers: T::Hash[String, @@ -130,15 +154,19 @@ module OpenAI end end + # An adapter that satisfies the IO interface required by `::IO.copy_stream` class ReadIOAdapter + # @api private sig { params(max_len: T.nilable(Integer)).returns(String) } private def read_enum(max_len) end + # @api private sig { params(max_len: T.nilable(Integer), out_string: T.nilable(String)).returns(T.nilable(String)) } def read(max_len = nil, out_string = nil) end + # @api private sig do params( stream: T.any(String, IO, StringIO, T::Enumerable[String]), @@ -157,20 +185,24 @@ module OpenAI end class << self + # @api private sig do params(y: Enumerator::Yielder, boundary: String, key: T.any(Symbol, String), val: T.anything).void end private def encode_multipart_formdata(y, boundary:, key:, val:) end + # @api private sig { params(body: T.anything).returns([String, T::Enumerable[String]]) } private def encode_multipart_streaming(body) end + # @api private sig { params(headers: T::Hash[String, String], body: T.anything).returns(T.anything) } def encode_content(headers, body) end + # @api private sig do params( headers: T.any(T::Hash[String, String], Net::HTTPHeader), @@ -184,6 +216,9 @@ module OpenAI end class << self + # @api private + # + # https://doc.rust-lang.org/std/iter/trait.FusedIterator.html sig do params(enum: T::Enumerable[T.anything], external: T::Boolean, close: T.proc.void) .returns(T::Enumerable[T.anything]) @@ -191,10 +226,12 @@ module OpenAI def fused_enum(enum, external: false, &close) end + # @api private sig { params(enum: T.nilable(T::Enumerable[T.anything])).void } def close_fused!(enum) end + # @api private sig do params( enum: T.nilable(T::Enumerable[T.anything]), @@ -210,10 +247,14 @@ module OpenAI end class << self + # @api private sig { params(enum: T::Enumerable[String]).returns(T::Enumerable[String]) } def decode_lines(enum) end + # @api private + # + # https://html.spec.whatwg.org/multipage/server-sent-events.html#parsing-an-event-stream sig { params(lines: T::Enumerable[String]).returns(OpenAI::Util::ServerSentEvent) } def decode_sse(lines) end diff --git a/sig/openai/base_client.rbs b/sig/openai/base_client.rbs index 4ca67417..d685733f 100644 --- a/sig/openai/base_client.rbs +++ b/sig/openai/base_client.rbs @@ -43,7 +43,7 @@ module OpenAI response_headers: ::Hash[String, String] ) -> OpenAI::BaseClient::request_input - # @private + # @api private attr_accessor requester: top def initialize: ( diff --git a/test/openai/client_test.rb b/test/openai/client_test.rb index ced591bb..4147e2ac 100644 --- a/test/openai/client_test.rb +++ b/test/openai/client_test.rb @@ -24,7 +24,6 @@ class MockResponse # @param code [Integer] # @param headers [Hash{String=>String}] - # def initialize(code, headers) @code = code @headers = {"content-type" => "application/json", **headers} @@ -33,7 +32,6 @@ def initialize(code, headers) # @param header [String] # # @return [String, nil] - # def [](header) @headers[header] end @@ -41,7 +39,6 @@ def [](header) # @param header [String] # # @return [Boolean] - # def key?(header) @headers.key?(header) end @@ -63,7 +60,6 @@ class MockRequester # @param response_code [Integer] # @param response_headers [Hash{String=>String}] # @param response_data [Object] - # def initialize(response_code, response_headers, response_data) @response_code = response_code @response_headers = response_headers @@ -72,7 +68,6 @@ def initialize(response_code, response_headers, response_data) end # @param req [Hash{Symbol=>Object}] - # def execute(req) # Deep copy the request because it is mutated on each retry. attempts.push(Marshal.load(Marshal.dump(req)))