From 9a993f2261b6524aa30b955e006c7ea89f086968 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 5 Dec 2025 11:04:29 +0000 Subject: [PATCH 1/5] chore(internal): update docstring --- src/openai/resources/realtime/realtime.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/openai/resources/realtime/realtime.py b/src/openai/resources/realtime/realtime.py index 33caba1871..44f14cd3aa 100644 --- a/src/openai/resources/realtime/realtime.py +++ b/src/openai/resources/realtime/realtime.py @@ -232,7 +232,7 @@ def calls(self) -> AsyncCallsWithStreamingResponse: class AsyncRealtimeConnection: - """Represents a live websocket connection to the Realtime API""" + """Represents a live WebSocket connection to the Realtime API""" session: AsyncRealtimeSessionResource response: AsyncRealtimeResponseResource @@ -421,7 +421,7 @@ async def __aexit__( class RealtimeConnection: - """Represents a live websocket connection to the Realtime API""" + """Represents a live WebSocket connection to the Realtime API""" session: RealtimeSessionResource response: RealtimeResponseResource From 8f0d23066c1edc38a6e9858b054dceaf92ae001b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 8 Dec 2025 18:37:33 +0000 Subject: [PATCH 2/5] fix(types): allow pyright to infer TypedDict types within SequenceNotStr --- src/openai/_types.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/openai/_types.py b/src/openai/_types.py index 2387d7e01c..d7e2eaac5f 100644 --- a/src/openai/_types.py +++ b/src/openai/_types.py @@ -247,6 +247,9 @@ class HttpxSendArgs(TypedDict, total=False): if TYPE_CHECKING: # This works because str.__contains__ does not accept object (either in typeshed or at runtime) # https://github.com/hauntsaninja/useful_types/blob/5e9710f3875107d068e7679fd7fec9cfab0eff3b/useful_types/__init__.py#L285 + # + # Note: index() and count() methods are intentionally omitted to allow pyright to properly + # infer TypedDict types when dict literals are used in lists assigned to SequenceNotStr. class SequenceNotStr(Protocol[_T_co]): @overload def __getitem__(self, index: SupportsIndex, /) -> _T_co: ... @@ -255,8 +258,6 @@ def __getitem__(self, index: slice, /) -> Sequence[_T_co]: ... def __contains__(self, value: object, /) -> bool: ... def __len__(self) -> int: ... def __iter__(self) -> Iterator[_T_co]: ... - def index(self, value: Any, start: int = 0, stop: int = ..., /) -> int: ... - def count(self, value: Any, /) -> int: ... def __reversed__(self) -> Iterator[_T_co]: ... else: # just point this to a normal `Sequence` at runtime to avoid having to special case From f20a9a18a421ba69622c77ab539509d218e774eb Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 8 Dec 2025 19:28:06 +0000 Subject: [PATCH 3/5] chore: add missing docstrings --- src/openai/types/audio/transcription.py | 10 ++ .../types/audio/transcription_diarized.py | 10 ++ .../audio/transcription_diarized_segment.py | 2 + .../audio/transcription_text_delta_event.py | 5 + .../audio/transcription_text_done_event.py | 9 ++ .../audio/transcription_text_segment_event.py | 4 + .../types/audio/transcription_verbose.py | 6 ++ .../auto_file_chunking_strategy_param.py | 5 + src/openai/types/batch_create_params.py | 4 + src/openai/types/batch_request_counts.py | 2 + src/openai/types/batch_usage.py | 10 ++ src/openai/types/beta/assistant.py | 7 ++ .../types/beta/assistant_create_params.py | 10 ++ .../types/beta/assistant_stream_event.py | 96 +++++++++++++++++++ .../types/beta/assistant_tool_choice.py | 5 + .../types/beta/assistant_tool_choice_param.py | 5 + .../types/beta/assistant_update_params.py | 5 + src/openai/types/beta/chatkit/chat_session.py | 2 + .../chat_session_automatic_thread_titling.py | 2 + .../chat_session_chatkit_configuration.py | 2 + ...hat_session_chatkit_configuration_param.py | 17 ++++ .../chat_session_expires_after_param.py | 2 + .../beta/chatkit/chat_session_file_upload.py | 2 + .../beta/chatkit/chat_session_history.py | 2 + .../beta/chatkit/chat_session_rate_limits.py | 2 + .../chatkit/chat_session_rate_limits_param.py | 2 + .../chatkit/chat_session_workflow_param.py | 7 ++ .../types/beta/chatkit/chatkit_attachment.py | 2 + .../chatkit/chatkit_response_output_text.py | 10 ++ .../types/beta/chatkit/chatkit_thread.py | 8 ++ .../chatkit_thread_assistant_message_item.py | 2 + .../beta/chatkit/chatkit_thread_item_list.py | 10 ++ .../chatkit_thread_user_message_item.py | 10 ++ .../types/beta/chatkit/chatkit_widget_item.py | 2 + .../beta/chatkit/thread_delete_response.py | 2 + src/openai/types/beta/chatkit_workflow.py | 4 + src/openai/types/beta/file_search_tool.py | 9 ++ .../types/beta/file_search_tool_param.py | 9 ++ src/openai/types/beta/thread.py | 8 ++ .../beta/thread_create_and_run_params.py | 25 +++++ src/openai/types/beta/thread_create_params.py | 9 ++ src/openai/types/beta/thread_update_params.py | 4 + .../beta/threads/file_citation_annotation.py | 4 + .../threads/file_citation_delta_annotation.py | 4 + .../beta/threads/file_path_annotation.py | 4 + .../threads/file_path_delta_annotation.py | 4 + .../beta/threads/image_file_content_block.py | 4 + .../threads/image_file_content_block_param.py | 4 + .../beta/threads/image_file_delta_block.py | 4 + .../beta/threads/image_url_content_block.py | 2 + .../threads/image_url_content_block_param.py | 2 + .../beta/threads/image_url_delta_block.py | 2 + src/openai/types/beta/threads/message.py | 6 ++ .../types/beta/threads/message_delta.py | 2 + .../types/beta/threads/message_delta_event.py | 5 + .../beta/threads/refusal_content_block.py | 2 + .../types/beta/threads/refusal_delta_block.py | 2 + .../required_action_function_tool_call.py | 4 + src/openai/types/beta/threads/run.py | 28 ++++++ .../types/beta/threads/run_create_params.py | 5 + .../threads/runs/code_interpreter_logs.py | 2 + .../runs/code_interpreter_tool_call.py | 6 ++ .../runs/code_interpreter_tool_call_delta.py | 4 + .../threads/runs/file_search_tool_call.py | 6 ++ .../beta/threads/runs/function_tool_call.py | 2 + .../threads/runs/function_tool_call_delta.py | 2 + .../runs/message_creation_step_details.py | 2 + .../types/beta/threads/runs/run_step.py | 12 +++ .../types/beta/threads/runs/run_step_delta.py | 2 + .../beta/threads/runs/run_step_delta_event.py | 5 + .../runs/run_step_delta_message_delta.py | 2 + .../threads/runs/tool_call_delta_object.py | 2 + .../threads/runs/tool_calls_step_details.py | 2 + .../types/beta/threads/text_content_block.py | 2 + .../beta/threads/text_content_block_param.py | 2 + .../types/beta/threads/text_delta_block.py | 2 + src/openai/types/chat/chat_completion.py | 6 ++ ...at_completion_allowed_tool_choice_param.py | 2 + .../chat_completion_allowed_tools_param.py | 2 + ...chat_completion_assistant_message_param.py | 12 +++ .../types/chat/chat_completion_audio.py | 5 + .../types/chat/chat_completion_audio_param.py | 6 ++ .../types/chat/chat_completion_chunk.py | 15 +++ .../chat_completion_content_part_image.py | 2 + ...hat_completion_content_part_image_param.py | 2 + ...mpletion_content_part_input_audio_param.py | 2 + .../chat_completion_content_part_param.py | 4 + .../chat/chat_completion_content_part_text.py | 4 + ...chat_completion_content_part_text_param.py | 4 + .../chat/chat_completion_custom_tool_param.py | 10 ++ ...chat_completion_developer_message_param.py | 6 ++ ...t_completion_function_call_option_param.py | 4 + .../chat/chat_completion_function_tool.py | 2 + .../chat_completion_function_tool_param.py | 2 + .../types/chat/chat_completion_message.py | 11 +++ ...hat_completion_message_custom_tool_call.py | 4 + ...mpletion_message_custom_tool_call_param.py | 4 + ...t_completion_message_function_tool_call.py | 4 + ...letion_message_function_tool_call_param.py | 4 + ...mpletion_named_tool_choice_custom_param.py | 5 + ...chat_completion_named_tool_choice_param.py | 5 + ...hat_completion_prediction_content_param.py | 5 + .../chat/chat_completion_store_message.py | 2 + .../chat_completion_stream_options_param.py | 2 + .../chat_completion_system_message_param.py | 6 ++ .../chat_completion_user_message_param.py | 5 + .../types/chat/completion_create_params.py | 9 ++ src/openai/types/completion.py | 5 + src/openai/types/completion_usage.py | 6 ++ src/openai/types/container_create_params.py | 2 + src/openai/types/container_create_response.py | 6 ++ src/openai/types/container_list_response.py | 6 ++ .../types/container_retrieve_response.py | 6 ++ .../computer_screenshot_content.py | 2 + .../types/conversations/conversation_item.py | 18 ++++ .../conversations/conversation_item_list.py | 2 + src/openai/types/conversations/message.py | 4 + .../conversations/summary_text_content.py | 2 + .../types/conversations/text_content.py | 2 + src/openai/types/create_embedding_response.py | 2 + src/openai/types/embedding.py | 2 + src/openai/types/eval_create_params.py | 37 +++++++ src/openai/types/eval_create_response.py | 19 ++++ .../types/eval_custom_data_source_config.py | 7 ++ src/openai/types/eval_list_response.py | 19 ++++ src/openai/types/eval_retrieve_response.py | 19 ++++ ...l_stored_completions_data_source_config.py | 2 + src/openai/types/eval_update_response.py | 19 ++++ ...create_eval_completions_run_data_source.py | 16 ++++ ..._eval_completions_run_data_source_param.py | 16 ++++ .../create_eval_jsonl_run_data_source.py | 4 + ...create_eval_jsonl_run_data_source_param.py | 4 + src/openai/types/evals/eval_api_error.py | 2 + src/openai/types/evals/run_cancel_response.py | 28 ++++++ src/openai/types/evals/run_create_params.py | 24 +++++ src/openai/types/evals/run_create_response.py | 28 ++++++ src/openai/types/evals/run_list_response.py | 28 ++++++ .../types/evals/run_retrieve_response.py | 28 ++++++ .../evals/runs/output_item_list_response.py | 10 ++ .../runs/output_item_retrieve_response.py | 10 ++ src/openai/types/file_create_params.py | 5 + src/openai/types/file_object.py | 2 + .../checkpoints/permission_create_response.py | 4 + .../permission_retrieve_response.py | 4 + .../types/fine_tuning/dpo_hyperparameters.py | 2 + .../fine_tuning/dpo_hyperparameters_param.py | 2 + src/openai/types/fine_tuning/dpo_method.py | 2 + .../types/fine_tuning/dpo_method_param.py | 2 + .../types/fine_tuning/fine_tuning_job.py | 15 +++ .../fine_tuning/fine_tuning_job_event.py | 2 + .../fine_tuning_job_wandb_integration.py | 7 ++ .../types/fine_tuning/job_create_params.py | 14 +++ .../jobs/fine_tuning_job_checkpoint.py | 6 ++ .../reinforcement_hyperparameters.py | 2 + .../reinforcement_hyperparameters_param.py | 2 + .../types/fine_tuning/reinforcement_method.py | 2 + .../fine_tuning/reinforcement_method_param.py | 2 + .../fine_tuning/supervised_hyperparameters.py | 2 + .../supervised_hyperparameters_param.py | 2 + .../types/fine_tuning/supervised_method.py | 2 + .../fine_tuning/supervised_method_param.py | 2 + .../types/graders/label_model_grader.py | 17 ++++ .../types/graders/label_model_grader_param.py | 17 ++++ src/openai/types/graders/multi_grader.py | 4 + .../types/graders/multi_grader_param.py | 4 + src/openai/types/graders/python_grader.py | 2 + .../types/graders/python_grader_param.py | 2 + .../types/graders/score_model_grader.py | 16 ++++ .../types/graders/score_model_grader_param.py | 16 ++++ .../types/graders/string_check_grader.py | 4 + .../graders/string_check_grader_param.py | 4 + .../types/graders/text_similarity_grader.py | 2 + .../graders/text_similarity_grader_param.py | 2 + src/openai/types/image.py | 2 + .../types/image_edit_completed_event.py | 6 ++ .../types/image_edit_partial_image_event.py | 2 + src/openai/types/image_gen_completed_event.py | 6 ++ .../types/image_gen_partial_image_event.py | 2 + src/openai/types/images_response.py | 6 ++ src/openai/types/model.py | 2 + src/openai/types/moderation.py | 8 ++ .../types/moderation_create_response.py | 2 + .../types/moderation_image_url_input_param.py | 4 + .../types/moderation_text_input_param.py | 2 + .../other_file_chunking_strategy_object.py | 5 + .../realtime/client_secret_create_params.py | 8 ++ .../realtime/client_secret_create_response.py | 2 + .../realtime/conversation_created_event.py | 4 + .../types/realtime/conversation_item_added.py | 10 ++ .../conversation_item_create_event.py | 10 ++ .../conversation_item_create_event_param.py | 10 ++ .../conversation_item_created_event.py | 13 +++ .../conversation_item_delete_event.py | 8 ++ .../conversation_item_delete_event_param.py | 8 ++ .../conversation_item_deleted_event.py | 6 ++ .../types/realtime/conversation_item_done.py | 5 + ...put_audio_transcription_completed_event.py | 19 ++++ ...m_input_audio_transcription_delta_event.py | 4 + ..._input_audio_transcription_failed_event.py | 8 ++ ..._item_input_audio_transcription_segment.py | 2 + .../conversation_item_retrieve_event.py | 7 ++ .../conversation_item_retrieve_event_param.py | 7 ++ .../conversation_item_truncate_event.py | 15 +++ .../conversation_item_truncate_event_param.py | 15 +++ .../conversation_item_truncated_event.py | 9 ++ .../input_audio_buffer_append_event.py | 17 ++++ .../input_audio_buffer_append_event_param.py | 17 ++++ .../input_audio_buffer_clear_event.py | 6 ++ .../input_audio_buffer_clear_event_param.py | 6 ++ .../input_audio_buffer_cleared_event.py | 5 + .../input_audio_buffer_commit_event.py | 6 ++ .../input_audio_buffer_commit_event_param.py | 6 ++ .../input_audio_buffer_committed_event.py | 7 ++ ..._audio_buffer_dtmf_event_received_event.py | 8 ++ ...input_audio_buffer_speech_started_event.py | 13 +++ ...input_audio_buffer_speech_stopped_event.py | 6 ++ .../input_audio_buffer_timeout_triggered.py | 17 ++++ .../types/realtime/log_prob_properties.py | 2 + .../realtime/mcp_list_tools_completed.py | 2 + .../types/realtime/mcp_list_tools_failed.py | 2 + .../realtime/mcp_list_tools_in_progress.py | 2 + .../output_audio_buffer_clear_event.py | 9 ++ .../output_audio_buffer_clear_event_param.py | 9 ++ .../realtime/rate_limits_updated_event.py | 8 ++ .../types/realtime/realtime_audio_config.py | 2 + .../realtime/realtime_audio_config_input.py | 7 ++ .../realtime_audio_config_input_param.py | 7 ++ .../realtime/realtime_audio_config_param.py | 2 + .../types/realtime/realtime_audio_formats.py | 6 ++ .../realtime/realtime_audio_formats_param.py | 6 ++ .../realtime_audio_input_turn_detection.py | 8 ++ ...altime_audio_input_turn_detection_param.py | 8 ++ ...ime_conversation_item_assistant_message.py | 2 + ...nversation_item_assistant_message_param.py | 2 + ...ealtime_conversation_item_function_call.py | 2 + ..._conversation_item_function_call_output.py | 2 + ...rsation_item_function_call_output_param.py | 2 + ...e_conversation_item_function_call_param.py | 2 + ...altime_conversation_item_system_message.py | 4 + ..._conversation_item_system_message_param.py | 4 + ...realtime_conversation_item_user_message.py | 2 + ...me_conversation_item_user_message_param.py | 2 + src/openai/types/realtime/realtime_error.py | 2 + .../types/realtime/realtime_error_event.py | 6 ++ .../realtime/realtime_mcp_approval_request.py | 2 + .../realtime_mcp_approval_request_param.py | 2 + .../realtime_mcp_approval_response.py | 2 + .../realtime_mcp_approval_response_param.py | 2 + .../types/realtime/realtime_mcp_list_tools.py | 4 + .../realtime/realtime_mcp_list_tools_param.py | 4 + .../types/realtime/realtime_mcp_tool_call.py | 2 + .../realtime/realtime_mcp_tool_call_param.py | 2 + .../types/realtime/realtime_response.py | 4 + .../realtime_response_create_audio_output.py | 2 + ...time_response_create_audio_output_param.py | 2 + .../realtime_response_create_mcp_tool.py | 18 ++++ ...realtime_response_create_mcp_tool_param.py | 18 ++++ .../realtime_response_create_params.py | 2 + .../realtime_response_create_params_param.py | 2 + .../realtime/realtime_response_status.py | 7 ++ .../types/realtime/realtime_response_usage.py | 8 ++ ...time_response_usage_input_token_details.py | 7 ++ ...ime_response_usage_output_token_details.py | 2 + .../types/realtime/realtime_server_event.py | 28 ++++++ .../realtime_session_client_secret.py | 2 + .../realtime_session_create_request.py | 2 + .../realtime_session_create_request_param.py | 2 + .../realtime_session_create_response.py | 43 +++++++++ .../realtime/realtime_tools_config_param.py | 18 ++++ .../realtime/realtime_tools_config_union.py | 18 ++++ .../realtime_tools_config_union_param.py | 18 ++++ .../types/realtime/realtime_tracing_config.py | 2 + .../realtime/realtime_tracing_config_param.py | 2 + .../realtime_transcription_session_audio.py | 2 + ...ltime_transcription_session_audio_input.py | 7 ++ ...transcription_session_audio_input_param.py | 7 ++ ...tion_session_audio_input_turn_detection.py | 8 ++ ...ession_audio_input_turn_detection_param.py | 8 ++ ...ltime_transcription_session_audio_param.py | 2 + ...me_transcription_session_create_request.py | 2 + ...nscription_session_create_request_param.py | 2 + ...e_transcription_session_create_response.py | 6 ++ ...me_transcription_session_turn_detection.py | 7 ++ .../realtime_truncation_retention_ratio.py | 9 ++ ...altime_truncation_retention_ratio_param.py | 9 ++ .../realtime/response_audio_delta_event.py | 2 + .../realtime/response_audio_done_event.py | 6 ++ .../response_audio_transcript_delta_event.py | 2 + .../response_audio_transcript_done_event.py | 6 ++ .../types/realtime/response_cancel_event.py | 9 ++ .../realtime/response_cancel_event_param.py | 9 ++ .../response_content_part_added_event.py | 7 ++ .../response_content_part_done_event.py | 7 ++ .../types/realtime/response_create_event.py | 28 ++++++ .../realtime/response_create_event_param.py | 28 ++++++ .../types/realtime/response_created_event.py | 6 ++ .../types/realtime/response_done_event.py | 13 +++ ...nse_function_call_arguments_delta_event.py | 2 + ...onse_function_call_arguments_done_event.py | 5 + .../response_mcp_call_arguments_delta.py | 2 + .../response_mcp_call_arguments_done.py | 2 + .../realtime/response_mcp_call_completed.py | 2 + .../realtime/response_mcp_call_failed.py | 2 + .../realtime/response_mcp_call_in_progress.py | 2 + .../response_output_item_added_event.py | 2 + .../response_output_item_done_event.py | 6 ++ .../realtime/response_text_delta_event.py | 2 + .../realtime/response_text_done_event.py | 6 ++ .../types/realtime/session_created_event.py | 7 ++ .../types/realtime/session_update_event.py | 12 +++ .../realtime/session_update_event_param.py | 12 +++ .../types/realtime/session_updated_event.py | 5 + .../types/responses/apply_patch_tool.py | 2 + .../types/responses/apply_patch_tool_param.py | 2 + src/openai/types/responses/computer_tool.py | 5 + .../types/responses/computer_tool_param.py | 5 + src/openai/types/responses/custom_tool.py | 5 + .../types/responses/custom_tool_param.py | 5 + .../types/responses/easy_input_message.py | 8 ++ .../responses/easy_input_message_param.py | 8 ++ .../types/responses/file_search_tool.py | 11 +++ .../types/responses/file_search_tool_param.py | 11 +++ .../types/responses/function_shell_tool.py | 2 + .../responses/function_shell_tool_param.py | 2 + src/openai/types/responses/function_tool.py | 5 + .../types/responses/function_tool_param.py | 5 + .../responses/input_token_count_params.py | 8 ++ src/openai/types/responses/response.py | 7 ++ .../response_apply_patch_tool_call.py | 8 ++ .../response_apply_patch_tool_call_output.py | 2 + .../responses/response_audio_delta_event.py | 2 + .../responses/response_audio_done_event.py | 2 + .../response_audio_transcript_delta_event.py | 2 + .../response_audio_transcript_done_event.py | 2 + ..._code_interpreter_call_code_delta_event.py | 2 + ...e_code_interpreter_call_code_done_event.py | 2 + ...e_code_interpreter_call_completed_event.py | 2 + ...code_interpreter_call_in_progress_event.py | 2 + ...ode_interpreter_call_interpreting_event.py | 2 + .../response_code_interpreter_tool_call.py | 6 ++ ...sponse_code_interpreter_tool_call_param.py | 6 ++ .../responses/response_compaction_item.py | 4 + .../response_compaction_item_param.py | 4 + .../response_compaction_item_param_param.py | 4 + .../responses/response_completed_event.py | 2 + .../responses/response_computer_tool_call.py | 28 ++++++ ...response_computer_tool_call_output_item.py | 2 + ...se_computer_tool_call_output_screenshot.py | 2 + ...puter_tool_call_output_screenshot_param.py | 2 + .../response_computer_tool_call_param.py | 28 ++++++ .../response_content_part_added_event.py | 4 + .../response_content_part_done_event.py | 4 + .../responses/response_conversation_param.py | 2 + .../types/responses/response_create_params.py | 2 + .../types/responses/response_created_event.py | 2 + .../responses/response_custom_tool_call.py | 2 + ...onse_custom_tool_call_input_delta_event.py | 2 + ...ponse_custom_tool_call_input_done_event.py | 2 + .../response_custom_tool_call_output.py | 2 + .../response_custom_tool_call_output_param.py | 2 + .../response_custom_tool_call_param.py | 2 + src/openai/types/responses/response_error.py | 2 + .../types/responses/response_error_event.py | 2 + .../types/responses/response_failed_event.py | 2 + ...sponse_file_search_call_completed_event.py | 2 + ...onse_file_search_call_in_progress_event.py | 2 + ...sponse_file_search_call_searching_event.py | 2 + .../response_file_search_tool_call.py | 6 ++ .../response_file_search_tool_call_param.py | 6 ++ ...response_format_text_json_schema_config.py | 6 ++ ...se_format_text_json_schema_config_param.py | 6 ++ ...nse_function_call_arguments_delta_event.py | 2 + ...onse_function_call_arguments_done_event.py | 2 + ...onse_function_shell_call_output_content.py | 6 ++ ...unction_shell_call_output_content_param.py | 6 ++ .../response_function_shell_tool_call.py | 4 + ...esponse_function_shell_tool_call_output.py | 8 ++ .../responses/response_function_tool_call.py | 6 ++ .../response_function_tool_call_item.py | 6 ++ .../response_function_tool_call_param.py | 6 ++ .../responses/response_function_web_search.py | 14 +++ .../response_function_web_search_param.py | 14 +++ ...response_image_gen_call_completed_event.py | 4 + ...esponse_image_gen_call_generating_event.py | 4 + ...sponse_image_gen_call_in_progress_event.py | 2 + ...onse_image_gen_call_partial_image_event.py | 2 + .../responses/response_in_progress_event.py | 2 + .../responses/response_incomplete_event.py | 2 + .../types/responses/response_input_audio.py | 2 + .../responses/response_input_audio_param.py | 2 + .../types/responses/response_input_file.py | 2 + .../responses/response_input_file_content.py | 2 + .../response_input_file_content_param.py | 2 + .../responses/response_input_file_param.py | 2 + .../types/responses/response_input_image.py | 5 + .../responses/response_input_image_content.py | 5 + .../response_input_image_content_param.py | 5 + .../responses/response_input_image_param.py | 5 + .../types/responses/response_input_item.py | 50 ++++++++++ .../responses/response_input_item_param.py | 50 ++++++++++ .../types/responses/response_input_param.py | 50 ++++++++++ .../types/responses/response_input_text.py | 2 + .../responses/response_input_text_content.py | 2 + .../response_input_text_content_param.py | 2 + .../responses/response_input_text_param.py | 2 + src/openai/types/responses/response_item.py | 18 ++++ .../types/responses/response_item_list.py | 2 + ...response_mcp_call_arguments_delta_event.py | 4 + .../response_mcp_call_arguments_done_event.py | 2 + .../response_mcp_call_completed_event.py | 2 + .../response_mcp_call_failed_event.py | 2 + .../response_mcp_call_in_progress_event.py | 2 + ...response_mcp_list_tools_completed_event.py | 2 + .../response_mcp_list_tools_failed_event.py | 2 + ...sponse_mcp_list_tools_in_progress_event.py | 4 + .../types/responses/response_output_item.py | 14 +++ .../response_output_item_added_event.py | 2 + .../response_output_item_done_event.py | 2 + .../responses/response_output_message.py | 2 + .../response_output_message_param.py | 2 + .../responses/response_output_refusal.py | 2 + .../response_output_refusal_param.py | 2 + .../types/responses/response_output_text.py | 14 +++ ...onse_output_text_annotation_added_event.py | 2 + .../responses/response_output_text_param.py | 14 +++ src/openai/types/responses/response_prompt.py | 5 + .../types/responses/response_prompt_param.py | 5 + .../types/responses/response_queued_event.py | 2 + .../responses/response_reasoning_item.py | 11 +++ .../response_reasoning_item_param.py | 11 +++ ...onse_reasoning_summary_part_added_event.py | 4 + ...ponse_reasoning_summary_part_done_event.py | 4 + ...onse_reasoning_summary_text_delta_event.py | 2 + ...ponse_reasoning_summary_text_done_event.py | 2 + .../response_reasoning_text_delta_event.py | 2 + .../response_reasoning_text_done_event.py | 2 + .../responses/response_refusal_delta_event.py | 2 + .../responses/response_refusal_done_event.py | 2 + .../types/responses/response_text_config.py | 8 ++ .../responses/response_text_config_param.py | 8 ++ .../responses/response_text_delta_event.py | 8 ++ .../responses/response_text_done_event.py | 8 ++ src/openai/types/responses/response_usage.py | 9 ++ ...esponse_web_search_call_completed_event.py | 2 + ...ponse_web_search_call_in_progress_event.py | 2 + ...esponse_web_search_call_searching_event.py | 2 + src/openai/types/responses/tool.py | 35 +++++++ .../types/responses/tool_choice_allowed.py | 2 + .../responses/tool_choice_allowed_param.py | 2 + .../responses/tool_choice_apply_patch.py | 2 + .../tool_choice_apply_patch_param.py | 2 + .../types/responses/tool_choice_custom.py | 2 + .../responses/tool_choice_custom_param.py | 2 + .../types/responses/tool_choice_function.py | 2 + .../responses/tool_choice_function_param.py | 2 + src/openai/types/responses/tool_choice_mcp.py | 4 + .../types/responses/tool_choice_mcp_param.py | 4 + .../types/responses/tool_choice_shell.py | 2 + .../responses/tool_choice_shell_param.py | 2 + .../types/responses/tool_choice_types.py | 5 + .../responses/tool_choice_types_param.py | 5 + src/openai/types/responses/tool_param.py | 35 +++++++ .../responses/web_search_preview_tool.py | 7 ++ .../web_search_preview_tool_param.py | 7 ++ src/openai/types/responses/web_search_tool.py | 10 ++ .../types/responses/web_search_tool_param.py | 10 ++ src/openai/types/shared/comparison_filter.py | 4 + src/openai/types/shared/compound_filter.py | 2 + .../types/shared/custom_tool_input_format.py | 4 + src/openai/types/shared/reasoning.py | 6 ++ .../shared/response_format_json_object.py | 8 ++ .../shared/response_format_json_schema.py | 8 ++ .../types/shared/response_format_text.py | 2 + .../shared/response_format_text_grammar.py | 5 + .../shared/response_format_text_python.py | 6 ++ .../types/shared_params/comparison_filter.py | 4 + .../types/shared_params/compound_filter.py | 2 + .../shared_params/custom_tool_input_format.py | 4 + src/openai/types/shared_params/reasoning.py | 6 ++ .../response_format_json_object.py | 8 ++ .../response_format_json_schema.py | 8 ++ .../shared_params/response_format_text.py | 2 + ...tic_file_chunking_strategy_object_param.py | 2 + src/openai/types/upload.py | 2 + src/openai/types/upload_create_params.py | 5 + src/openai/types/uploads/upload_part.py | 2 + src/openai/types/vector_store.py | 6 ++ .../types/vector_store_create_params.py | 2 + .../types/vector_store_search_params.py | 2 + .../types/vector_store_update_params.py | 2 + .../types/vector_stores/vector_store_file.py | 7 ++ .../vector_stores/vector_store_file_batch.py | 2 + src/openai/types/video.py | 2 + src/openai/types/video_delete_response.py | 2 + .../webhooks/batch_cancelled_webhook_event.py | 4 + .../webhooks/batch_completed_webhook_event.py | 4 + .../webhooks/batch_expired_webhook_event.py | 4 + .../webhooks/batch_failed_webhook_event.py | 4 + .../eval_run_canceled_webhook_event.py | 4 + .../webhooks/eval_run_failed_webhook_event.py | 4 + .../eval_run_succeeded_webhook_event.py | 4 + ...fine_tuning_job_cancelled_webhook_event.py | 4 + .../fine_tuning_job_failed_webhook_event.py | 4 + ...fine_tuning_job_succeeded_webhook_event.py | 4 + .../realtime_call_incoming_webhook_event.py | 6 ++ .../response_cancelled_webhook_event.py | 4 + .../response_completed_webhook_event.py | 4 + .../webhooks/response_failed_webhook_event.py | 4 + .../response_incomplete_webhook_event.py | 4 + 509 files changed, 3202 insertions(+) diff --git a/src/openai/types/audio/transcription.py b/src/openai/types/audio/transcription.py index 4c5882152d..cbae8bf750 100644 --- a/src/openai/types/audio/transcription.py +++ b/src/openai/types/audio/transcription.py @@ -21,6 +21,8 @@ class Logprob(BaseModel): class UsageTokensInputTokenDetails(BaseModel): + """Details about the input tokens billed for this request.""" + audio_tokens: Optional[int] = None """Number of audio tokens billed for this request.""" @@ -29,6 +31,8 @@ class UsageTokensInputTokenDetails(BaseModel): class UsageTokens(BaseModel): + """Usage statistics for models billed by token usage.""" + input_tokens: int """Number of input tokens billed for this request.""" @@ -46,6 +50,8 @@ class UsageTokens(BaseModel): class UsageDuration(BaseModel): + """Usage statistics for models billed by audio input duration.""" + seconds: float """Duration of the input audio in seconds.""" @@ -57,6 +63,10 @@ class UsageDuration(BaseModel): class Transcription(BaseModel): + """ + Represents a transcription response returned by model, based on the provided input. + """ + text: str """The transcribed text.""" diff --git a/src/openai/types/audio/transcription_diarized.py b/src/openai/types/audio/transcription_diarized.py index b7dd2b8ebb..07585fe239 100644 --- a/src/openai/types/audio/transcription_diarized.py +++ b/src/openai/types/audio/transcription_diarized.py @@ -11,6 +11,8 @@ class UsageTokensInputTokenDetails(BaseModel): + """Details about the input tokens billed for this request.""" + audio_tokens: Optional[int] = None """Number of audio tokens billed for this request.""" @@ -19,6 +21,8 @@ class UsageTokensInputTokenDetails(BaseModel): class UsageTokens(BaseModel): + """Usage statistics for models billed by token usage.""" + input_tokens: int """Number of input tokens billed for this request.""" @@ -36,6 +40,8 @@ class UsageTokens(BaseModel): class UsageDuration(BaseModel): + """Usage statistics for models billed by audio input duration.""" + seconds: float """Duration of the input audio in seconds.""" @@ -47,6 +53,10 @@ class UsageDuration(BaseModel): class TranscriptionDiarized(BaseModel): + """ + Represents a diarized transcription response returned by the model, including the combined transcript and speaker-segment annotations. + """ + duration: float """Duration of the input audio in seconds.""" diff --git a/src/openai/types/audio/transcription_diarized_segment.py b/src/openai/types/audio/transcription_diarized_segment.py index fe87bb4fb8..fcfdb3634f 100644 --- a/src/openai/types/audio/transcription_diarized_segment.py +++ b/src/openai/types/audio/transcription_diarized_segment.py @@ -8,6 +8,8 @@ class TranscriptionDiarizedSegment(BaseModel): + """A segment of diarized transcript text with speaker metadata.""" + id: str """Unique identifier for the segment.""" diff --git a/src/openai/types/audio/transcription_text_delta_event.py b/src/openai/types/audio/transcription_text_delta_event.py index 363b6a6335..a6e83133c8 100644 --- a/src/openai/types/audio/transcription_text_delta_event.py +++ b/src/openai/types/audio/transcription_text_delta_event.py @@ -20,6 +20,11 @@ class Logprob(BaseModel): class TranscriptionTextDeltaEvent(BaseModel): + """Emitted when there is an additional text delta. + + This is also the first event emitted when the transcription starts. Only emitted when you [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) with the `Stream` parameter set to `true`. + """ + delta: str """The text delta that was additionally transcribed.""" diff --git a/src/openai/types/audio/transcription_text_done_event.py b/src/openai/types/audio/transcription_text_done_event.py index 9665edc565..c8f7fc0769 100644 --- a/src/openai/types/audio/transcription_text_done_event.py +++ b/src/openai/types/audio/transcription_text_done_event.py @@ -20,6 +20,8 @@ class Logprob(BaseModel): class UsageInputTokenDetails(BaseModel): + """Details about the input tokens billed for this request.""" + audio_tokens: Optional[int] = None """Number of audio tokens billed for this request.""" @@ -28,6 +30,8 @@ class UsageInputTokenDetails(BaseModel): class Usage(BaseModel): + """Usage statistics for models billed by token usage.""" + input_tokens: int """Number of input tokens billed for this request.""" @@ -45,6 +49,11 @@ class Usage(BaseModel): class TranscriptionTextDoneEvent(BaseModel): + """Emitted when the transcription is complete. + + Contains the complete transcription text. Only emitted when you [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) with the `Stream` parameter set to `true`. + """ + text: str """The text that was transcribed.""" diff --git a/src/openai/types/audio/transcription_text_segment_event.py b/src/openai/types/audio/transcription_text_segment_event.py index d4f7664578..e95472e6c6 100644 --- a/src/openai/types/audio/transcription_text_segment_event.py +++ b/src/openai/types/audio/transcription_text_segment_event.py @@ -8,6 +8,10 @@ class TranscriptionTextSegmentEvent(BaseModel): + """ + Emitted when a diarized transcription returns a completed segment with speaker information. Only emitted when you [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) with `stream` set to `true` and `response_format` set to `diarized_json`. + """ + id: str """Unique identifier for the segment.""" diff --git a/src/openai/types/audio/transcription_verbose.py b/src/openai/types/audio/transcription_verbose.py index addda71ec6..b1a95e9c72 100644 --- a/src/openai/types/audio/transcription_verbose.py +++ b/src/openai/types/audio/transcription_verbose.py @@ -11,6 +11,8 @@ class Usage(BaseModel): + """Usage statistics for models billed by audio input duration.""" + seconds: float """Duration of the input audio in seconds.""" @@ -19,6 +21,10 @@ class Usage(BaseModel): class TranscriptionVerbose(BaseModel): + """ + Represents a verbose json transcription response returned by model, based on the provided input. + """ + duration: float """The duration of the input audio.""" diff --git a/src/openai/types/auto_file_chunking_strategy_param.py b/src/openai/types/auto_file_chunking_strategy_param.py index 6f17836bac..db7cbf596d 100644 --- a/src/openai/types/auto_file_chunking_strategy_param.py +++ b/src/openai/types/auto_file_chunking_strategy_param.py @@ -8,5 +8,10 @@ class AutoFileChunkingStrategyParam(TypedDict, total=False): + """The default strategy. + + This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + """ + type: Required[Literal["auto"]] """Always `auto`.""" diff --git a/src/openai/types/batch_create_params.py b/src/openai/types/batch_create_params.py index c182a87e7f..1088aab380 100644 --- a/src/openai/types/batch_create_params.py +++ b/src/openai/types/batch_create_params.py @@ -58,6 +58,10 @@ class BatchCreateParams(TypedDict, total=False): class OutputExpiresAfter(TypedDict, total=False): + """ + The expiration policy for the output and/or error file that are generated for a batch. + """ + anchor: Required[Literal["created_at"]] """Anchor timestamp after which the expiration policy applies. diff --git a/src/openai/types/batch_request_counts.py b/src/openai/types/batch_request_counts.py index 068b071af1..64a570747d 100644 --- a/src/openai/types/batch_request_counts.py +++ b/src/openai/types/batch_request_counts.py @@ -6,6 +6,8 @@ class BatchRequestCounts(BaseModel): + """The request counts for different statuses within the batch.""" + completed: int """Number of requests that have been completed successfully.""" diff --git a/src/openai/types/batch_usage.py b/src/openai/types/batch_usage.py index 578f64a5e2..d68d7110ac 100644 --- a/src/openai/types/batch_usage.py +++ b/src/openai/types/batch_usage.py @@ -6,6 +6,8 @@ class InputTokensDetails(BaseModel): + """A detailed breakdown of the input tokens.""" + cached_tokens: int """The number of tokens that were retrieved from the cache. @@ -14,11 +16,19 @@ class InputTokensDetails(BaseModel): class OutputTokensDetails(BaseModel): + """A detailed breakdown of the output tokens.""" + reasoning_tokens: int """The number of reasoning tokens.""" class BatchUsage(BaseModel): + """ + Represents token usage details including input tokens, output tokens, a + breakdown of output tokens, and the total tokens used. Only populated on + batches created after September 7, 2025. + """ + input_tokens: int """The number of input tokens.""" diff --git a/src/openai/types/beta/assistant.py b/src/openai/types/beta/assistant.py index 58421e0f66..61344f85a1 100644 --- a/src/openai/types/beta/assistant.py +++ b/src/openai/types/beta/assistant.py @@ -31,12 +31,19 @@ class ToolResourcesFileSearch(BaseModel): class ToolResources(BaseModel): + """A set of resources that are used by the assistant's tools. + + The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + """ + code_interpreter: Optional[ToolResourcesCodeInterpreter] = None file_search: Optional[ToolResourcesFileSearch] = None class Assistant(BaseModel): + """Represents an `assistant` that can call the model and use tools.""" + id: str """The identifier, which can be referenced in API endpoints.""" diff --git a/src/openai/types/beta/assistant_create_params.py b/src/openai/types/beta/assistant_create_params.py index 38b30f212f..49e7af2d67 100644 --- a/src/openai/types/beta/assistant_create_params.py +++ b/src/openai/types/beta/assistant_create_params.py @@ -141,6 +141,11 @@ class ToolResourcesCodeInterpreter(TypedDict, total=False): class ToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False): + """The default strategy. + + This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + """ + type: Required[Literal["auto"]] """Always `auto`.""" @@ -216,6 +221,11 @@ class ToolResourcesFileSearch(TypedDict, total=False): class ToolResources(TypedDict, total=False): + """A set of resources that are used by the assistant's tools. + + The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + """ + code_interpreter: ToolResourcesCodeInterpreter file_search: ToolResourcesFileSearch diff --git a/src/openai/types/beta/assistant_stream_event.py b/src/openai/types/beta/assistant_stream_event.py index 41d3a0c5ea..87620a11d0 100644 --- a/src/openai/types/beta/assistant_stream_event.py +++ b/src/openai/types/beta/assistant_stream_event.py @@ -43,6 +43,10 @@ class ThreadCreated(BaseModel): + """ + Occurs when a new [thread](https://platform.openai.com/docs/api-reference/threads/object) is created. + """ + data: Thread """ Represents a thread that contains @@ -56,6 +60,10 @@ class ThreadCreated(BaseModel): class ThreadRunCreated(BaseModel): + """ + Occurs when a new [run](https://platform.openai.com/docs/api-reference/runs/object) is created. + """ + data: Run """ Represents an execution run on a @@ -66,6 +74,10 @@ class ThreadRunCreated(BaseModel): class ThreadRunQueued(BaseModel): + """ + Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a `queued` status. + """ + data: Run """ Represents an execution run on a @@ -76,6 +88,10 @@ class ThreadRunQueued(BaseModel): class ThreadRunInProgress(BaseModel): + """ + Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to an `in_progress` status. + """ + data: Run """ Represents an execution run on a @@ -86,6 +102,10 @@ class ThreadRunInProgress(BaseModel): class ThreadRunRequiresAction(BaseModel): + """ + Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a `requires_action` status. + """ + data: Run """ Represents an execution run on a @@ -96,6 +116,10 @@ class ThreadRunRequiresAction(BaseModel): class ThreadRunCompleted(BaseModel): + """ + Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) is completed. + """ + data: Run """ Represents an execution run on a @@ -106,6 +130,10 @@ class ThreadRunCompleted(BaseModel): class ThreadRunIncomplete(BaseModel): + """ + Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) ends with status `incomplete`. + """ + data: Run """ Represents an execution run on a @@ -116,6 +144,10 @@ class ThreadRunIncomplete(BaseModel): class ThreadRunFailed(BaseModel): + """ + Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) fails. + """ + data: Run """ Represents an execution run on a @@ -126,6 +158,10 @@ class ThreadRunFailed(BaseModel): class ThreadRunCancelling(BaseModel): + """ + Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a `cancelling` status. + """ + data: Run """ Represents an execution run on a @@ -136,6 +172,10 @@ class ThreadRunCancelling(BaseModel): class ThreadRunCancelled(BaseModel): + """ + Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) is cancelled. + """ + data: Run """ Represents an execution run on a @@ -146,6 +186,10 @@ class ThreadRunCancelled(BaseModel): class ThreadRunExpired(BaseModel): + """ + Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) expires. + """ + data: Run """ Represents an execution run on a @@ -156,6 +200,10 @@ class ThreadRunExpired(BaseModel): class ThreadRunStepCreated(BaseModel): + """ + Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is created. + """ + data: RunStep """Represents a step in execution of a run.""" @@ -163,6 +211,10 @@ class ThreadRunStepCreated(BaseModel): class ThreadRunStepInProgress(BaseModel): + """ + Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) moves to an `in_progress` state. + """ + data: RunStep """Represents a step in execution of a run.""" @@ -170,6 +222,10 @@ class ThreadRunStepInProgress(BaseModel): class ThreadRunStepDelta(BaseModel): + """ + Occurs when parts of a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) are being streamed. + """ + data: RunStepDeltaEvent """Represents a run step delta i.e. @@ -180,6 +236,10 @@ class ThreadRunStepDelta(BaseModel): class ThreadRunStepCompleted(BaseModel): + """ + Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is completed. + """ + data: RunStep """Represents a step in execution of a run.""" @@ -187,6 +247,10 @@ class ThreadRunStepCompleted(BaseModel): class ThreadRunStepFailed(BaseModel): + """ + Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) fails. + """ + data: RunStep """Represents a step in execution of a run.""" @@ -194,6 +258,10 @@ class ThreadRunStepFailed(BaseModel): class ThreadRunStepCancelled(BaseModel): + """ + Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is cancelled. + """ + data: RunStep """Represents a step in execution of a run.""" @@ -201,6 +269,10 @@ class ThreadRunStepCancelled(BaseModel): class ThreadRunStepExpired(BaseModel): + """ + Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) expires. + """ + data: RunStep """Represents a step in execution of a run.""" @@ -208,6 +280,10 @@ class ThreadRunStepExpired(BaseModel): class ThreadMessageCreated(BaseModel): + """ + Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) is created. + """ + data: Message """ Represents a message within a @@ -218,6 +294,10 @@ class ThreadMessageCreated(BaseModel): class ThreadMessageInProgress(BaseModel): + """ + Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) moves to an `in_progress` state. + """ + data: Message """ Represents a message within a @@ -228,6 +308,10 @@ class ThreadMessageInProgress(BaseModel): class ThreadMessageDelta(BaseModel): + """ + Occurs when parts of a [Message](https://platform.openai.com/docs/api-reference/messages/object) are being streamed. + """ + data: MessageDeltaEvent """Represents a message delta i.e. @@ -238,6 +322,10 @@ class ThreadMessageDelta(BaseModel): class ThreadMessageCompleted(BaseModel): + """ + Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) is completed. + """ + data: Message """ Represents a message within a @@ -248,6 +336,10 @@ class ThreadMessageCompleted(BaseModel): class ThreadMessageIncomplete(BaseModel): + """ + Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) ends before it is completed. + """ + data: Message """ Represents a message within a @@ -258,6 +350,10 @@ class ThreadMessageIncomplete(BaseModel): class ErrorEvent(BaseModel): + """ + Occurs when an [error](https://platform.openai.com/docs/guides/error-codes#api-errors) occurs. This can happen due to an internal server error or a timeout. + """ + data: ErrorObject event: Literal["error"] diff --git a/src/openai/types/beta/assistant_tool_choice.py b/src/openai/types/beta/assistant_tool_choice.py index d73439f006..cabded0b3c 100644 --- a/src/openai/types/beta/assistant_tool_choice.py +++ b/src/openai/types/beta/assistant_tool_choice.py @@ -10,6 +10,11 @@ class AssistantToolChoice(BaseModel): + """Specifies a tool the model should use. + + Use to force the model to call a specific tool. + """ + type: Literal["function", "code_interpreter", "file_search"] """The type of the tool. If type is `function`, the function name must be set""" diff --git a/src/openai/types/beta/assistant_tool_choice_param.py b/src/openai/types/beta/assistant_tool_choice_param.py index 904f489e26..05916bb668 100644 --- a/src/openai/types/beta/assistant_tool_choice_param.py +++ b/src/openai/types/beta/assistant_tool_choice_param.py @@ -10,6 +10,11 @@ class AssistantToolChoiceParam(TypedDict, total=False): + """Specifies a tool the model should use. + + Use to force the model to call a specific tool. + """ + type: Required[Literal["function", "code_interpreter", "file_search"]] """The type of the tool. If type is `function`, the function name must be set""" diff --git a/src/openai/types/beta/assistant_update_params.py b/src/openai/types/beta/assistant_update_params.py index 8f774c4e6c..d84b15cc5b 100644 --- a/src/openai/types/beta/assistant_update_params.py +++ b/src/openai/types/beta/assistant_update_params.py @@ -187,6 +187,11 @@ class ToolResourcesFileSearch(TypedDict, total=False): class ToolResources(TypedDict, total=False): + """A set of resources that are used by the assistant's tools. + + The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + """ + code_interpreter: ToolResourcesCodeInterpreter file_search: ToolResourcesFileSearch diff --git a/src/openai/types/beta/chatkit/chat_session.py b/src/openai/types/beta/chatkit/chat_session.py index 82baea211c..9db9fc93a0 100644 --- a/src/openai/types/beta/chatkit/chat_session.py +++ b/src/openai/types/beta/chatkit/chat_session.py @@ -12,6 +12,8 @@ class ChatSession(BaseModel): + """Represents a ChatKit session and its resolved configuration.""" + id: str """Identifier for the ChatKit session.""" diff --git a/src/openai/types/beta/chatkit/chat_session_automatic_thread_titling.py b/src/openai/types/beta/chatkit/chat_session_automatic_thread_titling.py index 4fa96a4433..1d95255e06 100644 --- a/src/openai/types/beta/chatkit/chat_session_automatic_thread_titling.py +++ b/src/openai/types/beta/chatkit/chat_session_automatic_thread_titling.py @@ -6,5 +6,7 @@ class ChatSessionAutomaticThreadTitling(BaseModel): + """Automatic thread title preferences for the session.""" + enabled: bool """Whether automatic thread titling is enabled.""" diff --git a/src/openai/types/beta/chatkit/chat_session_chatkit_configuration.py b/src/openai/types/beta/chatkit/chat_session_chatkit_configuration.py index 6205b172cf..f9fa0ceff5 100644 --- a/src/openai/types/beta/chatkit/chat_session_chatkit_configuration.py +++ b/src/openai/types/beta/chatkit/chat_session_chatkit_configuration.py @@ -9,6 +9,8 @@ class ChatSessionChatKitConfiguration(BaseModel): + """ChatKit configuration for the session.""" + automatic_thread_titling: ChatSessionAutomaticThreadTitling """Automatic thread titling preferences.""" diff --git a/src/openai/types/beta/chatkit/chat_session_chatkit_configuration_param.py b/src/openai/types/beta/chatkit/chat_session_chatkit_configuration_param.py index 0a5ae80a76..834de71e71 100644 --- a/src/openai/types/beta/chatkit/chat_session_chatkit_configuration_param.py +++ b/src/openai/types/beta/chatkit/chat_session_chatkit_configuration_param.py @@ -8,11 +8,21 @@ class AutomaticThreadTitling(TypedDict, total=False): + """Configuration for automatic thread titling. + + When omitted, automatic thread titling is enabled by default. + """ + enabled: bool """Enable automatic thread title generation. Defaults to true.""" class FileUpload(TypedDict, total=False): + """Configuration for upload enablement and limits. + + When omitted, uploads are disabled by default (max_files 10, max_file_size 512 MB). + """ + enabled: bool """Enable uploads for this session. Defaults to false.""" @@ -27,6 +37,11 @@ class FileUpload(TypedDict, total=False): class History(TypedDict, total=False): + """Configuration for chat history retention. + + When omitted, history is enabled by default with no limit on recent_threads (null). + """ + enabled: bool """Enables chat users to access previous ChatKit threads. Defaults to true.""" @@ -38,6 +53,8 @@ class History(TypedDict, total=False): class ChatSessionChatKitConfigurationParam(TypedDict, total=False): + """Optional per-session configuration settings for ChatKit behavior.""" + automatic_thread_titling: AutomaticThreadTitling """Configuration for automatic thread titling. diff --git a/src/openai/types/beta/chatkit/chat_session_expires_after_param.py b/src/openai/types/beta/chatkit/chat_session_expires_after_param.py index ceb5a984c5..c1de8a767a 100644 --- a/src/openai/types/beta/chatkit/chat_session_expires_after_param.py +++ b/src/openai/types/beta/chatkit/chat_session_expires_after_param.py @@ -8,6 +8,8 @@ class ChatSessionExpiresAfterParam(TypedDict, total=False): + """Controls when the session expires relative to an anchor timestamp.""" + anchor: Required[Literal["created_at"]] """Base timestamp used to calculate expiration. Currently fixed to `created_at`.""" diff --git a/src/openai/types/beta/chatkit/chat_session_file_upload.py b/src/openai/types/beta/chatkit/chat_session_file_upload.py index c63c7a0149..0275859d27 100644 --- a/src/openai/types/beta/chatkit/chat_session_file_upload.py +++ b/src/openai/types/beta/chatkit/chat_session_file_upload.py @@ -8,6 +8,8 @@ class ChatSessionFileUpload(BaseModel): + """Upload permissions and limits applied to the session.""" + enabled: bool """Indicates if uploads are enabled for the session.""" diff --git a/src/openai/types/beta/chatkit/chat_session_history.py b/src/openai/types/beta/chatkit/chat_session_history.py index 66ebe00877..54690009c2 100644 --- a/src/openai/types/beta/chatkit/chat_session_history.py +++ b/src/openai/types/beta/chatkit/chat_session_history.py @@ -8,6 +8,8 @@ class ChatSessionHistory(BaseModel): + """History retention preferences returned for the session.""" + enabled: bool """Indicates if chat history is persisted for the session.""" diff --git a/src/openai/types/beta/chatkit/chat_session_rate_limits.py b/src/openai/types/beta/chatkit/chat_session_rate_limits.py index 392225e347..7c5bd94e76 100644 --- a/src/openai/types/beta/chatkit/chat_session_rate_limits.py +++ b/src/openai/types/beta/chatkit/chat_session_rate_limits.py @@ -6,5 +6,7 @@ class ChatSessionRateLimits(BaseModel): + """Active per-minute request limit for the session.""" + max_requests_per_1_minute: int """Maximum allowed requests per one-minute window.""" diff --git a/src/openai/types/beta/chatkit/chat_session_rate_limits_param.py b/src/openai/types/beta/chatkit/chat_session_rate_limits_param.py index 7894c06484..578f20b0c3 100644 --- a/src/openai/types/beta/chatkit/chat_session_rate_limits_param.py +++ b/src/openai/types/beta/chatkit/chat_session_rate_limits_param.py @@ -8,5 +8,7 @@ class ChatSessionRateLimitsParam(TypedDict, total=False): + """Controls request rate limits for the session.""" + max_requests_per_1_minute: int """Maximum number of requests allowed per minute for the session. Defaults to 10.""" diff --git a/src/openai/types/beta/chatkit/chat_session_workflow_param.py b/src/openai/types/beta/chatkit/chat_session_workflow_param.py index 5542922102..abf52de526 100644 --- a/src/openai/types/beta/chatkit/chat_session_workflow_param.py +++ b/src/openai/types/beta/chatkit/chat_session_workflow_param.py @@ -9,11 +9,18 @@ class Tracing(TypedDict, total=False): + """Optional tracing overrides for the workflow invocation. + + When omitted, tracing is enabled by default. + """ + enabled: bool """Whether tracing is enabled during the session. Defaults to true.""" class ChatSessionWorkflowParam(TypedDict, total=False): + """Workflow reference and overrides applied to the chat session.""" + id: Required[str] """Identifier for the workflow invoked by the session.""" diff --git a/src/openai/types/beta/chatkit/chatkit_attachment.py b/src/openai/types/beta/chatkit/chatkit_attachment.py index 8d8ad3e128..7750925e03 100644 --- a/src/openai/types/beta/chatkit/chatkit_attachment.py +++ b/src/openai/types/beta/chatkit/chatkit_attachment.py @@ -9,6 +9,8 @@ class ChatKitAttachment(BaseModel): + """Attachment metadata included on thread items.""" + id: str """Identifier for the attachment.""" diff --git a/src/openai/types/beta/chatkit/chatkit_response_output_text.py b/src/openai/types/beta/chatkit/chatkit_response_output_text.py index 116b797ec2..1348fed2b2 100644 --- a/src/openai/types/beta/chatkit/chatkit_response_output_text.py +++ b/src/openai/types/beta/chatkit/chatkit_response_output_text.py @@ -17,6 +17,8 @@ class AnnotationFileSource(BaseModel): + """File attachment referenced by the annotation.""" + filename: str """Filename referenced by the annotation.""" @@ -25,6 +27,8 @@ class AnnotationFileSource(BaseModel): class AnnotationFile(BaseModel): + """Annotation that references an uploaded file.""" + source: AnnotationFileSource """File attachment referenced by the annotation.""" @@ -33,6 +37,8 @@ class AnnotationFile(BaseModel): class AnnotationURLSource(BaseModel): + """URL referenced by the annotation.""" + type: Literal["url"] """Type discriminator that is always `url`.""" @@ -41,6 +47,8 @@ class AnnotationURLSource(BaseModel): class AnnotationURL(BaseModel): + """Annotation that references a URL.""" + source: AnnotationURLSource """URL referenced by the annotation.""" @@ -52,6 +60,8 @@ class AnnotationURL(BaseModel): class ChatKitResponseOutputText(BaseModel): + """Assistant response text accompanied by optional annotations.""" + annotations: List[Annotation] """Ordered list of annotations attached to the response text.""" diff --git a/src/openai/types/beta/chatkit/chatkit_thread.py b/src/openai/types/beta/chatkit/chatkit_thread.py index abd1a9ea01..32075233d8 100644 --- a/src/openai/types/beta/chatkit/chatkit_thread.py +++ b/src/openai/types/beta/chatkit/chatkit_thread.py @@ -10,11 +10,15 @@ class StatusActive(BaseModel): + """Indicates that a thread is active.""" + type: Literal["active"] """Status discriminator that is always `active`.""" class StatusLocked(BaseModel): + """Indicates that a thread is locked and cannot accept new input.""" + reason: Optional[str] = None """Reason that the thread was locked. Defaults to null when no reason is recorded.""" @@ -23,6 +27,8 @@ class StatusLocked(BaseModel): class StatusClosed(BaseModel): + """Indicates that a thread has been closed.""" + reason: Optional[str] = None """Reason that the thread was closed. Defaults to null when no reason is recorded.""" @@ -34,6 +40,8 @@ class StatusClosed(BaseModel): class ChatKitThread(BaseModel): + """Represents a ChatKit thread and its current status.""" + id: str """Identifier of the thread.""" diff --git a/src/openai/types/beta/chatkit/chatkit_thread_assistant_message_item.py b/src/openai/types/beta/chatkit/chatkit_thread_assistant_message_item.py index f4afd053b6..337f53a83d 100644 --- a/src/openai/types/beta/chatkit/chatkit_thread_assistant_message_item.py +++ b/src/openai/types/beta/chatkit/chatkit_thread_assistant_message_item.py @@ -10,6 +10,8 @@ class ChatKitThreadAssistantMessageItem(BaseModel): + """Assistant-authored message within a thread.""" + id: str """Identifier of the thread item.""" diff --git a/src/openai/types/beta/chatkit/chatkit_thread_item_list.py b/src/openai/types/beta/chatkit/chatkit_thread_item_list.py index 173bd15055..049ca54429 100644 --- a/src/openai/types/beta/chatkit/chatkit_thread_item_list.py +++ b/src/openai/types/beta/chatkit/chatkit_thread_item_list.py @@ -20,6 +20,8 @@ class DataChatKitClientToolCall(BaseModel): + """Record of a client side tool invocation initiated by the assistant.""" + id: str """Identifier of the thread item.""" @@ -55,6 +57,8 @@ class DataChatKitClientToolCall(BaseModel): class DataChatKitTask(BaseModel): + """Task emitted by the workflow to show progress and status updates.""" + id: str """Identifier of the thread item.""" @@ -81,6 +85,8 @@ class DataChatKitTask(BaseModel): class DataChatKitTaskGroupTask(BaseModel): + """Task entry that appears within a TaskGroup.""" + heading: Optional[str] = None """Optional heading for the grouped task. Defaults to null when not provided.""" @@ -95,6 +101,8 @@ class DataChatKitTaskGroupTask(BaseModel): class DataChatKitTaskGroup(BaseModel): + """Collection of workflow tasks grouped together in the thread.""" + id: str """Identifier of the thread item.""" @@ -128,6 +136,8 @@ class DataChatKitTaskGroup(BaseModel): class ChatKitThreadItemList(BaseModel): + """A paginated list of thread items rendered for the ChatKit API.""" + data: List[Data] """A list of items""" diff --git a/src/openai/types/beta/chatkit/chatkit_thread_user_message_item.py b/src/openai/types/beta/chatkit/chatkit_thread_user_message_item.py index 233d07232f..d7552c4f2e 100644 --- a/src/openai/types/beta/chatkit/chatkit_thread_user_message_item.py +++ b/src/openai/types/beta/chatkit/chatkit_thread_user_message_item.py @@ -18,6 +18,8 @@ class ContentInputText(BaseModel): + """Text block that a user contributed to the thread.""" + text: str """Plain-text content supplied by the user.""" @@ -26,6 +28,8 @@ class ContentInputText(BaseModel): class ContentQuotedText(BaseModel): + """Quoted snippet that the user referenced in their message.""" + text: str """Quoted text content.""" @@ -37,11 +41,15 @@ class ContentQuotedText(BaseModel): class InferenceOptionsToolChoice(BaseModel): + """Preferred tool to invoke. Defaults to null when ChatKit should auto-select.""" + id: str """Identifier of the requested tool.""" class InferenceOptions(BaseModel): + """Inference overrides applied to the message. Defaults to null when unset.""" + model: Optional[str] = None """Model name that generated the response. @@ -53,6 +61,8 @@ class InferenceOptions(BaseModel): class ChatKitThreadUserMessageItem(BaseModel): + """User-authored messages within a thread.""" + id: str """Identifier of the thread item.""" diff --git a/src/openai/types/beta/chatkit/chatkit_widget_item.py b/src/openai/types/beta/chatkit/chatkit_widget_item.py index c7f182259a..a269c736fb 100644 --- a/src/openai/types/beta/chatkit/chatkit_widget_item.py +++ b/src/openai/types/beta/chatkit/chatkit_widget_item.py @@ -8,6 +8,8 @@ class ChatKitWidgetItem(BaseModel): + """Thread item that renders a widget payload.""" + id: str """Identifier of the thread item.""" diff --git a/src/openai/types/beta/chatkit/thread_delete_response.py b/src/openai/types/beta/chatkit/thread_delete_response.py index 03fdec9c2c..45b686bf8b 100644 --- a/src/openai/types/beta/chatkit/thread_delete_response.py +++ b/src/openai/types/beta/chatkit/thread_delete_response.py @@ -8,6 +8,8 @@ class ThreadDeleteResponse(BaseModel): + """Confirmation payload returned after deleting a thread.""" + id: str """Identifier of the deleted thread.""" diff --git a/src/openai/types/beta/chatkit_workflow.py b/src/openai/types/beta/chatkit_workflow.py index 00fbcf41ce..b6f5b55b4a 100644 --- a/src/openai/types/beta/chatkit_workflow.py +++ b/src/openai/types/beta/chatkit_workflow.py @@ -8,11 +8,15 @@ class Tracing(BaseModel): + """Tracing settings applied to the workflow.""" + enabled: bool """Indicates whether tracing is enabled.""" class ChatKitWorkflow(BaseModel): + """Workflow metadata and state returned for the session.""" + id: str """Identifier of the workflow backing the session.""" diff --git a/src/openai/types/beta/file_search_tool.py b/src/openai/types/beta/file_search_tool.py index 89fc16c04c..9e33249e0b 100644 --- a/src/openai/types/beta/file_search_tool.py +++ b/src/openai/types/beta/file_search_tool.py @@ -9,6 +9,13 @@ class FileSearchRankingOptions(BaseModel): + """The ranking options for the file search. + + If not specified, the file search tool will use the `auto` ranker and a score_threshold of 0. + + See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. + """ + score_threshold: float """The score threshold for the file search. @@ -23,6 +30,8 @@ class FileSearchRankingOptions(BaseModel): class FileSearch(BaseModel): + """Overrides for the file search tool.""" + max_num_results: Optional[int] = None """The maximum number of results the file search tool should output. diff --git a/src/openai/types/beta/file_search_tool_param.py b/src/openai/types/beta/file_search_tool_param.py index c73d0af79d..9906b4b2a4 100644 --- a/src/openai/types/beta/file_search_tool_param.py +++ b/src/openai/types/beta/file_search_tool_param.py @@ -8,6 +8,13 @@ class FileSearchRankingOptions(TypedDict, total=False): + """The ranking options for the file search. + + If not specified, the file search tool will use the `auto` ranker and a score_threshold of 0. + + See the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) for more information. + """ + score_threshold: Required[float] """The score threshold for the file search. @@ -22,6 +29,8 @@ class FileSearchRankingOptions(TypedDict, total=False): class FileSearch(TypedDict, total=False): + """Overrides for the file search tool.""" + max_num_results: int """The maximum number of results the file search tool should output. diff --git a/src/openai/types/beta/thread.py b/src/openai/types/beta/thread.py index 789f66e48b..83d9055194 100644 --- a/src/openai/types/beta/thread.py +++ b/src/openai/types/beta/thread.py @@ -29,12 +29,20 @@ class ToolResourcesFileSearch(BaseModel): class ToolResources(BaseModel): + """ + A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + """ + code_interpreter: Optional[ToolResourcesCodeInterpreter] = None file_search: Optional[ToolResourcesFileSearch] = None class Thread(BaseModel): + """ + Represents a thread that contains [messages](https://platform.openai.com/docs/api-reference/messages). + """ + id: str """The identifier, which can be referenced in API endpoints.""" diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py index 734e5e2a4e..c0aee3e9f8 100644 --- a/src/openai/types/beta/thread_create_and_run_params.py +++ b/src/openai/types/beta/thread_create_and_run_params.py @@ -227,6 +227,11 @@ class ThreadToolResourcesCodeInterpreter(TypedDict, total=False): class ThreadToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False): + """The default strategy. + + This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + """ + type: Required[Literal["auto"]] """Always `auto`.""" @@ -303,12 +308,22 @@ class ThreadToolResourcesFileSearch(TypedDict, total=False): class ThreadToolResources(TypedDict, total=False): + """ + A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + """ + code_interpreter: ThreadToolResourcesCodeInterpreter file_search: ThreadToolResourcesFileSearch class Thread(TypedDict, total=False): + """Options to create a new thread. + + If no thread is provided when running a + request, an empty thread will be created. + """ + messages: Iterable[ThreadMessage] """ A list of [messages](https://platform.openai.com/docs/api-reference/messages) to @@ -354,12 +369,22 @@ class ToolResourcesFileSearch(TypedDict, total=False): class ToolResources(TypedDict, total=False): + """A set of resources that are used by the assistant's tools. + + The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + """ + code_interpreter: ToolResourcesCodeInterpreter file_search: ToolResourcesFileSearch class TruncationStrategy(TypedDict, total=False): + """Controls for how a thread will be truncated prior to the run. + + Use this to control the initial context window of the run. + """ + type: Required[Literal["auto", "last_messages"]] """The truncation strategy to use for the thread. diff --git a/src/openai/types/beta/thread_create_params.py b/src/openai/types/beta/thread_create_params.py index 8fd9f38df7..ef83e3d465 100644 --- a/src/openai/types/beta/thread_create_params.py +++ b/src/openai/types/beta/thread_create_params.py @@ -106,6 +106,11 @@ class ToolResourcesCodeInterpreter(TypedDict, total=False): class ToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False): + """The default strategy. + + This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. + """ + type: Required[Literal["auto"]] """Always `auto`.""" @@ -181,6 +186,10 @@ class ToolResourcesFileSearch(TypedDict, total=False): class ToolResources(TypedDict, total=False): + """ + A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + """ + code_interpreter: ToolResourcesCodeInterpreter file_search: ToolResourcesFileSearch diff --git a/src/openai/types/beta/thread_update_params.py b/src/openai/types/beta/thread_update_params.py index 464ea8d7eb..e000edc05f 100644 --- a/src/openai/types/beta/thread_update_params.py +++ b/src/openai/types/beta/thread_update_params.py @@ -51,6 +51,10 @@ class ToolResourcesFileSearch(TypedDict, total=False): class ToolResources(TypedDict, total=False): + """ + A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + """ + code_interpreter: ToolResourcesCodeInterpreter file_search: ToolResourcesFileSearch diff --git a/src/openai/types/beta/threads/file_citation_annotation.py b/src/openai/types/beta/threads/file_citation_annotation.py index c3085aed9b..929da0ac56 100644 --- a/src/openai/types/beta/threads/file_citation_annotation.py +++ b/src/openai/types/beta/threads/file_citation_annotation.py @@ -13,6 +13,10 @@ class FileCitation(BaseModel): class FileCitationAnnotation(BaseModel): + """ + A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. + """ + end_index: int file_citation: FileCitation diff --git a/src/openai/types/beta/threads/file_citation_delta_annotation.py b/src/openai/types/beta/threads/file_citation_delta_annotation.py index b40c0d123e..591e322332 100644 --- a/src/openai/types/beta/threads/file_citation_delta_annotation.py +++ b/src/openai/types/beta/threads/file_citation_delta_annotation.py @@ -17,6 +17,10 @@ class FileCitation(BaseModel): class FileCitationDeltaAnnotation(BaseModel): + """ + A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. + """ + index: int """The index of the annotation in the text content part.""" diff --git a/src/openai/types/beta/threads/file_path_annotation.py b/src/openai/types/beta/threads/file_path_annotation.py index 9812737ece..d3c144c2fc 100644 --- a/src/openai/types/beta/threads/file_path_annotation.py +++ b/src/openai/types/beta/threads/file_path_annotation.py @@ -13,6 +13,10 @@ class FilePath(BaseModel): class FilePathAnnotation(BaseModel): + """ + A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. + """ + end_index: int file_path: FilePath diff --git a/src/openai/types/beta/threads/file_path_delta_annotation.py b/src/openai/types/beta/threads/file_path_delta_annotation.py index 0cbb445e48..5416874749 100644 --- a/src/openai/types/beta/threads/file_path_delta_annotation.py +++ b/src/openai/types/beta/threads/file_path_delta_annotation.py @@ -14,6 +14,10 @@ class FilePath(BaseModel): class FilePathDeltaAnnotation(BaseModel): + """ + A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. + """ + index: int """The index of the annotation in the text content part.""" diff --git a/src/openai/types/beta/threads/image_file_content_block.py b/src/openai/types/beta/threads/image_file_content_block.py index a909999065..5a082cd488 100644 --- a/src/openai/types/beta/threads/image_file_content_block.py +++ b/src/openai/types/beta/threads/image_file_content_block.py @@ -9,6 +9,10 @@ class ImageFileContentBlock(BaseModel): + """ + References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message. + """ + image_file: ImageFile type: Literal["image_file"] diff --git a/src/openai/types/beta/threads/image_file_content_block_param.py b/src/openai/types/beta/threads/image_file_content_block_param.py index 48d94bee36..da095a5ff6 100644 --- a/src/openai/types/beta/threads/image_file_content_block_param.py +++ b/src/openai/types/beta/threads/image_file_content_block_param.py @@ -10,6 +10,10 @@ class ImageFileContentBlockParam(TypedDict, total=False): + """ + References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message. + """ + image_file: Required[ImageFileParam] type: Required[Literal["image_file"]] diff --git a/src/openai/types/beta/threads/image_file_delta_block.py b/src/openai/types/beta/threads/image_file_delta_block.py index 0a5a2e8a5f..ed17f7ff3b 100644 --- a/src/openai/types/beta/threads/image_file_delta_block.py +++ b/src/openai/types/beta/threads/image_file_delta_block.py @@ -10,6 +10,10 @@ class ImageFileDeltaBlock(BaseModel): + """ + References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message. + """ + index: int """The index of the content part in the message.""" diff --git a/src/openai/types/beta/threads/image_url_content_block.py b/src/openai/types/beta/threads/image_url_content_block.py index 40a16c1df8..8dc1f16a7a 100644 --- a/src/openai/types/beta/threads/image_url_content_block.py +++ b/src/openai/types/beta/threads/image_url_content_block.py @@ -9,6 +9,8 @@ class ImageURLContentBlock(BaseModel): + """References an image URL in the content of a message.""" + image_url: ImageURL type: Literal["image_url"] diff --git a/src/openai/types/beta/threads/image_url_content_block_param.py b/src/openai/types/beta/threads/image_url_content_block_param.py index 585b926c58..a5c59e02c2 100644 --- a/src/openai/types/beta/threads/image_url_content_block_param.py +++ b/src/openai/types/beta/threads/image_url_content_block_param.py @@ -10,6 +10,8 @@ class ImageURLContentBlockParam(TypedDict, total=False): + """References an image URL in the content of a message.""" + image_url: Required[ImageURLParam] type: Required[Literal["image_url"]] diff --git a/src/openai/types/beta/threads/image_url_delta_block.py b/src/openai/types/beta/threads/image_url_delta_block.py index 5252da12dd..3128d8e709 100644 --- a/src/openai/types/beta/threads/image_url_delta_block.py +++ b/src/openai/types/beta/threads/image_url_delta_block.py @@ -10,6 +10,8 @@ class ImageURLDeltaBlock(BaseModel): + """References an image URL in the content of a message.""" + index: int """The index of the content part in the message.""" diff --git a/src/openai/types/beta/threads/message.py b/src/openai/types/beta/threads/message.py index 4a05a128eb..fc7f73f091 100644 --- a/src/openai/types/beta/threads/message.py +++ b/src/openai/types/beta/threads/message.py @@ -34,11 +34,17 @@ class Attachment(BaseModel): class IncompleteDetails(BaseModel): + """On an incomplete message, details about why the message is incomplete.""" + reason: Literal["content_filter", "max_tokens", "run_cancelled", "run_expired", "run_failed"] """The reason the message is incomplete.""" class Message(BaseModel): + """ + Represents a message within a [thread](https://platform.openai.com/docs/api-reference/threads). + """ + id: str """The identifier, which can be referenced in API endpoints.""" diff --git a/src/openai/types/beta/threads/message_delta.py b/src/openai/types/beta/threads/message_delta.py index ecd0dfe319..fdeebb3a12 100644 --- a/src/openai/types/beta/threads/message_delta.py +++ b/src/openai/types/beta/threads/message_delta.py @@ -10,6 +10,8 @@ class MessageDelta(BaseModel): + """The delta containing the fields that have changed on the Message.""" + content: Optional[List[MessageContentDelta]] = None """The content of the message in array of text and/or images.""" diff --git a/src/openai/types/beta/threads/message_delta_event.py b/src/openai/types/beta/threads/message_delta_event.py index 3811cef679..d5ba1e172d 100644 --- a/src/openai/types/beta/threads/message_delta_event.py +++ b/src/openai/types/beta/threads/message_delta_event.py @@ -9,6 +9,11 @@ class MessageDeltaEvent(BaseModel): + """Represents a message delta i.e. + + any changed fields on a message during streaming. + """ + id: str """The identifier of the message, which can be referenced in API endpoints.""" diff --git a/src/openai/types/beta/threads/refusal_content_block.py b/src/openai/types/beta/threads/refusal_content_block.py index d54f948554..b4512b3ccb 100644 --- a/src/openai/types/beta/threads/refusal_content_block.py +++ b/src/openai/types/beta/threads/refusal_content_block.py @@ -8,6 +8,8 @@ class RefusalContentBlock(BaseModel): + """The refusal content generated by the assistant.""" + refusal: str type: Literal["refusal"] diff --git a/src/openai/types/beta/threads/refusal_delta_block.py b/src/openai/types/beta/threads/refusal_delta_block.py index dbd8e62697..85a1f08db1 100644 --- a/src/openai/types/beta/threads/refusal_delta_block.py +++ b/src/openai/types/beta/threads/refusal_delta_block.py @@ -9,6 +9,8 @@ class RefusalDeltaBlock(BaseModel): + """The refusal content that is part of a message.""" + index: int """The index of the refusal part in the message.""" diff --git a/src/openai/types/beta/threads/required_action_function_tool_call.py b/src/openai/types/beta/threads/required_action_function_tool_call.py index a24dfd068b..3cec8514ca 100644 --- a/src/openai/types/beta/threads/required_action_function_tool_call.py +++ b/src/openai/types/beta/threads/required_action_function_tool_call.py @@ -8,6 +8,8 @@ class Function(BaseModel): + """The function definition.""" + arguments: str """The arguments that the model expects you to pass to the function.""" @@ -16,6 +18,8 @@ class Function(BaseModel): class RequiredActionFunctionToolCall(BaseModel): + """Tool call objects""" + id: str """The ID of the tool call. diff --git a/src/openai/types/beta/threads/run.py b/src/openai/types/beta/threads/run.py index c545cc3759..8a88fa1673 100644 --- a/src/openai/types/beta/threads/run.py +++ b/src/openai/types/beta/threads/run.py @@ -23,6 +23,11 @@ class IncompleteDetails(BaseModel): + """Details on why the run is incomplete. + + Will be `null` if the run is not incomplete. + """ + reason: Optional[Literal["max_completion_tokens", "max_prompt_tokens"]] = None """The reason why the run is incomplete. @@ -32,6 +37,8 @@ class IncompleteDetails(BaseModel): class LastError(BaseModel): + """The last error associated with this run. Will be `null` if there are no errors.""" + code: Literal["server_error", "rate_limit_exceeded", "invalid_prompt"] """One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`.""" @@ -40,11 +47,18 @@ class LastError(BaseModel): class RequiredActionSubmitToolOutputs(BaseModel): + """Details on the tool outputs needed for this run to continue.""" + tool_calls: List[RequiredActionFunctionToolCall] """A list of the relevant tool calls.""" class RequiredAction(BaseModel): + """Details on the action required to continue the run. + + Will be `null` if no action is required. + """ + submit_tool_outputs: RequiredActionSubmitToolOutputs """Details on the tool outputs needed for this run to continue.""" @@ -53,6 +67,11 @@ class RequiredAction(BaseModel): class TruncationStrategy(BaseModel): + """Controls for how a thread will be truncated prior to the run. + + Use this to control the initial context window of the run. + """ + type: Literal["auto", "last_messages"] """The truncation strategy to use for the thread. @@ -70,6 +89,11 @@ class TruncationStrategy(BaseModel): class Usage(BaseModel): + """Usage statistics related to the run. + + This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). + """ + completion_tokens: int """Number of completion tokens used over the course of the run.""" @@ -81,6 +105,10 @@ class Usage(BaseModel): class Run(BaseModel): + """ + Represents an execution run on a [thread](https://platform.openai.com/docs/api-reference/threads). + """ + id: str """The identifier, which can be referenced in API endpoints.""" diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index df789decbc..f4c56feb56 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -232,6 +232,11 @@ class AdditionalMessage(TypedDict, total=False): class TruncationStrategy(TypedDict, total=False): + """Controls for how a thread will be truncated prior to the run. + + Use this to control the initial context window of the run. + """ + type: Required[Literal["auto", "last_messages"]] """The truncation strategy to use for the thread. diff --git a/src/openai/types/beta/threads/runs/code_interpreter_logs.py b/src/openai/types/beta/threads/runs/code_interpreter_logs.py index 0bf8c1dac2..722fd2b4c4 100644 --- a/src/openai/types/beta/threads/runs/code_interpreter_logs.py +++ b/src/openai/types/beta/threads/runs/code_interpreter_logs.py @@ -9,6 +9,8 @@ class CodeInterpreterLogs(BaseModel): + """Text output from the Code Interpreter tool call as part of a run step.""" + index: int """The index of the output in the outputs array.""" diff --git a/src/openai/types/beta/threads/runs/code_interpreter_tool_call.py b/src/openai/types/beta/threads/runs/code_interpreter_tool_call.py index e7df4e19c4..bc78b5fa3d 100644 --- a/src/openai/types/beta/threads/runs/code_interpreter_tool_call.py +++ b/src/openai/types/beta/threads/runs/code_interpreter_tool_call.py @@ -17,6 +17,8 @@ class CodeInterpreterOutputLogs(BaseModel): + """Text output from the Code Interpreter tool call as part of a run step.""" + logs: str """The text output from the Code Interpreter tool call.""" @@ -45,6 +47,8 @@ class CodeInterpreterOutputImage(BaseModel): class CodeInterpreter(BaseModel): + """The Code Interpreter tool call definition.""" + input: str """The input to the Code Interpreter tool call.""" @@ -57,6 +61,8 @@ class CodeInterpreter(BaseModel): class CodeInterpreterToolCall(BaseModel): + """Details of the Code Interpreter tool call the run step was involved in.""" + id: str """The ID of the tool call.""" diff --git a/src/openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py b/src/openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py index 9d7a1563cd..efedac795c 100644 --- a/src/openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py +++ b/src/openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py @@ -16,6 +16,8 @@ class CodeInterpreter(BaseModel): + """The Code Interpreter tool call definition.""" + input: Optional[str] = None """The input to the Code Interpreter tool call.""" @@ -28,6 +30,8 @@ class CodeInterpreter(BaseModel): class CodeInterpreterToolCallDelta(BaseModel): + """Details of the Code Interpreter tool call the run step was involved in.""" + index: int """The index of the tool call in the tool calls array.""" diff --git a/src/openai/types/beta/threads/runs/file_search_tool_call.py b/src/openai/types/beta/threads/runs/file_search_tool_call.py index a2068daad1..291a93ec65 100644 --- a/src/openai/types/beta/threads/runs/file_search_tool_call.py +++ b/src/openai/types/beta/threads/runs/file_search_tool_call.py @@ -15,6 +15,8 @@ class FileSearchRankingOptions(BaseModel): + """The ranking options for the file search.""" + ranker: Literal["auto", "default_2024_08_21"] """The ranker to use for the file search. @@ -37,6 +39,8 @@ class FileSearchResultContent(BaseModel): class FileSearchResult(BaseModel): + """A result instance of the file search.""" + file_id: str """The ID of the file that result was found in.""" @@ -57,6 +61,8 @@ class FileSearchResult(BaseModel): class FileSearch(BaseModel): + """For now, this is always going to be an empty object.""" + ranking_options: Optional[FileSearchRankingOptions] = None """The ranking options for the file search.""" diff --git a/src/openai/types/beta/threads/runs/function_tool_call.py b/src/openai/types/beta/threads/runs/function_tool_call.py index b1d354f894..dd0e22cfb1 100644 --- a/src/openai/types/beta/threads/runs/function_tool_call.py +++ b/src/openai/types/beta/threads/runs/function_tool_call.py @@ -9,6 +9,8 @@ class Function(BaseModel): + """The definition of the function that was called.""" + arguments: str """The arguments passed to the function.""" diff --git a/src/openai/types/beta/threads/runs/function_tool_call_delta.py b/src/openai/types/beta/threads/runs/function_tool_call_delta.py index faaf026f7f..4107e1b873 100644 --- a/src/openai/types/beta/threads/runs/function_tool_call_delta.py +++ b/src/openai/types/beta/threads/runs/function_tool_call_delta.py @@ -9,6 +9,8 @@ class Function(BaseModel): + """The definition of the function that was called.""" + arguments: Optional[str] = None """The arguments passed to the function.""" diff --git a/src/openai/types/beta/threads/runs/message_creation_step_details.py b/src/openai/types/beta/threads/runs/message_creation_step_details.py index 73439079d3..cd925b57ce 100644 --- a/src/openai/types/beta/threads/runs/message_creation_step_details.py +++ b/src/openai/types/beta/threads/runs/message_creation_step_details.py @@ -13,6 +13,8 @@ class MessageCreation(BaseModel): class MessageCreationStepDetails(BaseModel): + """Details of the message creation by the run step.""" + message_creation: MessageCreation type: Literal["message_creation"] diff --git a/src/openai/types/beta/threads/runs/run_step.py b/src/openai/types/beta/threads/runs/run_step.py index b5f380c7b1..97451229fc 100644 --- a/src/openai/types/beta/threads/runs/run_step.py +++ b/src/openai/types/beta/threads/runs/run_step.py @@ -13,6 +13,11 @@ class LastError(BaseModel): + """The last error associated with this run step. + + Will be `null` if there are no errors. + """ + code: Literal["server_error", "rate_limit_exceeded"] """One of `server_error` or `rate_limit_exceeded`.""" @@ -26,6 +31,11 @@ class LastError(BaseModel): class Usage(BaseModel): + """Usage statistics related to the run step. + + This value will be `null` while the run step's status is `in_progress`. + """ + completion_tokens: int """Number of completion tokens used over the course of the run step.""" @@ -37,6 +47,8 @@ class Usage(BaseModel): class RunStep(BaseModel): + """Represents a step in execution of a run.""" + id: str """The identifier of the run step, which can be referenced in API endpoints.""" diff --git a/src/openai/types/beta/threads/runs/run_step_delta.py b/src/openai/types/beta/threads/runs/run_step_delta.py index 1139088fb4..2ccb770d57 100644 --- a/src/openai/types/beta/threads/runs/run_step_delta.py +++ b/src/openai/types/beta/threads/runs/run_step_delta.py @@ -16,5 +16,7 @@ class RunStepDelta(BaseModel): + """The delta containing the fields that have changed on the run step.""" + step_details: Optional[StepDetails] = None """The details of the run step.""" diff --git a/src/openai/types/beta/threads/runs/run_step_delta_event.py b/src/openai/types/beta/threads/runs/run_step_delta_event.py index 7f3f92aabf..8f1c095ae4 100644 --- a/src/openai/types/beta/threads/runs/run_step_delta_event.py +++ b/src/openai/types/beta/threads/runs/run_step_delta_event.py @@ -9,6 +9,11 @@ class RunStepDeltaEvent(BaseModel): + """Represents a run step delta i.e. + + any changed fields on a run step during streaming. + """ + id: str """The identifier of the run step, which can be referenced in API endpoints.""" diff --git a/src/openai/types/beta/threads/runs/run_step_delta_message_delta.py b/src/openai/types/beta/threads/runs/run_step_delta_message_delta.py index f58ed3d96d..4b18277c18 100644 --- a/src/openai/types/beta/threads/runs/run_step_delta_message_delta.py +++ b/src/openai/types/beta/threads/runs/run_step_delta_message_delta.py @@ -14,6 +14,8 @@ class MessageCreation(BaseModel): class RunStepDeltaMessageDelta(BaseModel): + """Details of the message creation by the run step.""" + type: Literal["message_creation"] """Always `message_creation`.""" diff --git a/src/openai/types/beta/threads/runs/tool_call_delta_object.py b/src/openai/types/beta/threads/runs/tool_call_delta_object.py index 189dce772c..dbd1096ad6 100644 --- a/src/openai/types/beta/threads/runs/tool_call_delta_object.py +++ b/src/openai/types/beta/threads/runs/tool_call_delta_object.py @@ -10,6 +10,8 @@ class ToolCallDeltaObject(BaseModel): + """Details of the tool call.""" + type: Literal["tool_calls"] """Always `tool_calls`.""" diff --git a/src/openai/types/beta/threads/runs/tool_calls_step_details.py b/src/openai/types/beta/threads/runs/tool_calls_step_details.py index a084d387c7..1f54a6aa71 100644 --- a/src/openai/types/beta/threads/runs/tool_calls_step_details.py +++ b/src/openai/types/beta/threads/runs/tool_calls_step_details.py @@ -10,6 +10,8 @@ class ToolCallsStepDetails(BaseModel): + """Details of the tool call.""" + tool_calls: List[ToolCall] """An array of tool calls the run step was involved in. diff --git a/src/openai/types/beta/threads/text_content_block.py b/src/openai/types/beta/threads/text_content_block.py index 3706d6b9d8..b9b1368a17 100644 --- a/src/openai/types/beta/threads/text_content_block.py +++ b/src/openai/types/beta/threads/text_content_block.py @@ -9,6 +9,8 @@ class TextContentBlock(BaseModel): + """The text content that is part of a message.""" + text: Text type: Literal["text"] diff --git a/src/openai/types/beta/threads/text_content_block_param.py b/src/openai/types/beta/threads/text_content_block_param.py index 6313de32cc..22c864438d 100644 --- a/src/openai/types/beta/threads/text_content_block_param.py +++ b/src/openai/types/beta/threads/text_content_block_param.py @@ -8,6 +8,8 @@ class TextContentBlockParam(TypedDict, total=False): + """The text content that is part of a message.""" + text: Required[str] """Text content to be sent to the model""" diff --git a/src/openai/types/beta/threads/text_delta_block.py b/src/openai/types/beta/threads/text_delta_block.py index 586116e0d6..a3d339ccad 100644 --- a/src/openai/types/beta/threads/text_delta_block.py +++ b/src/openai/types/beta/threads/text_delta_block.py @@ -10,6 +10,8 @@ class TextDeltaBlock(BaseModel): + """The text content that is part of a message.""" + index: int """The index of the content part in the message.""" diff --git a/src/openai/types/chat/chat_completion.py b/src/openai/types/chat/chat_completion.py index 6bc4bafe79..31219aa812 100644 --- a/src/openai/types/chat/chat_completion.py +++ b/src/openai/types/chat/chat_completion.py @@ -12,6 +12,8 @@ class ChoiceLogprobs(BaseModel): + """Log probability information for the choice.""" + content: Optional[List[ChatCompletionTokenLogprob]] = None """A list of message content tokens with log probability information.""" @@ -41,6 +43,10 @@ class Choice(BaseModel): class ChatCompletion(BaseModel): + """ + Represents a chat completion response returned by model, based on the provided input. + """ + id: str """A unique identifier for the chat completion.""" diff --git a/src/openai/types/chat/chat_completion_allowed_tool_choice_param.py b/src/openai/types/chat/chat_completion_allowed_tool_choice_param.py index 813e6293f9..c5ba21626d 100644 --- a/src/openai/types/chat/chat_completion_allowed_tool_choice_param.py +++ b/src/openai/types/chat/chat_completion_allowed_tool_choice_param.py @@ -10,6 +10,8 @@ class ChatCompletionAllowedToolChoiceParam(TypedDict, total=False): + """Constrains the tools available to the model to a pre-defined set.""" + allowed_tools: Required[ChatCompletionAllowedToolsParam] """Constrains the tools available to the model to a pre-defined set.""" diff --git a/src/openai/types/chat/chat_completion_allowed_tools_param.py b/src/openai/types/chat/chat_completion_allowed_tools_param.py index d9b72d8f34..ac31fcb543 100644 --- a/src/openai/types/chat/chat_completion_allowed_tools_param.py +++ b/src/openai/types/chat/chat_completion_allowed_tools_param.py @@ -9,6 +9,8 @@ class ChatCompletionAllowedToolsParam(TypedDict, total=False): + """Constrains the tools available to the model to a pre-defined set.""" + mode: Required[Literal["auto", "required"]] """Constrains the tools available to the model to a pre-defined set. diff --git a/src/openai/types/chat/chat_completion_assistant_message_param.py b/src/openai/types/chat/chat_completion_assistant_message_param.py index 1a08a959db..16a218438a 100644 --- a/src/openai/types/chat/chat_completion_assistant_message_param.py +++ b/src/openai/types/chat/chat_completion_assistant_message_param.py @@ -13,6 +13,11 @@ class Audio(TypedDict, total=False): + """ + Data about a previous audio response from the model. + [Learn more](https://platform.openai.com/docs/guides/audio). + """ + id: Required[str] """Unique identifier for a previous audio response from the model.""" @@ -21,6 +26,11 @@ class Audio(TypedDict, total=False): class FunctionCall(TypedDict, total=False): + """Deprecated and replaced by `tool_calls`. + + The name and arguments of a function that should be called, as generated by the model. + """ + arguments: Required[str] """ The arguments to call the function with, as generated by the model in JSON @@ -34,6 +44,8 @@ class FunctionCall(TypedDict, total=False): class ChatCompletionAssistantMessageParam(TypedDict, total=False): + """Messages sent by the model in response to user messages.""" + role: Required[Literal["assistant"]] """The role of the messages author, in this case `assistant`.""" diff --git a/src/openai/types/chat/chat_completion_audio.py b/src/openai/types/chat/chat_completion_audio.py index 232d60563d..df346d8c9d 100644 --- a/src/openai/types/chat/chat_completion_audio.py +++ b/src/openai/types/chat/chat_completion_audio.py @@ -6,6 +6,11 @@ class ChatCompletionAudio(BaseModel): + """ + If the audio output modality is requested, this object contains data + about the audio response from the model. [Learn more](https://platform.openai.com/docs/guides/audio). + """ + id: str """Unique identifier for this audio response.""" diff --git a/src/openai/types/chat/chat_completion_audio_param.py b/src/openai/types/chat/chat_completion_audio_param.py index b1576b41df..cac3c8b9d4 100644 --- a/src/openai/types/chat/chat_completion_audio_param.py +++ b/src/openai/types/chat/chat_completion_audio_param.py @@ -9,6 +9,12 @@ class ChatCompletionAudioParam(TypedDict, total=False): + """Parameters for audio output. + + Required when audio output is requested with + `modalities: ["audio"]`. [Learn more](https://platform.openai.com/docs/guides/audio). + """ + format: Required[Literal["wav", "aac", "mp3", "flac", "opus", "pcm16"]] """Specifies the output audio format. diff --git a/src/openai/types/chat/chat_completion_chunk.py b/src/openai/types/chat/chat_completion_chunk.py index ea32d157ef..ecbfd0a5aa 100644 --- a/src/openai/types/chat/chat_completion_chunk.py +++ b/src/openai/types/chat/chat_completion_chunk.py @@ -19,6 +19,11 @@ class ChoiceDeltaFunctionCall(BaseModel): + """Deprecated and replaced by `tool_calls`. + + The name and arguments of a function that should be called, as generated by the model. + """ + arguments: Optional[str] = None """ The arguments to call the function with, as generated by the model in JSON @@ -57,6 +62,8 @@ class ChoiceDeltaToolCall(BaseModel): class ChoiceDelta(BaseModel): + """A chat completion delta generated by streamed model responses.""" + content: Optional[str] = None """The contents of the chunk message.""" @@ -77,6 +84,8 @@ class ChoiceDelta(BaseModel): class ChoiceLogprobs(BaseModel): + """Log probability information for the choice.""" + content: Optional[List[ChatCompletionTokenLogprob]] = None """A list of message content tokens with log probability information.""" @@ -106,6 +115,12 @@ class Choice(BaseModel): class ChatCompletionChunk(BaseModel): + """ + Represents a streamed chunk of a chat completion response returned + by the model, based on the provided input. + [Learn more](https://platform.openai.com/docs/guides/streaming-responses). + """ + id: str """A unique identifier for the chat completion. Each chunk has the same ID.""" diff --git a/src/openai/types/chat/chat_completion_content_part_image.py b/src/openai/types/chat/chat_completion_content_part_image.py index c1386b9dd3..a636c51fb4 100644 --- a/src/openai/types/chat/chat_completion_content_part_image.py +++ b/src/openai/types/chat/chat_completion_content_part_image.py @@ -21,6 +21,8 @@ class ImageURL(BaseModel): class ChatCompletionContentPartImage(BaseModel): + """Learn about [image inputs](https://platform.openai.com/docs/guides/vision).""" + image_url: ImageURL type: Literal["image_url"] diff --git a/src/openai/types/chat/chat_completion_content_part_image_param.py b/src/openai/types/chat/chat_completion_content_part_image_param.py index 9d407324d0..a230a340a7 100644 --- a/src/openai/types/chat/chat_completion_content_part_image_param.py +++ b/src/openai/types/chat/chat_completion_content_part_image_param.py @@ -20,6 +20,8 @@ class ImageURL(TypedDict, total=False): class ChatCompletionContentPartImageParam(TypedDict, total=False): + """Learn about [image inputs](https://platform.openai.com/docs/guides/vision).""" + image_url: Required[ImageURL] type: Required[Literal["image_url"]] diff --git a/src/openai/types/chat/chat_completion_content_part_input_audio_param.py b/src/openai/types/chat/chat_completion_content_part_input_audio_param.py index 0b1b1a80b1..98d9e3c5eb 100644 --- a/src/openai/types/chat/chat_completion_content_part_input_audio_param.py +++ b/src/openai/types/chat/chat_completion_content_part_input_audio_param.py @@ -16,6 +16,8 @@ class InputAudio(TypedDict, total=False): class ChatCompletionContentPartInputAudioParam(TypedDict, total=False): + """Learn about [audio inputs](https://platform.openai.com/docs/guides/audio).""" + input_audio: Required[InputAudio] type: Required[Literal["input_audio"]] diff --git a/src/openai/types/chat/chat_completion_content_part_param.py b/src/openai/types/chat/chat_completion_content_part_param.py index cbedc853ba..b8c710a980 100644 --- a/src/openai/types/chat/chat_completion_content_part_param.py +++ b/src/openai/types/chat/chat_completion_content_part_param.py @@ -27,6 +27,10 @@ class FileFile(TypedDict, total=False): class File(TypedDict, total=False): + """ + Learn about [file inputs](https://platform.openai.com/docs/guides/text) for text generation. + """ + file: Required[FileFile] type: Required[Literal["file"]] diff --git a/src/openai/types/chat/chat_completion_content_part_text.py b/src/openai/types/chat/chat_completion_content_part_text.py index f09f35f708..e6d1bf1ec0 100644 --- a/src/openai/types/chat/chat_completion_content_part_text.py +++ b/src/openai/types/chat/chat_completion_content_part_text.py @@ -8,6 +8,10 @@ class ChatCompletionContentPartText(BaseModel): + """ + Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation). + """ + text: str """The text content.""" diff --git a/src/openai/types/chat/chat_completion_content_part_text_param.py b/src/openai/types/chat/chat_completion_content_part_text_param.py index a270744417..be69bf66fa 100644 --- a/src/openai/types/chat/chat_completion_content_part_text_param.py +++ b/src/openai/types/chat/chat_completion_content_part_text_param.py @@ -8,6 +8,10 @@ class ChatCompletionContentPartTextParam(TypedDict, total=False): + """ + Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation). + """ + text: Required[str] """The text content.""" diff --git a/src/openai/types/chat/chat_completion_custom_tool_param.py b/src/openai/types/chat/chat_completion_custom_tool_param.py index 14959ee449..d4f21ba0ca 100644 --- a/src/openai/types/chat/chat_completion_custom_tool_param.py +++ b/src/openai/types/chat/chat_completion_custom_tool_param.py @@ -16,11 +16,15 @@ class CustomFormatText(TypedDict, total=False): + """Unconstrained free-form text.""" + type: Required[Literal["text"]] """Unconstrained text format. Always `text`.""" class CustomFormatGrammarGrammar(TypedDict, total=False): + """Your chosen grammar.""" + definition: Required[str] """The grammar definition.""" @@ -29,6 +33,8 @@ class CustomFormatGrammarGrammar(TypedDict, total=False): class CustomFormatGrammar(TypedDict, total=False): + """A grammar defined by the user.""" + grammar: Required[CustomFormatGrammarGrammar] """Your chosen grammar.""" @@ -40,6 +46,8 @@ class CustomFormatGrammar(TypedDict, total=False): class Custom(TypedDict, total=False): + """Properties of the custom tool.""" + name: Required[str] """The name of the custom tool, used to identify it in tool calls.""" @@ -51,6 +59,8 @@ class Custom(TypedDict, total=False): class ChatCompletionCustomToolParam(TypedDict, total=False): + """A custom tool that processes input using a specified format.""" + custom: Required[Custom] """Properties of the custom tool.""" diff --git a/src/openai/types/chat/chat_completion_developer_message_param.py b/src/openai/types/chat/chat_completion_developer_message_param.py index 01e4fdb654..94fb3359f6 100644 --- a/src/openai/types/chat/chat_completion_developer_message_param.py +++ b/src/openai/types/chat/chat_completion_developer_message_param.py @@ -11,6 +11,12 @@ class ChatCompletionDeveloperMessageParam(TypedDict, total=False): + """ + Developer-provided instructions that the model should follow, regardless of + messages sent by the user. With o1 models and newer, `developer` messages + replace the previous `system` messages. + """ + content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]] """The contents of the developer message.""" diff --git a/src/openai/types/chat/chat_completion_function_call_option_param.py b/src/openai/types/chat/chat_completion_function_call_option_param.py index 2bc014af7a..b1ca37bf58 100644 --- a/src/openai/types/chat/chat_completion_function_call_option_param.py +++ b/src/openai/types/chat/chat_completion_function_call_option_param.py @@ -8,5 +8,9 @@ class ChatCompletionFunctionCallOptionParam(TypedDict, total=False): + """ + Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. + """ + name: Required[str] """The name of the function to call.""" diff --git a/src/openai/types/chat/chat_completion_function_tool.py b/src/openai/types/chat/chat_completion_function_tool.py index 641568acf1..5d43a1e836 100644 --- a/src/openai/types/chat/chat_completion_function_tool.py +++ b/src/openai/types/chat/chat_completion_function_tool.py @@ -9,6 +9,8 @@ class ChatCompletionFunctionTool(BaseModel): + """A function tool that can be used to generate a response.""" + function: FunctionDefinition type: Literal["function"] diff --git a/src/openai/types/chat/chat_completion_function_tool_param.py b/src/openai/types/chat/chat_completion_function_tool_param.py index a39feea542..d336e8c08c 100644 --- a/src/openai/types/chat/chat_completion_function_tool_param.py +++ b/src/openai/types/chat/chat_completion_function_tool_param.py @@ -10,6 +10,8 @@ class ChatCompletionFunctionToolParam(TypedDict, total=False): + """A function tool that can be used to generate a response.""" + function: Required[FunctionDefinition] type: Required[Literal["function"]] diff --git a/src/openai/types/chat/chat_completion_message.py b/src/openai/types/chat/chat_completion_message.py index 5bb153fe3f..3f88f776b9 100644 --- a/src/openai/types/chat/chat_completion_message.py +++ b/src/openai/types/chat/chat_completion_message.py @@ -11,6 +11,8 @@ class AnnotationURLCitation(BaseModel): + """A URL citation when using web search.""" + end_index: int """The index of the last character of the URL citation in the message.""" @@ -25,6 +27,8 @@ class AnnotationURLCitation(BaseModel): class Annotation(BaseModel): + """A URL citation when using web search.""" + type: Literal["url_citation"] """The type of the URL citation. Always `url_citation`.""" @@ -33,6 +37,11 @@ class Annotation(BaseModel): class FunctionCall(BaseModel): + """Deprecated and replaced by `tool_calls`. + + The name and arguments of a function that should be called, as generated by the model. + """ + arguments: str """ The arguments to call the function with, as generated by the model in JSON @@ -46,6 +55,8 @@ class FunctionCall(BaseModel): class ChatCompletionMessage(BaseModel): + """A chat completion message generated by the model.""" + content: Optional[str] = None """The contents of the message.""" diff --git a/src/openai/types/chat/chat_completion_message_custom_tool_call.py b/src/openai/types/chat/chat_completion_message_custom_tool_call.py index b13c176afe..9542d8b924 100644 --- a/src/openai/types/chat/chat_completion_message_custom_tool_call.py +++ b/src/openai/types/chat/chat_completion_message_custom_tool_call.py @@ -8,6 +8,8 @@ class Custom(BaseModel): + """The custom tool that the model called.""" + input: str """The input for the custom tool call generated by the model.""" @@ -16,6 +18,8 @@ class Custom(BaseModel): class ChatCompletionMessageCustomToolCall(BaseModel): + """A call to a custom tool created by the model.""" + id: str """The ID of the tool call.""" diff --git a/src/openai/types/chat/chat_completion_message_custom_tool_call_param.py b/src/openai/types/chat/chat_completion_message_custom_tool_call_param.py index 3753e0f200..3d03f0a93c 100644 --- a/src/openai/types/chat/chat_completion_message_custom_tool_call_param.py +++ b/src/openai/types/chat/chat_completion_message_custom_tool_call_param.py @@ -8,6 +8,8 @@ class Custom(TypedDict, total=False): + """The custom tool that the model called.""" + input: Required[str] """The input for the custom tool call generated by the model.""" @@ -16,6 +18,8 @@ class Custom(TypedDict, total=False): class ChatCompletionMessageCustomToolCallParam(TypedDict, total=False): + """A call to a custom tool created by the model.""" + id: Required[str] """The ID of the tool call.""" diff --git a/src/openai/types/chat/chat_completion_message_function_tool_call.py b/src/openai/types/chat/chat_completion_message_function_tool_call.py index d056d9aff6..e7278b923c 100644 --- a/src/openai/types/chat/chat_completion_message_function_tool_call.py +++ b/src/openai/types/chat/chat_completion_message_function_tool_call.py @@ -8,6 +8,8 @@ class Function(BaseModel): + """The function that the model called.""" + arguments: str """ The arguments to call the function with, as generated by the model in JSON @@ -21,6 +23,8 @@ class Function(BaseModel): class ChatCompletionMessageFunctionToolCall(BaseModel): + """A call to a function tool created by the model.""" + id: str """The ID of the tool call.""" diff --git a/src/openai/types/chat/chat_completion_message_function_tool_call_param.py b/src/openai/types/chat/chat_completion_message_function_tool_call_param.py index 7c827edd2c..a8094ea63a 100644 --- a/src/openai/types/chat/chat_completion_message_function_tool_call_param.py +++ b/src/openai/types/chat/chat_completion_message_function_tool_call_param.py @@ -8,6 +8,8 @@ class Function(TypedDict, total=False): + """The function that the model called.""" + arguments: Required[str] """ The arguments to call the function with, as generated by the model in JSON @@ -21,6 +23,8 @@ class Function(TypedDict, total=False): class ChatCompletionMessageFunctionToolCallParam(TypedDict, total=False): + """A call to a function tool created by the model.""" + id: Required[str] """The ID of the tool call.""" diff --git a/src/openai/types/chat/chat_completion_named_tool_choice_custom_param.py b/src/openai/types/chat/chat_completion_named_tool_choice_custom_param.py index 1c123c0acb..147fb87965 100644 --- a/src/openai/types/chat/chat_completion_named_tool_choice_custom_param.py +++ b/src/openai/types/chat/chat_completion_named_tool_choice_custom_param.py @@ -13,6 +13,11 @@ class Custom(TypedDict, total=False): class ChatCompletionNamedToolChoiceCustomParam(TypedDict, total=False): + """Specifies a tool the model should use. + + Use to force the model to call a specific custom tool. + """ + custom: Required[Custom] type: Required[Literal["custom"]] diff --git a/src/openai/types/chat/chat_completion_named_tool_choice_param.py b/src/openai/types/chat/chat_completion_named_tool_choice_param.py index ae1acfb909..f684fcea5e 100644 --- a/src/openai/types/chat/chat_completion_named_tool_choice_param.py +++ b/src/openai/types/chat/chat_completion_named_tool_choice_param.py @@ -13,6 +13,11 @@ class Function(TypedDict, total=False): class ChatCompletionNamedToolChoiceParam(TypedDict, total=False): + """Specifies a tool the model should use. + + Use to force the model to call a specific function. + """ + function: Required[Function] type: Required[Literal["function"]] diff --git a/src/openai/types/chat/chat_completion_prediction_content_param.py b/src/openai/types/chat/chat_completion_prediction_content_param.py index c44e6e3653..6184a314b5 100644 --- a/src/openai/types/chat/chat_completion_prediction_content_param.py +++ b/src/openai/types/chat/chat_completion_prediction_content_param.py @@ -11,6 +11,11 @@ class ChatCompletionPredictionContentParam(TypedDict, total=False): + """ + Static predicted output content, such as the content of a text file that is + being regenerated. + """ + content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]] """ The content that should be matched when generating a model response. If diff --git a/src/openai/types/chat/chat_completion_store_message.py b/src/openai/types/chat/chat_completion_store_message.py index 661342716b..6a805cce76 100644 --- a/src/openai/types/chat/chat_completion_store_message.py +++ b/src/openai/types/chat/chat_completion_store_message.py @@ -13,6 +13,8 @@ class ChatCompletionStoreMessage(ChatCompletionMessage): + """A chat completion message generated by the model.""" + id: str """The identifier of the chat message.""" diff --git a/src/openai/types/chat/chat_completion_stream_options_param.py b/src/openai/types/chat/chat_completion_stream_options_param.py index fc3191d2d1..9b881fff02 100644 --- a/src/openai/types/chat/chat_completion_stream_options_param.py +++ b/src/openai/types/chat/chat_completion_stream_options_param.py @@ -8,6 +8,8 @@ class ChatCompletionStreamOptionsParam(TypedDict, total=False): + """Options for streaming response. Only set this when you set `stream: true`.""" + include_obfuscation: bool """When true, stream obfuscation will be enabled. diff --git a/src/openai/types/chat/chat_completion_system_message_param.py b/src/openai/types/chat/chat_completion_system_message_param.py index 172ccea09e..9dcc5e07f9 100644 --- a/src/openai/types/chat/chat_completion_system_message_param.py +++ b/src/openai/types/chat/chat_completion_system_message_param.py @@ -11,6 +11,12 @@ class ChatCompletionSystemMessageParam(TypedDict, total=False): + """ + Developer-provided instructions that the model should follow, regardless of + messages sent by the user. With o1 models and newer, use `developer` messages + for this purpose instead. + """ + content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]] """The contents of the system message.""" diff --git a/src/openai/types/chat/chat_completion_user_message_param.py b/src/openai/types/chat/chat_completion_user_message_param.py index 5c15322a22..c97ba535eb 100644 --- a/src/openai/types/chat/chat_completion_user_message_param.py +++ b/src/openai/types/chat/chat_completion_user_message_param.py @@ -11,6 +11,11 @@ class ChatCompletionUserMessageParam(TypedDict, total=False): + """ + Messages sent by an end user, containing prompts or additional context + information. + """ + content: Required[Union[str, Iterable[ChatCompletionContentPartParam]]] """The contents of the user message.""" diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index f2d55f7ec4..613787e9b5 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -382,6 +382,8 @@ class Function(TypedDict, total=False): class WebSearchOptionsUserLocationApproximate(TypedDict, total=False): + """Approximate location parameters for the search.""" + city: str """Free text input for the city of the user, e.g. `San Francisco`.""" @@ -402,6 +404,8 @@ class WebSearchOptionsUserLocationApproximate(TypedDict, total=False): class WebSearchOptionsUserLocation(TypedDict, total=False): + """Approximate location parameters for the search.""" + approximate: Required[WebSearchOptionsUserLocationApproximate] """Approximate location parameters for the search.""" @@ -410,6 +414,11 @@ class WebSearchOptionsUserLocation(TypedDict, total=False): class WebSearchOptions(TypedDict, total=False): + """ + This tool searches the web for relevant results to use in a response. + Learn more about the [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + """ + search_context_size: Literal["low", "medium", "high"] """ High level guidance for the amount of context window space to use for the diff --git a/src/openai/types/completion.py b/src/openai/types/completion.py index d3b3102a4a..ee59b2e209 100644 --- a/src/openai/types/completion.py +++ b/src/openai/types/completion.py @@ -11,6 +11,11 @@ class Completion(BaseModel): + """Represents a completion response from the API. + + Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint). + """ + id: str """A unique identifier for the completion.""" diff --git a/src/openai/types/completion_usage.py b/src/openai/types/completion_usage.py index d8c4e84cf7..9b5202da14 100644 --- a/src/openai/types/completion_usage.py +++ b/src/openai/types/completion_usage.py @@ -8,6 +8,8 @@ class CompletionTokensDetails(BaseModel): + """Breakdown of tokens used in a completion.""" + accepted_prediction_tokens: Optional[int] = None """ When using Predicted Outputs, the number of tokens in the prediction that @@ -30,6 +32,8 @@ class CompletionTokensDetails(BaseModel): class PromptTokensDetails(BaseModel): + """Breakdown of tokens used in the prompt.""" + audio_tokens: Optional[int] = None """Audio input tokens present in the prompt.""" @@ -38,6 +42,8 @@ class PromptTokensDetails(BaseModel): class CompletionUsage(BaseModel): + """Usage statistics for the completion request.""" + completion_tokens: int """Number of tokens in the generated completion.""" diff --git a/src/openai/types/container_create_params.py b/src/openai/types/container_create_params.py index d629c24d38..47101ecdb6 100644 --- a/src/openai/types/container_create_params.py +++ b/src/openai/types/container_create_params.py @@ -24,6 +24,8 @@ class ContainerCreateParams(TypedDict, total=False): class ExpiresAfter(TypedDict, total=False): + """Container expiration time in seconds relative to the 'anchor' time.""" + anchor: Required[Literal["last_active_at"]] """Time anchor for the expiration time. diff --git a/src/openai/types/container_create_response.py b/src/openai/types/container_create_response.py index cbad914283..0ebcc04062 100644 --- a/src/openai/types/container_create_response.py +++ b/src/openai/types/container_create_response.py @@ -9,6 +9,12 @@ class ExpiresAfter(BaseModel): + """ + The container will expire after this time period. + The anchor is the reference point for the expiration. + The minutes is the number of minutes after the anchor before the container expires. + """ + anchor: Optional[Literal["last_active_at"]] = None """The reference point for the expiration.""" diff --git a/src/openai/types/container_list_response.py b/src/openai/types/container_list_response.py index 29416f0941..8f39548201 100644 --- a/src/openai/types/container_list_response.py +++ b/src/openai/types/container_list_response.py @@ -9,6 +9,12 @@ class ExpiresAfter(BaseModel): + """ + The container will expire after this time period. + The anchor is the reference point for the expiration. + The minutes is the number of minutes after the anchor before the container expires. + """ + anchor: Optional[Literal["last_active_at"]] = None """The reference point for the expiration.""" diff --git a/src/openai/types/container_retrieve_response.py b/src/openai/types/container_retrieve_response.py index 31fedeac64..9ba3e18c3a 100644 --- a/src/openai/types/container_retrieve_response.py +++ b/src/openai/types/container_retrieve_response.py @@ -9,6 +9,12 @@ class ExpiresAfter(BaseModel): + """ + The container will expire after this time period. + The anchor is the reference point for the expiration. + The minutes is the number of minutes after the anchor before the container expires. + """ + anchor: Optional[Literal["last_active_at"]] = None """The reference point for the expiration.""" diff --git a/src/openai/types/conversations/computer_screenshot_content.py b/src/openai/types/conversations/computer_screenshot_content.py index 897b7ada0d..e42096eba2 100644 --- a/src/openai/types/conversations/computer_screenshot_content.py +++ b/src/openai/types/conversations/computer_screenshot_content.py @@ -9,6 +9,8 @@ class ComputerScreenshotContent(BaseModel): + """A screenshot of a computer.""" + file_id: Optional[str] = None """The identifier of an uploaded file that contains the screenshot.""" diff --git a/src/openai/types/conversations/conversation_item.py b/src/openai/types/conversations/conversation_item.py index 052d09ce77..46268d381c 100644 --- a/src/openai/types/conversations/conversation_item.py +++ b/src/openai/types/conversations/conversation_item.py @@ -36,6 +36,8 @@ class ImageGenerationCall(BaseModel): + """An image generation request made by the model.""" + id: str """The unique ID of the image generation call.""" @@ -50,6 +52,8 @@ class ImageGenerationCall(BaseModel): class LocalShellCallAction(BaseModel): + """Execute a shell command on the server.""" + command: List[str] """The command to run.""" @@ -70,6 +74,8 @@ class LocalShellCallAction(BaseModel): class LocalShellCall(BaseModel): + """A tool call to run a command on the local shell.""" + id: str """The unique ID of the local shell call.""" @@ -87,6 +93,8 @@ class LocalShellCall(BaseModel): class LocalShellCallOutput(BaseModel): + """The output of a local shell tool call.""" + id: str """The unique ID of the local shell tool call generated by the model.""" @@ -101,6 +109,8 @@ class LocalShellCallOutput(BaseModel): class McpListToolsTool(BaseModel): + """A tool available on an MCP server.""" + input_schema: object """The JSON schema describing the tool's input.""" @@ -115,6 +125,8 @@ class McpListToolsTool(BaseModel): class McpListTools(BaseModel): + """A list of tools available on an MCP server.""" + id: str """The unique ID of the list.""" @@ -132,6 +144,8 @@ class McpListTools(BaseModel): class McpApprovalRequest(BaseModel): + """A request for human approval of a tool invocation.""" + id: str """The unique ID of the approval request.""" @@ -149,6 +163,8 @@ class McpApprovalRequest(BaseModel): class McpApprovalResponse(BaseModel): + """A response to an MCP approval request.""" + id: str """The unique ID of the approval response""" @@ -166,6 +182,8 @@ class McpApprovalResponse(BaseModel): class McpCall(BaseModel): + """An invocation of a tool on an MCP server.""" + id: str """The unique ID of the tool call.""" diff --git a/src/openai/types/conversations/conversation_item_list.py b/src/openai/types/conversations/conversation_item_list.py index 20091102cb..74d945d864 100644 --- a/src/openai/types/conversations/conversation_item_list.py +++ b/src/openai/types/conversations/conversation_item_list.py @@ -10,6 +10,8 @@ class ConversationItemList(BaseModel): + """A list of Conversation items.""" + data: List[ConversationItem] """A list of conversation items.""" diff --git a/src/openai/types/conversations/message.py b/src/openai/types/conversations/message.py index dbf5a14680..86c8860da8 100644 --- a/src/openai/types/conversations/message.py +++ b/src/openai/types/conversations/message.py @@ -18,6 +18,8 @@ class ContentReasoningText(BaseModel): + """Reasoning text from the model.""" + text: str """The reasoning text from the model.""" @@ -42,6 +44,8 @@ class ContentReasoningText(BaseModel): class Message(BaseModel): + """A message to or from the model.""" + id: str """The unique ID of the message.""" diff --git a/src/openai/types/conversations/summary_text_content.py b/src/openai/types/conversations/summary_text_content.py index d357b15725..6464a36599 100644 --- a/src/openai/types/conversations/summary_text_content.py +++ b/src/openai/types/conversations/summary_text_content.py @@ -8,6 +8,8 @@ class SummaryTextContent(BaseModel): + """A summary text from the model.""" + text: str """A summary of the reasoning output from the model so far.""" diff --git a/src/openai/types/conversations/text_content.py b/src/openai/types/conversations/text_content.py index f1ae079597..e602466c47 100644 --- a/src/openai/types/conversations/text_content.py +++ b/src/openai/types/conversations/text_content.py @@ -8,6 +8,8 @@ class TextContent(BaseModel): + """A text content.""" + text: str type: Literal["text"] diff --git a/src/openai/types/create_embedding_response.py b/src/openai/types/create_embedding_response.py index eff247a112..314a7f9afc 100644 --- a/src/openai/types/create_embedding_response.py +++ b/src/openai/types/create_embedding_response.py @@ -10,6 +10,8 @@ class Usage(BaseModel): + """The usage information for the request.""" + prompt_tokens: int """The number of tokens used by the prompt.""" diff --git a/src/openai/types/embedding.py b/src/openai/types/embedding.py index 769b1d165f..fbffec01e0 100644 --- a/src/openai/types/embedding.py +++ b/src/openai/types/embedding.py @@ -9,6 +9,8 @@ class Embedding(BaseModel): + """Represents an embedding vector returned by embedding endpoint.""" + embedding: List[float] """The embedding vector, which is a list of floats. diff --git a/src/openai/types/eval_create_params.py b/src/openai/types/eval_create_params.py index eb7f86cd92..0f2100b718 100644 --- a/src/openai/types/eval_create_params.py +++ b/src/openai/types/eval_create_params.py @@ -64,6 +64,13 @@ class EvalCreateParams(TypedDict, total=False): class DataSourceConfigCustom(TypedDict, total=False): + """ + A CustomDataSourceConfig object that defines the schema for the data source used for the evaluation runs. + This schema is used to define the shape of the data that will be: + - Used to define your testing criteria and + - What data is required when creating a run + """ + item_schema: Required[Dict[str, object]] """The json schema for each row in the data source.""" @@ -78,6 +85,11 @@ class DataSourceConfigCustom(TypedDict, total=False): class DataSourceConfigLogs(TypedDict, total=False): + """ + A data source config which specifies the metadata property of your logs query. + This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. + """ + type: Required[Literal["logs"]] """The type of data source. Always `logs`.""" @@ -86,6 +98,8 @@ class DataSourceConfigLogs(TypedDict, total=False): class DataSourceConfigStoredCompletions(TypedDict, total=False): + """Deprecated in favor of LogsDataSourceConfig.""" + type: Required[Literal["stored_completions"]] """The type of data source. Always `stored_completions`.""" @@ -105,6 +119,8 @@ class TestingCriterionLabelModelInputSimpleInputMessage(TypedDict, total=False): class TestingCriterionLabelModelInputEvalItemContentOutputText(TypedDict, total=False): + """A text output from the model.""" + text: Required[str] """The text output from the model.""" @@ -113,6 +129,8 @@ class TestingCriterionLabelModelInputEvalItemContentOutputText(TypedDict, total= class TestingCriterionLabelModelInputEvalItemContentInputImage(TypedDict, total=False): + """An image input to the model.""" + image_url: Required[str] """The URL of the image input.""" @@ -137,6 +155,14 @@ class TestingCriterionLabelModelInputEvalItemContentInputImage(TypedDict, total= class TestingCriterionLabelModelInputEvalItem(TypedDict, total=False): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: Required[TestingCriterionLabelModelInputEvalItemContent] """Inputs to the model - can contain template strings.""" @@ -156,6 +182,11 @@ class TestingCriterionLabelModelInputEvalItem(TypedDict, total=False): class TestingCriterionLabelModel(TypedDict, total=False): + """ + A LabelModelGrader object which uses a model to assign labels to each item + in the evaluation. + """ + input: Required[Iterable[TestingCriterionLabelModelInput]] """A list of chat messages forming the prompt or context. @@ -179,16 +210,22 @@ class TestingCriterionLabelModel(TypedDict, total=False): class TestingCriterionTextSimilarity(TextSimilarityGraderParam, total=False): + """A TextSimilarityGrader object which grades text based on similarity metrics.""" + pass_threshold: Required[float] """The threshold for the score.""" class TestingCriterionPython(PythonGraderParam, total=False): + """A PythonGrader object that runs a python script on the input.""" + pass_threshold: float """The threshold for the score.""" class TestingCriterionScoreModel(ScoreModelGraderParam, total=False): + """A ScoreModelGrader object that uses a model to assign a score to the input.""" + pass_threshold: float """The threshold for the score.""" diff --git a/src/openai/types/eval_create_response.py b/src/openai/types/eval_create_response.py index 20b0e3127f..f3166422ba 100644 --- a/src/openai/types/eval_create_response.py +++ b/src/openai/types/eval_create_response.py @@ -28,6 +28,13 @@ class DataSourceConfigLogs(BaseModel): + """ + A LogsDataSourceConfig which specifies the metadata property of your logs query. + This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. + The schema returned by this data source config is used to defined what variables are available in your evals. + `item` and `sample` are both defined when using this data source config. + """ + schema_: Dict[str, object] = FieldInfo(alias="schema") """ The json schema for the run data source items. Learn how to build JSON schemas @@ -56,18 +63,21 @@ class DataSourceConfigLogs(BaseModel): class TestingCriterionEvalGraderTextSimilarity(TextSimilarityGrader): __test__ = False + """A TextSimilarityGrader object which grades text based on similarity metrics.""" pass_threshold: float """The threshold for the score.""" class TestingCriterionEvalGraderPython(PythonGrader): __test__ = False + """A PythonGrader object that runs a python script on the input.""" pass_threshold: Optional[float] = None """The threshold for the score.""" class TestingCriterionEvalGraderScoreModel(ScoreModelGrader): __test__ = False + """A ScoreModelGrader object that uses a model to assign a score to the input.""" pass_threshold: Optional[float] = None """The threshold for the score.""" @@ -82,6 +92,15 @@ class TestingCriterionEvalGraderScoreModel(ScoreModelGrader): class EvalCreateResponse(BaseModel): + """ + An Eval object with a data source config and testing criteria. + An Eval represents a task to be done for your LLM integration. + Like: + - Improve the quality of my chatbot + - See how well my chatbot handles customer support + - Check if o4-mini is better at my usecase than gpt-4o + """ + id: str """Unique identifier for the evaluation.""" diff --git a/src/openai/types/eval_custom_data_source_config.py b/src/openai/types/eval_custom_data_source_config.py index d99701cc71..6234c4f47a 100644 --- a/src/openai/types/eval_custom_data_source_config.py +++ b/src/openai/types/eval_custom_data_source_config.py @@ -11,6 +11,13 @@ class EvalCustomDataSourceConfig(BaseModel): + """ + A CustomDataSourceConfig which specifies the schema of your `item` and optionally `sample` namespaces. + The response schema defines the shape of the data that will be: + - Used to define your testing criteria and + - What data is required when creating a run + """ + schema_: Dict[str, object] = FieldInfo(alias="schema") """ The json schema for the run data source items. Learn how to build JSON schemas diff --git a/src/openai/types/eval_list_response.py b/src/openai/types/eval_list_response.py index 5ac4997cf6..7cd92c5a09 100644 --- a/src/openai/types/eval_list_response.py +++ b/src/openai/types/eval_list_response.py @@ -28,6 +28,13 @@ class DataSourceConfigLogs(BaseModel): + """ + A LogsDataSourceConfig which specifies the metadata property of your logs query. + This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. + The schema returned by this data source config is used to defined what variables are available in your evals. + `item` and `sample` are both defined when using this data source config. + """ + schema_: Dict[str, object] = FieldInfo(alias="schema") """ The json schema for the run data source items. Learn how to build JSON schemas @@ -56,18 +63,21 @@ class DataSourceConfigLogs(BaseModel): class TestingCriterionEvalGraderTextSimilarity(TextSimilarityGrader): __test__ = False + """A TextSimilarityGrader object which grades text based on similarity metrics.""" pass_threshold: float """The threshold for the score.""" class TestingCriterionEvalGraderPython(PythonGrader): __test__ = False + """A PythonGrader object that runs a python script on the input.""" pass_threshold: Optional[float] = None """The threshold for the score.""" class TestingCriterionEvalGraderScoreModel(ScoreModelGrader): __test__ = False + """A ScoreModelGrader object that uses a model to assign a score to the input.""" pass_threshold: Optional[float] = None """The threshold for the score.""" @@ -82,6 +92,15 @@ class TestingCriterionEvalGraderScoreModel(ScoreModelGrader): class EvalListResponse(BaseModel): + """ + An Eval object with a data source config and testing criteria. + An Eval represents a task to be done for your LLM integration. + Like: + - Improve the quality of my chatbot + - See how well my chatbot handles customer support + - Check if o4-mini is better at my usecase than gpt-4o + """ + id: str """Unique identifier for the evaluation.""" diff --git a/src/openai/types/eval_retrieve_response.py b/src/openai/types/eval_retrieve_response.py index 758f9cc040..56db7d6bc1 100644 --- a/src/openai/types/eval_retrieve_response.py +++ b/src/openai/types/eval_retrieve_response.py @@ -28,6 +28,13 @@ class DataSourceConfigLogs(BaseModel): + """ + A LogsDataSourceConfig which specifies the metadata property of your logs query. + This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. + The schema returned by this data source config is used to defined what variables are available in your evals. + `item` and `sample` are both defined when using this data source config. + """ + schema_: Dict[str, object] = FieldInfo(alias="schema") """ The json schema for the run data source items. Learn how to build JSON schemas @@ -56,18 +63,21 @@ class DataSourceConfigLogs(BaseModel): class TestingCriterionEvalGraderTextSimilarity(TextSimilarityGrader): __test__ = False + """A TextSimilarityGrader object which grades text based on similarity metrics.""" pass_threshold: float """The threshold for the score.""" class TestingCriterionEvalGraderPython(PythonGrader): __test__ = False + """A PythonGrader object that runs a python script on the input.""" pass_threshold: Optional[float] = None """The threshold for the score.""" class TestingCriterionEvalGraderScoreModel(ScoreModelGrader): __test__ = False + """A ScoreModelGrader object that uses a model to assign a score to the input.""" pass_threshold: Optional[float] = None """The threshold for the score.""" @@ -82,6 +92,15 @@ class TestingCriterionEvalGraderScoreModel(ScoreModelGrader): class EvalRetrieveResponse(BaseModel): + """ + An Eval object with a data source config and testing criteria. + An Eval represents a task to be done for your LLM integration. + Like: + - Improve the quality of my chatbot + - See how well my chatbot handles customer support + - Check if o4-mini is better at my usecase than gpt-4o + """ + id: str """Unique identifier for the evaluation.""" diff --git a/src/openai/types/eval_stored_completions_data_source_config.py b/src/openai/types/eval_stored_completions_data_source_config.py index 98f86a4719..d11f6ae14c 100644 --- a/src/openai/types/eval_stored_completions_data_source_config.py +++ b/src/openai/types/eval_stored_completions_data_source_config.py @@ -12,6 +12,8 @@ class EvalStoredCompletionsDataSourceConfig(BaseModel): + """Deprecated in favor of LogsDataSourceConfig.""" + schema_: Dict[str, object] = FieldInfo(alias="schema") """ The json schema for the run data source items. Learn how to build JSON schemas diff --git a/src/openai/types/eval_update_response.py b/src/openai/types/eval_update_response.py index 3f0b90ae03..30d4dbc3a1 100644 --- a/src/openai/types/eval_update_response.py +++ b/src/openai/types/eval_update_response.py @@ -28,6 +28,13 @@ class DataSourceConfigLogs(BaseModel): + """ + A LogsDataSourceConfig which specifies the metadata property of your logs query. + This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. + The schema returned by this data source config is used to defined what variables are available in your evals. + `item` and `sample` are both defined when using this data source config. + """ + schema_: Dict[str, object] = FieldInfo(alias="schema") """ The json schema for the run data source items. Learn how to build JSON schemas @@ -56,18 +63,21 @@ class DataSourceConfigLogs(BaseModel): class TestingCriterionEvalGraderTextSimilarity(TextSimilarityGrader): __test__ = False + """A TextSimilarityGrader object which grades text based on similarity metrics.""" pass_threshold: float """The threshold for the score.""" class TestingCriterionEvalGraderPython(PythonGrader): __test__ = False + """A PythonGrader object that runs a python script on the input.""" pass_threshold: Optional[float] = None """The threshold for the score.""" class TestingCriterionEvalGraderScoreModel(ScoreModelGrader): __test__ = False + """A ScoreModelGrader object that uses a model to assign a score to the input.""" pass_threshold: Optional[float] = None """The threshold for the score.""" @@ -82,6 +92,15 @@ class TestingCriterionEvalGraderScoreModel(ScoreModelGrader): class EvalUpdateResponse(BaseModel): + """ + An Eval object with a data source config and testing criteria. + An Eval represents a task to be done for your LLM integration. + Like: + - Improve the quality of my chatbot + - See how well my chatbot handles customer support + - Check if o4-mini is better at my usecase than gpt-4o + """ + id: str """Unique identifier for the evaluation.""" diff --git a/src/openai/types/evals/create_eval_completions_run_data_source.py b/src/openai/types/evals/create_eval_completions_run_data_source.py index 4236746a17..6ec39873b7 100644 --- a/src/openai/types/evals/create_eval_completions_run_data_source.py +++ b/src/openai/types/evals/create_eval_completions_run_data_source.py @@ -58,6 +58,8 @@ class SourceFileID(BaseModel): class SourceStoredCompletions(BaseModel): + """A StoredCompletionsRunDataSource configuration describing a set of filters""" + type: Literal["stored_completions"] """The type of source. Always `stored_completions`.""" @@ -90,6 +92,8 @@ class SourceStoredCompletions(BaseModel): class InputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel): + """A text output from the model.""" + text: str """The text output from the model.""" @@ -98,6 +102,8 @@ class InputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel): class InputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel): + """An image input to the model.""" + image_url: str """The URL of the image input.""" @@ -122,6 +128,14 @@ class InputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel): class InputMessagesTemplateTemplateEvalItem(BaseModel): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: InputMessagesTemplateTemplateEvalItemContent """Inputs to the model - can contain template strings.""" @@ -217,6 +231,8 @@ class SamplingParams(BaseModel): class CreateEvalCompletionsRunDataSource(BaseModel): + """A CompletionsRunDataSource object describing a model sampling configuration.""" + source: Source """Determines what populates the `item` namespace in this run's data source.""" diff --git a/src/openai/types/evals/create_eval_completions_run_data_source_param.py b/src/openai/types/evals/create_eval_completions_run_data_source_param.py index 751a1432b8..22d07c5598 100644 --- a/src/openai/types/evals/create_eval_completions_run_data_source_param.py +++ b/src/openai/types/evals/create_eval_completions_run_data_source_param.py @@ -58,6 +58,8 @@ class SourceFileID(TypedDict, total=False): class SourceStoredCompletions(TypedDict, total=False): + """A StoredCompletionsRunDataSource configuration describing a set of filters""" + type: Required[Literal["stored_completions"]] """The type of source. Always `stored_completions`.""" @@ -88,6 +90,8 @@ class SourceStoredCompletions(TypedDict, total=False): class InputMessagesTemplateTemplateEvalItemContentOutputText(TypedDict, total=False): + """A text output from the model.""" + text: Required[str] """The text output from the model.""" @@ -96,6 +100,8 @@ class InputMessagesTemplateTemplateEvalItemContentOutputText(TypedDict, total=Fa class InputMessagesTemplateTemplateEvalItemContentInputImage(TypedDict, total=False): + """An image input to the model.""" + image_url: Required[str] """The URL of the image input.""" @@ -120,6 +126,14 @@ class InputMessagesTemplateTemplateEvalItemContentInputImage(TypedDict, total=Fa class InputMessagesTemplateTemplateEvalItem(TypedDict, total=False): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: Required[InputMessagesTemplateTemplateEvalItemContent] """Inputs to the model - can contain template strings.""" @@ -213,6 +227,8 @@ class SamplingParams(TypedDict, total=False): class CreateEvalCompletionsRunDataSourceParam(TypedDict, total=False): + """A CompletionsRunDataSource object describing a model sampling configuration.""" + source: Required[Source] """Determines what populates the `item` namespace in this run's data source.""" diff --git a/src/openai/types/evals/create_eval_jsonl_run_data_source.py b/src/openai/types/evals/create_eval_jsonl_run_data_source.py index ae36f8c55f..36ede2d9eb 100644 --- a/src/openai/types/evals/create_eval_jsonl_run_data_source.py +++ b/src/openai/types/evals/create_eval_jsonl_run_data_source.py @@ -35,6 +35,10 @@ class SourceFileID(BaseModel): class CreateEvalJSONLRunDataSource(BaseModel): + """ + A JsonlRunDataSource object with that specifies a JSONL file that matches the eval + """ + source: Source """Determines what populates the `item` namespace in the data source.""" diff --git a/src/openai/types/evals/create_eval_jsonl_run_data_source_param.py b/src/openai/types/evals/create_eval_jsonl_run_data_source_param.py index 217ee36346..b87ba9c5df 100644 --- a/src/openai/types/evals/create_eval_jsonl_run_data_source_param.py +++ b/src/openai/types/evals/create_eval_jsonl_run_data_source_param.py @@ -40,6 +40,10 @@ class SourceFileID(TypedDict, total=False): class CreateEvalJSONLRunDataSourceParam(TypedDict, total=False): + """ + A JsonlRunDataSource object with that specifies a JSONL file that matches the eval + """ + source: Required[Source] """Determines what populates the `item` namespace in the data source.""" diff --git a/src/openai/types/evals/eval_api_error.py b/src/openai/types/evals/eval_api_error.py index fe76871024..9b2c1871fb 100644 --- a/src/openai/types/evals/eval_api_error.py +++ b/src/openai/types/evals/eval_api_error.py @@ -6,6 +6,8 @@ class EvalAPIError(BaseModel): + """An object representing an error response from the Eval API.""" + code: str """The error code.""" diff --git a/src/openai/types/evals/run_cancel_response.py b/src/openai/types/evals/run_cancel_response.py index f7fb0ec4ad..40f071c959 100644 --- a/src/openai/types/evals/run_cancel_response.py +++ b/src/openai/types/evals/run_cancel_response.py @@ -66,6 +66,8 @@ class DataSourceResponsesSourceFileID(BaseModel): class DataSourceResponsesSourceResponses(BaseModel): + """A EvalResponsesSource object describing a run data source configuration.""" + type: Literal["responses"] """The type of run data source. Always `responses`.""" @@ -144,6 +146,8 @@ class DataSourceResponsesInputMessagesTemplateTemplateChatMessage(BaseModel): class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel): + """A text output from the model.""" + text: str """The text output from the model.""" @@ -152,6 +156,8 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText( class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel): + """An image input to the model.""" + image_url: str """The URL of the image input.""" @@ -176,6 +182,14 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage( class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent """Inputs to the model - can contain template strings.""" @@ -221,6 +235,14 @@ class DataSourceResponsesInputMessagesItemReference(BaseModel): class DataSourceResponsesSamplingParamsText(BaseModel): + """Configuration options for a text response from the model. + + Can be plain + text or structured JSON data. Learn more: + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ + format: Optional[ResponseFormatTextConfig] = None """An object specifying the format that the model must output. @@ -297,6 +319,8 @@ class DataSourceResponsesSamplingParams(BaseModel): class DataSourceResponses(BaseModel): + """A ResponsesRunDataSource object describing a model sampling configuration.""" + source: DataSourceResponsesSource """Determines what populates the `item` namespace in this run's data source.""" @@ -355,6 +379,8 @@ class PerTestingCriteriaResult(BaseModel): class ResultCounts(BaseModel): + """Counters summarizing the outcomes of the evaluation run.""" + errored: int """Number of output items that resulted in an error.""" @@ -369,6 +395,8 @@ class ResultCounts(BaseModel): class RunCancelResponse(BaseModel): + """A schema representing an evaluation run.""" + id: str """Unique identifier for the evaluation run.""" diff --git a/src/openai/types/evals/run_create_params.py b/src/openai/types/evals/run_create_params.py index a70d1923e5..993e10c738 100644 --- a/src/openai/types/evals/run_create_params.py +++ b/src/openai/types/evals/run_create_params.py @@ -79,6 +79,8 @@ class DataSourceCreateEvalResponsesRunDataSourceSourceFileID(TypedDict, total=Fa class DataSourceCreateEvalResponsesRunDataSourceSourceResponses(TypedDict, total=False): + """A EvalResponsesSource object describing a run data source configuration.""" + type: Required[Literal["responses"]] """The type of run data source. Always `responses`.""" @@ -160,6 +162,8 @@ class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateCha class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentOutputText( TypedDict, total=False ): + """A text output from the model.""" + text: Required[str] """The text output from the model.""" @@ -170,6 +174,8 @@ class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEva class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentInputImage( TypedDict, total=False ): + """An image input to the model.""" + image_url: Required[str] """The URL of the image input.""" @@ -194,6 +200,14 @@ class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEva class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItem(TypedDict, total=False): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: Required[DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContent] """Inputs to the model - can contain template strings.""" @@ -239,6 +253,14 @@ class DataSourceCreateEvalResponsesRunDataSourceInputMessagesItemReference(Typed class DataSourceCreateEvalResponsesRunDataSourceSamplingParamsText(TypedDict, total=False): + """Configuration options for a text response from the model. + + Can be plain + text or structured JSON data. Learn more: + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ + format: ResponseFormatTextConfigParam """An object specifying the format that the model must output. @@ -315,6 +337,8 @@ class DataSourceCreateEvalResponsesRunDataSourceSamplingParams(TypedDict, total= class DataSourceCreateEvalResponsesRunDataSource(TypedDict, total=False): + """A ResponsesRunDataSource object describing a model sampling configuration.""" + source: Required[DataSourceCreateEvalResponsesRunDataSourceSource] """Determines what populates the `item` namespace in this run's data source.""" diff --git a/src/openai/types/evals/run_create_response.py b/src/openai/types/evals/run_create_response.py index fb2220b3a1..93dae7adde 100644 --- a/src/openai/types/evals/run_create_response.py +++ b/src/openai/types/evals/run_create_response.py @@ -66,6 +66,8 @@ class DataSourceResponsesSourceFileID(BaseModel): class DataSourceResponsesSourceResponses(BaseModel): + """A EvalResponsesSource object describing a run data source configuration.""" + type: Literal["responses"] """The type of run data source. Always `responses`.""" @@ -144,6 +146,8 @@ class DataSourceResponsesInputMessagesTemplateTemplateChatMessage(BaseModel): class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel): + """A text output from the model.""" + text: str """The text output from the model.""" @@ -152,6 +156,8 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText( class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel): + """An image input to the model.""" + image_url: str """The URL of the image input.""" @@ -176,6 +182,14 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage( class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent """Inputs to the model - can contain template strings.""" @@ -221,6 +235,14 @@ class DataSourceResponsesInputMessagesItemReference(BaseModel): class DataSourceResponsesSamplingParamsText(BaseModel): + """Configuration options for a text response from the model. + + Can be plain + text or structured JSON data. Learn more: + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ + format: Optional[ResponseFormatTextConfig] = None """An object specifying the format that the model must output. @@ -297,6 +319,8 @@ class DataSourceResponsesSamplingParams(BaseModel): class DataSourceResponses(BaseModel): + """A ResponsesRunDataSource object describing a model sampling configuration.""" + source: DataSourceResponsesSource """Determines what populates the `item` namespace in this run's data source.""" @@ -355,6 +379,8 @@ class PerTestingCriteriaResult(BaseModel): class ResultCounts(BaseModel): + """Counters summarizing the outcomes of the evaluation run.""" + errored: int """Number of output items that resulted in an error.""" @@ -369,6 +395,8 @@ class ResultCounts(BaseModel): class RunCreateResponse(BaseModel): + """A schema representing an evaluation run.""" + id: str """Unique identifier for the evaluation run.""" diff --git a/src/openai/types/evals/run_list_response.py b/src/openai/types/evals/run_list_response.py index adac4ffdc8..c3c745b21c 100644 --- a/src/openai/types/evals/run_list_response.py +++ b/src/openai/types/evals/run_list_response.py @@ -66,6 +66,8 @@ class DataSourceResponsesSourceFileID(BaseModel): class DataSourceResponsesSourceResponses(BaseModel): + """A EvalResponsesSource object describing a run data source configuration.""" + type: Literal["responses"] """The type of run data source. Always `responses`.""" @@ -144,6 +146,8 @@ class DataSourceResponsesInputMessagesTemplateTemplateChatMessage(BaseModel): class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel): + """A text output from the model.""" + text: str """The text output from the model.""" @@ -152,6 +156,8 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText( class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel): + """An image input to the model.""" + image_url: str """The URL of the image input.""" @@ -176,6 +182,14 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage( class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent """Inputs to the model - can contain template strings.""" @@ -221,6 +235,14 @@ class DataSourceResponsesInputMessagesItemReference(BaseModel): class DataSourceResponsesSamplingParamsText(BaseModel): + """Configuration options for a text response from the model. + + Can be plain + text or structured JSON data. Learn more: + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ + format: Optional[ResponseFormatTextConfig] = None """An object specifying the format that the model must output. @@ -297,6 +319,8 @@ class DataSourceResponsesSamplingParams(BaseModel): class DataSourceResponses(BaseModel): + """A ResponsesRunDataSource object describing a model sampling configuration.""" + source: DataSourceResponsesSource """Determines what populates the `item` namespace in this run's data source.""" @@ -355,6 +379,8 @@ class PerTestingCriteriaResult(BaseModel): class ResultCounts(BaseModel): + """Counters summarizing the outcomes of the evaluation run.""" + errored: int """Number of output items that resulted in an error.""" @@ -369,6 +395,8 @@ class ResultCounts(BaseModel): class RunListResponse(BaseModel): + """A schema representing an evaluation run.""" + id: str """Unique identifier for the evaluation run.""" diff --git a/src/openai/types/evals/run_retrieve_response.py b/src/openai/types/evals/run_retrieve_response.py index abdc5ebae5..d02256c679 100644 --- a/src/openai/types/evals/run_retrieve_response.py +++ b/src/openai/types/evals/run_retrieve_response.py @@ -66,6 +66,8 @@ class DataSourceResponsesSourceFileID(BaseModel): class DataSourceResponsesSourceResponses(BaseModel): + """A EvalResponsesSource object describing a run data source configuration.""" + type: Literal["responses"] """The type of run data source. Always `responses`.""" @@ -144,6 +146,8 @@ class DataSourceResponsesInputMessagesTemplateTemplateChatMessage(BaseModel): class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel): + """A text output from the model.""" + text: str """The text output from the model.""" @@ -152,6 +156,8 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText( class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel): + """An image input to the model.""" + image_url: str """The URL of the image input.""" @@ -176,6 +182,14 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage( class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent """Inputs to the model - can contain template strings.""" @@ -221,6 +235,14 @@ class DataSourceResponsesInputMessagesItemReference(BaseModel): class DataSourceResponsesSamplingParamsText(BaseModel): + """Configuration options for a text response from the model. + + Can be plain + text or structured JSON data. Learn more: + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ + format: Optional[ResponseFormatTextConfig] = None """An object specifying the format that the model must output. @@ -297,6 +319,8 @@ class DataSourceResponsesSamplingParams(BaseModel): class DataSourceResponses(BaseModel): + """A ResponsesRunDataSource object describing a model sampling configuration.""" + source: DataSourceResponsesSource """Determines what populates the `item` namespace in this run's data source.""" @@ -355,6 +379,8 @@ class PerTestingCriteriaResult(BaseModel): class ResultCounts(BaseModel): + """Counters summarizing the outcomes of the evaluation run.""" + errored: int """Number of output items that resulted in an error.""" @@ -369,6 +395,8 @@ class ResultCounts(BaseModel): class RunRetrieveResponse(BaseModel): + """A schema representing an evaluation run.""" + id: str """Unique identifier for the evaluation run.""" diff --git a/src/openai/types/evals/runs/output_item_list_response.py b/src/openai/types/evals/runs/output_item_list_response.py index e88c21766f..a906a29df7 100644 --- a/src/openai/types/evals/runs/output_item_list_response.py +++ b/src/openai/types/evals/runs/output_item_list_response.py @@ -12,6 +12,8 @@ class Result(BaseModel): + """A single grader result for an evaluation run output item.""" + name: str """The name of the grader.""" @@ -41,6 +43,8 @@ def __getattr__(self, attr: str) -> object: ... class SampleInput(BaseModel): + """An input message.""" + content: str """The content of the message.""" @@ -57,6 +61,8 @@ class SampleOutput(BaseModel): class SampleUsage(BaseModel): + """Token usage details for the sample.""" + cached_tokens: int """The number of tokens retrieved from cache.""" @@ -71,6 +77,8 @@ class SampleUsage(BaseModel): class Sample(BaseModel): + """A sample containing the input and output of the evaluation run.""" + error: EvalAPIError """An object representing an error response from the Eval API.""" @@ -103,6 +111,8 @@ class Sample(BaseModel): class OutputItemListResponse(BaseModel): + """A schema representing an evaluation run output item.""" + id: str """Unique identifier for the evaluation run output item.""" diff --git a/src/openai/types/evals/runs/output_item_retrieve_response.py b/src/openai/types/evals/runs/output_item_retrieve_response.py index c728629b41..42ba4b2864 100644 --- a/src/openai/types/evals/runs/output_item_retrieve_response.py +++ b/src/openai/types/evals/runs/output_item_retrieve_response.py @@ -12,6 +12,8 @@ class Result(BaseModel): + """A single grader result for an evaluation run output item.""" + name: str """The name of the grader.""" @@ -41,6 +43,8 @@ def __getattr__(self, attr: str) -> object: ... class SampleInput(BaseModel): + """An input message.""" + content: str """The content of the message.""" @@ -57,6 +61,8 @@ class SampleOutput(BaseModel): class SampleUsage(BaseModel): + """Token usage details for the sample.""" + cached_tokens: int """The number of tokens retrieved from cache.""" @@ -71,6 +77,8 @@ class SampleUsage(BaseModel): class Sample(BaseModel): + """A sample containing the input and output of the evaluation run.""" + error: EvalAPIError """An object representing an error response from the Eval API.""" @@ -103,6 +111,8 @@ class Sample(BaseModel): class OutputItemRetrieveResponse(BaseModel): + """A schema representing an evaluation run output item.""" + id: str """Unique identifier for the evaluation run output item.""" diff --git a/src/openai/types/file_create_params.py b/src/openai/types/file_create_params.py index f4583b16a3..f4367f7a7d 100644 --- a/src/openai/types/file_create_params.py +++ b/src/openai/types/file_create_params.py @@ -32,6 +32,11 @@ class FileCreateParams(TypedDict, total=False): class ExpiresAfter(TypedDict, total=False): + """The expiration policy for a file. + + By default, files with `purpose=batch` expire after 30 days and all other files are persisted until they are manually deleted. + """ + anchor: Required[Literal["created_at"]] """Anchor timestamp after which the expiration policy applies. diff --git a/src/openai/types/file_object.py b/src/openai/types/file_object.py index 883c2de019..4a9901fd3f 100644 --- a/src/openai/types/file_object.py +++ b/src/openai/types/file_object.py @@ -9,6 +9,8 @@ class FileObject(BaseModel): + """The `File` object represents a document that has been uploaded to OpenAI.""" + id: str """The file identifier, which can be referenced in the API endpoints.""" diff --git a/src/openai/types/fine_tuning/checkpoints/permission_create_response.py b/src/openai/types/fine_tuning/checkpoints/permission_create_response.py index 9bc14c00cc..459fa9dee7 100644 --- a/src/openai/types/fine_tuning/checkpoints/permission_create_response.py +++ b/src/openai/types/fine_tuning/checkpoints/permission_create_response.py @@ -8,6 +8,10 @@ class PermissionCreateResponse(BaseModel): + """ + The `checkpoint.permission` object represents a permission for a fine-tuned model checkpoint. + """ + id: str """The permission identifier, which can be referenced in the API endpoints.""" diff --git a/src/openai/types/fine_tuning/checkpoints/permission_retrieve_response.py b/src/openai/types/fine_tuning/checkpoints/permission_retrieve_response.py index 14c73b55d0..34208958ef 100644 --- a/src/openai/types/fine_tuning/checkpoints/permission_retrieve_response.py +++ b/src/openai/types/fine_tuning/checkpoints/permission_retrieve_response.py @@ -9,6 +9,10 @@ class Data(BaseModel): + """ + The `checkpoint.permission` object represents a permission for a fine-tuned model checkpoint. + """ + id: str """The permission identifier, which can be referenced in the API endpoints.""" diff --git a/src/openai/types/fine_tuning/dpo_hyperparameters.py b/src/openai/types/fine_tuning/dpo_hyperparameters.py index b0b3f0581b..cd39f308a4 100644 --- a/src/openai/types/fine_tuning/dpo_hyperparameters.py +++ b/src/openai/types/fine_tuning/dpo_hyperparameters.py @@ -9,6 +9,8 @@ class DpoHyperparameters(BaseModel): + """The hyperparameters used for the DPO fine-tuning job.""" + batch_size: Union[Literal["auto"], int, None] = None """Number of examples in each batch. diff --git a/src/openai/types/fine_tuning/dpo_hyperparameters_param.py b/src/openai/types/fine_tuning/dpo_hyperparameters_param.py index 87c6ee80a5..12b2c41ca8 100644 --- a/src/openai/types/fine_tuning/dpo_hyperparameters_param.py +++ b/src/openai/types/fine_tuning/dpo_hyperparameters_param.py @@ -9,6 +9,8 @@ class DpoHyperparametersParam(TypedDict, total=False): + """The hyperparameters used for the DPO fine-tuning job.""" + batch_size: Union[Literal["auto"], int] """Number of examples in each batch. diff --git a/src/openai/types/fine_tuning/dpo_method.py b/src/openai/types/fine_tuning/dpo_method.py index 3e20f360dd..452c182016 100644 --- a/src/openai/types/fine_tuning/dpo_method.py +++ b/src/openai/types/fine_tuning/dpo_method.py @@ -9,5 +9,7 @@ class DpoMethod(BaseModel): + """Configuration for the DPO fine-tuning method.""" + hyperparameters: Optional[DpoHyperparameters] = None """The hyperparameters used for the DPO fine-tuning job.""" diff --git a/src/openai/types/fine_tuning/dpo_method_param.py b/src/openai/types/fine_tuning/dpo_method_param.py index ce6b6510f6..6bd74d9760 100644 --- a/src/openai/types/fine_tuning/dpo_method_param.py +++ b/src/openai/types/fine_tuning/dpo_method_param.py @@ -10,5 +10,7 @@ class DpoMethodParam(TypedDict, total=False): + """Configuration for the DPO fine-tuning method.""" + hyperparameters: DpoHyperparametersParam """The hyperparameters used for the DPO fine-tuning job.""" diff --git a/src/openai/types/fine_tuning/fine_tuning_job.py b/src/openai/types/fine_tuning/fine_tuning_job.py index f626fbba64..bb8a4d597b 100644 --- a/src/openai/types/fine_tuning/fine_tuning_job.py +++ b/src/openai/types/fine_tuning/fine_tuning_job.py @@ -14,6 +14,10 @@ class Error(BaseModel): + """ + For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. + """ + code: str """A machine-readable error code.""" @@ -28,6 +32,11 @@ class Error(BaseModel): class Hyperparameters(BaseModel): + """The hyperparameters used for the fine-tuning job. + + This value will only be returned when running `supervised` jobs. + """ + batch_size: Union[Literal["auto"], int, None] = None """Number of examples in each batch. @@ -49,6 +58,8 @@ class Hyperparameters(BaseModel): class Method(BaseModel): + """The method used for fine-tuning.""" + type: Literal["supervised", "dpo", "reinforcement"] """The type of method. Is either `supervised`, `dpo`, or `reinforcement`.""" @@ -63,6 +74,10 @@ class Method(BaseModel): class FineTuningJob(BaseModel): + """ + The `fine_tuning.job` object represents a fine-tuning job that has been created through the API. + """ + id: str """The object identifier, which can be referenced in the API endpoints.""" diff --git a/src/openai/types/fine_tuning/fine_tuning_job_event.py b/src/openai/types/fine_tuning/fine_tuning_job_event.py index 1d728bd765..7452b818c6 100644 --- a/src/openai/types/fine_tuning/fine_tuning_job_event.py +++ b/src/openai/types/fine_tuning/fine_tuning_job_event.py @@ -10,6 +10,8 @@ class FineTuningJobEvent(BaseModel): + """Fine-tuning job event object""" + id: str """The object identifier.""" diff --git a/src/openai/types/fine_tuning/fine_tuning_job_wandb_integration.py b/src/openai/types/fine_tuning/fine_tuning_job_wandb_integration.py index 4ac282eb54..0e33aa84c8 100644 --- a/src/openai/types/fine_tuning/fine_tuning_job_wandb_integration.py +++ b/src/openai/types/fine_tuning/fine_tuning_job_wandb_integration.py @@ -8,6 +8,13 @@ class FineTuningJobWandbIntegration(BaseModel): + """The settings for your integration with Weights and Biases. + + This payload specifies the project that + metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags + to your run, and set a default entity (team, username, etc) to be associated with your run. + """ + project: str """The name of the project that the new run will be created under.""" diff --git a/src/openai/types/fine_tuning/job_create_params.py b/src/openai/types/fine_tuning/job_create_params.py index 351d4e0e1b..181bede2d9 100644 --- a/src/openai/types/fine_tuning/job_create_params.py +++ b/src/openai/types/fine_tuning/job_create_params.py @@ -100,6 +100,11 @@ class JobCreateParams(TypedDict, total=False): class Hyperparameters(TypedDict, total=False): + """ + The hyperparameters used for the fine-tuning job. + This value is now deprecated in favor of `method`, and should be passed in under the `method` parameter. + """ + batch_size: Union[Literal["auto"], int] """Number of examples in each batch. @@ -121,6 +126,13 @@ class Hyperparameters(TypedDict, total=False): class IntegrationWandb(TypedDict, total=False): + """The settings for your integration with Weights and Biases. + + This payload specifies the project that + metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags + to your run, and set a default entity (team, username, etc) to be associated with your run. + """ + project: Required[str] """The name of the project that the new run will be created under.""" @@ -163,6 +175,8 @@ class Integration(TypedDict, total=False): class Method(TypedDict, total=False): + """The method used for fine-tuning.""" + type: Required[Literal["supervised", "dpo", "reinforcement"]] """The type of method. Is either `supervised`, `dpo`, or `reinforcement`.""" diff --git a/src/openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py b/src/openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py index bd07317a3e..f8a04b6395 100644 --- a/src/openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py +++ b/src/openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py @@ -9,6 +9,8 @@ class Metrics(BaseModel): + """Metrics at the step number during the fine-tuning job.""" + full_valid_loss: Optional[float] = None full_valid_mean_token_accuracy: Optional[float] = None @@ -25,6 +27,10 @@ class Metrics(BaseModel): class FineTuningJobCheckpoint(BaseModel): + """ + The `fine_tuning.job.checkpoint` object represents a model checkpoint for a fine-tuning job that is ready to use. + """ + id: str """The checkpoint identifier, which can be referenced in the API endpoints.""" diff --git a/src/openai/types/fine_tuning/reinforcement_hyperparameters.py b/src/openai/types/fine_tuning/reinforcement_hyperparameters.py index 7c1762d38c..4c289fd659 100644 --- a/src/openai/types/fine_tuning/reinforcement_hyperparameters.py +++ b/src/openai/types/fine_tuning/reinforcement_hyperparameters.py @@ -9,6 +9,8 @@ class ReinforcementHyperparameters(BaseModel): + """The hyperparameters used for the reinforcement fine-tuning job.""" + batch_size: Union[Literal["auto"], int, None] = None """Number of examples in each batch. diff --git a/src/openai/types/fine_tuning/reinforcement_hyperparameters_param.py b/src/openai/types/fine_tuning/reinforcement_hyperparameters_param.py index 0cc12fcb17..7be716f143 100644 --- a/src/openai/types/fine_tuning/reinforcement_hyperparameters_param.py +++ b/src/openai/types/fine_tuning/reinforcement_hyperparameters_param.py @@ -9,6 +9,8 @@ class ReinforcementHyperparametersParam(TypedDict, total=False): + """The hyperparameters used for the reinforcement fine-tuning job.""" + batch_size: Union[Literal["auto"], int] """Number of examples in each batch. diff --git a/src/openai/types/fine_tuning/reinforcement_method.py b/src/openai/types/fine_tuning/reinforcement_method.py index 9b65c41033..a8a3685148 100644 --- a/src/openai/types/fine_tuning/reinforcement_method.py +++ b/src/openai/types/fine_tuning/reinforcement_method.py @@ -17,6 +17,8 @@ class ReinforcementMethod(BaseModel): + """Configuration for the reinforcement fine-tuning method.""" + grader: Grader """The grader used for the fine-tuning job.""" diff --git a/src/openai/types/fine_tuning/reinforcement_method_param.py b/src/openai/types/fine_tuning/reinforcement_method_param.py index 00d5060536..ea75bfeb69 100644 --- a/src/openai/types/fine_tuning/reinforcement_method_param.py +++ b/src/openai/types/fine_tuning/reinforcement_method_param.py @@ -20,6 +20,8 @@ class ReinforcementMethodParam(TypedDict, total=False): + """Configuration for the reinforcement fine-tuning method.""" + grader: Required[Grader] """The grader used for the fine-tuning job.""" diff --git a/src/openai/types/fine_tuning/supervised_hyperparameters.py b/src/openai/types/fine_tuning/supervised_hyperparameters.py index 3955ecf437..1231bbdd80 100644 --- a/src/openai/types/fine_tuning/supervised_hyperparameters.py +++ b/src/openai/types/fine_tuning/supervised_hyperparameters.py @@ -9,6 +9,8 @@ class SupervisedHyperparameters(BaseModel): + """The hyperparameters used for the fine-tuning job.""" + batch_size: Union[Literal["auto"], int, None] = None """Number of examples in each batch. diff --git a/src/openai/types/fine_tuning/supervised_hyperparameters_param.py b/src/openai/types/fine_tuning/supervised_hyperparameters_param.py index bd37d9b239..de0e021dea 100644 --- a/src/openai/types/fine_tuning/supervised_hyperparameters_param.py +++ b/src/openai/types/fine_tuning/supervised_hyperparameters_param.py @@ -9,6 +9,8 @@ class SupervisedHyperparametersParam(TypedDict, total=False): + """The hyperparameters used for the fine-tuning job.""" + batch_size: Union[Literal["auto"], int] """Number of examples in each batch. diff --git a/src/openai/types/fine_tuning/supervised_method.py b/src/openai/types/fine_tuning/supervised_method.py index 3a32bf27a0..96e102582d 100644 --- a/src/openai/types/fine_tuning/supervised_method.py +++ b/src/openai/types/fine_tuning/supervised_method.py @@ -9,5 +9,7 @@ class SupervisedMethod(BaseModel): + """Configuration for the supervised fine-tuning method.""" + hyperparameters: Optional[SupervisedHyperparameters] = None """The hyperparameters used for the fine-tuning job.""" diff --git a/src/openai/types/fine_tuning/supervised_method_param.py b/src/openai/types/fine_tuning/supervised_method_param.py index ba277853d7..4381cd184b 100644 --- a/src/openai/types/fine_tuning/supervised_method_param.py +++ b/src/openai/types/fine_tuning/supervised_method_param.py @@ -10,5 +10,7 @@ class SupervisedMethodParam(TypedDict, total=False): + """Configuration for the supervised fine-tuning method.""" + hyperparameters: SupervisedHyperparametersParam """The hyperparameters used for the fine-tuning job.""" diff --git a/src/openai/types/graders/label_model_grader.py b/src/openai/types/graders/label_model_grader.py index 0929349c24..141306b510 100644 --- a/src/openai/types/graders/label_model_grader.py +++ b/src/openai/types/graders/label_model_grader.py @@ -11,6 +11,8 @@ class InputContentOutputText(BaseModel): + """A text output from the model.""" + text: str """The text output from the model.""" @@ -19,6 +21,8 @@ class InputContentOutputText(BaseModel): class InputContentInputImage(BaseModel): + """An image input to the model.""" + image_url: str """The URL of the image input.""" @@ -38,6 +42,14 @@ class InputContentInputImage(BaseModel): class Input(BaseModel): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: InputContent """Inputs to the model - can contain template strings.""" @@ -52,6 +64,11 @@ class Input(BaseModel): class LabelModelGrader(BaseModel): + """ + A LabelModelGrader object which uses a model to assign labels to each item + in the evaluation. + """ + input: List[Input] labels: List[str] diff --git a/src/openai/types/graders/label_model_grader_param.py b/src/openai/types/graders/label_model_grader_param.py index 7bd6fdb4a7..a23be2a236 100644 --- a/src/openai/types/graders/label_model_grader_param.py +++ b/src/openai/types/graders/label_model_grader_param.py @@ -13,6 +13,8 @@ class InputContentOutputText(TypedDict, total=False): + """A text output from the model.""" + text: Required[str] """The text output from the model.""" @@ -21,6 +23,8 @@ class InputContentOutputText(TypedDict, total=False): class InputContentInputImage(TypedDict, total=False): + """An image input to the model.""" + image_url: Required[str] """The URL of the image input.""" @@ -45,6 +49,14 @@ class InputContentInputImage(TypedDict, total=False): class Input(TypedDict, total=False): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: Required[InputContent] """Inputs to the model - can contain template strings.""" @@ -59,6 +71,11 @@ class Input(TypedDict, total=False): class LabelModelGraderParam(TypedDict, total=False): + """ + A LabelModelGrader object which uses a model to assign labels to each item + in the evaluation. + """ + input: Required[Iterable[Input]] labels: Required[SequenceNotStr[str]] diff --git a/src/openai/types/graders/multi_grader.py b/src/openai/types/graders/multi_grader.py index 7539c68ef5..022ddb406a 100644 --- a/src/openai/types/graders/multi_grader.py +++ b/src/openai/types/graders/multi_grader.py @@ -16,6 +16,10 @@ class MultiGrader(BaseModel): + """ + A MultiGrader object combines the output of multiple graders to produce a single score. + """ + calculate_output: str """A formula to calculate the output based on grader results.""" diff --git a/src/openai/types/graders/multi_grader_param.py b/src/openai/types/graders/multi_grader_param.py index 28a6705b81..064267a5aa 100644 --- a/src/openai/types/graders/multi_grader_param.py +++ b/src/openai/types/graders/multi_grader_param.py @@ -19,6 +19,10 @@ class MultiGraderParam(TypedDict, total=False): + """ + A MultiGrader object combines the output of multiple graders to produce a single score. + """ + calculate_output: Required[str] """A formula to calculate the output based on grader results.""" diff --git a/src/openai/types/graders/python_grader.py b/src/openai/types/graders/python_grader.py index faa10b1ef9..81aafdae0a 100644 --- a/src/openai/types/graders/python_grader.py +++ b/src/openai/types/graders/python_grader.py @@ -9,6 +9,8 @@ class PythonGrader(BaseModel): + """A PythonGrader object that runs a python script on the input.""" + name: str """The name of the grader.""" diff --git a/src/openai/types/graders/python_grader_param.py b/src/openai/types/graders/python_grader_param.py index efb923751e..3be7bab432 100644 --- a/src/openai/types/graders/python_grader_param.py +++ b/src/openai/types/graders/python_grader_param.py @@ -8,6 +8,8 @@ class PythonGraderParam(TypedDict, total=False): + """A PythonGrader object that runs a python script on the input.""" + name: Required[str] """The name of the grader.""" diff --git a/src/openai/types/graders/score_model_grader.py b/src/openai/types/graders/score_model_grader.py index b3ba6758bb..6dd5a0eee8 100644 --- a/src/openai/types/graders/score_model_grader.py +++ b/src/openai/types/graders/score_model_grader.py @@ -19,6 +19,8 @@ class InputContentOutputText(BaseModel): + """A text output from the model.""" + text: str """The text output from the model.""" @@ -27,6 +29,8 @@ class InputContentOutputText(BaseModel): class InputContentInputImage(BaseModel): + """An image input to the model.""" + image_url: str """The URL of the image input.""" @@ -46,6 +50,14 @@ class InputContentInputImage(BaseModel): class Input(BaseModel): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: InputContent """Inputs to the model - can contain template strings.""" @@ -60,6 +72,8 @@ class Input(BaseModel): class SamplingParams(BaseModel): + """The sampling parameters for the model.""" + max_completions_tokens: Optional[int] = None """The maximum number of tokens the grader model may generate in its response.""" @@ -91,6 +105,8 @@ class SamplingParams(BaseModel): class ScoreModelGrader(BaseModel): + """A ScoreModelGrader object that uses a model to assign a score to the input.""" + input: List[Input] """The input text. This may include template strings.""" diff --git a/src/openai/types/graders/score_model_grader_param.py b/src/openai/types/graders/score_model_grader_param.py index eb1f6e03ac..33452e43f3 100644 --- a/src/openai/types/graders/score_model_grader_param.py +++ b/src/openai/types/graders/score_model_grader_param.py @@ -20,6 +20,8 @@ class InputContentOutputText(TypedDict, total=False): + """A text output from the model.""" + text: Required[str] """The text output from the model.""" @@ -28,6 +30,8 @@ class InputContentOutputText(TypedDict, total=False): class InputContentInputImage(TypedDict, total=False): + """An image input to the model.""" + image_url: Required[str] """The URL of the image input.""" @@ -52,6 +56,14 @@ class InputContentInputImage(TypedDict, total=False): class Input(TypedDict, total=False): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: Required[InputContent] """Inputs to the model - can contain template strings.""" @@ -66,6 +78,8 @@ class Input(TypedDict, total=False): class SamplingParams(TypedDict, total=False): + """The sampling parameters for the model.""" + max_completions_tokens: Optional[int] """The maximum number of tokens the grader model may generate in its response.""" @@ -97,6 +111,8 @@ class SamplingParams(TypedDict, total=False): class ScoreModelGraderParam(TypedDict, total=False): + """A ScoreModelGrader object that uses a model to assign a score to the input.""" + input: Required[Iterable[Input]] """The input text. This may include template strings.""" diff --git a/src/openai/types/graders/string_check_grader.py b/src/openai/types/graders/string_check_grader.py index 3bf0b8c868..efd3679da9 100644 --- a/src/openai/types/graders/string_check_grader.py +++ b/src/openai/types/graders/string_check_grader.py @@ -8,6 +8,10 @@ class StringCheckGrader(BaseModel): + """ + A StringCheckGrader object that performs a string comparison between input and reference using a specified operation. + """ + input: str """The input text. This may include template strings.""" diff --git a/src/openai/types/graders/string_check_grader_param.py b/src/openai/types/graders/string_check_grader_param.py index 27b204cec0..da9e961568 100644 --- a/src/openai/types/graders/string_check_grader_param.py +++ b/src/openai/types/graders/string_check_grader_param.py @@ -8,6 +8,10 @@ class StringCheckGraderParam(TypedDict, total=False): + """ + A StringCheckGrader object that performs a string comparison between input and reference using a specified operation. + """ + input: Required[str] """The input text. This may include template strings.""" diff --git a/src/openai/types/graders/text_similarity_grader.py b/src/openai/types/graders/text_similarity_grader.py index 9082ac8969..a9d39a2fbd 100644 --- a/src/openai/types/graders/text_similarity_grader.py +++ b/src/openai/types/graders/text_similarity_grader.py @@ -8,6 +8,8 @@ class TextSimilarityGrader(BaseModel): + """A TextSimilarityGrader object which grades text based on similarity metrics.""" + evaluation_metric: Literal[ "cosine", "fuzzy_match", diff --git a/src/openai/types/graders/text_similarity_grader_param.py b/src/openai/types/graders/text_similarity_grader_param.py index 1646afc84b..0907c3c2a7 100644 --- a/src/openai/types/graders/text_similarity_grader_param.py +++ b/src/openai/types/graders/text_similarity_grader_param.py @@ -8,6 +8,8 @@ class TextSimilarityGraderParam(TypedDict, total=False): + """A TextSimilarityGrader object which grades text based on similarity metrics.""" + evaluation_metric: Required[ Literal[ "cosine", diff --git a/src/openai/types/image.py b/src/openai/types/image.py index ecaef3fd58..9e2a23fa40 100644 --- a/src/openai/types/image.py +++ b/src/openai/types/image.py @@ -8,6 +8,8 @@ class Image(BaseModel): + """Represents the content or the URL of an image generated by the OpenAI API.""" + b64_json: Optional[str] = None """The base64-encoded JSON of the generated image. diff --git a/src/openai/types/image_edit_completed_event.py b/src/openai/types/image_edit_completed_event.py index a40682da6a..5bd2986d2a 100644 --- a/src/openai/types/image_edit_completed_event.py +++ b/src/openai/types/image_edit_completed_event.py @@ -8,6 +8,8 @@ class UsageInputTokensDetails(BaseModel): + """The input tokens detailed information for the image generation.""" + image_tokens: int """The number of image tokens in the input prompt.""" @@ -16,6 +18,8 @@ class UsageInputTokensDetails(BaseModel): class Usage(BaseModel): + """For `gpt-image-1` only, the token usage information for the image generation.""" + input_tokens: int """The number of tokens (images and text) in the input prompt.""" @@ -30,6 +34,8 @@ class Usage(BaseModel): class ImageEditCompletedEvent(BaseModel): + """Emitted when image editing has completed and the final image is available.""" + b64_json: str """Base64-encoded final edited image data, suitable for rendering as an image.""" diff --git a/src/openai/types/image_edit_partial_image_event.py b/src/openai/types/image_edit_partial_image_event.py index 20da45efc3..7bbd8c9b13 100644 --- a/src/openai/types/image_edit_partial_image_event.py +++ b/src/openai/types/image_edit_partial_image_event.py @@ -8,6 +8,8 @@ class ImageEditPartialImageEvent(BaseModel): + """Emitted when a partial image is available during image editing streaming.""" + b64_json: str """Base64-encoded partial image data, suitable for rendering as an image.""" diff --git a/src/openai/types/image_gen_completed_event.py b/src/openai/types/image_gen_completed_event.py index e78da842d4..dc9ccb8cfc 100644 --- a/src/openai/types/image_gen_completed_event.py +++ b/src/openai/types/image_gen_completed_event.py @@ -8,6 +8,8 @@ class UsageInputTokensDetails(BaseModel): + """The input tokens detailed information for the image generation.""" + image_tokens: int """The number of image tokens in the input prompt.""" @@ -16,6 +18,8 @@ class UsageInputTokensDetails(BaseModel): class Usage(BaseModel): + """For `gpt-image-1` only, the token usage information for the image generation.""" + input_tokens: int """The number of tokens (images and text) in the input prompt.""" @@ -30,6 +34,8 @@ class Usage(BaseModel): class ImageGenCompletedEvent(BaseModel): + """Emitted when image generation has completed and the final image is available.""" + b64_json: str """Base64-encoded image data, suitable for rendering as an image.""" diff --git a/src/openai/types/image_gen_partial_image_event.py b/src/openai/types/image_gen_partial_image_event.py index 965d450604..df29c00a63 100644 --- a/src/openai/types/image_gen_partial_image_event.py +++ b/src/openai/types/image_gen_partial_image_event.py @@ -8,6 +8,8 @@ class ImageGenPartialImageEvent(BaseModel): + """Emitted when a partial image is available during image generation streaming.""" + b64_json: str """Base64-encoded partial image data, suitable for rendering as an image.""" diff --git a/src/openai/types/images_response.py b/src/openai/types/images_response.py index 89cc71df24..914017823e 100644 --- a/src/openai/types/images_response.py +++ b/src/openai/types/images_response.py @@ -10,6 +10,8 @@ class UsageInputTokensDetails(BaseModel): + """The input tokens detailed information for the image generation.""" + image_tokens: int """The number of image tokens in the input prompt.""" @@ -18,6 +20,8 @@ class UsageInputTokensDetails(BaseModel): class Usage(BaseModel): + """For `gpt-image-1` only, the token usage information for the image generation.""" + input_tokens: int """The number of tokens (images and text) in the input prompt.""" @@ -32,6 +36,8 @@ class Usage(BaseModel): class ImagesResponse(BaseModel): + """The response from the image generation endpoint.""" + created: int """The Unix timestamp (in seconds) of when the image was created.""" diff --git a/src/openai/types/model.py b/src/openai/types/model.py index 2631ee8d1a..6506224a20 100644 --- a/src/openai/types/model.py +++ b/src/openai/types/model.py @@ -8,6 +8,8 @@ class Model(BaseModel): + """Describes an OpenAI model offering that can be used with the API.""" + id: str """The model identifier, which can be referenced in the API endpoints.""" diff --git a/src/openai/types/moderation.py b/src/openai/types/moderation.py index 608f562218..a6acc26db4 100644 --- a/src/openai/types/moderation.py +++ b/src/openai/types/moderation.py @@ -11,6 +11,8 @@ class Categories(BaseModel): + """A list of the categories, and whether they are flagged or not.""" + harassment: bool """ Content that expresses, incites, or promotes harassing language towards any @@ -89,6 +91,10 @@ class Categories(BaseModel): class CategoryAppliedInputTypes(BaseModel): + """ + A list of the categories along with the input type(s) that the score applies to. + """ + harassment: List[Literal["text"]] """The applied input type(s) for the category 'harassment'.""" @@ -130,6 +136,8 @@ class CategoryAppliedInputTypes(BaseModel): class CategoryScores(BaseModel): + """A list of the categories along with their scores as predicted by model.""" + harassment: float """The score for the category 'harassment'.""" diff --git a/src/openai/types/moderation_create_response.py b/src/openai/types/moderation_create_response.py index 79684f8a70..23c03875bf 100644 --- a/src/openai/types/moderation_create_response.py +++ b/src/openai/types/moderation_create_response.py @@ -9,6 +9,8 @@ class ModerationCreateResponse(BaseModel): + """Represents if a given text input is potentially harmful.""" + id: str """The unique identifier for the moderation request.""" diff --git a/src/openai/types/moderation_image_url_input_param.py b/src/openai/types/moderation_image_url_input_param.py index 9a69a6a257..9c0fe25685 100644 --- a/src/openai/types/moderation_image_url_input_param.py +++ b/src/openai/types/moderation_image_url_input_param.py @@ -8,11 +8,15 @@ class ImageURL(TypedDict, total=False): + """Contains either an image URL or a data URL for a base64 encoded image.""" + url: Required[str] """Either a URL of the image or the base64 encoded image data.""" class ModerationImageURLInputParam(TypedDict, total=False): + """An object describing an image to classify.""" + image_url: Required[ImageURL] """Contains either an image URL or a data URL for a base64 encoded image.""" diff --git a/src/openai/types/moderation_text_input_param.py b/src/openai/types/moderation_text_input_param.py index e5da53337b..786ecbe625 100644 --- a/src/openai/types/moderation_text_input_param.py +++ b/src/openai/types/moderation_text_input_param.py @@ -8,6 +8,8 @@ class ModerationTextInputParam(TypedDict, total=False): + """An object describing text to classify.""" + text: Required[str] """A string of text to classify.""" diff --git a/src/openai/types/other_file_chunking_strategy_object.py b/src/openai/types/other_file_chunking_strategy_object.py index e4cd61a8fc..a5371425d7 100644 --- a/src/openai/types/other_file_chunking_strategy_object.py +++ b/src/openai/types/other_file_chunking_strategy_object.py @@ -8,5 +8,10 @@ class OtherFileChunkingStrategyObject(BaseModel): + """This is returned when the chunking strategy is unknown. + + Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API. + """ + type: Literal["other"] """Always `other`.""" diff --git a/src/openai/types/realtime/client_secret_create_params.py b/src/openai/types/realtime/client_secret_create_params.py index 5f0b0d796f..2297f3f6d2 100644 --- a/src/openai/types/realtime/client_secret_create_params.py +++ b/src/openai/types/realtime/client_secret_create_params.py @@ -28,6 +28,14 @@ class ClientSecretCreateParams(TypedDict, total=False): class ExpiresAfter(TypedDict, total=False): + """Configuration for the client secret expiration. + + Expiration refers to the time after which + a client secret will no longer be valid for creating sessions. The session itself may + continue after that time once started. A secret can be used to create multiple sessions + until it expires. + """ + anchor: Literal["created_at"] """ The anchor point for the client secret expiration, meaning that `seconds` will diff --git a/src/openai/types/realtime/client_secret_create_response.py b/src/openai/types/realtime/client_secret_create_response.py index 2aed66a25b..3a30b10544 100644 --- a/src/openai/types/realtime/client_secret_create_response.py +++ b/src/openai/types/realtime/client_secret_create_response.py @@ -16,6 +16,8 @@ class ClientSecretCreateResponse(BaseModel): + """Response from creating a session and client secret for the Realtime API.""" + expires_at: int """Expiration timestamp for the client secret, in seconds since epoch.""" diff --git a/src/openai/types/realtime/conversation_created_event.py b/src/openai/types/realtime/conversation_created_event.py index 6ec1dc8c85..3026322e86 100644 --- a/src/openai/types/realtime/conversation_created_event.py +++ b/src/openai/types/realtime/conversation_created_event.py @@ -9,6 +9,8 @@ class Conversation(BaseModel): + """The conversation resource.""" + id: Optional[str] = None """The unique ID of the conversation.""" @@ -17,6 +19,8 @@ class Conversation(BaseModel): class ConversationCreatedEvent(BaseModel): + """Returned when a conversation is created. Emitted right after session creation.""" + conversation: Conversation """The conversation resource.""" diff --git a/src/openai/types/realtime/conversation_item_added.py b/src/openai/types/realtime/conversation_item_added.py index ae9f6803e4..0e336a9261 100644 --- a/src/openai/types/realtime/conversation_item_added.py +++ b/src/openai/types/realtime/conversation_item_added.py @@ -10,6 +10,16 @@ class ConversationItemAdded(BaseModel): + """Sent by the server when an Item is added to the default Conversation. + + This can happen in several cases: + - When the client sends a `conversation.item.create` event. + - When the input audio buffer is committed. In this case the item will be a user message containing the audio from the buffer. + - When the model is generating a Response. In this case the `conversation.item.added` event will be sent when the model starts generating a specific Item, and thus it will not yet have any content (and `status` will be `in_progress`). + + The event will include the full content of the Item (except when model is generating a Response) except for audio data, which can be retrieved separately with a `conversation.item.retrieve` event if necessary. + """ + event_id: str """The unique ID of the server event.""" diff --git a/src/openai/types/realtime/conversation_item_create_event.py b/src/openai/types/realtime/conversation_item_create_event.py index 8fa2dfe08c..bf2d129744 100644 --- a/src/openai/types/realtime/conversation_item_create_event.py +++ b/src/openai/types/realtime/conversation_item_create_event.py @@ -10,6 +10,16 @@ class ConversationItemCreateEvent(BaseModel): + """ + Add a new Item to the Conversation's context, including messages, function + calls, and function call responses. This event can be used both to populate a + "history" of the conversation and to add new items mid-stream, but has the + current limitation that it cannot populate assistant audio messages. + + If successful, the server will respond with a `conversation.item.created` + event, otherwise an `error` event will be sent. + """ + item: ConversationItem """A single item within a Realtime conversation.""" diff --git a/src/openai/types/realtime/conversation_item_create_event_param.py b/src/openai/types/realtime/conversation_item_create_event_param.py index 8530dc72cd..be7f0ff011 100644 --- a/src/openai/types/realtime/conversation_item_create_event_param.py +++ b/src/openai/types/realtime/conversation_item_create_event_param.py @@ -10,6 +10,16 @@ class ConversationItemCreateEventParam(TypedDict, total=False): + """ + Add a new Item to the Conversation's context, including messages, function + calls, and function call responses. This event can be used both to populate a + "history" of the conversation and to add new items mid-stream, but has the + current limitation that it cannot populate assistant audio messages. + + If successful, the server will respond with a `conversation.item.created` + event, otherwise an `error` event will be sent. + """ + item: Required[ConversationItemParam] """A single item within a Realtime conversation.""" diff --git a/src/openai/types/realtime/conversation_item_created_event.py b/src/openai/types/realtime/conversation_item_created_event.py index 13f24ad31a..6ae6f05ffe 100644 --- a/src/openai/types/realtime/conversation_item_created_event.py +++ b/src/openai/types/realtime/conversation_item_created_event.py @@ -10,6 +10,19 @@ class ConversationItemCreatedEvent(BaseModel): + """Returned when a conversation item is created. + + There are several scenarios that produce this event: + - The server is generating a Response, which if successful will produce + either one or two Items, which will be of type `message` + (role `assistant`) or type `function_call`. + - The input audio buffer has been committed, either by the client or the + server (in `server_vad` mode). The server will take the content of the + input audio buffer and add it to a new user message Item. + - The client has sent a `conversation.item.create` event to add a new Item + to the Conversation. + """ + event_id: str """The unique ID of the server event.""" diff --git a/src/openai/types/realtime/conversation_item_delete_event.py b/src/openai/types/realtime/conversation_item_delete_event.py index 3734f72e9d..c662f386e3 100644 --- a/src/openai/types/realtime/conversation_item_delete_event.py +++ b/src/openai/types/realtime/conversation_item_delete_event.py @@ -9,6 +9,14 @@ class ConversationItemDeleteEvent(BaseModel): + """Send this event when you want to remove any item from the conversation + history. + + The server will respond with a `conversation.item.deleted` event, + unless the item does not exist in the conversation history, in which case the + server will respond with an error. + """ + item_id: str """The ID of the item to delete.""" diff --git a/src/openai/types/realtime/conversation_item_delete_event_param.py b/src/openai/types/realtime/conversation_item_delete_event_param.py index c3f88d6627..e79bb68c9a 100644 --- a/src/openai/types/realtime/conversation_item_delete_event_param.py +++ b/src/openai/types/realtime/conversation_item_delete_event_param.py @@ -8,6 +8,14 @@ class ConversationItemDeleteEventParam(TypedDict, total=False): + """Send this event when you want to remove any item from the conversation + history. + + The server will respond with a `conversation.item.deleted` event, + unless the item does not exist in the conversation history, in which case the + server will respond with an error. + """ + item_id: Required[str] """The ID of the item to delete.""" diff --git a/src/openai/types/realtime/conversation_item_deleted_event.py b/src/openai/types/realtime/conversation_item_deleted_event.py index cfe6fe85fc..9826289ebf 100644 --- a/src/openai/types/realtime/conversation_item_deleted_event.py +++ b/src/openai/types/realtime/conversation_item_deleted_event.py @@ -8,6 +8,12 @@ class ConversationItemDeletedEvent(BaseModel): + """ + Returned when an item in the conversation is deleted by the client with a + `conversation.item.delete` event. This event is used to synchronize the + server's understanding of the conversation history with the client's view. + """ + event_id: str """The unique ID of the server event.""" diff --git a/src/openai/types/realtime/conversation_item_done.py b/src/openai/types/realtime/conversation_item_done.py index a4c9b8a840..6a823c65a8 100644 --- a/src/openai/types/realtime/conversation_item_done.py +++ b/src/openai/types/realtime/conversation_item_done.py @@ -10,6 +10,11 @@ class ConversationItemDone(BaseModel): + """Returned when a conversation item is finalized. + + The event will include the full content of the Item except for audio data, which can be retrieved separately with a `conversation.item.retrieve` event if needed. + """ + event_id: str """The unique ID of the server event.""" diff --git a/src/openai/types/realtime/conversation_item_input_audio_transcription_completed_event.py b/src/openai/types/realtime/conversation_item_input_audio_transcription_completed_event.py index 09b20aa184..3304233f8f 100644 --- a/src/openai/types/realtime/conversation_item_input_audio_transcription_completed_event.py +++ b/src/openai/types/realtime/conversation_item_input_audio_transcription_completed_event.py @@ -16,6 +16,8 @@ class UsageTranscriptTextUsageTokensInputTokenDetails(BaseModel): + """Details about the input tokens billed for this request.""" + audio_tokens: Optional[int] = None """Number of audio tokens billed for this request.""" @@ -24,6 +26,8 @@ class UsageTranscriptTextUsageTokensInputTokenDetails(BaseModel): class UsageTranscriptTextUsageTokens(BaseModel): + """Usage statistics for models billed by token usage.""" + input_tokens: int """Number of input tokens billed for this request.""" @@ -41,6 +45,8 @@ class UsageTranscriptTextUsageTokens(BaseModel): class UsageTranscriptTextUsageDuration(BaseModel): + """Usage statistics for models billed by audio input duration.""" + seconds: float """Duration of the input audio in seconds.""" @@ -52,6 +58,19 @@ class UsageTranscriptTextUsageDuration(BaseModel): class ConversationItemInputAudioTranscriptionCompletedEvent(BaseModel): + """ + This event is the output of audio transcription for user audio written to the + user audio buffer. Transcription begins when the input audio buffer is + committed by the client or server (when VAD is enabled). Transcription runs + asynchronously with Response creation, so this event may come before or after + the Response events. + + Realtime API models accept audio natively, and thus input transcription is a + separate process run on a separate ASR (Automatic Speech Recognition) model. + The transcript may diverge somewhat from the model's interpretation, and + should be treated as a rough guide. + """ + content_index: int """The index of the content part containing the audio.""" diff --git a/src/openai/types/realtime/conversation_item_input_audio_transcription_delta_event.py b/src/openai/types/realtime/conversation_item_input_audio_transcription_delta_event.py index f49e6f636f..5f3f54810f 100644 --- a/src/openai/types/realtime/conversation_item_input_audio_transcription_delta_event.py +++ b/src/openai/types/realtime/conversation_item_input_audio_transcription_delta_event.py @@ -10,6 +10,10 @@ class ConversationItemInputAudioTranscriptionDeltaEvent(BaseModel): + """ + Returned when the text value of an input audio transcription content part is updated with incremental transcription results. + """ + event_id: str """The unique ID of the server event.""" diff --git a/src/openai/types/realtime/conversation_item_input_audio_transcription_failed_event.py b/src/openai/types/realtime/conversation_item_input_audio_transcription_failed_event.py index edb97bbf6f..e8ad05e43c 100644 --- a/src/openai/types/realtime/conversation_item_input_audio_transcription_failed_event.py +++ b/src/openai/types/realtime/conversation_item_input_audio_transcription_failed_event.py @@ -9,6 +9,8 @@ class Error(BaseModel): + """Details of the transcription error.""" + code: Optional[str] = None """Error code, if any.""" @@ -23,6 +25,12 @@ class Error(BaseModel): class ConversationItemInputAudioTranscriptionFailedEvent(BaseModel): + """ + Returned when input audio transcription is configured, and a transcription + request for a user message failed. These events are separate from other + `error` events so that the client can identify the related Item. + """ + content_index: int """The index of the content part containing the audio.""" diff --git a/src/openai/types/realtime/conversation_item_input_audio_transcription_segment.py b/src/openai/types/realtime/conversation_item_input_audio_transcription_segment.py index e2cbc9d299..dcc4916580 100644 --- a/src/openai/types/realtime/conversation_item_input_audio_transcription_segment.py +++ b/src/openai/types/realtime/conversation_item_input_audio_transcription_segment.py @@ -8,6 +8,8 @@ class ConversationItemInputAudioTranscriptionSegment(BaseModel): + """Returned when an input audio transcription segment is identified for an item.""" + id: str """The segment identifier.""" diff --git a/src/openai/types/realtime/conversation_item_retrieve_event.py b/src/openai/types/realtime/conversation_item_retrieve_event.py index 018c2ccc59..e7d8eb6c49 100644 --- a/src/openai/types/realtime/conversation_item_retrieve_event.py +++ b/src/openai/types/realtime/conversation_item_retrieve_event.py @@ -9,6 +9,13 @@ class ConversationItemRetrieveEvent(BaseModel): + """ + Send this event when you want to retrieve the server's representation of a specific item in the conversation history. This is useful, for example, to inspect user audio after noise cancellation and VAD. + The server will respond with a `conversation.item.retrieved` event, + unless the item does not exist in the conversation history, in which case the + server will respond with an error. + """ + item_id: str """The ID of the item to retrieve.""" diff --git a/src/openai/types/realtime/conversation_item_retrieve_event_param.py b/src/openai/types/realtime/conversation_item_retrieve_event_param.py index 71b3ffa499..59fdb6fb93 100644 --- a/src/openai/types/realtime/conversation_item_retrieve_event_param.py +++ b/src/openai/types/realtime/conversation_item_retrieve_event_param.py @@ -8,6 +8,13 @@ class ConversationItemRetrieveEventParam(TypedDict, total=False): + """ + Send this event when you want to retrieve the server's representation of a specific item in the conversation history. This is useful, for example, to inspect user audio after noise cancellation and VAD. + The server will respond with a `conversation.item.retrieved` event, + unless the item does not exist in the conversation history, in which case the + server will respond with an error. + """ + item_id: Required[str] """The ID of the item to retrieve.""" diff --git a/src/openai/types/realtime/conversation_item_truncate_event.py b/src/openai/types/realtime/conversation_item_truncate_event.py index d6c6779cc8..16c82183c4 100644 --- a/src/openai/types/realtime/conversation_item_truncate_event.py +++ b/src/openai/types/realtime/conversation_item_truncate_event.py @@ -9,6 +9,21 @@ class ConversationItemTruncateEvent(BaseModel): + """Send this event to truncate a previous assistant message’s audio. + + The server + will produce audio faster than realtime, so this event is useful when the user + interrupts to truncate audio that has already been sent to the client but not + yet played. This will synchronize the server's understanding of the audio with + the client's playback. + + Truncating audio will delete the server-side text transcript to ensure there + is not text in the context that hasn't been heard by the user. + + If successful, the server will respond with a `conversation.item.truncated` + event. + """ + audio_end_ms: int """Inclusive duration up to which audio is truncated, in milliseconds. diff --git a/src/openai/types/realtime/conversation_item_truncate_event_param.py b/src/openai/types/realtime/conversation_item_truncate_event_param.py index f5ab13a419..e9b41fc980 100644 --- a/src/openai/types/realtime/conversation_item_truncate_event_param.py +++ b/src/openai/types/realtime/conversation_item_truncate_event_param.py @@ -8,6 +8,21 @@ class ConversationItemTruncateEventParam(TypedDict, total=False): + """Send this event to truncate a previous assistant message’s audio. + + The server + will produce audio faster than realtime, so this event is useful when the user + interrupts to truncate audio that has already been sent to the client but not + yet played. This will synchronize the server's understanding of the audio with + the client's playback. + + Truncating audio will delete the server-side text transcript to ensure there + is not text in the context that hasn't been heard by the user. + + If successful, the server will respond with a `conversation.item.truncated` + event. + """ + audio_end_ms: Required[int] """Inclusive duration up to which audio is truncated, in milliseconds. diff --git a/src/openai/types/realtime/conversation_item_truncated_event.py b/src/openai/types/realtime/conversation_item_truncated_event.py index f56cabc3d9..c78a776d9b 100644 --- a/src/openai/types/realtime/conversation_item_truncated_event.py +++ b/src/openai/types/realtime/conversation_item_truncated_event.py @@ -8,6 +8,15 @@ class ConversationItemTruncatedEvent(BaseModel): + """ + Returned when an earlier assistant audio message item is truncated by the + client with a `conversation.item.truncate` event. This event is used to + synchronize the server's understanding of the audio with the client's playback. + + This action will truncate the audio and remove the server-side text transcript + to ensure there is no text in the context that hasn't been heard by the user. + """ + audio_end_ms: int """The duration up to which the audio was truncated, in milliseconds.""" diff --git a/src/openai/types/realtime/input_audio_buffer_append_event.py b/src/openai/types/realtime/input_audio_buffer_append_event.py index 8562cf0af4..4c9e9a544d 100644 --- a/src/openai/types/realtime/input_audio_buffer_append_event.py +++ b/src/openai/types/realtime/input_audio_buffer_append_event.py @@ -9,6 +9,23 @@ class InputAudioBufferAppendEvent(BaseModel): + """Send this event to append audio bytes to the input audio buffer. + + The audio + buffer is temporary storage you can write to and later commit. A "commit" will create a new + user message item in the conversation history from the buffer content and clear the buffer. + Input audio transcription (if enabled) will be generated when the buffer is committed. + + If VAD is enabled the audio buffer is used to detect speech and the server will decide + when to commit. When Server VAD is disabled, you must commit the audio buffer + manually. Input audio noise reduction operates on writes to the audio buffer. + + The client may choose how much audio to place in each event up to a maximum + of 15 MiB, for example streaming smaller chunks from the client may allow the + VAD to be more responsive. Unlike most other client events, the server will + not send a confirmation response to this event. + """ + audio: str """Base64-encoded audio bytes. diff --git a/src/openai/types/realtime/input_audio_buffer_append_event_param.py b/src/openai/types/realtime/input_audio_buffer_append_event_param.py index 3ad0bc737d..a0d308e4d9 100644 --- a/src/openai/types/realtime/input_audio_buffer_append_event_param.py +++ b/src/openai/types/realtime/input_audio_buffer_append_event_param.py @@ -8,6 +8,23 @@ class InputAudioBufferAppendEventParam(TypedDict, total=False): + """Send this event to append audio bytes to the input audio buffer. + + The audio + buffer is temporary storage you can write to and later commit. A "commit" will create a new + user message item in the conversation history from the buffer content and clear the buffer. + Input audio transcription (if enabled) will be generated when the buffer is committed. + + If VAD is enabled the audio buffer is used to detect speech and the server will decide + when to commit. When Server VAD is disabled, you must commit the audio buffer + manually. Input audio noise reduction operates on writes to the audio buffer. + + The client may choose how much audio to place in each event up to a maximum + of 15 MiB, for example streaming smaller chunks from the client may allow the + VAD to be more responsive. Unlike most other client events, the server will + not send a confirmation response to this event. + """ + audio: Required[str] """Base64-encoded audio bytes. diff --git a/src/openai/types/realtime/input_audio_buffer_clear_event.py b/src/openai/types/realtime/input_audio_buffer_clear_event.py index 9922ff3b32..5526bcbfa9 100644 --- a/src/openai/types/realtime/input_audio_buffer_clear_event.py +++ b/src/openai/types/realtime/input_audio_buffer_clear_event.py @@ -9,6 +9,12 @@ class InputAudioBufferClearEvent(BaseModel): + """Send this event to clear the audio bytes in the buffer. + + The server will + respond with an `input_audio_buffer.cleared` event. + """ + type: Literal["input_audio_buffer.clear"] """The event type, must be `input_audio_buffer.clear`.""" diff --git a/src/openai/types/realtime/input_audio_buffer_clear_event_param.py b/src/openai/types/realtime/input_audio_buffer_clear_event_param.py index 2bd6bc5a02..8e0e9c55fa 100644 --- a/src/openai/types/realtime/input_audio_buffer_clear_event_param.py +++ b/src/openai/types/realtime/input_audio_buffer_clear_event_param.py @@ -8,6 +8,12 @@ class InputAudioBufferClearEventParam(TypedDict, total=False): + """Send this event to clear the audio bytes in the buffer. + + The server will + respond with an `input_audio_buffer.cleared` event. + """ + type: Required[Literal["input_audio_buffer.clear"]] """The event type, must be `input_audio_buffer.clear`.""" diff --git a/src/openai/types/realtime/input_audio_buffer_cleared_event.py b/src/openai/types/realtime/input_audio_buffer_cleared_event.py index af71844f2f..e4775567dc 100644 --- a/src/openai/types/realtime/input_audio_buffer_cleared_event.py +++ b/src/openai/types/realtime/input_audio_buffer_cleared_event.py @@ -8,6 +8,11 @@ class InputAudioBufferClearedEvent(BaseModel): + """ + Returned when the input audio buffer is cleared by the client with a + `input_audio_buffer.clear` event. + """ + event_id: str """The unique ID of the server event.""" diff --git a/src/openai/types/realtime/input_audio_buffer_commit_event.py b/src/openai/types/realtime/input_audio_buffer_commit_event.py index 125c3ba1e8..fe2ec01783 100644 --- a/src/openai/types/realtime/input_audio_buffer_commit_event.py +++ b/src/openai/types/realtime/input_audio_buffer_commit_event.py @@ -9,6 +9,12 @@ class InputAudioBufferCommitEvent(BaseModel): + """ + Send this event to commit the user input audio buffer, which will create a new user message item in the conversation. This event will produce an error if the input audio buffer is empty. When in Server VAD mode, the client does not need to send this event, the server will commit the audio buffer automatically. + + Committing the input audio buffer will trigger input audio transcription (if enabled in session configuration), but it will not create a response from the model. The server will respond with an `input_audio_buffer.committed` event. + """ + type: Literal["input_audio_buffer.commit"] """The event type, must be `input_audio_buffer.commit`.""" diff --git a/src/openai/types/realtime/input_audio_buffer_commit_event_param.py b/src/openai/types/realtime/input_audio_buffer_commit_event_param.py index c9c927ab98..20342795e8 100644 --- a/src/openai/types/realtime/input_audio_buffer_commit_event_param.py +++ b/src/openai/types/realtime/input_audio_buffer_commit_event_param.py @@ -8,6 +8,12 @@ class InputAudioBufferCommitEventParam(TypedDict, total=False): + """ + Send this event to commit the user input audio buffer, which will create a new user message item in the conversation. This event will produce an error if the input audio buffer is empty. When in Server VAD mode, the client does not need to send this event, the server will commit the audio buffer automatically. + + Committing the input audio buffer will trigger input audio transcription (if enabled in session configuration), but it will not create a response from the model. The server will respond with an `input_audio_buffer.committed` event. + """ + type: Required[Literal["input_audio_buffer.commit"]] """The event type, must be `input_audio_buffer.commit`.""" diff --git a/src/openai/types/realtime/input_audio_buffer_committed_event.py b/src/openai/types/realtime/input_audio_buffer_committed_event.py index 5ed1b4ccc7..15dc8254f3 100644 --- a/src/openai/types/realtime/input_audio_buffer_committed_event.py +++ b/src/openai/types/realtime/input_audio_buffer_committed_event.py @@ -9,6 +9,13 @@ class InputAudioBufferCommittedEvent(BaseModel): + """ + Returned when an input audio buffer is committed, either by the client or + automatically in server VAD mode. The `item_id` property is the ID of the user + message item that will be created, thus a `conversation.item.created` event + will also be sent to the client. + """ + event_id: str """The unique ID of the server event.""" diff --git a/src/openai/types/realtime/input_audio_buffer_dtmf_event_received_event.py b/src/openai/types/realtime/input_audio_buffer_dtmf_event_received_event.py index d61ed4bda7..c2623cc7b8 100644 --- a/src/openai/types/realtime/input_audio_buffer_dtmf_event_received_event.py +++ b/src/openai/types/realtime/input_audio_buffer_dtmf_event_received_event.py @@ -8,6 +8,14 @@ class InputAudioBufferDtmfEventReceivedEvent(BaseModel): + """**SIP Only:** Returned when an DTMF event is received. + + A DTMF event is a message that + represents a telephone keypad press (0–9, *, #, A–D). The `event` property + is the keypad that the user press. The `received_at` is the UTC Unix Timestamp + that the server received the event. + """ + event: str """The telephone keypad that was pressed by the user.""" diff --git a/src/openai/types/realtime/input_audio_buffer_speech_started_event.py b/src/openai/types/realtime/input_audio_buffer_speech_started_event.py index 865205d786..1bd4c74eb0 100644 --- a/src/openai/types/realtime/input_audio_buffer_speech_started_event.py +++ b/src/openai/types/realtime/input_audio_buffer_speech_started_event.py @@ -8,6 +8,19 @@ class InputAudioBufferSpeechStartedEvent(BaseModel): + """ + Sent by the server when in `server_vad` mode to indicate that speech has been + detected in the audio buffer. This can happen any time audio is added to the + buffer (unless speech is already detected). The client may want to use this + event to interrupt audio playback or provide visual feedback to the user. + + The client should expect to receive a `input_audio_buffer.speech_stopped` event + when speech stops. The `item_id` property is the ID of the user message item + that will be created when speech stops and will also be included in the + `input_audio_buffer.speech_stopped` event (unless the client manually commits + the audio buffer during VAD activation). + """ + audio_start_ms: int """ Milliseconds from the start of all audio written to the buffer during the diff --git a/src/openai/types/realtime/input_audio_buffer_speech_stopped_event.py b/src/openai/types/realtime/input_audio_buffer_speech_stopped_event.py index 6cb7845ff4..b3fb20929a 100644 --- a/src/openai/types/realtime/input_audio_buffer_speech_stopped_event.py +++ b/src/openai/types/realtime/input_audio_buffer_speech_stopped_event.py @@ -8,6 +8,12 @@ class InputAudioBufferSpeechStoppedEvent(BaseModel): + """ + Returned in `server_vad` mode when the server detects the end of speech in + the audio buffer. The server will also send an `conversation.item.created` + event with the user message item that is created from the audio buffer. + """ + audio_end_ms: int """Milliseconds since the session started when speech stopped. diff --git a/src/openai/types/realtime/input_audio_buffer_timeout_triggered.py b/src/openai/types/realtime/input_audio_buffer_timeout_triggered.py index 5c5dc5cfa6..72b107d56e 100644 --- a/src/openai/types/realtime/input_audio_buffer_timeout_triggered.py +++ b/src/openai/types/realtime/input_audio_buffer_timeout_triggered.py @@ -8,6 +8,23 @@ class InputAudioBufferTimeoutTriggered(BaseModel): + """Returned when the Server VAD timeout is triggered for the input audio buffer. + + This is configured + with `idle_timeout_ms` in the `turn_detection` settings of the session, and it indicates that + there hasn't been any speech detected for the configured duration. + + The `audio_start_ms` and `audio_end_ms` fields indicate the segment of audio after the last + model response up to the triggering time, as an offset from the beginning of audio written + to the input audio buffer. This means it demarcates the segment of audio that was silent and + the difference between the start and end values will roughly match the configured timeout. + + The empty audio will be committed to the conversation as an `input_audio` item (there will be a + `input_audio_buffer.committed` event) and a model response will be generated. There may be speech + that didn't trigger VAD but is still detected by the model, so the model may respond with + something relevant to the conversation or a prompt to continue speaking. + """ + audio_end_ms: int """ Millisecond offset of audio written to the input audio buffer at the time the diff --git a/src/openai/types/realtime/log_prob_properties.py b/src/openai/types/realtime/log_prob_properties.py index 92477d67d0..423af1c492 100644 --- a/src/openai/types/realtime/log_prob_properties.py +++ b/src/openai/types/realtime/log_prob_properties.py @@ -8,6 +8,8 @@ class LogProbProperties(BaseModel): + """A log probability object.""" + token: str """The token that was used to generate the log probability.""" diff --git a/src/openai/types/realtime/mcp_list_tools_completed.py b/src/openai/types/realtime/mcp_list_tools_completed.py index 941280f01a..2fe64147d6 100644 --- a/src/openai/types/realtime/mcp_list_tools_completed.py +++ b/src/openai/types/realtime/mcp_list_tools_completed.py @@ -8,6 +8,8 @@ class McpListToolsCompleted(BaseModel): + """Returned when listing MCP tools has completed for an item.""" + event_id: str """The unique ID of the server event.""" diff --git a/src/openai/types/realtime/mcp_list_tools_failed.py b/src/openai/types/realtime/mcp_list_tools_failed.py index 892eda21bd..8cad7c0a12 100644 --- a/src/openai/types/realtime/mcp_list_tools_failed.py +++ b/src/openai/types/realtime/mcp_list_tools_failed.py @@ -8,6 +8,8 @@ class McpListToolsFailed(BaseModel): + """Returned when listing MCP tools has failed for an item.""" + event_id: str """The unique ID of the server event.""" diff --git a/src/openai/types/realtime/mcp_list_tools_in_progress.py b/src/openai/types/realtime/mcp_list_tools_in_progress.py index 4254b5fd33..823bb875a3 100644 --- a/src/openai/types/realtime/mcp_list_tools_in_progress.py +++ b/src/openai/types/realtime/mcp_list_tools_in_progress.py @@ -8,6 +8,8 @@ class McpListToolsInProgress(BaseModel): + """Returned when listing MCP tools is in progress for an item.""" + event_id: str """The unique ID of the server event.""" diff --git a/src/openai/types/realtime/output_audio_buffer_clear_event.py b/src/openai/types/realtime/output_audio_buffer_clear_event.py index b4c95039f3..b3fa7620ac 100644 --- a/src/openai/types/realtime/output_audio_buffer_clear_event.py +++ b/src/openai/types/realtime/output_audio_buffer_clear_event.py @@ -9,6 +9,15 @@ class OutputAudioBufferClearEvent(BaseModel): + """**WebRTC/SIP Only:** Emit to cut off the current audio response. + + This will trigger the server to + stop generating audio and emit a `output_audio_buffer.cleared` event. This + event should be preceded by a `response.cancel` client event to stop the + generation of the current response. + [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc). + """ + type: Literal["output_audio_buffer.clear"] """The event type, must be `output_audio_buffer.clear`.""" diff --git a/src/openai/types/realtime/output_audio_buffer_clear_event_param.py b/src/openai/types/realtime/output_audio_buffer_clear_event_param.py index a3205ebc6c..59f897a5c1 100644 --- a/src/openai/types/realtime/output_audio_buffer_clear_event_param.py +++ b/src/openai/types/realtime/output_audio_buffer_clear_event_param.py @@ -8,6 +8,15 @@ class OutputAudioBufferClearEventParam(TypedDict, total=False): + """**WebRTC/SIP Only:** Emit to cut off the current audio response. + + This will trigger the server to + stop generating audio and emit a `output_audio_buffer.cleared` event. This + event should be preceded by a `response.cancel` client event to stop the + generation of the current response. + [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc). + """ + type: Required[Literal["output_audio_buffer.clear"]] """The event type, must be `output_audio_buffer.clear`.""" diff --git a/src/openai/types/realtime/rate_limits_updated_event.py b/src/openai/types/realtime/rate_limits_updated_event.py index 048a4028a1..951de103af 100644 --- a/src/openai/types/realtime/rate_limits_updated_event.py +++ b/src/openai/types/realtime/rate_limits_updated_event.py @@ -23,6 +23,14 @@ class RateLimit(BaseModel): class RateLimitsUpdatedEvent(BaseModel): + """Emitted at the beginning of a Response to indicate the updated rate limits. + + + When a Response is created some tokens will be "reserved" for the output + tokens, the rate limits shown here reflect that reservation, which is then + adjusted accordingly once the Response is completed. + """ + event_id: str """The unique ID of the server event.""" diff --git a/src/openai/types/realtime/realtime_audio_config.py b/src/openai/types/realtime/realtime_audio_config.py index 72d7cc59cc..daa50358a8 100644 --- a/src/openai/types/realtime/realtime_audio_config.py +++ b/src/openai/types/realtime/realtime_audio_config.py @@ -10,6 +10,8 @@ class RealtimeAudioConfig(BaseModel): + """Configuration for input and output audio.""" + input: Optional[RealtimeAudioConfigInput] = None output: Optional[RealtimeAudioConfigOutput] = None diff --git a/src/openai/types/realtime/realtime_audio_config_input.py b/src/openai/types/realtime/realtime_audio_config_input.py index cfcb7f22d4..08e1b14601 100644 --- a/src/openai/types/realtime/realtime_audio_config_input.py +++ b/src/openai/types/realtime/realtime_audio_config_input.py @@ -12,6 +12,13 @@ class NoiseReduction(BaseModel): + """Configuration for input audio noise reduction. + + This can be set to `null` to turn off. + Noise reduction filters audio added to the input audio buffer before it is sent to VAD and the model. + Filtering the audio can improve VAD and turn detection accuracy (reducing false positives) and model performance by improving perception of the input audio. + """ + type: Optional[NoiseReductionType] = None """Type of noise reduction. diff --git a/src/openai/types/realtime/realtime_audio_config_input_param.py b/src/openai/types/realtime/realtime_audio_config_input_param.py index 730f46cfec..73495e6cd3 100644 --- a/src/openai/types/realtime/realtime_audio_config_input_param.py +++ b/src/openai/types/realtime/realtime_audio_config_input_param.py @@ -14,6 +14,13 @@ class NoiseReduction(TypedDict, total=False): + """Configuration for input audio noise reduction. + + This can be set to `null` to turn off. + Noise reduction filters audio added to the input audio buffer before it is sent to VAD and the model. + Filtering the audio can improve VAD and turn detection accuracy (reducing false positives) and model performance by improving perception of the input audio. + """ + type: NoiseReductionType """Type of noise reduction. diff --git a/src/openai/types/realtime/realtime_audio_config_param.py b/src/openai/types/realtime/realtime_audio_config_param.py index 2c41de35ae..7899fe359b 100644 --- a/src/openai/types/realtime/realtime_audio_config_param.py +++ b/src/openai/types/realtime/realtime_audio_config_param.py @@ -11,6 +11,8 @@ class RealtimeAudioConfigParam(TypedDict, total=False): + """Configuration for input and output audio.""" + input: RealtimeAudioConfigInputParam output: RealtimeAudioConfigOutputParam diff --git a/src/openai/types/realtime/realtime_audio_formats.py b/src/openai/types/realtime/realtime_audio_formats.py index 10f91883b6..fa10c9a7a4 100644 --- a/src/openai/types/realtime/realtime_audio_formats.py +++ b/src/openai/types/realtime/realtime_audio_formats.py @@ -10,6 +10,8 @@ class AudioPCM(BaseModel): + """The PCM audio format. Only a 24kHz sample rate is supported.""" + rate: Optional[Literal[24000]] = None """The sample rate of the audio. Always `24000`.""" @@ -18,11 +20,15 @@ class AudioPCM(BaseModel): class AudioPCMU(BaseModel): + """The G.711 μ-law format.""" + type: Optional[Literal["audio/pcmu"]] = None """The audio format. Always `audio/pcmu`.""" class AudioPCMA(BaseModel): + """The G.711 A-law format.""" + type: Optional[Literal["audio/pcma"]] = None """The audio format. Always `audio/pcma`.""" diff --git a/src/openai/types/realtime/realtime_audio_formats_param.py b/src/openai/types/realtime/realtime_audio_formats_param.py index cf58577f38..6392f632c3 100644 --- a/src/openai/types/realtime/realtime_audio_formats_param.py +++ b/src/openai/types/realtime/realtime_audio_formats_param.py @@ -9,6 +9,8 @@ class AudioPCM(TypedDict, total=False): + """The PCM audio format. Only a 24kHz sample rate is supported.""" + rate: Literal[24000] """The sample rate of the audio. Always `24000`.""" @@ -17,11 +19,15 @@ class AudioPCM(TypedDict, total=False): class AudioPCMU(TypedDict, total=False): + """The G.711 μ-law format.""" + type: Literal["audio/pcmu"] """The audio format. Always `audio/pcmu`.""" class AudioPCMA(TypedDict, total=False): + """The G.711 A-law format.""" + type: Literal["audio/pcma"] """The audio format. Always `audio/pcma`.""" diff --git a/src/openai/types/realtime/realtime_audio_input_turn_detection.py b/src/openai/types/realtime/realtime_audio_input_turn_detection.py index 9b55353884..8d9aff3563 100644 --- a/src/openai/types/realtime/realtime_audio_input_turn_detection.py +++ b/src/openai/types/realtime/realtime_audio_input_turn_detection.py @@ -10,6 +10,10 @@ class ServerVad(BaseModel): + """ + Server-side voice activity detection (VAD) which flips on when user speech is detected and off after a period of silence. + """ + type: Literal["server_vad"] """Type of turn detection, `server_vad` to turn on simple Server VAD.""" @@ -76,6 +80,10 @@ class ServerVad(BaseModel): class SemanticVad(BaseModel): + """ + Server-side semantic turn detection which uses a model to determine when the user has finished speaking. + """ + type: Literal["semantic_vad"] """Type of turn detection, `semantic_vad` to turn on Semantic VAD.""" diff --git a/src/openai/types/realtime/realtime_audio_input_turn_detection_param.py b/src/openai/types/realtime/realtime_audio_input_turn_detection_param.py index 4ce7640727..30522d74e1 100644 --- a/src/openai/types/realtime/realtime_audio_input_turn_detection_param.py +++ b/src/openai/types/realtime/realtime_audio_input_turn_detection_param.py @@ -9,6 +9,10 @@ class ServerVad(TypedDict, total=False): + """ + Server-side voice activity detection (VAD) which flips on when user speech is detected and off after a period of silence. + """ + type: Required[Literal["server_vad"]] """Type of turn detection, `server_vad` to turn on simple Server VAD.""" @@ -75,6 +79,10 @@ class ServerVad(TypedDict, total=False): class SemanticVad(TypedDict, total=False): + """ + Server-side semantic turn detection which uses a model to determine when the user has finished speaking. + """ + type: Required[Literal["semantic_vad"]] """Type of turn detection, `semantic_vad` to turn on Semantic VAD.""" diff --git a/src/openai/types/realtime/realtime_conversation_item_assistant_message.py b/src/openai/types/realtime/realtime_conversation_item_assistant_message.py index 6b0f86ee32..207831a3c8 100644 --- a/src/openai/types/realtime/realtime_conversation_item_assistant_message.py +++ b/src/openai/types/realtime/realtime_conversation_item_assistant_message.py @@ -33,6 +33,8 @@ class Content(BaseModel): class RealtimeConversationItemAssistantMessage(BaseModel): + """An assistant message item in a Realtime conversation.""" + content: List[Content] """The content of the message.""" diff --git a/src/openai/types/realtime/realtime_conversation_item_assistant_message_param.py b/src/openai/types/realtime/realtime_conversation_item_assistant_message_param.py index 93699afba2..abc78e7d3f 100644 --- a/src/openai/types/realtime/realtime_conversation_item_assistant_message_param.py +++ b/src/openai/types/realtime/realtime_conversation_item_assistant_message_param.py @@ -33,6 +33,8 @@ class Content(TypedDict, total=False): class RealtimeConversationItemAssistantMessageParam(TypedDict, total=False): + """An assistant message item in a Realtime conversation.""" + content: Required[Iterable[Content]] """The content of the message.""" diff --git a/src/openai/types/realtime/realtime_conversation_item_function_call.py b/src/openai/types/realtime/realtime_conversation_item_function_call.py index 279a2fcdc5..4e40394883 100644 --- a/src/openai/types/realtime/realtime_conversation_item_function_call.py +++ b/src/openai/types/realtime/realtime_conversation_item_function_call.py @@ -9,6 +9,8 @@ class RealtimeConversationItemFunctionCall(BaseModel): + """A function call item in a Realtime conversation.""" + arguments: str """The arguments of the function call. diff --git a/src/openai/types/realtime/realtime_conversation_item_function_call_output.py b/src/openai/types/realtime/realtime_conversation_item_function_call_output.py index 4b6b15d0ad..cdbc352d85 100644 --- a/src/openai/types/realtime/realtime_conversation_item_function_call_output.py +++ b/src/openai/types/realtime/realtime_conversation_item_function_call_output.py @@ -9,6 +9,8 @@ class RealtimeConversationItemFunctionCallOutput(BaseModel): + """A function call output item in a Realtime conversation.""" + call_id: str """The ID of the function call this output is for.""" diff --git a/src/openai/types/realtime/realtime_conversation_item_function_call_output_param.py b/src/openai/types/realtime/realtime_conversation_item_function_call_output_param.py index 56d62da563..2e56a81dc3 100644 --- a/src/openai/types/realtime/realtime_conversation_item_function_call_output_param.py +++ b/src/openai/types/realtime/realtime_conversation_item_function_call_output_param.py @@ -8,6 +8,8 @@ class RealtimeConversationItemFunctionCallOutputParam(TypedDict, total=False): + """A function call output item in a Realtime conversation.""" + call_id: Required[str] """The ID of the function call this output is for.""" diff --git a/src/openai/types/realtime/realtime_conversation_item_function_call_param.py b/src/openai/types/realtime/realtime_conversation_item_function_call_param.py index 36a16a27b3..6467ce149e 100644 --- a/src/openai/types/realtime/realtime_conversation_item_function_call_param.py +++ b/src/openai/types/realtime/realtime_conversation_item_function_call_param.py @@ -8,6 +8,8 @@ class RealtimeConversationItemFunctionCallParam(TypedDict, total=False): + """A function call item in a Realtime conversation.""" + arguments: Required[str] """The arguments of the function call. diff --git a/src/openai/types/realtime/realtime_conversation_item_system_message.py b/src/openai/types/realtime/realtime_conversation_item_system_message.py index 7dac5c9fe2..f69bc03937 100644 --- a/src/openai/types/realtime/realtime_conversation_item_system_message.py +++ b/src/openai/types/realtime/realtime_conversation_item_system_message.py @@ -17,6 +17,10 @@ class Content(BaseModel): class RealtimeConversationItemSystemMessage(BaseModel): + """ + A system message in a Realtime conversation can be used to provide additional context or instructions to the model. This is similar but distinct from the instruction prompt provided at the start of a conversation, as system messages can be added at any point in the conversation. For major changes to the conversation's behavior, use instructions, but for smaller updates (e.g. "the user is now asking about a different topic"), use system messages. + """ + content: List[Content] """The content of the message.""" diff --git a/src/openai/types/realtime/realtime_conversation_item_system_message_param.py b/src/openai/types/realtime/realtime_conversation_item_system_message_param.py index a2790fcf67..93880198fa 100644 --- a/src/openai/types/realtime/realtime_conversation_item_system_message_param.py +++ b/src/openai/types/realtime/realtime_conversation_item_system_message_param.py @@ -17,6 +17,10 @@ class Content(TypedDict, total=False): class RealtimeConversationItemSystemMessageParam(TypedDict, total=False): + """ + A system message in a Realtime conversation can be used to provide additional context or instructions to the model. This is similar but distinct from the instruction prompt provided at the start of a conversation, as system messages can be added at any point in the conversation. For major changes to the conversation's behavior, use instructions, but for smaller updates (e.g. "the user is now asking about a different topic"), use system messages. + """ + content: Required[Iterable[Content]] """The content of the message.""" diff --git a/src/openai/types/realtime/realtime_conversation_item_user_message.py b/src/openai/types/realtime/realtime_conversation_item_user_message.py index 30d9bb10e3..20e9614eb6 100644 --- a/src/openai/types/realtime/realtime_conversation_item_user_message.py +++ b/src/openai/types/realtime/realtime_conversation_item_user_message.py @@ -44,6 +44,8 @@ class Content(BaseModel): class RealtimeConversationItemUserMessage(BaseModel): + """A user message item in a Realtime conversation.""" + content: List[Content] """The content of the message.""" diff --git a/src/openai/types/realtime/realtime_conversation_item_user_message_param.py b/src/openai/types/realtime/realtime_conversation_item_user_message_param.py index 7d3b9bc137..69a24692e8 100644 --- a/src/openai/types/realtime/realtime_conversation_item_user_message_param.py +++ b/src/openai/types/realtime/realtime_conversation_item_user_message_param.py @@ -44,6 +44,8 @@ class Content(TypedDict, total=False): class RealtimeConversationItemUserMessageParam(TypedDict, total=False): + """A user message item in a Realtime conversation.""" + content: Required[Iterable[Content]] """The content of the message.""" diff --git a/src/openai/types/realtime/realtime_error.py b/src/openai/types/realtime/realtime_error.py index f1017d09e4..2aa5bc9425 100644 --- a/src/openai/types/realtime/realtime_error.py +++ b/src/openai/types/realtime/realtime_error.py @@ -8,6 +8,8 @@ class RealtimeError(BaseModel): + """Details of the error.""" + message: str """A human-readable error message.""" diff --git a/src/openai/types/realtime/realtime_error_event.py b/src/openai/types/realtime/realtime_error_event.py index 8b501d6b21..574464b29e 100644 --- a/src/openai/types/realtime/realtime_error_event.py +++ b/src/openai/types/realtime/realtime_error_event.py @@ -9,6 +9,12 @@ class RealtimeErrorEvent(BaseModel): + """ + Returned when an error occurs, which could be a client problem or a server + problem. Most errors are recoverable and the session will stay open, we + recommend to implementors to monitor and log error messages by default. + """ + error: RealtimeError """Details of the error.""" diff --git a/src/openai/types/realtime/realtime_mcp_approval_request.py b/src/openai/types/realtime/realtime_mcp_approval_request.py index bafc8d89d4..1744c90070 100644 --- a/src/openai/types/realtime/realtime_mcp_approval_request.py +++ b/src/openai/types/realtime/realtime_mcp_approval_request.py @@ -8,6 +8,8 @@ class RealtimeMcpApprovalRequest(BaseModel): + """A Realtime item requesting human approval of a tool invocation.""" + id: str """The unique ID of the approval request.""" diff --git a/src/openai/types/realtime/realtime_mcp_approval_request_param.py b/src/openai/types/realtime/realtime_mcp_approval_request_param.py index 57c21a487f..f7cb68d67e 100644 --- a/src/openai/types/realtime/realtime_mcp_approval_request_param.py +++ b/src/openai/types/realtime/realtime_mcp_approval_request_param.py @@ -8,6 +8,8 @@ class RealtimeMcpApprovalRequestParam(TypedDict, total=False): + """A Realtime item requesting human approval of a tool invocation.""" + id: Required[str] """The unique ID of the approval request.""" diff --git a/src/openai/types/realtime/realtime_mcp_approval_response.py b/src/openai/types/realtime/realtime_mcp_approval_response.py index 2cb03bc61a..f8525a12fc 100644 --- a/src/openai/types/realtime/realtime_mcp_approval_response.py +++ b/src/openai/types/realtime/realtime_mcp_approval_response.py @@ -9,6 +9,8 @@ class RealtimeMcpApprovalResponse(BaseModel): + """A Realtime item responding to an MCP approval request.""" + id: str """The unique ID of the approval response.""" diff --git a/src/openai/types/realtime/realtime_mcp_approval_response_param.py b/src/openai/types/realtime/realtime_mcp_approval_response_param.py index 19b6337004..6a65f7ce38 100644 --- a/src/openai/types/realtime/realtime_mcp_approval_response_param.py +++ b/src/openai/types/realtime/realtime_mcp_approval_response_param.py @@ -9,6 +9,8 @@ class RealtimeMcpApprovalResponseParam(TypedDict, total=False): + """A Realtime item responding to an MCP approval request.""" + id: Required[str] """The unique ID of the approval response.""" diff --git a/src/openai/types/realtime/realtime_mcp_list_tools.py b/src/openai/types/realtime/realtime_mcp_list_tools.py index aeb58a1faf..669d1fb43b 100644 --- a/src/openai/types/realtime/realtime_mcp_list_tools.py +++ b/src/openai/types/realtime/realtime_mcp_list_tools.py @@ -9,6 +9,8 @@ class Tool(BaseModel): + """A tool available on an MCP server.""" + input_schema: object """The JSON schema describing the tool's input.""" @@ -23,6 +25,8 @@ class Tool(BaseModel): class RealtimeMcpListTools(BaseModel): + """A Realtime item listing tools available on an MCP server.""" + server_label: str """The label of the MCP server.""" diff --git a/src/openai/types/realtime/realtime_mcp_list_tools_param.py b/src/openai/types/realtime/realtime_mcp_list_tools_param.py index eb8605a061..614fa53347 100644 --- a/src/openai/types/realtime/realtime_mcp_list_tools_param.py +++ b/src/openai/types/realtime/realtime_mcp_list_tools_param.py @@ -9,6 +9,8 @@ class Tool(TypedDict, total=False): + """A tool available on an MCP server.""" + input_schema: Required[object] """The JSON schema describing the tool's input.""" @@ -23,6 +25,8 @@ class Tool(TypedDict, total=False): class RealtimeMcpListToolsParam(TypedDict, total=False): + """A Realtime item listing tools available on an MCP server.""" + server_label: Required[str] """The label of the MCP server.""" diff --git a/src/openai/types/realtime/realtime_mcp_tool_call.py b/src/openai/types/realtime/realtime_mcp_tool_call.py index 019aee25c0..f53ad0eaa9 100644 --- a/src/openai/types/realtime/realtime_mcp_tool_call.py +++ b/src/openai/types/realtime/realtime_mcp_tool_call.py @@ -18,6 +18,8 @@ class RealtimeMcpToolCall(BaseModel): + """A Realtime item representing an invocation of a tool on an MCP server.""" + id: str """The unique ID of the tool call.""" diff --git a/src/openai/types/realtime/realtime_mcp_tool_call_param.py b/src/openai/types/realtime/realtime_mcp_tool_call_param.py index 0ba16d3dc1..8ccb5efc8a 100644 --- a/src/openai/types/realtime/realtime_mcp_tool_call_param.py +++ b/src/openai/types/realtime/realtime_mcp_tool_call_param.py @@ -15,6 +15,8 @@ class RealtimeMcpToolCallParam(TypedDict, total=False): + """A Realtime item representing an invocation of a tool on an MCP server.""" + id: Required[str] """The unique ID of the tool call.""" diff --git a/src/openai/types/realtime/realtime_response.py b/src/openai/types/realtime/realtime_response.py index 92d75491c0..a23edc48ab 100644 --- a/src/openai/types/realtime/realtime_response.py +++ b/src/openai/types/realtime/realtime_response.py @@ -30,10 +30,14 @@ class AudioOutput(BaseModel): class Audio(BaseModel): + """Configuration for audio output.""" + output: Optional[AudioOutput] = None class RealtimeResponse(BaseModel): + """The response resource.""" + id: Optional[str] = None """The unique ID of the response, will look like `resp_1234`.""" diff --git a/src/openai/types/realtime/realtime_response_create_audio_output.py b/src/openai/types/realtime/realtime_response_create_audio_output.py index 48a5d67e20..b8f4d284d5 100644 --- a/src/openai/types/realtime/realtime_response_create_audio_output.py +++ b/src/openai/types/realtime/realtime_response_create_audio_output.py @@ -26,4 +26,6 @@ class Output(BaseModel): class RealtimeResponseCreateAudioOutput(BaseModel): + """Configuration for audio input and output.""" + output: Optional[Output] = None diff --git a/src/openai/types/realtime/realtime_response_create_audio_output_param.py b/src/openai/types/realtime/realtime_response_create_audio_output_param.py index 9aa6d28835..30a4633698 100644 --- a/src/openai/types/realtime/realtime_response_create_audio_output_param.py +++ b/src/openai/types/realtime/realtime_response_create_audio_output_param.py @@ -25,4 +25,6 @@ class Output(TypedDict, total=False): class RealtimeResponseCreateAudioOutputParam(TypedDict, total=False): + """Configuration for audio input and output.""" + output: Output diff --git a/src/openai/types/realtime/realtime_response_create_mcp_tool.py b/src/openai/types/realtime/realtime_response_create_mcp_tool.py index 119b4a455d..72189e10e6 100644 --- a/src/openai/types/realtime/realtime_response_create_mcp_tool.py +++ b/src/openai/types/realtime/realtime_response_create_mcp_tool.py @@ -17,6 +17,8 @@ class AllowedToolsMcpToolFilter(BaseModel): + """A filter object to specify which tools are allowed.""" + read_only: Optional[bool] = None """Indicates whether or not a tool modifies data or is read-only. @@ -33,6 +35,8 @@ class AllowedToolsMcpToolFilter(BaseModel): class RequireApprovalMcpToolApprovalFilterAlways(BaseModel): + """A filter object to specify which tools are allowed.""" + read_only: Optional[bool] = None """Indicates whether or not a tool modifies data or is read-only. @@ -46,6 +50,8 @@ class RequireApprovalMcpToolApprovalFilterAlways(BaseModel): class RequireApprovalMcpToolApprovalFilterNever(BaseModel): + """A filter object to specify which tools are allowed.""" + read_only: Optional[bool] = None """Indicates whether or not a tool modifies data or is read-only. @@ -59,6 +65,13 @@ class RequireApprovalMcpToolApprovalFilterNever(BaseModel): class RequireApprovalMcpToolApprovalFilter(BaseModel): + """Specify which of the MCP server's tools require approval. + + Can be + `always`, `never`, or a filter object associated with tools + that require approval. + """ + always: Optional[RequireApprovalMcpToolApprovalFilterAlways] = None """A filter object to specify which tools are allowed.""" @@ -70,6 +83,11 @@ class RequireApprovalMcpToolApprovalFilter(BaseModel): class RealtimeResponseCreateMcpTool(BaseModel): + """ + Give the model access to additional tools via remote Model Context Protocol + (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp). + """ + server_label: str """A label for this MCP server, used to identify it in tool calls.""" diff --git a/src/openai/types/realtime/realtime_response_create_mcp_tool_param.py b/src/openai/types/realtime/realtime_response_create_mcp_tool_param.py index 3b9cf047c1..68dd6bdb5c 100644 --- a/src/openai/types/realtime/realtime_response_create_mcp_tool_param.py +++ b/src/openai/types/realtime/realtime_response_create_mcp_tool_param.py @@ -19,6 +19,8 @@ class AllowedToolsMcpToolFilter(TypedDict, total=False): + """A filter object to specify which tools are allowed.""" + read_only: bool """Indicates whether or not a tool modifies data or is read-only. @@ -35,6 +37,8 @@ class AllowedToolsMcpToolFilter(TypedDict, total=False): class RequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False): + """A filter object to specify which tools are allowed.""" + read_only: bool """Indicates whether or not a tool modifies data or is read-only. @@ -48,6 +52,8 @@ class RequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False): class RequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False): + """A filter object to specify which tools are allowed.""" + read_only: bool """Indicates whether or not a tool modifies data or is read-only. @@ -61,6 +67,13 @@ class RequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False): class RequireApprovalMcpToolApprovalFilter(TypedDict, total=False): + """Specify which of the MCP server's tools require approval. + + Can be + `always`, `never`, or a filter object associated with tools + that require approval. + """ + always: RequireApprovalMcpToolApprovalFilterAlways """A filter object to specify which tools are allowed.""" @@ -72,6 +85,11 @@ class RequireApprovalMcpToolApprovalFilter(TypedDict, total=False): class RealtimeResponseCreateMcpToolParam(TypedDict, total=False): + """ + Give the model access to additional tools via remote Model Context Protocol + (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp). + """ + server_label: Required[str] """A label for this MCP server, used to identify it in tool calls.""" diff --git a/src/openai/types/realtime/realtime_response_create_params.py b/src/openai/types/realtime/realtime_response_create_params.py index e8486220bf..deec8c9280 100644 --- a/src/openai/types/realtime/realtime_response_create_params.py +++ b/src/openai/types/realtime/realtime_response_create_params.py @@ -22,6 +22,8 @@ class RealtimeResponseCreateParams(BaseModel): + """Create a new Realtime response with these parameters""" + audio: Optional[RealtimeResponseCreateAudioOutput] = None """Configuration for audio input and output.""" diff --git a/src/openai/types/realtime/realtime_response_create_params_param.py b/src/openai/types/realtime/realtime_response_create_params_param.py index 116384bd82..caad5bc900 100644 --- a/src/openai/types/realtime/realtime_response_create_params_param.py +++ b/src/openai/types/realtime/realtime_response_create_params_param.py @@ -23,6 +23,8 @@ class RealtimeResponseCreateParamsParam(TypedDict, total=False): + """Create a new Realtime response with these parameters""" + audio: RealtimeResponseCreateAudioOutputParam """Configuration for audio input and output.""" diff --git a/src/openai/types/realtime/realtime_response_status.py b/src/openai/types/realtime/realtime_response_status.py index 12999f61a1..26b272ae5a 100644 --- a/src/openai/types/realtime/realtime_response_status.py +++ b/src/openai/types/realtime/realtime_response_status.py @@ -9,6 +9,11 @@ class Error(BaseModel): + """ + A description of the error that caused the response to fail, + populated when the `status` is `failed`. + """ + code: Optional[str] = None """Error code, if any.""" @@ -17,6 +22,8 @@ class Error(BaseModel): class RealtimeResponseStatus(BaseModel): + """Additional details about the status.""" + error: Optional[Error] = None """ A description of the error that caused the response to fail, populated when the diff --git a/src/openai/types/realtime/realtime_response_usage.py b/src/openai/types/realtime/realtime_response_usage.py index fb8893b346..a5985d8a7b 100644 --- a/src/openai/types/realtime/realtime_response_usage.py +++ b/src/openai/types/realtime/realtime_response_usage.py @@ -10,6 +10,14 @@ class RealtimeResponseUsage(BaseModel): + """Usage statistics for the Response, this will correspond to billing. + + A + Realtime API session will maintain a conversation context and append new + Items to the Conversation, thus output from previous turns (text and + audio tokens) will become the input for later turns. + """ + input_token_details: Optional[RealtimeResponseUsageInputTokenDetails] = None """Details about the input tokens used in the Response. diff --git a/src/openai/types/realtime/realtime_response_usage_input_token_details.py b/src/openai/types/realtime/realtime_response_usage_input_token_details.py index e14a74a84e..0fc71749e9 100644 --- a/src/openai/types/realtime/realtime_response_usage_input_token_details.py +++ b/src/openai/types/realtime/realtime_response_usage_input_token_details.py @@ -8,6 +8,8 @@ class CachedTokensDetails(BaseModel): + """Details about the cached tokens used as input for the Response.""" + audio_tokens: Optional[int] = None """The number of cached audio tokens used as input for the Response.""" @@ -19,6 +21,11 @@ class CachedTokensDetails(BaseModel): class RealtimeResponseUsageInputTokenDetails(BaseModel): + """Details about the input tokens used in the Response. + + Cached tokens are tokens from previous turns in the conversation that are included as context for the current response. Cached tokens here are counted as a subset of input tokens, meaning input tokens will include cached and uncached tokens. + """ + audio_tokens: Optional[int] = None """The number of audio tokens used as input for the Response.""" diff --git a/src/openai/types/realtime/realtime_response_usage_output_token_details.py b/src/openai/types/realtime/realtime_response_usage_output_token_details.py index dfa97a1f47..2154c77d5d 100644 --- a/src/openai/types/realtime/realtime_response_usage_output_token_details.py +++ b/src/openai/types/realtime/realtime_response_usage_output_token_details.py @@ -8,6 +8,8 @@ class RealtimeResponseUsageOutputTokenDetails(BaseModel): + """Details about the output tokens used in the Response.""" + audio_tokens: Optional[int] = None """The number of audio tokens used in the Response.""" diff --git a/src/openai/types/realtime/realtime_server_event.py b/src/openai/types/realtime/realtime_server_event.py index ead98f1a54..5de53d053e 100644 --- a/src/openai/types/realtime/realtime_server_event.py +++ b/src/openai/types/realtime/realtime_server_event.py @@ -61,6 +61,11 @@ class ConversationItemRetrieved(BaseModel): + """Returned when a conversation item is retrieved with `conversation.item.retrieve`. + + This is provided as a way to fetch the server's representation of an item, for example to get access to the post-processed audio data after noise cancellation and VAD. It includes the full content of the Item, including audio data. + """ + event_id: str """The unique ID of the server event.""" @@ -72,6 +77,13 @@ class ConversationItemRetrieved(BaseModel): class OutputAudioBufferStarted(BaseModel): + """ + **WebRTC/SIP Only:** Emitted when the server begins streaming audio to the client. This event is + emitted after an audio content part has been added (`response.content_part.added`) + to the response. + [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc). + """ + event_id: str """The unique ID of the server event.""" @@ -83,6 +95,13 @@ class OutputAudioBufferStarted(BaseModel): class OutputAudioBufferStopped(BaseModel): + """ + **WebRTC/SIP Only:** Emitted when the output audio buffer has been completely drained on the server, + and no more audio is forthcoming. This event is emitted after the full response + data has been sent to the client (`response.done`). + [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc). + """ + event_id: str """The unique ID of the server event.""" @@ -94,6 +113,15 @@ class OutputAudioBufferStopped(BaseModel): class OutputAudioBufferCleared(BaseModel): + """**WebRTC/SIP Only:** Emitted when the output audio buffer is cleared. + + This happens either in VAD + mode when the user has interrupted (`input_audio_buffer.speech_started`), + or when the client has emitted the `output_audio_buffer.clear` event to manually + cut off the current audio response. + [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc). + """ + event_id: str """The unique ID of the server event.""" diff --git a/src/openai/types/realtime/realtime_session_client_secret.py b/src/openai/types/realtime/realtime_session_client_secret.py index a4998802bb..13a12f5502 100644 --- a/src/openai/types/realtime/realtime_session_client_secret.py +++ b/src/openai/types/realtime/realtime_session_client_secret.py @@ -6,6 +6,8 @@ class RealtimeSessionClientSecret(BaseModel): + """Ephemeral key returned by the API.""" + expires_at: int """Timestamp for when the token expires. diff --git a/src/openai/types/realtime/realtime_session_create_request.py b/src/openai/types/realtime/realtime_session_create_request.py index 80cf468dc8..76738816a0 100644 --- a/src/openai/types/realtime/realtime_session_create_request.py +++ b/src/openai/types/realtime/realtime_session_create_request.py @@ -15,6 +15,8 @@ class RealtimeSessionCreateRequest(BaseModel): + """Realtime session object configuration.""" + type: Literal["realtime"] """The type of session to create. Always `realtime` for the Realtime API.""" diff --git a/src/openai/types/realtime/realtime_session_create_request_param.py b/src/openai/types/realtime/realtime_session_create_request_param.py index 578d5a502d..cc5806fe11 100644 --- a/src/openai/types/realtime/realtime_session_create_request_param.py +++ b/src/openai/types/realtime/realtime_session_create_request_param.py @@ -16,6 +16,8 @@ class RealtimeSessionCreateRequestParam(TypedDict, total=False): + """Realtime session object configuration.""" + type: Required[Literal["realtime"]] """The type of session to create. Always `realtime` for the Realtime API.""" diff --git a/src/openai/types/realtime/realtime_session_create_response.py b/src/openai/types/realtime/realtime_session_create_response.py index df69dd7bdb..46d32e8571 100644 --- a/src/openai/types/realtime/realtime_session_create_response.py +++ b/src/openai/types/realtime/realtime_session_create_response.py @@ -40,6 +40,13 @@ class AudioInputNoiseReduction(BaseModel): + """Configuration for input audio noise reduction. + + This can be set to `null` to turn off. + Noise reduction filters audio added to the input audio buffer before it is sent to VAD and the model. + Filtering the audio can improve VAD and turn detection accuracy (reducing false positives) and model performance by improving perception of the input audio. + """ + type: Optional[NoiseReductionType] = None """Type of noise reduction. @@ -49,6 +56,10 @@ class AudioInputNoiseReduction(BaseModel): class AudioInputTurnDetectionServerVad(BaseModel): + """ + Server-side voice activity detection (VAD) which flips on when user speech is detected and off after a period of silence. + """ + type: Literal["server_vad"] """Type of turn detection, `server_vad` to turn on simple Server VAD.""" @@ -115,6 +126,10 @@ class AudioInputTurnDetectionServerVad(BaseModel): class AudioInputTurnDetectionSemanticVad(BaseModel): + """ + Server-side semantic turn detection which uses a model to determine when the user has finished speaking. + """ + type: Literal["semantic_vad"] """Type of turn detection, `semantic_vad` to turn on Semantic VAD.""" @@ -218,6 +233,8 @@ class AudioOutput(BaseModel): class Audio(BaseModel): + """Configuration for input and output audio.""" + input: Optional[AudioInput] = None output: Optional[AudioOutput] = None @@ -227,6 +244,8 @@ class Audio(BaseModel): class ToolMcpToolAllowedToolsMcpToolFilter(BaseModel): + """A filter object to specify which tools are allowed.""" + read_only: Optional[bool] = None """Indicates whether or not a tool modifies data or is read-only. @@ -243,6 +262,8 @@ class ToolMcpToolAllowedToolsMcpToolFilter(BaseModel): class ToolMcpToolRequireApprovalMcpToolApprovalFilterAlways(BaseModel): + """A filter object to specify which tools are allowed.""" + read_only: Optional[bool] = None """Indicates whether or not a tool modifies data or is read-only. @@ -256,6 +277,8 @@ class ToolMcpToolRequireApprovalMcpToolApprovalFilterAlways(BaseModel): class ToolMcpToolRequireApprovalMcpToolApprovalFilterNever(BaseModel): + """A filter object to specify which tools are allowed.""" + read_only: Optional[bool] = None """Indicates whether or not a tool modifies data or is read-only. @@ -269,6 +292,13 @@ class ToolMcpToolRequireApprovalMcpToolApprovalFilterNever(BaseModel): class ToolMcpToolRequireApprovalMcpToolApprovalFilter(BaseModel): + """Specify which of the MCP server's tools require approval. + + Can be + `always`, `never`, or a filter object associated with tools + that require approval. + """ + always: Optional[ToolMcpToolRequireApprovalMcpToolApprovalFilterAlways] = None """A filter object to specify which tools are allowed.""" @@ -282,6 +312,11 @@ class ToolMcpToolRequireApprovalMcpToolApprovalFilter(BaseModel): class ToolMcpTool(BaseModel): + """ + Give the model access to additional tools via remote Model Context Protocol + (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp). + """ + server_label: str """A label for this MCP server, used to identify it in tool calls.""" @@ -351,6 +386,8 @@ class ToolMcpTool(BaseModel): class TracingTracingConfiguration(BaseModel): + """Granular configuration for tracing.""" + group_id: Optional[str] = None """ The group id to attach to this trace to enable filtering and grouping in the @@ -374,6 +411,12 @@ class TracingTracingConfiguration(BaseModel): class RealtimeSessionCreateResponse(BaseModel): + """A new Realtime session configuration, with an ephemeral key. + + Default TTL + for keys is one minute. + """ + client_secret: RealtimeSessionClientSecret """Ephemeral key returned by the API.""" diff --git a/src/openai/types/realtime/realtime_tools_config_param.py b/src/openai/types/realtime/realtime_tools_config_param.py index 630fc74691..3cc404feef 100644 --- a/src/openai/types/realtime/realtime_tools_config_param.py +++ b/src/openai/types/realtime/realtime_tools_config_param.py @@ -22,6 +22,8 @@ class McpAllowedToolsMcpToolFilter(TypedDict, total=False): + """A filter object to specify which tools are allowed.""" + read_only: bool """Indicates whether or not a tool modifies data or is read-only. @@ -38,6 +40,8 @@ class McpAllowedToolsMcpToolFilter(TypedDict, total=False): class McpRequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False): + """A filter object to specify which tools are allowed.""" + read_only: bool """Indicates whether or not a tool modifies data or is read-only. @@ -51,6 +55,8 @@ class McpRequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False): class McpRequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False): + """A filter object to specify which tools are allowed.""" + read_only: bool """Indicates whether or not a tool modifies data or is read-only. @@ -64,6 +70,13 @@ class McpRequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False): class McpRequireApprovalMcpToolApprovalFilter(TypedDict, total=False): + """Specify which of the MCP server's tools require approval. + + Can be + `always`, `never`, or a filter object associated with tools + that require approval. + """ + always: McpRequireApprovalMcpToolApprovalFilterAlways """A filter object to specify which tools are allowed.""" @@ -75,6 +88,11 @@ class McpRequireApprovalMcpToolApprovalFilter(TypedDict, total=False): class Mcp(TypedDict, total=False): + """ + Give the model access to additional tools via remote Model Context Protocol + (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp). + """ + server_label: Required[str] """A label for this MCP server, used to identify it in tool calls.""" diff --git a/src/openai/types/realtime/realtime_tools_config_union.py b/src/openai/types/realtime/realtime_tools_config_union.py index e7126ed60d..92aaee7f26 100644 --- a/src/openai/types/realtime/realtime_tools_config_union.py +++ b/src/openai/types/realtime/realtime_tools_config_union.py @@ -20,6 +20,8 @@ class McpAllowedToolsMcpToolFilter(BaseModel): + """A filter object to specify which tools are allowed.""" + read_only: Optional[bool] = None """Indicates whether or not a tool modifies data or is read-only. @@ -36,6 +38,8 @@ class McpAllowedToolsMcpToolFilter(BaseModel): class McpRequireApprovalMcpToolApprovalFilterAlways(BaseModel): + """A filter object to specify which tools are allowed.""" + read_only: Optional[bool] = None """Indicates whether or not a tool modifies data or is read-only. @@ -49,6 +53,8 @@ class McpRequireApprovalMcpToolApprovalFilterAlways(BaseModel): class McpRequireApprovalMcpToolApprovalFilterNever(BaseModel): + """A filter object to specify which tools are allowed.""" + read_only: Optional[bool] = None """Indicates whether or not a tool modifies data or is read-only. @@ -62,6 +68,13 @@ class McpRequireApprovalMcpToolApprovalFilterNever(BaseModel): class McpRequireApprovalMcpToolApprovalFilter(BaseModel): + """Specify which of the MCP server's tools require approval. + + Can be + `always`, `never`, or a filter object associated with tools + that require approval. + """ + always: Optional[McpRequireApprovalMcpToolApprovalFilterAlways] = None """A filter object to specify which tools are allowed.""" @@ -73,6 +86,11 @@ class McpRequireApprovalMcpToolApprovalFilter(BaseModel): class Mcp(BaseModel): + """ + Give the model access to additional tools via remote Model Context Protocol + (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp). + """ + server_label: str """A label for this MCP server, used to identify it in tool calls.""" diff --git a/src/openai/types/realtime/realtime_tools_config_union_param.py b/src/openai/types/realtime/realtime_tools_config_union_param.py index 9ee58fdbe6..6889b4c304 100644 --- a/src/openai/types/realtime/realtime_tools_config_union_param.py +++ b/src/openai/types/realtime/realtime_tools_config_union_param.py @@ -21,6 +21,8 @@ class McpAllowedToolsMcpToolFilter(TypedDict, total=False): + """A filter object to specify which tools are allowed.""" + read_only: bool """Indicates whether or not a tool modifies data or is read-only. @@ -37,6 +39,8 @@ class McpAllowedToolsMcpToolFilter(TypedDict, total=False): class McpRequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False): + """A filter object to specify which tools are allowed.""" + read_only: bool """Indicates whether or not a tool modifies data or is read-only. @@ -50,6 +54,8 @@ class McpRequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False): class McpRequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False): + """A filter object to specify which tools are allowed.""" + read_only: bool """Indicates whether or not a tool modifies data or is read-only. @@ -63,6 +69,13 @@ class McpRequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False): class McpRequireApprovalMcpToolApprovalFilter(TypedDict, total=False): + """Specify which of the MCP server's tools require approval. + + Can be + `always`, `never`, or a filter object associated with tools + that require approval. + """ + always: McpRequireApprovalMcpToolApprovalFilterAlways """A filter object to specify which tools are allowed.""" @@ -74,6 +87,11 @@ class McpRequireApprovalMcpToolApprovalFilter(TypedDict, total=False): class Mcp(TypedDict, total=False): + """ + Give the model access to additional tools via remote Model Context Protocol + (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp). + """ + server_label: Required[str] """A label for this MCP server, used to identify it in tool calls.""" diff --git a/src/openai/types/realtime/realtime_tracing_config.py b/src/openai/types/realtime/realtime_tracing_config.py index 1c46de7928..37e3ce8945 100644 --- a/src/openai/types/realtime/realtime_tracing_config.py +++ b/src/openai/types/realtime/realtime_tracing_config.py @@ -9,6 +9,8 @@ class TracingConfiguration(BaseModel): + """Granular configuration for tracing.""" + group_id: Optional[str] = None """ The group id to attach to this trace to enable filtering and grouping in the diff --git a/src/openai/types/realtime/realtime_tracing_config_param.py b/src/openai/types/realtime/realtime_tracing_config_param.py index fd9e266244..742412897f 100644 --- a/src/openai/types/realtime/realtime_tracing_config_param.py +++ b/src/openai/types/realtime/realtime_tracing_config_param.py @@ -9,6 +9,8 @@ class TracingConfiguration(TypedDict, total=False): + """Granular configuration for tracing.""" + group_id: str """ The group id to attach to this trace to enable filtering and grouping in the diff --git a/src/openai/types/realtime/realtime_transcription_session_audio.py b/src/openai/types/realtime/realtime_transcription_session_audio.py index a5506947f1..7ec29afb79 100644 --- a/src/openai/types/realtime/realtime_transcription_session_audio.py +++ b/src/openai/types/realtime/realtime_transcription_session_audio.py @@ -9,4 +9,6 @@ class RealtimeTranscriptionSessionAudio(BaseModel): + """Configuration for input and output audio.""" + input: Optional[RealtimeTranscriptionSessionAudioInput] = None diff --git a/src/openai/types/realtime/realtime_transcription_session_audio_input.py b/src/openai/types/realtime/realtime_transcription_session_audio_input.py index efc321cbeb..80ff223590 100644 --- a/src/openai/types/realtime/realtime_transcription_session_audio_input.py +++ b/src/openai/types/realtime/realtime_transcription_session_audio_input.py @@ -14,6 +14,13 @@ class NoiseReduction(BaseModel): + """Configuration for input audio noise reduction. + + This can be set to `null` to turn off. + Noise reduction filters audio added to the input audio buffer before it is sent to VAD and the model. + Filtering the audio can improve VAD and turn detection accuracy (reducing false positives) and model performance by improving perception of the input audio. + """ + type: Optional[NoiseReductionType] = None """Type of noise reduction. diff --git a/src/openai/types/realtime/realtime_transcription_session_audio_input_param.py b/src/openai/types/realtime/realtime_transcription_session_audio_input_param.py index c9153b68a4..dd908c72f6 100644 --- a/src/openai/types/realtime/realtime_transcription_session_audio_input_param.py +++ b/src/openai/types/realtime/realtime_transcription_session_audio_input_param.py @@ -16,6 +16,13 @@ class NoiseReduction(TypedDict, total=False): + """Configuration for input audio noise reduction. + + This can be set to `null` to turn off. + Noise reduction filters audio added to the input audio buffer before it is sent to VAD and the model. + Filtering the audio can improve VAD and turn detection accuracy (reducing false positives) and model performance by improving perception of the input audio. + """ + type: NoiseReductionType """Type of noise reduction. diff --git a/src/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection.py b/src/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection.py index e21844f48f..3d4ee779f4 100644 --- a/src/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection.py +++ b/src/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection.py @@ -10,6 +10,10 @@ class ServerVad(BaseModel): + """ + Server-side voice activity detection (VAD) which flips on when user speech is detected and off after a period of silence. + """ + type: Literal["server_vad"] """Type of turn detection, `server_vad` to turn on simple Server VAD.""" @@ -76,6 +80,10 @@ class ServerVad(BaseModel): class SemanticVad(BaseModel): + """ + Server-side semantic turn detection which uses a model to determine when the user has finished speaking. + """ + type: Literal["semantic_vad"] """Type of turn detection, `semantic_vad` to turn on Semantic VAD.""" diff --git a/src/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.py b/src/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.py index 507c43141e..0aca59ce11 100644 --- a/src/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.py +++ b/src/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.py @@ -9,6 +9,10 @@ class ServerVad(TypedDict, total=False): + """ + Server-side voice activity detection (VAD) which flips on when user speech is detected and off after a period of silence. + """ + type: Required[Literal["server_vad"]] """Type of turn detection, `server_vad` to turn on simple Server VAD.""" @@ -75,6 +79,10 @@ class ServerVad(TypedDict, total=False): class SemanticVad(TypedDict, total=False): + """ + Server-side semantic turn detection which uses a model to determine when the user has finished speaking. + """ + type: Required[Literal["semantic_vad"]] """Type of turn detection, `semantic_vad` to turn on Semantic VAD.""" diff --git a/src/openai/types/realtime/realtime_transcription_session_audio_param.py b/src/openai/types/realtime/realtime_transcription_session_audio_param.py index 1503a606d3..6bf1117917 100644 --- a/src/openai/types/realtime/realtime_transcription_session_audio_param.py +++ b/src/openai/types/realtime/realtime_transcription_session_audio_param.py @@ -10,4 +10,6 @@ class RealtimeTranscriptionSessionAudioParam(TypedDict, total=False): + """Configuration for input and output audio.""" + input: RealtimeTranscriptionSessionAudioInputParam diff --git a/src/openai/types/realtime/realtime_transcription_session_create_request.py b/src/openai/types/realtime/realtime_transcription_session_create_request.py index 102f2b14fb..f72a4ad93f 100644 --- a/src/openai/types/realtime/realtime_transcription_session_create_request.py +++ b/src/openai/types/realtime/realtime_transcription_session_create_request.py @@ -10,6 +10,8 @@ class RealtimeTranscriptionSessionCreateRequest(BaseModel): + """Realtime transcription session object configuration.""" + type: Literal["transcription"] """The type of session to create. diff --git a/src/openai/types/realtime/realtime_transcription_session_create_request_param.py b/src/openai/types/realtime/realtime_transcription_session_create_request_param.py index 80cbe2d414..9b4d8ead79 100644 --- a/src/openai/types/realtime/realtime_transcription_session_create_request_param.py +++ b/src/openai/types/realtime/realtime_transcription_session_create_request_param.py @@ -11,6 +11,8 @@ class RealtimeTranscriptionSessionCreateRequestParam(TypedDict, total=False): + """Realtime transcription session object configuration.""" + type: Required[Literal["transcription"]] """The type of session to create. diff --git a/src/openai/types/realtime/realtime_transcription_session_create_response.py b/src/openai/types/realtime/realtime_transcription_session_create_response.py index 301af1ac3f..6ca6c3808b 100644 --- a/src/openai/types/realtime/realtime_transcription_session_create_response.py +++ b/src/openai/types/realtime/realtime_transcription_session_create_response.py @@ -13,6 +13,8 @@ class AudioInputNoiseReduction(BaseModel): + """Configuration for input audio noise reduction.""" + type: Optional[NoiseReductionType] = None """Type of noise reduction. @@ -41,10 +43,14 @@ class AudioInput(BaseModel): class Audio(BaseModel): + """Configuration for input audio for the session.""" + input: Optional[AudioInput] = None class RealtimeTranscriptionSessionCreateResponse(BaseModel): + """A Realtime transcription session configuration object.""" + id: str """Unique identifier for the session that looks like `sess_1234567890abcdef`.""" diff --git a/src/openai/types/realtime/realtime_transcription_session_turn_detection.py b/src/openai/types/realtime/realtime_transcription_session_turn_detection.py index f5da31ce77..8dacd60a07 100644 --- a/src/openai/types/realtime/realtime_transcription_session_turn_detection.py +++ b/src/openai/types/realtime/realtime_transcription_session_turn_detection.py @@ -8,6 +8,13 @@ class RealtimeTranscriptionSessionTurnDetection(BaseModel): + """Configuration for turn detection. + + Can be set to `null` to turn off. Server + VAD means that the model will detect the start and end of speech based on + audio volume and respond at the end of user speech. + """ + prefix_padding_ms: Optional[int] = None """Amount of audio to include before the VAD detected speech (in milliseconds). diff --git a/src/openai/types/realtime/realtime_truncation_retention_ratio.py b/src/openai/types/realtime/realtime_truncation_retention_ratio.py index e19ed64831..72a93a5654 100644 --- a/src/openai/types/realtime/realtime_truncation_retention_ratio.py +++ b/src/openai/types/realtime/realtime_truncation_retention_ratio.py @@ -9,6 +9,11 @@ class TokenLimits(BaseModel): + """Optional custom token limits for this truncation strategy. + + If not provided, the model's default token limits will be used. + """ + post_instructions: Optional[int] = None """ Maximum tokens allowed in the conversation after instructions (which including @@ -20,6 +25,10 @@ class TokenLimits(BaseModel): class RealtimeTruncationRetentionRatio(BaseModel): + """ + Retain a fraction of the conversation tokens when the conversation exceeds the input token limit. This allows you to amortize truncations across multiple turns, which can help improve cached token usage. + """ + retention_ratio: float """ Fraction of post-instruction conversation tokens to retain (`0.0` - `1.0`) when diff --git a/src/openai/types/realtime/realtime_truncation_retention_ratio_param.py b/src/openai/types/realtime/realtime_truncation_retention_ratio_param.py index 4ea80fe4ce..4648fa66b0 100644 --- a/src/openai/types/realtime/realtime_truncation_retention_ratio_param.py +++ b/src/openai/types/realtime/realtime_truncation_retention_ratio_param.py @@ -8,6 +8,11 @@ class TokenLimits(TypedDict, total=False): + """Optional custom token limits for this truncation strategy. + + If not provided, the model's default token limits will be used. + """ + post_instructions: int """ Maximum tokens allowed in the conversation after instructions (which including @@ -19,6 +24,10 @@ class TokenLimits(TypedDict, total=False): class RealtimeTruncationRetentionRatioParam(TypedDict, total=False): + """ + Retain a fraction of the conversation tokens when the conversation exceeds the input token limit. This allows you to amortize truncations across multiple turns, which can help improve cached token usage. + """ + retention_ratio: Required[float] """ Fraction of post-instruction conversation tokens to retain (`0.0` - `1.0`) when diff --git a/src/openai/types/realtime/response_audio_delta_event.py b/src/openai/types/realtime/response_audio_delta_event.py index d92c5462d0..ae87014053 100644 --- a/src/openai/types/realtime/response_audio_delta_event.py +++ b/src/openai/types/realtime/response_audio_delta_event.py @@ -8,6 +8,8 @@ class ResponseAudioDeltaEvent(BaseModel): + """Returned when the model-generated audio is updated.""" + content_index: int """The index of the content part in the item's content array.""" diff --git a/src/openai/types/realtime/response_audio_done_event.py b/src/openai/types/realtime/response_audio_done_event.py index 5ea0f07e36..98715aba13 100644 --- a/src/openai/types/realtime/response_audio_done_event.py +++ b/src/openai/types/realtime/response_audio_done_event.py @@ -8,6 +8,12 @@ class ResponseAudioDoneEvent(BaseModel): + """Returned when the model-generated audio is done. + + Also emitted when a Response + is interrupted, incomplete, or cancelled. + """ + content_index: int """The index of the content part in the item's content array.""" diff --git a/src/openai/types/realtime/response_audio_transcript_delta_event.py b/src/openai/types/realtime/response_audio_transcript_delta_event.py index 4dd5fecac0..4ec1a820ba 100644 --- a/src/openai/types/realtime/response_audio_transcript_delta_event.py +++ b/src/openai/types/realtime/response_audio_transcript_delta_event.py @@ -8,6 +8,8 @@ class ResponseAudioTranscriptDeltaEvent(BaseModel): + """Returned when the model-generated transcription of audio output is updated.""" + content_index: int """The index of the content part in the item's content array.""" diff --git a/src/openai/types/realtime/response_audio_transcript_done_event.py b/src/openai/types/realtime/response_audio_transcript_done_event.py index 2de913d277..c2a2416355 100644 --- a/src/openai/types/realtime/response_audio_transcript_done_event.py +++ b/src/openai/types/realtime/response_audio_transcript_done_event.py @@ -8,6 +8,12 @@ class ResponseAudioTranscriptDoneEvent(BaseModel): + """ + Returned when the model-generated transcription of audio output is done + streaming. Also emitted when a Response is interrupted, incomplete, or + cancelled. + """ + content_index: int """The index of the content part in the item's content array.""" diff --git a/src/openai/types/realtime/response_cancel_event.py b/src/openai/types/realtime/response_cancel_event.py index 15dc141cbf..9c6113998f 100644 --- a/src/openai/types/realtime/response_cancel_event.py +++ b/src/openai/types/realtime/response_cancel_event.py @@ -9,6 +9,15 @@ class ResponseCancelEvent(BaseModel): + """Send this event to cancel an in-progress response. + + The server will respond + with a `response.done` event with a status of `response.status=cancelled`. If + there is no response to cancel, the server will respond with an error. It's safe + to call `response.cancel` even if no response is in progress, an error will be + returned the session will remain unaffected. + """ + type: Literal["response.cancel"] """The event type, must be `response.cancel`.""" diff --git a/src/openai/types/realtime/response_cancel_event_param.py b/src/openai/types/realtime/response_cancel_event_param.py index f33740730a..b233b407f9 100644 --- a/src/openai/types/realtime/response_cancel_event_param.py +++ b/src/openai/types/realtime/response_cancel_event_param.py @@ -8,6 +8,15 @@ class ResponseCancelEventParam(TypedDict, total=False): + """Send this event to cancel an in-progress response. + + The server will respond + with a `response.done` event with a status of `response.status=cancelled`. If + there is no response to cancel, the server will respond with an error. It's safe + to call `response.cancel` even if no response is in progress, an error will be + returned the session will remain unaffected. + """ + type: Required[Literal["response.cancel"]] """The event type, must be `response.cancel`.""" diff --git a/src/openai/types/realtime/response_content_part_added_event.py b/src/openai/types/realtime/response_content_part_added_event.py index aca965c3d8..e47c84af20 100644 --- a/src/openai/types/realtime/response_content_part_added_event.py +++ b/src/openai/types/realtime/response_content_part_added_event.py @@ -9,6 +9,8 @@ class Part(BaseModel): + """The content part that was added.""" + audio: Optional[str] = None """Base64-encoded audio data (if type is "audio").""" @@ -23,6 +25,11 @@ class Part(BaseModel): class ResponseContentPartAddedEvent(BaseModel): + """ + Returned when a new content part is added to an assistant message item during + response generation. + """ + content_index: int """The index of the content part in the item's content array.""" diff --git a/src/openai/types/realtime/response_content_part_done_event.py b/src/openai/types/realtime/response_content_part_done_event.py index 59af808a90..a6cb8559b9 100644 --- a/src/openai/types/realtime/response_content_part_done_event.py +++ b/src/openai/types/realtime/response_content_part_done_event.py @@ -9,6 +9,8 @@ class Part(BaseModel): + """The content part that is done.""" + audio: Optional[str] = None """Base64-encoded audio data (if type is "audio").""" @@ -23,6 +25,11 @@ class Part(BaseModel): class ResponseContentPartDoneEvent(BaseModel): + """ + Returned when a content part is done streaming in an assistant message item. + Also emitted when a Response is interrupted, incomplete, or cancelled. + """ + content_index: int """The index of the content part in the item's content array.""" diff --git a/src/openai/types/realtime/response_create_event.py b/src/openai/types/realtime/response_create_event.py index 75a08ee460..3e98a8d858 100644 --- a/src/openai/types/realtime/response_create_event.py +++ b/src/openai/types/realtime/response_create_event.py @@ -10,6 +10,34 @@ class ResponseCreateEvent(BaseModel): + """ + This event instructs the server to create a Response, which means triggering + model inference. When in Server VAD mode, the server will create Responses + automatically. + + A Response will include at least one Item, and may have two, in which case + the second will be a function call. These Items will be appended to the + conversation history by default. + + The server will respond with a `response.created` event, events for Items + and content created, and finally a `response.done` event to indicate the + Response is complete. + + The `response.create` event includes inference configuration like + `instructions` and `tools`. If these are set, they will override the Session's + configuration for this Response only. + + Responses can be created out-of-band of the default Conversation, meaning that they can + have arbitrary input, and it's possible to disable writing the output to the Conversation. + Only one Response can write to the default Conversation at a time, but otherwise multiple + Responses can be created in parallel. The `metadata` field is a good way to disambiguate + multiple simultaneous Responses. + + Clients can set `conversation` to `none` to create a Response that does not write to the default + Conversation. Arbitrary input can be provided with the `input` field, which is an array accepting + raw Items and references to existing Items. + """ + type: Literal["response.create"] """The event type, must be `response.create`.""" diff --git a/src/openai/types/realtime/response_create_event_param.py b/src/openai/types/realtime/response_create_event_param.py index e5dd46d9b6..9da89e14ee 100644 --- a/src/openai/types/realtime/response_create_event_param.py +++ b/src/openai/types/realtime/response_create_event_param.py @@ -10,6 +10,34 @@ class ResponseCreateEventParam(TypedDict, total=False): + """ + This event instructs the server to create a Response, which means triggering + model inference. When in Server VAD mode, the server will create Responses + automatically. + + A Response will include at least one Item, and may have two, in which case + the second will be a function call. These Items will be appended to the + conversation history by default. + + The server will respond with a `response.created` event, events for Items + and content created, and finally a `response.done` event to indicate the + Response is complete. + + The `response.create` event includes inference configuration like + `instructions` and `tools`. If these are set, they will override the Session's + configuration for this Response only. + + Responses can be created out-of-band of the default Conversation, meaning that they can + have arbitrary input, and it's possible to disable writing the output to the Conversation. + Only one Response can write to the default Conversation at a time, but otherwise multiple + Responses can be created in parallel. The `metadata` field is a good way to disambiguate + multiple simultaneous Responses. + + Clients can set `conversation` to `none` to create a Response that does not write to the default + Conversation. Arbitrary input can be provided with the `input` field, which is an array accepting + raw Items and references to existing Items. + """ + type: Required[Literal["response.create"]] """The event type, must be `response.create`.""" diff --git a/src/openai/types/realtime/response_created_event.py b/src/openai/types/realtime/response_created_event.py index 996bf26f75..dc5941262d 100644 --- a/src/openai/types/realtime/response_created_event.py +++ b/src/openai/types/realtime/response_created_event.py @@ -9,6 +9,12 @@ class ResponseCreatedEvent(BaseModel): + """Returned when a new Response is created. + + The first event of response creation, + where the response is in an initial state of `in_progress`. + """ + event_id: str """The unique ID of the server event.""" diff --git a/src/openai/types/realtime/response_done_event.py b/src/openai/types/realtime/response_done_event.py index ce9a4b9f1d..9c31a2aa03 100644 --- a/src/openai/types/realtime/response_done_event.py +++ b/src/openai/types/realtime/response_done_event.py @@ -9,6 +9,19 @@ class ResponseDoneEvent(BaseModel): + """Returned when a Response is done streaming. + + Always emitted, no matter the + final state. The Response object included in the `response.done` event will + include all output Items in the Response but will omit the raw audio data. + + Clients should check the `status` field of the Response to determine if it was successful + (`completed`) or if there was another outcome: `cancelled`, `failed`, or `incomplete`. + + A response will contain all output items that were generated during the response, excluding + any audio content. + """ + event_id: str """The unique ID of the server event.""" diff --git a/src/openai/types/realtime/response_function_call_arguments_delta_event.py b/src/openai/types/realtime/response_function_call_arguments_delta_event.py index 6d96e78b24..a426c3f211 100644 --- a/src/openai/types/realtime/response_function_call_arguments_delta_event.py +++ b/src/openai/types/realtime/response_function_call_arguments_delta_event.py @@ -8,6 +8,8 @@ class ResponseFunctionCallArgumentsDeltaEvent(BaseModel): + """Returned when the model-generated function call arguments are updated.""" + call_id: str """The ID of the function call.""" diff --git a/src/openai/types/realtime/response_function_call_arguments_done_event.py b/src/openai/types/realtime/response_function_call_arguments_done_event.py index be7fae9a1b..504f91d558 100644 --- a/src/openai/types/realtime/response_function_call_arguments_done_event.py +++ b/src/openai/types/realtime/response_function_call_arguments_done_event.py @@ -8,6 +8,11 @@ class ResponseFunctionCallArgumentsDoneEvent(BaseModel): + """ + Returned when the model-generated function call arguments are done streaming. + Also emitted when a Response is interrupted, incomplete, or cancelled. + """ + arguments: str """The final arguments as a JSON string.""" diff --git a/src/openai/types/realtime/response_mcp_call_arguments_delta.py b/src/openai/types/realtime/response_mcp_call_arguments_delta.py index 0a02a1a578..d890de0575 100644 --- a/src/openai/types/realtime/response_mcp_call_arguments_delta.py +++ b/src/openai/types/realtime/response_mcp_call_arguments_delta.py @@ -9,6 +9,8 @@ class ResponseMcpCallArgumentsDelta(BaseModel): + """Returned when MCP tool call arguments are updated during response generation.""" + delta: str """The JSON-encoded arguments delta.""" diff --git a/src/openai/types/realtime/response_mcp_call_arguments_done.py b/src/openai/types/realtime/response_mcp_call_arguments_done.py index 5ec95f1728..a7cb2d1958 100644 --- a/src/openai/types/realtime/response_mcp_call_arguments_done.py +++ b/src/openai/types/realtime/response_mcp_call_arguments_done.py @@ -8,6 +8,8 @@ class ResponseMcpCallArgumentsDone(BaseModel): + """Returned when MCP tool call arguments are finalized during response generation.""" + arguments: str """The final JSON-encoded arguments string.""" diff --git a/src/openai/types/realtime/response_mcp_call_completed.py b/src/openai/types/realtime/response_mcp_call_completed.py index e3fcec21f0..130260539a 100644 --- a/src/openai/types/realtime/response_mcp_call_completed.py +++ b/src/openai/types/realtime/response_mcp_call_completed.py @@ -8,6 +8,8 @@ class ResponseMcpCallCompleted(BaseModel): + """Returned when an MCP tool call has completed successfully.""" + event_id: str """The unique ID of the server event.""" diff --git a/src/openai/types/realtime/response_mcp_call_failed.py b/src/openai/types/realtime/response_mcp_call_failed.py index b7adc8c2a7..1c08d1d4b7 100644 --- a/src/openai/types/realtime/response_mcp_call_failed.py +++ b/src/openai/types/realtime/response_mcp_call_failed.py @@ -8,6 +8,8 @@ class ResponseMcpCallFailed(BaseModel): + """Returned when an MCP tool call has failed.""" + event_id: str """The unique ID of the server event.""" diff --git a/src/openai/types/realtime/response_mcp_call_in_progress.py b/src/openai/types/realtime/response_mcp_call_in_progress.py index d0fcc7615c..4c0ad149e5 100644 --- a/src/openai/types/realtime/response_mcp_call_in_progress.py +++ b/src/openai/types/realtime/response_mcp_call_in_progress.py @@ -8,6 +8,8 @@ class ResponseMcpCallInProgress(BaseModel): + """Returned when an MCP tool call has started and is in progress.""" + event_id: str """The unique ID of the server event.""" diff --git a/src/openai/types/realtime/response_output_item_added_event.py b/src/openai/types/realtime/response_output_item_added_event.py index 509dfcaeaf..abec0d18f1 100644 --- a/src/openai/types/realtime/response_output_item_added_event.py +++ b/src/openai/types/realtime/response_output_item_added_event.py @@ -9,6 +9,8 @@ class ResponseOutputItemAddedEvent(BaseModel): + """Returned when a new Item is created during Response generation.""" + event_id: str """The unique ID of the server event.""" diff --git a/src/openai/types/realtime/response_output_item_done_event.py b/src/openai/types/realtime/response_output_item_done_event.py index 800e4ae8ee..63936b97d5 100644 --- a/src/openai/types/realtime/response_output_item_done_event.py +++ b/src/openai/types/realtime/response_output_item_done_event.py @@ -9,6 +9,12 @@ class ResponseOutputItemDoneEvent(BaseModel): + """Returned when an Item is done streaming. + + Also emitted when a Response is + interrupted, incomplete, or cancelled. + """ + event_id: str """The unique ID of the server event.""" diff --git a/src/openai/types/realtime/response_text_delta_event.py b/src/openai/types/realtime/response_text_delta_event.py index 493348aa22..b251b7639c 100644 --- a/src/openai/types/realtime/response_text_delta_event.py +++ b/src/openai/types/realtime/response_text_delta_event.py @@ -8,6 +8,8 @@ class ResponseTextDeltaEvent(BaseModel): + """Returned when the text value of an "output_text" content part is updated.""" + content_index: int """The index of the content part in the item's content array.""" diff --git a/src/openai/types/realtime/response_text_done_event.py b/src/openai/types/realtime/response_text_done_event.py index 83c6cf0694..046e520222 100644 --- a/src/openai/types/realtime/response_text_done_event.py +++ b/src/openai/types/realtime/response_text_done_event.py @@ -8,6 +8,12 @@ class ResponseTextDoneEvent(BaseModel): + """Returned when the text value of an "output_text" content part is done streaming. + + Also + emitted when a Response is interrupted, incomplete, or cancelled. + """ + content_index: int """The index of the content part in the item's content array.""" diff --git a/src/openai/types/realtime/session_created_event.py b/src/openai/types/realtime/session_created_event.py index b5caad35d7..1b8d4a4d81 100644 --- a/src/openai/types/realtime/session_created_event.py +++ b/src/openai/types/realtime/session_created_event.py @@ -13,6 +13,13 @@ class SessionCreatedEvent(BaseModel): + """Returned when a Session is created. + + Emitted automatically when a new + connection is established as the first server event. This event will contain + the default Session configuration. + """ + event_id: str """The unique ID of the server event.""" diff --git a/src/openai/types/realtime/session_update_event.py b/src/openai/types/realtime/session_update_event.py index 2e226162c4..a8422e4e89 100644 --- a/src/openai/types/realtime/session_update_event.py +++ b/src/openai/types/realtime/session_update_event.py @@ -13,6 +13,18 @@ class SessionUpdateEvent(BaseModel): + """ + Send this event to update the session’s configuration. + The client may send this event at any time to update any field + except for `voice` and `model`. `voice` can be updated only if there have been no other audio outputs yet. + + When the server receives a `session.update`, it will respond + with a `session.updated` event showing the full, effective configuration. + Only the fields that are present in the `session.update` are updated. To clear a field like + `instructions`, pass an empty string. To clear a field like `tools`, pass an empty array. + To clear a field like `turn_detection`, pass `null`. + """ + session: Session """Update the Realtime session. diff --git a/src/openai/types/realtime/session_update_event_param.py b/src/openai/types/realtime/session_update_event_param.py index 5962361431..910e89ca34 100644 --- a/src/openai/types/realtime/session_update_event_param.py +++ b/src/openai/types/realtime/session_update_event_param.py @@ -14,6 +14,18 @@ class SessionUpdateEventParam(TypedDict, total=False): + """ + Send this event to update the session’s configuration. + The client may send this event at any time to update any field + except for `voice` and `model`. `voice` can be updated only if there have been no other audio outputs yet. + + When the server receives a `session.update`, it will respond + with a `session.updated` event showing the full, effective configuration. + Only the fields that are present in the `session.update` are updated. To clear a field like + `instructions`, pass an empty string. To clear a field like `tools`, pass an empty array. + To clear a field like `turn_detection`, pass `null`. + """ + session: Required[Session] """Update the Realtime session. diff --git a/src/openai/types/realtime/session_updated_event.py b/src/openai/types/realtime/session_updated_event.py index eb7ee0332d..e68a08d6cc 100644 --- a/src/openai/types/realtime/session_updated_event.py +++ b/src/openai/types/realtime/session_updated_event.py @@ -13,6 +13,11 @@ class SessionUpdatedEvent(BaseModel): + """ + Returned when a session is updated with a `session.update` event, unless + there is an error. + """ + event_id: str """The unique ID of the server event.""" diff --git a/src/openai/types/responses/apply_patch_tool.py b/src/openai/types/responses/apply_patch_tool.py index 07706ce239..f2ed245d10 100644 --- a/src/openai/types/responses/apply_patch_tool.py +++ b/src/openai/types/responses/apply_patch_tool.py @@ -8,5 +8,7 @@ class ApplyPatchTool(BaseModel): + """Allows the assistant to create, delete, or update files using unified diffs.""" + type: Literal["apply_patch"] """The type of the tool. Always `apply_patch`.""" diff --git a/src/openai/types/responses/apply_patch_tool_param.py b/src/openai/types/responses/apply_patch_tool_param.py index 93d15f0b1f..2e0a809099 100644 --- a/src/openai/types/responses/apply_patch_tool_param.py +++ b/src/openai/types/responses/apply_patch_tool_param.py @@ -8,5 +8,7 @@ class ApplyPatchToolParam(TypedDict, total=False): + """Allows the assistant to create, delete, or update files using unified diffs.""" + type: Required[Literal["apply_patch"]] """The type of the tool. Always `apply_patch`.""" diff --git a/src/openai/types/responses/computer_tool.py b/src/openai/types/responses/computer_tool.py index 5b844f5bf4..22871c841c 100644 --- a/src/openai/types/responses/computer_tool.py +++ b/src/openai/types/responses/computer_tool.py @@ -8,6 +8,11 @@ class ComputerTool(BaseModel): + """A tool that controls a virtual computer. + + Learn more about the [computer tool](https://platform.openai.com/docs/guides/tools-computer-use). + """ + display_height: int """The height of the computer display.""" diff --git a/src/openai/types/responses/computer_tool_param.py b/src/openai/types/responses/computer_tool_param.py index 06a5c132ec..cdf75a43f2 100644 --- a/src/openai/types/responses/computer_tool_param.py +++ b/src/openai/types/responses/computer_tool_param.py @@ -8,6 +8,11 @@ class ComputerToolParam(TypedDict, total=False): + """A tool that controls a virtual computer. + + Learn more about the [computer tool](https://platform.openai.com/docs/guides/tools-computer-use). + """ + display_height: Required[int] """The height of the computer display.""" diff --git a/src/openai/types/responses/custom_tool.py b/src/openai/types/responses/custom_tool.py index c16ae715eb..1ca401a486 100644 --- a/src/openai/types/responses/custom_tool.py +++ b/src/openai/types/responses/custom_tool.py @@ -10,6 +10,11 @@ class CustomTool(BaseModel): + """A custom tool that processes input using a specified format. + + Learn more about [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + """ + name: str """The name of the custom tool, used to identify it in tool calls.""" diff --git a/src/openai/types/responses/custom_tool_param.py b/src/openai/types/responses/custom_tool_param.py index 2afc8b19b8..4ce43cdfdb 100644 --- a/src/openai/types/responses/custom_tool_param.py +++ b/src/openai/types/responses/custom_tool_param.py @@ -10,6 +10,11 @@ class CustomToolParam(TypedDict, total=False): + """A custom tool that processes input using a specified format. + + Learn more about [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + """ + name: Required[str] """The name of the custom tool, used to identify it in tool calls.""" diff --git a/src/openai/types/responses/easy_input_message.py b/src/openai/types/responses/easy_input_message.py index 4ed0194f9f..9a36a6b084 100644 --- a/src/openai/types/responses/easy_input_message.py +++ b/src/openai/types/responses/easy_input_message.py @@ -10,6 +10,14 @@ class EasyInputMessage(BaseModel): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: Union[str, ResponseInputMessageContentList] """ Text, image, or audio input to the model, used to generate a response. Can also diff --git a/src/openai/types/responses/easy_input_message_param.py b/src/openai/types/responses/easy_input_message_param.py index ef2f1c5f37..0a382bddee 100644 --- a/src/openai/types/responses/easy_input_message_param.py +++ b/src/openai/types/responses/easy_input_message_param.py @@ -11,6 +11,14 @@ class EasyInputMessageParam(TypedDict, total=False): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. Messages with the + `assistant` role are presumed to have been generated by the model in previous + interactions. + """ + content: Required[Union[str, ResponseInputMessageContentListParam]] """ Text, image, or audio input to the model, used to generate a response. Can also diff --git a/src/openai/types/responses/file_search_tool.py b/src/openai/types/responses/file_search_tool.py index d0d08a323f..09c12876ca 100644 --- a/src/openai/types/responses/file_search_tool.py +++ b/src/openai/types/responses/file_search_tool.py @@ -13,6 +13,10 @@ class RankingOptionsHybridSearch(BaseModel): + """ + Weights that control how reciprocal rank fusion balances semantic embedding matches versus sparse keyword matches when hybrid search is enabled. + """ + embedding_weight: float """The weight of the embedding in the reciprocal ranking fusion.""" @@ -21,6 +25,8 @@ class RankingOptionsHybridSearch(BaseModel): class RankingOptions(BaseModel): + """Ranking options for search.""" + hybrid_search: Optional[RankingOptionsHybridSearch] = None """ Weights that control how reciprocal rank fusion balances semantic embedding @@ -39,6 +45,11 @@ class RankingOptions(BaseModel): class FileSearchTool(BaseModel): + """A tool that searches for relevant content from uploaded files. + + Learn more about the [file search tool](https://platform.openai.com/docs/guides/tools-file-search). + """ + type: Literal["file_search"] """The type of the file search tool. Always `file_search`.""" diff --git a/src/openai/types/responses/file_search_tool_param.py b/src/openai/types/responses/file_search_tool_param.py index b37a669ebd..82831d0dc0 100644 --- a/src/openai/types/responses/file_search_tool_param.py +++ b/src/openai/types/responses/file_search_tool_param.py @@ -15,6 +15,10 @@ class RankingOptionsHybridSearch(TypedDict, total=False): + """ + Weights that control how reciprocal rank fusion balances semantic embedding matches versus sparse keyword matches when hybrid search is enabled. + """ + embedding_weight: Required[float] """The weight of the embedding in the reciprocal ranking fusion.""" @@ -23,6 +27,8 @@ class RankingOptionsHybridSearch(TypedDict, total=False): class RankingOptions(TypedDict, total=False): + """Ranking options for search.""" + hybrid_search: RankingOptionsHybridSearch """ Weights that control how reciprocal rank fusion balances semantic embedding @@ -41,6 +47,11 @@ class RankingOptions(TypedDict, total=False): class FileSearchToolParam(TypedDict, total=False): + """A tool that searches for relevant content from uploaded files. + + Learn more about the [file search tool](https://platform.openai.com/docs/guides/tools-file-search). + """ + type: Required[Literal["file_search"]] """The type of the file search tool. Always `file_search`.""" diff --git a/src/openai/types/responses/function_shell_tool.py b/src/openai/types/responses/function_shell_tool.py index 1784b6c2f1..5b237aa705 100644 --- a/src/openai/types/responses/function_shell_tool.py +++ b/src/openai/types/responses/function_shell_tool.py @@ -8,5 +8,7 @@ class FunctionShellTool(BaseModel): + """A tool that allows the model to execute shell commands.""" + type: Literal["shell"] """The type of the shell tool. Always `shell`.""" diff --git a/src/openai/types/responses/function_shell_tool_param.py b/src/openai/types/responses/function_shell_tool_param.py index cee7ba23c9..c640ddab99 100644 --- a/src/openai/types/responses/function_shell_tool_param.py +++ b/src/openai/types/responses/function_shell_tool_param.py @@ -8,5 +8,7 @@ class FunctionShellToolParam(TypedDict, total=False): + """A tool that allows the model to execute shell commands.""" + type: Required[Literal["shell"]] """The type of the shell tool. Always `shell`.""" diff --git a/src/openai/types/responses/function_tool.py b/src/openai/types/responses/function_tool.py index d881565356..b0827a9fa7 100644 --- a/src/openai/types/responses/function_tool.py +++ b/src/openai/types/responses/function_tool.py @@ -9,6 +9,11 @@ class FunctionTool(BaseModel): + """Defines a function in your own code the model can choose to call. + + Learn more about [function calling](https://platform.openai.com/docs/guides/function-calling). + """ + name: str """The name of the function to call.""" diff --git a/src/openai/types/responses/function_tool_param.py b/src/openai/types/responses/function_tool_param.py index 56bab36f47..ba0a3168c4 100644 --- a/src/openai/types/responses/function_tool_param.py +++ b/src/openai/types/responses/function_tool_param.py @@ -9,6 +9,11 @@ class FunctionToolParam(TypedDict, total=False): + """Defines a function in your own code the model can choose to call. + + Learn more about [function calling](https://platform.openai.com/docs/guides/function-calling). + """ + name: Required[str] """The name of the function to call.""" diff --git a/src/openai/types/responses/input_token_count_params.py b/src/openai/types/responses/input_token_count_params.py index 296d0718d8..50cc950e41 100644 --- a/src/openai/types/responses/input_token_count_params.py +++ b/src/openai/types/responses/input_token_count_params.py @@ -105,6 +105,14 @@ class InputTokenCountParams(TypedDict, total=False): class Text(TypedDict, total=False): + """Configuration options for a text response from the model. + + Can be plain + text or structured JSON data. Learn more: + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ + format: ResponseFormatTextConfigParam """An object specifying the format that the model must output. diff --git a/src/openai/types/responses/response.py b/src/openai/types/responses/response.py index cdd143f1cb..00c38c064e 100644 --- a/src/openai/types/responses/response.py +++ b/src/openai/types/responses/response.py @@ -28,6 +28,8 @@ class IncompleteDetails(BaseModel): + """Details about why the response is incomplete.""" + reason: Optional[Literal["max_output_tokens", "content_filter"]] = None """The reason why the response is incomplete.""" @@ -45,6 +47,11 @@ class IncompleteDetails(BaseModel): class Conversation(BaseModel): + """The conversation that this response belongs to. + + Input items and output items from this response are automatically added to this conversation. + """ + id: str """The unique ID of the conversation.""" diff --git a/src/openai/types/responses/response_apply_patch_tool_call.py b/src/openai/types/responses/response_apply_patch_tool_call.py index 7dc2a3c2b5..7af1300265 100644 --- a/src/openai/types/responses/response_apply_patch_tool_call.py +++ b/src/openai/types/responses/response_apply_patch_tool_call.py @@ -16,6 +16,8 @@ class OperationCreateFile(BaseModel): + """Instruction describing how to create a file via the apply_patch tool.""" + diff: str """Diff to apply.""" @@ -27,6 +29,8 @@ class OperationCreateFile(BaseModel): class OperationDeleteFile(BaseModel): + """Instruction describing how to delete a file via the apply_patch tool.""" + path: str """Path of the file to delete.""" @@ -35,6 +39,8 @@ class OperationDeleteFile(BaseModel): class OperationUpdateFile(BaseModel): + """Instruction describing how to update a file via the apply_patch tool.""" + diff: str """Diff to apply.""" @@ -51,6 +57,8 @@ class OperationUpdateFile(BaseModel): class ResponseApplyPatchToolCall(BaseModel): + """A tool call that applies file diffs by creating, deleting, or updating files.""" + id: str """The unique ID of the apply patch tool call. diff --git a/src/openai/types/responses/response_apply_patch_tool_call_output.py b/src/openai/types/responses/response_apply_patch_tool_call_output.py index cf0bcfeecc..de63c6e2ee 100644 --- a/src/openai/types/responses/response_apply_patch_tool_call_output.py +++ b/src/openai/types/responses/response_apply_patch_tool_call_output.py @@ -9,6 +9,8 @@ class ResponseApplyPatchToolCallOutput(BaseModel): + """The output emitted by an apply patch tool call.""" + id: str """The unique ID of the apply patch tool call output. diff --git a/src/openai/types/responses/response_audio_delta_event.py b/src/openai/types/responses/response_audio_delta_event.py index 6fb7887b80..e577d65d04 100644 --- a/src/openai/types/responses/response_audio_delta_event.py +++ b/src/openai/types/responses/response_audio_delta_event.py @@ -8,6 +8,8 @@ class ResponseAudioDeltaEvent(BaseModel): + """Emitted when there is a partial audio response.""" + delta: str """A chunk of Base64 encoded response audio bytes.""" diff --git a/src/openai/types/responses/response_audio_done_event.py b/src/openai/types/responses/response_audio_done_event.py index 2592ae8dcd..f5f0401c86 100644 --- a/src/openai/types/responses/response_audio_done_event.py +++ b/src/openai/types/responses/response_audio_done_event.py @@ -8,6 +8,8 @@ class ResponseAudioDoneEvent(BaseModel): + """Emitted when the audio response is complete.""" + sequence_number: int """The sequence number of the delta.""" diff --git a/src/openai/types/responses/response_audio_transcript_delta_event.py b/src/openai/types/responses/response_audio_transcript_delta_event.py index 830c133d61..03be59a29f 100644 --- a/src/openai/types/responses/response_audio_transcript_delta_event.py +++ b/src/openai/types/responses/response_audio_transcript_delta_event.py @@ -8,6 +8,8 @@ class ResponseAudioTranscriptDeltaEvent(BaseModel): + """Emitted when there is a partial transcript of audio.""" + delta: str """The partial transcript of the audio response.""" diff --git a/src/openai/types/responses/response_audio_transcript_done_event.py b/src/openai/types/responses/response_audio_transcript_done_event.py index e39f501cf0..87219e4844 100644 --- a/src/openai/types/responses/response_audio_transcript_done_event.py +++ b/src/openai/types/responses/response_audio_transcript_done_event.py @@ -8,6 +8,8 @@ class ResponseAudioTranscriptDoneEvent(BaseModel): + """Emitted when the full audio transcript is completed.""" + sequence_number: int """The sequence number of this event.""" diff --git a/src/openai/types/responses/response_code_interpreter_call_code_delta_event.py b/src/openai/types/responses/response_code_interpreter_call_code_delta_event.py index c5fef939b1..c6bc8b73ea 100644 --- a/src/openai/types/responses/response_code_interpreter_call_code_delta_event.py +++ b/src/openai/types/responses/response_code_interpreter_call_code_delta_event.py @@ -8,6 +8,8 @@ class ResponseCodeInterpreterCallCodeDeltaEvent(BaseModel): + """Emitted when a partial code snippet is streamed by the code interpreter.""" + delta: str """The partial code snippet being streamed by the code interpreter.""" diff --git a/src/openai/types/responses/response_code_interpreter_call_code_done_event.py b/src/openai/types/responses/response_code_interpreter_call_code_done_event.py index 5201a02d36..186c03711a 100644 --- a/src/openai/types/responses/response_code_interpreter_call_code_done_event.py +++ b/src/openai/types/responses/response_code_interpreter_call_code_done_event.py @@ -8,6 +8,8 @@ class ResponseCodeInterpreterCallCodeDoneEvent(BaseModel): + """Emitted when the code snippet is finalized by the code interpreter.""" + code: str """The final code snippet output by the code interpreter.""" diff --git a/src/openai/types/responses/response_code_interpreter_call_completed_event.py b/src/openai/types/responses/response_code_interpreter_call_completed_event.py index bb9563a16b..197e39e7e9 100644 --- a/src/openai/types/responses/response_code_interpreter_call_completed_event.py +++ b/src/openai/types/responses/response_code_interpreter_call_completed_event.py @@ -8,6 +8,8 @@ class ResponseCodeInterpreterCallCompletedEvent(BaseModel): + """Emitted when the code interpreter call is completed.""" + item_id: str """The unique identifier of the code interpreter tool call item.""" diff --git a/src/openai/types/responses/response_code_interpreter_call_in_progress_event.py b/src/openai/types/responses/response_code_interpreter_call_in_progress_event.py index 9c6b221004..c775f1b864 100644 --- a/src/openai/types/responses/response_code_interpreter_call_in_progress_event.py +++ b/src/openai/types/responses/response_code_interpreter_call_in_progress_event.py @@ -8,6 +8,8 @@ class ResponseCodeInterpreterCallInProgressEvent(BaseModel): + """Emitted when a code interpreter call is in progress.""" + item_id: str """The unique identifier of the code interpreter tool call item.""" diff --git a/src/openai/types/responses/response_code_interpreter_call_interpreting_event.py b/src/openai/types/responses/response_code_interpreter_call_interpreting_event.py index f6191e4165..85e9c87f08 100644 --- a/src/openai/types/responses/response_code_interpreter_call_interpreting_event.py +++ b/src/openai/types/responses/response_code_interpreter_call_interpreting_event.py @@ -8,6 +8,8 @@ class ResponseCodeInterpreterCallInterpretingEvent(BaseModel): + """Emitted when the code interpreter is actively interpreting the code snippet.""" + item_id: str """The unique identifier of the code interpreter tool call item.""" diff --git a/src/openai/types/responses/response_code_interpreter_tool_call.py b/src/openai/types/responses/response_code_interpreter_tool_call.py index b651581520..d7e30f4920 100644 --- a/src/openai/types/responses/response_code_interpreter_tool_call.py +++ b/src/openai/types/responses/response_code_interpreter_tool_call.py @@ -10,6 +10,8 @@ class OutputLogs(BaseModel): + """The logs output from the code interpreter.""" + logs: str """The logs output from the code interpreter.""" @@ -18,6 +20,8 @@ class OutputLogs(BaseModel): class OutputImage(BaseModel): + """The image output from the code interpreter.""" + type: Literal["image"] """The type of the output. Always `image`.""" @@ -29,6 +33,8 @@ class OutputImage(BaseModel): class ResponseCodeInterpreterToolCall(BaseModel): + """A tool call to run code.""" + id: str """The unique ID of the code interpreter tool call.""" diff --git a/src/openai/types/responses/response_code_interpreter_tool_call_param.py b/src/openai/types/responses/response_code_interpreter_tool_call_param.py index d402b872a4..fc03a3fe48 100644 --- a/src/openai/types/responses/response_code_interpreter_tool_call_param.py +++ b/src/openai/types/responses/response_code_interpreter_tool_call_param.py @@ -9,6 +9,8 @@ class OutputLogs(TypedDict, total=False): + """The logs output from the code interpreter.""" + logs: Required[str] """The logs output from the code interpreter.""" @@ -17,6 +19,8 @@ class OutputLogs(TypedDict, total=False): class OutputImage(TypedDict, total=False): + """The image output from the code interpreter.""" + type: Required[Literal["image"]] """The type of the output. Always `image`.""" @@ -28,6 +32,8 @@ class OutputImage(TypedDict, total=False): class ResponseCodeInterpreterToolCallParam(TypedDict, total=False): + """A tool call to run code.""" + id: Required[str] """The unique ID of the code interpreter tool call.""" diff --git a/src/openai/types/responses/response_compaction_item.py b/src/openai/types/responses/response_compaction_item.py index dc5f839bb8..f5f8b97f4e 100644 --- a/src/openai/types/responses/response_compaction_item.py +++ b/src/openai/types/responses/response_compaction_item.py @@ -9,6 +9,10 @@ class ResponseCompactionItem(BaseModel): + """ + A compaction item generated by the [`v1/responses/compact` API](https://platform.openai.com/docs/api-reference/responses/compact). + """ + id: str """The unique ID of the compaction item.""" diff --git a/src/openai/types/responses/response_compaction_item_param.py b/src/openai/types/responses/response_compaction_item_param.py index 8fdc2a561a..5dcc921d67 100644 --- a/src/openai/types/responses/response_compaction_item_param.py +++ b/src/openai/types/responses/response_compaction_item_param.py @@ -9,6 +9,10 @@ class ResponseCompactionItemParam(BaseModel): + """ + A compaction item generated by the [`v1/responses/compact` API](https://platform.openai.com/docs/api-reference/responses/compact). + """ + encrypted_content: str type: Literal["compaction"] diff --git a/src/openai/types/responses/response_compaction_item_param_param.py b/src/openai/types/responses/response_compaction_item_param_param.py index 0d12296589..b9b5ab031c 100644 --- a/src/openai/types/responses/response_compaction_item_param_param.py +++ b/src/openai/types/responses/response_compaction_item_param_param.py @@ -9,6 +9,10 @@ class ResponseCompactionItemParamParam(TypedDict, total=False): + """ + A compaction item generated by the [`v1/responses/compact` API](https://platform.openai.com/docs/api-reference/responses/compact). + """ + encrypted_content: Required[str] type: Required[Literal["compaction"]] diff --git a/src/openai/types/responses/response_completed_event.py b/src/openai/types/responses/response_completed_event.py index 8a2bd51f75..6dc958101c 100644 --- a/src/openai/types/responses/response_completed_event.py +++ b/src/openai/types/responses/response_completed_event.py @@ -9,6 +9,8 @@ class ResponseCompletedEvent(BaseModel): + """Emitted when the model response is complete.""" + response: Response """Properties of the completed response.""" diff --git a/src/openai/types/responses/response_computer_tool_call.py b/src/openai/types/responses/response_computer_tool_call.py index f1476fa0fb..4e1b3cf7fd 100644 --- a/src/openai/types/responses/response_computer_tool_call.py +++ b/src/openai/types/responses/response_computer_tool_call.py @@ -24,6 +24,8 @@ class ActionClick(BaseModel): + """A click action.""" + button: Literal["left", "right", "wheel", "back", "forward"] """Indicates which mouse button was pressed during the click. @@ -41,6 +43,8 @@ class ActionClick(BaseModel): class ActionDoubleClick(BaseModel): + """A double click action.""" + type: Literal["double_click"] """Specifies the event type. @@ -55,6 +59,8 @@ class ActionDoubleClick(BaseModel): class ActionDragPath(BaseModel): + """An x/y coordinate pair, e.g. `{ x: 100, y: 200 }`.""" + x: int """The x-coordinate.""" @@ -63,6 +69,8 @@ class ActionDragPath(BaseModel): class ActionDrag(BaseModel): + """A drag action.""" + path: List[ActionDragPath] """An array of coordinates representing the path of the drag action. @@ -84,6 +92,8 @@ class ActionDrag(BaseModel): class ActionKeypress(BaseModel): + """A collection of keypresses the model would like to perform.""" + keys: List[str] """The combination of keys the model is requesting to be pressed. @@ -98,6 +108,8 @@ class ActionKeypress(BaseModel): class ActionMove(BaseModel): + """A mouse move action.""" + type: Literal["move"] """Specifies the event type. @@ -112,6 +124,8 @@ class ActionMove(BaseModel): class ActionScreenshot(BaseModel): + """A screenshot action.""" + type: Literal["screenshot"] """Specifies the event type. @@ -120,6 +134,8 @@ class ActionScreenshot(BaseModel): class ActionScroll(BaseModel): + """A scroll action.""" + scroll_x: int """The horizontal scroll distance.""" @@ -140,6 +156,8 @@ class ActionScroll(BaseModel): class ActionType(BaseModel): + """An action to type in text.""" + text: str """The text to type.""" @@ -151,6 +169,8 @@ class ActionType(BaseModel): class ActionWait(BaseModel): + """A wait action.""" + type: Literal["wait"] """Specifies the event type. @@ -175,6 +195,8 @@ class ActionWait(BaseModel): class PendingSafetyCheck(BaseModel): + """A pending safety check for the computer call.""" + id: str """The ID of the pending safety check.""" @@ -186,6 +208,12 @@ class PendingSafetyCheck(BaseModel): class ResponseComputerToolCall(BaseModel): + """A tool call to a computer use tool. + + See the + [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) for more information. + """ + id: str """The unique ID of the computer call.""" diff --git a/src/openai/types/responses/response_computer_tool_call_output_item.py b/src/openai/types/responses/response_computer_tool_call_output_item.py index e1ac358cc6..90e935c3bd 100644 --- a/src/openai/types/responses/response_computer_tool_call_output_item.py +++ b/src/openai/types/responses/response_computer_tool_call_output_item.py @@ -10,6 +10,8 @@ class AcknowledgedSafetyCheck(BaseModel): + """A pending safety check for the computer call.""" + id: str """The ID of the pending safety check.""" diff --git a/src/openai/types/responses/response_computer_tool_call_output_screenshot.py b/src/openai/types/responses/response_computer_tool_call_output_screenshot.py index a500da85c1..2c16f215eb 100644 --- a/src/openai/types/responses/response_computer_tool_call_output_screenshot.py +++ b/src/openai/types/responses/response_computer_tool_call_output_screenshot.py @@ -9,6 +9,8 @@ class ResponseComputerToolCallOutputScreenshot(BaseModel): + """A computer screenshot image used with the computer use tool.""" + type: Literal["computer_screenshot"] """Specifies the event type. diff --git a/src/openai/types/responses/response_computer_tool_call_output_screenshot_param.py b/src/openai/types/responses/response_computer_tool_call_output_screenshot_param.py index efc2028aa4..857ccf9fb9 100644 --- a/src/openai/types/responses/response_computer_tool_call_output_screenshot_param.py +++ b/src/openai/types/responses/response_computer_tool_call_output_screenshot_param.py @@ -8,6 +8,8 @@ class ResponseComputerToolCallOutputScreenshotParam(TypedDict, total=False): + """A computer screenshot image used with the computer use tool.""" + type: Required[Literal["computer_screenshot"]] """Specifies the event type. diff --git a/src/openai/types/responses/response_computer_tool_call_param.py b/src/openai/types/responses/response_computer_tool_call_param.py index 228f76bac9..550ba599cd 100644 --- a/src/openai/types/responses/response_computer_tool_call_param.py +++ b/src/openai/types/responses/response_computer_tool_call_param.py @@ -25,6 +25,8 @@ class ActionClick(TypedDict, total=False): + """A click action.""" + button: Required[Literal["left", "right", "wheel", "back", "forward"]] """Indicates which mouse button was pressed during the click. @@ -42,6 +44,8 @@ class ActionClick(TypedDict, total=False): class ActionDoubleClick(TypedDict, total=False): + """A double click action.""" + type: Required[Literal["double_click"]] """Specifies the event type. @@ -56,6 +60,8 @@ class ActionDoubleClick(TypedDict, total=False): class ActionDragPath(TypedDict, total=False): + """An x/y coordinate pair, e.g. `{ x: 100, y: 200 }`.""" + x: Required[int] """The x-coordinate.""" @@ -64,6 +70,8 @@ class ActionDragPath(TypedDict, total=False): class ActionDrag(TypedDict, total=False): + """A drag action.""" + path: Required[Iterable[ActionDragPath]] """An array of coordinates representing the path of the drag action. @@ -85,6 +93,8 @@ class ActionDrag(TypedDict, total=False): class ActionKeypress(TypedDict, total=False): + """A collection of keypresses the model would like to perform.""" + keys: Required[SequenceNotStr[str]] """The combination of keys the model is requesting to be pressed. @@ -99,6 +109,8 @@ class ActionKeypress(TypedDict, total=False): class ActionMove(TypedDict, total=False): + """A mouse move action.""" + type: Required[Literal["move"]] """Specifies the event type. @@ -113,6 +125,8 @@ class ActionMove(TypedDict, total=False): class ActionScreenshot(TypedDict, total=False): + """A screenshot action.""" + type: Required[Literal["screenshot"]] """Specifies the event type. @@ -121,6 +135,8 @@ class ActionScreenshot(TypedDict, total=False): class ActionScroll(TypedDict, total=False): + """A scroll action.""" + scroll_x: Required[int] """The horizontal scroll distance.""" @@ -141,6 +157,8 @@ class ActionScroll(TypedDict, total=False): class ActionType(TypedDict, total=False): + """An action to type in text.""" + text: Required[str] """The text to type.""" @@ -152,6 +170,8 @@ class ActionType(TypedDict, total=False): class ActionWait(TypedDict, total=False): + """A wait action.""" + type: Required[Literal["wait"]] """Specifies the event type. @@ -173,6 +193,8 @@ class ActionWait(TypedDict, total=False): class PendingSafetyCheck(TypedDict, total=False): + """A pending safety check for the computer call.""" + id: Required[str] """The ID of the pending safety check.""" @@ -184,6 +206,12 @@ class PendingSafetyCheck(TypedDict, total=False): class ResponseComputerToolCallParam(TypedDict, total=False): + """A tool call to a computer use tool. + + See the + [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) for more information. + """ + id: Required[str] """The unique ID of the computer call.""" diff --git a/src/openai/types/responses/response_content_part_added_event.py b/src/openai/types/responses/response_content_part_added_event.py index c78e80d1c4..ec9893159d 100644 --- a/src/openai/types/responses/response_content_part_added_event.py +++ b/src/openai/types/responses/response_content_part_added_event.py @@ -12,6 +12,8 @@ class PartReasoningText(BaseModel): + """Reasoning text from the model.""" + text: str """The reasoning text from the model.""" @@ -25,6 +27,8 @@ class PartReasoningText(BaseModel): class ResponseContentPartAddedEvent(BaseModel): + """Emitted when a new content part is added.""" + content_index: int """The index of the content part that was added.""" diff --git a/src/openai/types/responses/response_content_part_done_event.py b/src/openai/types/responses/response_content_part_done_event.py index 732f2303ef..f896ad8743 100644 --- a/src/openai/types/responses/response_content_part_done_event.py +++ b/src/openai/types/responses/response_content_part_done_event.py @@ -12,6 +12,8 @@ class PartReasoningText(BaseModel): + """Reasoning text from the model.""" + text: str """The reasoning text from the model.""" @@ -25,6 +27,8 @@ class PartReasoningText(BaseModel): class ResponseContentPartDoneEvent(BaseModel): + """Emitted when a content part is done.""" + content_index: int """The index of the content part that is done.""" diff --git a/src/openai/types/responses/response_conversation_param.py b/src/openai/types/responses/response_conversation_param.py index 067bdc7a31..d1587fe68a 100644 --- a/src/openai/types/responses/response_conversation_param.py +++ b/src/openai/types/responses/response_conversation_param.py @@ -8,5 +8,7 @@ class ResponseConversationParam(TypedDict, total=False): + """The conversation that this response belongs to.""" + id: Required[str] """The unique ID of the conversation.""" diff --git a/src/openai/types/responses/response_create_params.py b/src/openai/types/responses/response_create_params.py index 64888ac62d..15844c6597 100644 --- a/src/openai/types/responses/response_create_params.py +++ b/src/openai/types/responses/response_create_params.py @@ -283,6 +283,8 @@ class ResponseCreateParamsBase(TypedDict, total=False): class StreamOptions(TypedDict, total=False): + """Options for streaming responses. Only set this when you set `stream: true`.""" + include_obfuscation: bool """When true, stream obfuscation will be enabled. diff --git a/src/openai/types/responses/response_created_event.py b/src/openai/types/responses/response_created_event.py index 73a9d700d4..308b2f4916 100644 --- a/src/openai/types/responses/response_created_event.py +++ b/src/openai/types/responses/response_created_event.py @@ -9,6 +9,8 @@ class ResponseCreatedEvent(BaseModel): + """An event that is emitted when a response is created.""" + response: Response """The response that was created.""" diff --git a/src/openai/types/responses/response_custom_tool_call.py b/src/openai/types/responses/response_custom_tool_call.py index 38c650e662..f05743966e 100644 --- a/src/openai/types/responses/response_custom_tool_call.py +++ b/src/openai/types/responses/response_custom_tool_call.py @@ -9,6 +9,8 @@ class ResponseCustomToolCall(BaseModel): + """A call to a custom tool created by the model.""" + call_id: str """An identifier used to map this custom tool call to a tool call output.""" diff --git a/src/openai/types/responses/response_custom_tool_call_input_delta_event.py b/src/openai/types/responses/response_custom_tool_call_input_delta_event.py index 6c33102d75..7473d33d9a 100644 --- a/src/openai/types/responses/response_custom_tool_call_input_delta_event.py +++ b/src/openai/types/responses/response_custom_tool_call_input_delta_event.py @@ -8,6 +8,8 @@ class ResponseCustomToolCallInputDeltaEvent(BaseModel): + """Event representing a delta (partial update) to the input of a custom tool call.""" + delta: str """The incremental input data (delta) for the custom tool call.""" diff --git a/src/openai/types/responses/response_custom_tool_call_input_done_event.py b/src/openai/types/responses/response_custom_tool_call_input_done_event.py index 35a2fee22b..be47ae8e96 100644 --- a/src/openai/types/responses/response_custom_tool_call_input_done_event.py +++ b/src/openai/types/responses/response_custom_tool_call_input_done_event.py @@ -8,6 +8,8 @@ class ResponseCustomToolCallInputDoneEvent(BaseModel): + """Event indicating that input for a custom tool call is complete.""" + input: str """The complete input data for the custom tool call.""" diff --git a/src/openai/types/responses/response_custom_tool_call_output.py b/src/openai/types/responses/response_custom_tool_call_output.py index 9db9e7e5cf..833956493b 100644 --- a/src/openai/types/responses/response_custom_tool_call_output.py +++ b/src/openai/types/responses/response_custom_tool_call_output.py @@ -17,6 +17,8 @@ class ResponseCustomToolCallOutput(BaseModel): + """The output of a custom tool call from your code, being sent back to the model.""" + call_id: str """The call ID, used to map this custom tool call output to a custom tool call.""" diff --git a/src/openai/types/responses/response_custom_tool_call_output_param.py b/src/openai/types/responses/response_custom_tool_call_output_param.py index e967a37cff..db0034216a 100644 --- a/src/openai/types/responses/response_custom_tool_call_output_param.py +++ b/src/openai/types/responses/response_custom_tool_call_output_param.py @@ -15,6 +15,8 @@ class ResponseCustomToolCallOutputParam(TypedDict, total=False): + """The output of a custom tool call from your code, being sent back to the model.""" + call_id: Required[str] """The call ID, used to map this custom tool call output to a custom tool call.""" diff --git a/src/openai/types/responses/response_custom_tool_call_param.py b/src/openai/types/responses/response_custom_tool_call_param.py index e15beac29f..5d4ce3376c 100644 --- a/src/openai/types/responses/response_custom_tool_call_param.py +++ b/src/openai/types/responses/response_custom_tool_call_param.py @@ -8,6 +8,8 @@ class ResponseCustomToolCallParam(TypedDict, total=False): + """A call to a custom tool created by the model.""" + call_id: Required[str] """An identifier used to map this custom tool call to a tool call output.""" diff --git a/src/openai/types/responses/response_error.py b/src/openai/types/responses/response_error.py index 90f1fcf5da..90958d1c13 100644 --- a/src/openai/types/responses/response_error.py +++ b/src/openai/types/responses/response_error.py @@ -8,6 +8,8 @@ class ResponseError(BaseModel): + """An error object returned when the model fails to generate a Response.""" + code: Literal[ "server_error", "rate_limit_exceeded", diff --git a/src/openai/types/responses/response_error_event.py b/src/openai/types/responses/response_error_event.py index 826c395125..1789f731b4 100644 --- a/src/openai/types/responses/response_error_event.py +++ b/src/openai/types/responses/response_error_event.py @@ -9,6 +9,8 @@ class ResponseErrorEvent(BaseModel): + """Emitted when an error occurs.""" + code: Optional[str] = None """The error code.""" diff --git a/src/openai/types/responses/response_failed_event.py b/src/openai/types/responses/response_failed_event.py index cdd3d7d808..2232c9678d 100644 --- a/src/openai/types/responses/response_failed_event.py +++ b/src/openai/types/responses/response_failed_event.py @@ -9,6 +9,8 @@ class ResponseFailedEvent(BaseModel): + """An event that is emitted when a response fails.""" + response: Response """The response that failed.""" diff --git a/src/openai/types/responses/response_file_search_call_completed_event.py b/src/openai/types/responses/response_file_search_call_completed_event.py index 08e51b2d3f..88ffa5ac56 100644 --- a/src/openai/types/responses/response_file_search_call_completed_event.py +++ b/src/openai/types/responses/response_file_search_call_completed_event.py @@ -8,6 +8,8 @@ class ResponseFileSearchCallCompletedEvent(BaseModel): + """Emitted when a file search call is completed (results found).""" + item_id: str """The ID of the output item that the file search call is initiated.""" diff --git a/src/openai/types/responses/response_file_search_call_in_progress_event.py b/src/openai/types/responses/response_file_search_call_in_progress_event.py index 63840a649f..4f3504fda4 100644 --- a/src/openai/types/responses/response_file_search_call_in_progress_event.py +++ b/src/openai/types/responses/response_file_search_call_in_progress_event.py @@ -8,6 +8,8 @@ class ResponseFileSearchCallInProgressEvent(BaseModel): + """Emitted when a file search call is initiated.""" + item_id: str """The ID of the output item that the file search call is initiated.""" diff --git a/src/openai/types/responses/response_file_search_call_searching_event.py b/src/openai/types/responses/response_file_search_call_searching_event.py index 706c8c57ad..5bf1a076dd 100644 --- a/src/openai/types/responses/response_file_search_call_searching_event.py +++ b/src/openai/types/responses/response_file_search_call_searching_event.py @@ -8,6 +8,8 @@ class ResponseFileSearchCallSearchingEvent(BaseModel): + """Emitted when a file search is currently searching.""" + item_id: str """The ID of the output item that the file search call is initiated.""" diff --git a/src/openai/types/responses/response_file_search_tool_call.py b/src/openai/types/responses/response_file_search_tool_call.py index ef1c6a5608..fa45631345 100644 --- a/src/openai/types/responses/response_file_search_tool_call.py +++ b/src/openai/types/responses/response_file_search_tool_call.py @@ -32,6 +32,12 @@ class Result(BaseModel): class ResponseFileSearchToolCall(BaseModel): + """The results of a file search tool call. + + See the + [file search guide](https://platform.openai.com/docs/guides/tools-file-search) for more information. + """ + id: str """The unique ID of the file search tool call.""" diff --git a/src/openai/types/responses/response_file_search_tool_call_param.py b/src/openai/types/responses/response_file_search_tool_call_param.py index 4903dca4fb..45a5bbb486 100644 --- a/src/openai/types/responses/response_file_search_tool_call_param.py +++ b/src/openai/types/responses/response_file_search_tool_call_param.py @@ -34,6 +34,12 @@ class Result(TypedDict, total=False): class ResponseFileSearchToolCallParam(TypedDict, total=False): + """The results of a file search tool call. + + See the + [file search guide](https://platform.openai.com/docs/guides/tools-file-search) for more information. + """ + id: Required[str] """The unique ID of the file search tool call.""" diff --git a/src/openai/types/responses/response_format_text_json_schema_config.py b/src/openai/types/responses/response_format_text_json_schema_config.py index 001fcf5bab..b953112621 100644 --- a/src/openai/types/responses/response_format_text_json_schema_config.py +++ b/src/openai/types/responses/response_format_text_json_schema_config.py @@ -11,6 +11,12 @@ class ResponseFormatTextJSONSchemaConfig(BaseModel): + """JSON Schema response format. + + Used to generate structured JSON responses. + Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). + """ + name: str """The name of the response format. diff --git a/src/openai/types/responses/response_format_text_json_schema_config_param.py b/src/openai/types/responses/response_format_text_json_schema_config_param.py index f293a80c5a..6f5c633106 100644 --- a/src/openai/types/responses/response_format_text_json_schema_config_param.py +++ b/src/openai/types/responses/response_format_text_json_schema_config_param.py @@ -9,6 +9,12 @@ class ResponseFormatTextJSONSchemaConfigParam(TypedDict, total=False): + """JSON Schema response format. + + Used to generate structured JSON responses. + Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). + """ + name: Required[str] """The name of the response format. diff --git a/src/openai/types/responses/response_function_call_arguments_delta_event.py b/src/openai/types/responses/response_function_call_arguments_delta_event.py index c6bc5dfad7..0798c2e123 100644 --- a/src/openai/types/responses/response_function_call_arguments_delta_event.py +++ b/src/openai/types/responses/response_function_call_arguments_delta_event.py @@ -8,6 +8,8 @@ class ResponseFunctionCallArgumentsDeltaEvent(BaseModel): + """Emitted when there is a partial function-call arguments delta.""" + delta: str """The function-call arguments delta that is added.""" diff --git a/src/openai/types/responses/response_function_call_arguments_done_event.py b/src/openai/types/responses/response_function_call_arguments_done_event.py index 4ee5ed7fe1..543cd073a2 100644 --- a/src/openai/types/responses/response_function_call_arguments_done_event.py +++ b/src/openai/types/responses/response_function_call_arguments_done_event.py @@ -8,6 +8,8 @@ class ResponseFunctionCallArgumentsDoneEvent(BaseModel): + """Emitted when function-call arguments are finalized.""" + arguments: str """The function-call arguments.""" diff --git a/src/openai/types/responses/response_function_shell_call_output_content.py b/src/openai/types/responses/response_function_shell_call_output_content.py index e0e2c09ad1..dae48f14da 100644 --- a/src/openai/types/responses/response_function_shell_call_output_content.py +++ b/src/openai/types/responses/response_function_shell_call_output_content.py @@ -10,11 +10,15 @@ class OutcomeTimeout(BaseModel): + """Indicates that the shell call exceeded its configured time limit.""" + type: Literal["timeout"] """The outcome type. Always `timeout`.""" class OutcomeExit(BaseModel): + """Indicates that the shell commands finished and returned an exit code.""" + exit_code: int """The exit code returned by the shell process.""" @@ -26,6 +30,8 @@ class OutcomeExit(BaseModel): class ResponseFunctionShellCallOutputContent(BaseModel): + """Captured stdout and stderr for a portion of a shell tool call output.""" + outcome: Outcome """The exit or timeout outcome associated with this shell call.""" diff --git a/src/openai/types/responses/response_function_shell_call_output_content_param.py b/src/openai/types/responses/response_function_shell_call_output_content_param.py index fa065bd4b5..4d8ea70d08 100644 --- a/src/openai/types/responses/response_function_shell_call_output_content_param.py +++ b/src/openai/types/responses/response_function_shell_call_output_content_param.py @@ -9,11 +9,15 @@ class OutcomeTimeout(TypedDict, total=False): + """Indicates that the shell call exceeded its configured time limit.""" + type: Required[Literal["timeout"]] """The outcome type. Always `timeout`.""" class OutcomeExit(TypedDict, total=False): + """Indicates that the shell commands finished and returned an exit code.""" + exit_code: Required[int] """The exit code returned by the shell process.""" @@ -25,6 +29,8 @@ class OutcomeExit(TypedDict, total=False): class ResponseFunctionShellCallOutputContentParam(TypedDict, total=False): + """Captured stdout and stderr for a portion of a shell tool call output.""" + outcome: Required[Outcome] """The exit or timeout outcome associated with this shell call.""" diff --git a/src/openai/types/responses/response_function_shell_tool_call.py b/src/openai/types/responses/response_function_shell_tool_call.py index de42cb0640..7c6a184ed4 100644 --- a/src/openai/types/responses/response_function_shell_tool_call.py +++ b/src/openai/types/responses/response_function_shell_tool_call.py @@ -9,6 +9,8 @@ class Action(BaseModel): + """The shell commands and limits that describe how to run the tool call.""" + commands: List[str] max_output_length: Optional[int] = None @@ -19,6 +21,8 @@ class Action(BaseModel): class ResponseFunctionShellToolCall(BaseModel): + """A tool call that executes one or more shell commands in a managed environment.""" + id: str """The unique ID of the shell tool call. diff --git a/src/openai/types/responses/response_function_shell_tool_call_output.py b/src/openai/types/responses/response_function_shell_tool_call_output.py index e74927df41..7885ee2f83 100644 --- a/src/openai/types/responses/response_function_shell_tool_call_output.py +++ b/src/openai/types/responses/response_function_shell_tool_call_output.py @@ -16,11 +16,15 @@ class OutputOutcomeTimeout(BaseModel): + """Indicates that the shell call exceeded its configured time limit.""" + type: Literal["timeout"] """The outcome type. Always `timeout`.""" class OutputOutcomeExit(BaseModel): + """Indicates that the shell commands finished and returned an exit code.""" + exit_code: int """Exit code from the shell process.""" @@ -32,6 +36,8 @@ class OutputOutcomeExit(BaseModel): class Output(BaseModel): + """The content of a shell call output.""" + outcome: OutputOutcome """ Represents either an exit outcome (with an exit code) or a timeout outcome for a @@ -46,6 +52,8 @@ class Output(BaseModel): class ResponseFunctionShellToolCallOutput(BaseModel): + """The output of a shell tool call.""" + id: str """The unique ID of the shell call output. diff --git a/src/openai/types/responses/response_function_tool_call.py b/src/openai/types/responses/response_function_tool_call.py index 2a8482204e..194e3f7d6a 100644 --- a/src/openai/types/responses/response_function_tool_call.py +++ b/src/openai/types/responses/response_function_tool_call.py @@ -9,6 +9,12 @@ class ResponseFunctionToolCall(BaseModel): + """A tool call to run a function. + + See the + [function calling guide](https://platform.openai.com/docs/guides/function-calling) for more information. + """ + arguments: str """A JSON string of the arguments to pass to the function.""" diff --git a/src/openai/types/responses/response_function_tool_call_item.py b/src/openai/types/responses/response_function_tool_call_item.py index 762015a4b1..3df299e512 100644 --- a/src/openai/types/responses/response_function_tool_call_item.py +++ b/src/openai/types/responses/response_function_tool_call_item.py @@ -6,5 +6,11 @@ class ResponseFunctionToolCallItem(ResponseFunctionToolCall): + """A tool call to run a function. + + See the + [function calling guide](https://platform.openai.com/docs/guides/function-calling) for more information. + """ + id: str # type: ignore """The unique ID of the function tool call.""" diff --git a/src/openai/types/responses/response_function_tool_call_param.py b/src/openai/types/responses/response_function_tool_call_param.py index eaa263cf67..4e8dd3d629 100644 --- a/src/openai/types/responses/response_function_tool_call_param.py +++ b/src/openai/types/responses/response_function_tool_call_param.py @@ -8,6 +8,12 @@ class ResponseFunctionToolCallParam(TypedDict, total=False): + """A tool call to run a function. + + See the + [function calling guide](https://platform.openai.com/docs/guides/function-calling) for more information. + """ + arguments: Required[str] """A JSON string of the arguments to pass to the function.""" diff --git a/src/openai/types/responses/response_function_web_search.py b/src/openai/types/responses/response_function_web_search.py index f3e80e6a8f..1450fba4d1 100644 --- a/src/openai/types/responses/response_function_web_search.py +++ b/src/openai/types/responses/response_function_web_search.py @@ -10,6 +10,8 @@ class ActionSearchSource(BaseModel): + """A source used in the search.""" + type: Literal["url"] """The type of source. Always `url`.""" @@ -18,6 +20,8 @@ class ActionSearchSource(BaseModel): class ActionSearch(BaseModel): + """Action type "search" - Performs a web search query.""" + query: str """The search query.""" @@ -29,6 +33,8 @@ class ActionSearch(BaseModel): class ActionOpenPage(BaseModel): + """Action type "open_page" - Opens a specific URL from search results.""" + type: Literal["open_page"] """The action type.""" @@ -37,6 +43,8 @@ class ActionOpenPage(BaseModel): class ActionFind(BaseModel): + """Action type "find": Searches for a pattern within a loaded page.""" + pattern: str """The pattern or text to search for within the page.""" @@ -51,6 +59,12 @@ class ActionFind(BaseModel): class ResponseFunctionWebSearch(BaseModel): + """The results of a web search tool call. + + See the + [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for more information. + """ + id: str """The unique ID of the web search tool call.""" diff --git a/src/openai/types/responses/response_function_web_search_param.py b/src/openai/types/responses/response_function_web_search_param.py index fc019d3eb7..8d0b60334d 100644 --- a/src/openai/types/responses/response_function_web_search_param.py +++ b/src/openai/types/responses/response_function_web_search_param.py @@ -16,6 +16,8 @@ class ActionSearchSource(TypedDict, total=False): + """A source used in the search.""" + type: Required[Literal["url"]] """The type of source. Always `url`.""" @@ -24,6 +26,8 @@ class ActionSearchSource(TypedDict, total=False): class ActionSearch(TypedDict, total=False): + """Action type "search" - Performs a web search query.""" + query: Required[str] """The search query.""" @@ -35,6 +39,8 @@ class ActionSearch(TypedDict, total=False): class ActionOpenPage(TypedDict, total=False): + """Action type "open_page" - Opens a specific URL from search results.""" + type: Required[Literal["open_page"]] """The action type.""" @@ -43,6 +49,8 @@ class ActionOpenPage(TypedDict, total=False): class ActionFind(TypedDict, total=False): + """Action type "find": Searches for a pattern within a loaded page.""" + pattern: Required[str] """The pattern or text to search for within the page.""" @@ -57,6 +65,12 @@ class ActionFind(TypedDict, total=False): class ResponseFunctionWebSearchParam(TypedDict, total=False): + """The results of a web search tool call. + + See the + [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for more information. + """ + id: Required[str] """The unique ID of the web search tool call.""" diff --git a/src/openai/types/responses/response_image_gen_call_completed_event.py b/src/openai/types/responses/response_image_gen_call_completed_event.py index a554273ed0..f6ce9d0fd8 100644 --- a/src/openai/types/responses/response_image_gen_call_completed_event.py +++ b/src/openai/types/responses/response_image_gen_call_completed_event.py @@ -8,6 +8,10 @@ class ResponseImageGenCallCompletedEvent(BaseModel): + """ + Emitted when an image generation tool call has completed and the final image is available. + """ + item_id: str """The unique identifier of the image generation item being processed.""" diff --git a/src/openai/types/responses/response_image_gen_call_generating_event.py b/src/openai/types/responses/response_image_gen_call_generating_event.py index 74b4f57333..8e3026d0dc 100644 --- a/src/openai/types/responses/response_image_gen_call_generating_event.py +++ b/src/openai/types/responses/response_image_gen_call_generating_event.py @@ -8,6 +8,10 @@ class ResponseImageGenCallGeneratingEvent(BaseModel): + """ + Emitted when an image generation tool call is actively generating an image (intermediate state). + """ + item_id: str """The unique identifier of the image generation item being processed.""" diff --git a/src/openai/types/responses/response_image_gen_call_in_progress_event.py b/src/openai/types/responses/response_image_gen_call_in_progress_event.py index b36ff5fa47..60726a22b4 100644 --- a/src/openai/types/responses/response_image_gen_call_in_progress_event.py +++ b/src/openai/types/responses/response_image_gen_call_in_progress_event.py @@ -8,6 +8,8 @@ class ResponseImageGenCallInProgressEvent(BaseModel): + """Emitted when an image generation tool call is in progress.""" + item_id: str """The unique identifier of the image generation item being processed.""" diff --git a/src/openai/types/responses/response_image_gen_call_partial_image_event.py b/src/openai/types/responses/response_image_gen_call_partial_image_event.py index e69c95fb33..289d5d44c0 100644 --- a/src/openai/types/responses/response_image_gen_call_partial_image_event.py +++ b/src/openai/types/responses/response_image_gen_call_partial_image_event.py @@ -8,6 +8,8 @@ class ResponseImageGenCallPartialImageEvent(BaseModel): + """Emitted when a partial image is available during image generation streaming.""" + item_id: str """The unique identifier of the image generation item being processed.""" diff --git a/src/openai/types/responses/response_in_progress_event.py b/src/openai/types/responses/response_in_progress_event.py index b82e10b357..9d9bbd94b0 100644 --- a/src/openai/types/responses/response_in_progress_event.py +++ b/src/openai/types/responses/response_in_progress_event.py @@ -9,6 +9,8 @@ class ResponseInProgressEvent(BaseModel): + """Emitted when the response is in progress.""" + response: Response """The response that is in progress.""" diff --git a/src/openai/types/responses/response_incomplete_event.py b/src/openai/types/responses/response_incomplete_event.py index 63c969a428..ef99c5f0b2 100644 --- a/src/openai/types/responses/response_incomplete_event.py +++ b/src/openai/types/responses/response_incomplete_event.py @@ -9,6 +9,8 @@ class ResponseIncompleteEvent(BaseModel): + """An event that is emitted when a response finishes as incomplete.""" + response: Response """The response that was incomplete.""" diff --git a/src/openai/types/responses/response_input_audio.py b/src/openai/types/responses/response_input_audio.py index 9fef6de0fd..f362ba4133 100644 --- a/src/openai/types/responses/response_input_audio.py +++ b/src/openai/types/responses/response_input_audio.py @@ -16,6 +16,8 @@ class InputAudio(BaseModel): class ResponseInputAudio(BaseModel): + """An audio input to the model.""" + input_audio: InputAudio type: Literal["input_audio"] diff --git a/src/openai/types/responses/response_input_audio_param.py b/src/openai/types/responses/response_input_audio_param.py index f3fc913cca..0be935c54d 100644 --- a/src/openai/types/responses/response_input_audio_param.py +++ b/src/openai/types/responses/response_input_audio_param.py @@ -16,6 +16,8 @@ class InputAudio(TypedDict, total=False): class ResponseInputAudioParam(TypedDict, total=False): + """An audio input to the model.""" + input_audio: Required[InputAudio] type: Required[Literal["input_audio"]] diff --git a/src/openai/types/responses/response_input_file.py b/src/openai/types/responses/response_input_file.py index 1eecd6a2b6..3e5fb70c5f 100644 --- a/src/openai/types/responses/response_input_file.py +++ b/src/openai/types/responses/response_input_file.py @@ -9,6 +9,8 @@ class ResponseInputFile(BaseModel): + """A file input to the model.""" + type: Literal["input_file"] """The type of the input item. Always `input_file`.""" diff --git a/src/openai/types/responses/response_input_file_content.py b/src/openai/types/responses/response_input_file_content.py index d832bb0e26..f0dfef55d0 100644 --- a/src/openai/types/responses/response_input_file_content.py +++ b/src/openai/types/responses/response_input_file_content.py @@ -9,6 +9,8 @@ class ResponseInputFileContent(BaseModel): + """A file input to the model.""" + type: Literal["input_file"] """The type of the input item. Always `input_file`.""" diff --git a/src/openai/types/responses/response_input_file_content_param.py b/src/openai/types/responses/response_input_file_content_param.py index 71f7b3a281..376f6c7a45 100644 --- a/src/openai/types/responses/response_input_file_content_param.py +++ b/src/openai/types/responses/response_input_file_content_param.py @@ -9,6 +9,8 @@ class ResponseInputFileContentParam(TypedDict, total=False): + """A file input to the model.""" + type: Required[Literal["input_file"]] """The type of the input item. Always `input_file`.""" diff --git a/src/openai/types/responses/response_input_file_param.py b/src/openai/types/responses/response_input_file_param.py index 0b5f513ec6..8b5da20245 100644 --- a/src/openai/types/responses/response_input_file_param.py +++ b/src/openai/types/responses/response_input_file_param.py @@ -9,6 +9,8 @@ class ResponseInputFileParam(TypedDict, total=False): + """A file input to the model.""" + type: Required[Literal["input_file"]] """The type of the input item. Always `input_file`.""" diff --git a/src/openai/types/responses/response_input_image.py b/src/openai/types/responses/response_input_image.py index f2d760b25e..500bc4b346 100644 --- a/src/openai/types/responses/response_input_image.py +++ b/src/openai/types/responses/response_input_image.py @@ -9,6 +9,11 @@ class ResponseInputImage(BaseModel): + """An image input to the model. + + Learn about [image inputs](https://platform.openai.com/docs/guides/vision). + """ + detail: Literal["low", "high", "auto"] """The detail level of the image to be sent to the model. diff --git a/src/openai/types/responses/response_input_image_content.py b/src/openai/types/responses/response_input_image_content.py index fb90cb57eb..e38bc28d5e 100644 --- a/src/openai/types/responses/response_input_image_content.py +++ b/src/openai/types/responses/response_input_image_content.py @@ -9,6 +9,11 @@ class ResponseInputImageContent(BaseModel): + """An image input to the model. + + Learn about [image inputs](https://platform.openai.com/docs/guides/vision) + """ + type: Literal["input_image"] """The type of the input item. Always `input_image`.""" diff --git a/src/openai/types/responses/response_input_image_content_param.py b/src/openai/types/responses/response_input_image_content_param.py index c51509a3f3..c21f46d736 100644 --- a/src/openai/types/responses/response_input_image_content_param.py +++ b/src/openai/types/responses/response_input_image_content_param.py @@ -9,6 +9,11 @@ class ResponseInputImageContentParam(TypedDict, total=False): + """An image input to the model. + + Learn about [image inputs](https://platform.openai.com/docs/guides/vision) + """ + type: Required[Literal["input_image"]] """The type of the input item. Always `input_image`.""" diff --git a/src/openai/types/responses/response_input_image_param.py b/src/openai/types/responses/response_input_image_param.py index bc17e4f1c2..fd8c1bd070 100644 --- a/src/openai/types/responses/response_input_image_param.py +++ b/src/openai/types/responses/response_input_image_param.py @@ -9,6 +9,11 @@ class ResponseInputImageParam(TypedDict, total=False): + """An image input to the model. + + Learn about [image inputs](https://platform.openai.com/docs/guides/vision). + """ + detail: Required[Literal["low", "high", "auto"]] """The detail level of the image to be sent to the model. diff --git a/src/openai/types/responses/response_input_item.py b/src/openai/types/responses/response_input_item.py index 103c8634ce..23eb2c8950 100644 --- a/src/openai/types/responses/response_input_item.py +++ b/src/openai/types/responses/response_input_item.py @@ -50,6 +50,12 @@ class Message(BaseModel): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. + """ + content: ResponseInputMessageContentList """ A list of one or many input items to the model, containing different content @@ -71,6 +77,8 @@ class Message(BaseModel): class ComputerCallOutputAcknowledgedSafetyCheck(BaseModel): + """A pending safety check for the computer call.""" + id: str """The ID of the pending safety check.""" @@ -82,6 +90,8 @@ class ComputerCallOutputAcknowledgedSafetyCheck(BaseModel): class ComputerCallOutput(BaseModel): + """The output of a computer tool call.""" + call_id: str """The ID of the computer tool call that produced the output.""" @@ -109,6 +119,8 @@ class ComputerCallOutput(BaseModel): class FunctionCallOutput(BaseModel): + """The output of a function tool call.""" + call_id: str """The unique ID of the function tool call generated by the model.""" @@ -133,6 +145,8 @@ class FunctionCallOutput(BaseModel): class ImageGenerationCall(BaseModel): + """An image generation request made by the model.""" + id: str """The unique ID of the image generation call.""" @@ -147,6 +161,8 @@ class ImageGenerationCall(BaseModel): class LocalShellCallAction(BaseModel): + """Execute a shell command on the server.""" + command: List[str] """The command to run.""" @@ -167,6 +183,8 @@ class LocalShellCallAction(BaseModel): class LocalShellCall(BaseModel): + """A tool call to run a command on the local shell.""" + id: str """The unique ID of the local shell call.""" @@ -184,6 +202,8 @@ class LocalShellCall(BaseModel): class LocalShellCallOutput(BaseModel): + """The output of a local shell tool call.""" + id: str """The unique ID of the local shell tool call generated by the model.""" @@ -198,6 +218,8 @@ class LocalShellCallOutput(BaseModel): class ShellCallAction(BaseModel): + """The shell commands and limits that describe how to run the tool call.""" + commands: List[str] """Ordered shell commands for the execution environment to run.""" @@ -212,6 +234,8 @@ class ShellCallAction(BaseModel): class ShellCall(BaseModel): + """A tool representing a request to execute one or more shell commands.""" + action: ShellCallAction """The shell commands and limits that describe how to run the tool call.""" @@ -235,6 +259,8 @@ class ShellCall(BaseModel): class ShellCallOutput(BaseModel): + """The streamed output items emitted by a shell tool call.""" + call_id: str """The unique ID of the shell tool call generated by the model.""" @@ -261,6 +287,8 @@ class ShellCallOutput(BaseModel): class ApplyPatchCallOperationCreateFile(BaseModel): + """Instruction for creating a new file via the apply_patch tool.""" + diff: str """Unified diff content to apply when creating the file.""" @@ -272,6 +300,8 @@ class ApplyPatchCallOperationCreateFile(BaseModel): class ApplyPatchCallOperationDeleteFile(BaseModel): + """Instruction for deleting an existing file via the apply_patch tool.""" + path: str """Path of the file to delete relative to the workspace root.""" @@ -280,6 +310,8 @@ class ApplyPatchCallOperationDeleteFile(BaseModel): class ApplyPatchCallOperationUpdateFile(BaseModel): + """Instruction for updating an existing file via the apply_patch tool.""" + diff: str """Unified diff content to apply to the existing file.""" @@ -297,6 +329,10 @@ class ApplyPatchCallOperationUpdateFile(BaseModel): class ApplyPatchCall(BaseModel): + """ + A tool call representing a request to create, delete, or update files using diff patches. + """ + call_id: str """The unique ID of the apply patch tool call generated by the model.""" @@ -320,6 +356,8 @@ class ApplyPatchCall(BaseModel): class ApplyPatchCallOutput(BaseModel): + """The streamed output emitted by an apply patch tool call.""" + call_id: str """The unique ID of the apply patch tool call generated by the model.""" @@ -343,6 +381,8 @@ class ApplyPatchCallOutput(BaseModel): class McpListToolsTool(BaseModel): + """A tool available on an MCP server.""" + input_schema: object """The JSON schema describing the tool's input.""" @@ -357,6 +397,8 @@ class McpListToolsTool(BaseModel): class McpListTools(BaseModel): + """A list of tools available on an MCP server.""" + id: str """The unique ID of the list.""" @@ -374,6 +416,8 @@ class McpListTools(BaseModel): class McpApprovalRequest(BaseModel): + """A request for human approval of a tool invocation.""" + id: str """The unique ID of the approval request.""" @@ -391,6 +435,8 @@ class McpApprovalRequest(BaseModel): class McpApprovalResponse(BaseModel): + """A response to an MCP approval request.""" + approval_request_id: str """The ID of the approval request being answered.""" @@ -408,6 +454,8 @@ class McpApprovalResponse(BaseModel): class McpCall(BaseModel): + """An invocation of a tool on an MCP server.""" + id: str """The unique ID of the tool call.""" @@ -444,6 +492,8 @@ class McpCall(BaseModel): class ItemReference(BaseModel): + """An internal identifier for an item to reference.""" + id: str """The ID of the item to reference.""" diff --git a/src/openai/types/responses/response_input_item_param.py b/src/openai/types/responses/response_input_item_param.py index 85d9f92b23..2c42b93021 100644 --- a/src/openai/types/responses/response_input_item_param.py +++ b/src/openai/types/responses/response_input_item_param.py @@ -51,6 +51,12 @@ class Message(TypedDict, total=False): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. + """ + content: Required[ResponseInputMessageContentListParam] """ A list of one or many input items to the model, containing different content @@ -72,6 +78,8 @@ class Message(TypedDict, total=False): class ComputerCallOutputAcknowledgedSafetyCheck(TypedDict, total=False): + """A pending safety check for the computer call.""" + id: Required[str] """The ID of the pending safety check.""" @@ -83,6 +91,8 @@ class ComputerCallOutputAcknowledgedSafetyCheck(TypedDict, total=False): class ComputerCallOutput(TypedDict, total=False): + """The output of a computer tool call.""" + call_id: Required[str] """The ID of the computer tool call that produced the output.""" @@ -110,6 +120,8 @@ class ComputerCallOutput(TypedDict, total=False): class FunctionCallOutput(TypedDict, total=False): + """The output of a function tool call.""" + call_id: Required[str] """The unique ID of the function tool call generated by the model.""" @@ -134,6 +146,8 @@ class FunctionCallOutput(TypedDict, total=False): class ImageGenerationCall(TypedDict, total=False): + """An image generation request made by the model.""" + id: Required[str] """The unique ID of the image generation call.""" @@ -148,6 +162,8 @@ class ImageGenerationCall(TypedDict, total=False): class LocalShellCallAction(TypedDict, total=False): + """Execute a shell command on the server.""" + command: Required[SequenceNotStr[str]] """The command to run.""" @@ -168,6 +184,8 @@ class LocalShellCallAction(TypedDict, total=False): class LocalShellCall(TypedDict, total=False): + """A tool call to run a command on the local shell.""" + id: Required[str] """The unique ID of the local shell call.""" @@ -185,6 +203,8 @@ class LocalShellCall(TypedDict, total=False): class LocalShellCallOutput(TypedDict, total=False): + """The output of a local shell tool call.""" + id: Required[str] """The unique ID of the local shell tool call generated by the model.""" @@ -199,6 +219,8 @@ class LocalShellCallOutput(TypedDict, total=False): class ShellCallAction(TypedDict, total=False): + """The shell commands and limits that describe how to run the tool call.""" + commands: Required[SequenceNotStr[str]] """Ordered shell commands for the execution environment to run.""" @@ -213,6 +235,8 @@ class ShellCallAction(TypedDict, total=False): class ShellCall(TypedDict, total=False): + """A tool representing a request to execute one or more shell commands.""" + action: Required[ShellCallAction] """The shell commands and limits that describe how to run the tool call.""" @@ -236,6 +260,8 @@ class ShellCall(TypedDict, total=False): class ShellCallOutput(TypedDict, total=False): + """The streamed output items emitted by a shell tool call.""" + call_id: Required[str] """The unique ID of the shell tool call generated by the model.""" @@ -262,6 +288,8 @@ class ShellCallOutput(TypedDict, total=False): class ApplyPatchCallOperationCreateFile(TypedDict, total=False): + """Instruction for creating a new file via the apply_patch tool.""" + diff: Required[str] """Unified diff content to apply when creating the file.""" @@ -273,6 +301,8 @@ class ApplyPatchCallOperationCreateFile(TypedDict, total=False): class ApplyPatchCallOperationDeleteFile(TypedDict, total=False): + """Instruction for deleting an existing file via the apply_patch tool.""" + path: Required[str] """Path of the file to delete relative to the workspace root.""" @@ -281,6 +311,8 @@ class ApplyPatchCallOperationDeleteFile(TypedDict, total=False): class ApplyPatchCallOperationUpdateFile(TypedDict, total=False): + """Instruction for updating an existing file via the apply_patch tool.""" + diff: Required[str] """Unified diff content to apply to the existing file.""" @@ -297,6 +329,10 @@ class ApplyPatchCallOperationUpdateFile(TypedDict, total=False): class ApplyPatchCall(TypedDict, total=False): + """ + A tool call representing a request to create, delete, or update files using diff patches. + """ + call_id: Required[str] """The unique ID of the apply patch tool call generated by the model.""" @@ -320,6 +356,8 @@ class ApplyPatchCall(TypedDict, total=False): class ApplyPatchCallOutput(TypedDict, total=False): + """The streamed output emitted by an apply patch tool call.""" + call_id: Required[str] """The unique ID of the apply patch tool call generated by the model.""" @@ -343,6 +381,8 @@ class ApplyPatchCallOutput(TypedDict, total=False): class McpListToolsTool(TypedDict, total=False): + """A tool available on an MCP server.""" + input_schema: Required[object] """The JSON schema describing the tool's input.""" @@ -357,6 +397,8 @@ class McpListToolsTool(TypedDict, total=False): class McpListTools(TypedDict, total=False): + """A list of tools available on an MCP server.""" + id: Required[str] """The unique ID of the list.""" @@ -374,6 +416,8 @@ class McpListTools(TypedDict, total=False): class McpApprovalRequest(TypedDict, total=False): + """A request for human approval of a tool invocation.""" + id: Required[str] """The unique ID of the approval request.""" @@ -391,6 +435,8 @@ class McpApprovalRequest(TypedDict, total=False): class McpApprovalResponse(TypedDict, total=False): + """A response to an MCP approval request.""" + approval_request_id: Required[str] """The ID of the approval request being answered.""" @@ -408,6 +454,8 @@ class McpApprovalResponse(TypedDict, total=False): class McpCall(TypedDict, total=False): + """An invocation of a tool on an MCP server.""" + id: Required[str] """The unique ID of the tool call.""" @@ -444,6 +492,8 @@ class McpCall(TypedDict, total=False): class ItemReference(TypedDict, total=False): + """An internal identifier for an item to reference.""" + id: Required[str] """The ID of the item to reference.""" diff --git a/src/openai/types/responses/response_input_param.py b/src/openai/types/responses/response_input_param.py index bbd8e6af79..c2d12c0ab4 100644 --- a/src/openai/types/responses/response_input_param.py +++ b/src/openai/types/responses/response_input_param.py @@ -52,6 +52,12 @@ class Message(TypedDict, total=False): + """ + A message input to the model with a role indicating instruction following + hierarchy. Instructions given with the `developer` or `system` role take + precedence over instructions given with the `user` role. + """ + content: Required[ResponseInputMessageContentListParam] """ A list of one or many input items to the model, containing different content @@ -73,6 +79,8 @@ class Message(TypedDict, total=False): class ComputerCallOutputAcknowledgedSafetyCheck(TypedDict, total=False): + """A pending safety check for the computer call.""" + id: Required[str] """The ID of the pending safety check.""" @@ -84,6 +92,8 @@ class ComputerCallOutputAcknowledgedSafetyCheck(TypedDict, total=False): class ComputerCallOutput(TypedDict, total=False): + """The output of a computer tool call.""" + call_id: Required[str] """The ID of the computer tool call that produced the output.""" @@ -111,6 +121,8 @@ class ComputerCallOutput(TypedDict, total=False): class FunctionCallOutput(TypedDict, total=False): + """The output of a function tool call.""" + call_id: Required[str] """The unique ID of the function tool call generated by the model.""" @@ -135,6 +147,8 @@ class FunctionCallOutput(TypedDict, total=False): class ImageGenerationCall(TypedDict, total=False): + """An image generation request made by the model.""" + id: Required[str] """The unique ID of the image generation call.""" @@ -149,6 +163,8 @@ class ImageGenerationCall(TypedDict, total=False): class LocalShellCallAction(TypedDict, total=False): + """Execute a shell command on the server.""" + command: Required[SequenceNotStr[str]] """The command to run.""" @@ -169,6 +185,8 @@ class LocalShellCallAction(TypedDict, total=False): class LocalShellCall(TypedDict, total=False): + """A tool call to run a command on the local shell.""" + id: Required[str] """The unique ID of the local shell call.""" @@ -186,6 +204,8 @@ class LocalShellCall(TypedDict, total=False): class LocalShellCallOutput(TypedDict, total=False): + """The output of a local shell tool call.""" + id: Required[str] """The unique ID of the local shell tool call generated by the model.""" @@ -200,6 +220,8 @@ class LocalShellCallOutput(TypedDict, total=False): class ShellCallAction(TypedDict, total=False): + """The shell commands and limits that describe how to run the tool call.""" + commands: Required[SequenceNotStr[str]] """Ordered shell commands for the execution environment to run.""" @@ -214,6 +236,8 @@ class ShellCallAction(TypedDict, total=False): class ShellCall(TypedDict, total=False): + """A tool representing a request to execute one or more shell commands.""" + action: Required[ShellCallAction] """The shell commands and limits that describe how to run the tool call.""" @@ -237,6 +261,8 @@ class ShellCall(TypedDict, total=False): class ShellCallOutput(TypedDict, total=False): + """The streamed output items emitted by a shell tool call.""" + call_id: Required[str] """The unique ID of the shell tool call generated by the model.""" @@ -263,6 +289,8 @@ class ShellCallOutput(TypedDict, total=False): class ApplyPatchCallOperationCreateFile(TypedDict, total=False): + """Instruction for creating a new file via the apply_patch tool.""" + diff: Required[str] """Unified diff content to apply when creating the file.""" @@ -274,6 +302,8 @@ class ApplyPatchCallOperationCreateFile(TypedDict, total=False): class ApplyPatchCallOperationDeleteFile(TypedDict, total=False): + """Instruction for deleting an existing file via the apply_patch tool.""" + path: Required[str] """Path of the file to delete relative to the workspace root.""" @@ -282,6 +312,8 @@ class ApplyPatchCallOperationDeleteFile(TypedDict, total=False): class ApplyPatchCallOperationUpdateFile(TypedDict, total=False): + """Instruction for updating an existing file via the apply_patch tool.""" + diff: Required[str] """Unified diff content to apply to the existing file.""" @@ -298,6 +330,10 @@ class ApplyPatchCallOperationUpdateFile(TypedDict, total=False): class ApplyPatchCall(TypedDict, total=False): + """ + A tool call representing a request to create, delete, or update files using diff patches. + """ + call_id: Required[str] """The unique ID of the apply patch tool call generated by the model.""" @@ -321,6 +357,8 @@ class ApplyPatchCall(TypedDict, total=False): class ApplyPatchCallOutput(TypedDict, total=False): + """The streamed output emitted by an apply patch tool call.""" + call_id: Required[str] """The unique ID of the apply patch tool call generated by the model.""" @@ -344,6 +382,8 @@ class ApplyPatchCallOutput(TypedDict, total=False): class McpListToolsTool(TypedDict, total=False): + """A tool available on an MCP server.""" + input_schema: Required[object] """The JSON schema describing the tool's input.""" @@ -358,6 +398,8 @@ class McpListToolsTool(TypedDict, total=False): class McpListTools(TypedDict, total=False): + """A list of tools available on an MCP server.""" + id: Required[str] """The unique ID of the list.""" @@ -375,6 +417,8 @@ class McpListTools(TypedDict, total=False): class McpApprovalRequest(TypedDict, total=False): + """A request for human approval of a tool invocation.""" + id: Required[str] """The unique ID of the approval request.""" @@ -392,6 +436,8 @@ class McpApprovalRequest(TypedDict, total=False): class McpApprovalResponse(TypedDict, total=False): + """A response to an MCP approval request.""" + approval_request_id: Required[str] """The ID of the approval request being answered.""" @@ -409,6 +455,8 @@ class McpApprovalResponse(TypedDict, total=False): class McpCall(TypedDict, total=False): + """An invocation of a tool on an MCP server.""" + id: Required[str] """The unique ID of the tool call.""" @@ -445,6 +493,8 @@ class McpCall(TypedDict, total=False): class ItemReference(TypedDict, total=False): + """An internal identifier for an item to reference.""" + id: Required[str] """The ID of the item to reference.""" diff --git a/src/openai/types/responses/response_input_text.py b/src/openai/types/responses/response_input_text.py index ba8d1ea18b..1e06ba71f3 100644 --- a/src/openai/types/responses/response_input_text.py +++ b/src/openai/types/responses/response_input_text.py @@ -8,6 +8,8 @@ class ResponseInputText(BaseModel): + """A text input to the model.""" + text: str """The text input to the model.""" diff --git a/src/openai/types/responses/response_input_text_content.py b/src/openai/types/responses/response_input_text_content.py index 2cce849855..66dbb8b0d0 100644 --- a/src/openai/types/responses/response_input_text_content.py +++ b/src/openai/types/responses/response_input_text_content.py @@ -8,6 +8,8 @@ class ResponseInputTextContent(BaseModel): + """A text input to the model.""" + text: str """The text input to the model.""" diff --git a/src/openai/types/responses/response_input_text_content_param.py b/src/openai/types/responses/response_input_text_content_param.py index 85b57df2bd..013f22d0df 100644 --- a/src/openai/types/responses/response_input_text_content_param.py +++ b/src/openai/types/responses/response_input_text_content_param.py @@ -8,6 +8,8 @@ class ResponseInputTextContentParam(TypedDict, total=False): + """A text input to the model.""" + text: Required[str] """The text input to the model.""" diff --git a/src/openai/types/responses/response_input_text_param.py b/src/openai/types/responses/response_input_text_param.py index f2ba834082..e1a2976e2e 100644 --- a/src/openai/types/responses/response_input_text_param.py +++ b/src/openai/types/responses/response_input_text_param.py @@ -8,6 +8,8 @@ class ResponseInputTextParam(TypedDict, total=False): + """A text input to the model.""" + text: Required[str] """The text input to the model.""" diff --git a/src/openai/types/responses/response_item.py b/src/openai/types/responses/response_item.py index 5ae2405988..3dba681d53 100644 --- a/src/openai/types/responses/response_item.py +++ b/src/openai/types/responses/response_item.py @@ -34,6 +34,8 @@ class ImageGenerationCall(BaseModel): + """An image generation request made by the model.""" + id: str """The unique ID of the image generation call.""" @@ -48,6 +50,8 @@ class ImageGenerationCall(BaseModel): class LocalShellCallAction(BaseModel): + """Execute a shell command on the server.""" + command: List[str] """The command to run.""" @@ -68,6 +72,8 @@ class LocalShellCallAction(BaseModel): class LocalShellCall(BaseModel): + """A tool call to run a command on the local shell.""" + id: str """The unique ID of the local shell call.""" @@ -85,6 +91,8 @@ class LocalShellCall(BaseModel): class LocalShellCallOutput(BaseModel): + """The output of a local shell tool call.""" + id: str """The unique ID of the local shell tool call generated by the model.""" @@ -99,6 +107,8 @@ class LocalShellCallOutput(BaseModel): class McpListToolsTool(BaseModel): + """A tool available on an MCP server.""" + input_schema: object """The JSON schema describing the tool's input.""" @@ -113,6 +123,8 @@ class McpListToolsTool(BaseModel): class McpListTools(BaseModel): + """A list of tools available on an MCP server.""" + id: str """The unique ID of the list.""" @@ -130,6 +142,8 @@ class McpListTools(BaseModel): class McpApprovalRequest(BaseModel): + """A request for human approval of a tool invocation.""" + id: str """The unique ID of the approval request.""" @@ -147,6 +161,8 @@ class McpApprovalRequest(BaseModel): class McpApprovalResponse(BaseModel): + """A response to an MCP approval request.""" + id: str """The unique ID of the approval response""" @@ -164,6 +180,8 @@ class McpApprovalResponse(BaseModel): class McpCall(BaseModel): + """An invocation of a tool on an MCP server.""" + id: str """The unique ID of the tool call.""" diff --git a/src/openai/types/responses/response_item_list.py b/src/openai/types/responses/response_item_list.py index b43eacdb51..e2b5a1a961 100644 --- a/src/openai/types/responses/response_item_list.py +++ b/src/openai/types/responses/response_item_list.py @@ -10,6 +10,8 @@ class ResponseItemList(BaseModel): + """A list of Response items.""" + data: List[ResponseItem] """A list of items used to generate this response.""" diff --git a/src/openai/types/responses/response_mcp_call_arguments_delta_event.py b/src/openai/types/responses/response_mcp_call_arguments_delta_event.py index 54eff38373..303ef494a3 100644 --- a/src/openai/types/responses/response_mcp_call_arguments_delta_event.py +++ b/src/openai/types/responses/response_mcp_call_arguments_delta_event.py @@ -8,6 +8,10 @@ class ResponseMcpCallArgumentsDeltaEvent(BaseModel): + """ + Emitted when there is a delta (partial update) to the arguments of an MCP tool call. + """ + delta: str """ A JSON string containing the partial update to the arguments for the MCP tool diff --git a/src/openai/types/responses/response_mcp_call_arguments_done_event.py b/src/openai/types/responses/response_mcp_call_arguments_done_event.py index 59ce9bc944..59e71be77c 100644 --- a/src/openai/types/responses/response_mcp_call_arguments_done_event.py +++ b/src/openai/types/responses/response_mcp_call_arguments_done_event.py @@ -8,6 +8,8 @@ class ResponseMcpCallArgumentsDoneEvent(BaseModel): + """Emitted when the arguments for an MCP tool call are finalized.""" + arguments: str """A JSON string containing the finalized arguments for the MCP tool call.""" diff --git a/src/openai/types/responses/response_mcp_call_completed_event.py b/src/openai/types/responses/response_mcp_call_completed_event.py index 2fee5dff81..bee54d4039 100644 --- a/src/openai/types/responses/response_mcp_call_completed_event.py +++ b/src/openai/types/responses/response_mcp_call_completed_event.py @@ -8,6 +8,8 @@ class ResponseMcpCallCompletedEvent(BaseModel): + """Emitted when an MCP tool call has completed successfully.""" + item_id: str """The ID of the MCP tool call item that completed.""" diff --git a/src/openai/types/responses/response_mcp_call_failed_event.py b/src/openai/types/responses/response_mcp_call_failed_event.py index ca41ab7159..cb3130b155 100644 --- a/src/openai/types/responses/response_mcp_call_failed_event.py +++ b/src/openai/types/responses/response_mcp_call_failed_event.py @@ -8,6 +8,8 @@ class ResponseMcpCallFailedEvent(BaseModel): + """Emitted when an MCP tool call has failed.""" + item_id: str """The ID of the MCP tool call item that failed.""" diff --git a/src/openai/types/responses/response_mcp_call_in_progress_event.py b/src/openai/types/responses/response_mcp_call_in_progress_event.py index 401c316851..7cf6a1decf 100644 --- a/src/openai/types/responses/response_mcp_call_in_progress_event.py +++ b/src/openai/types/responses/response_mcp_call_in_progress_event.py @@ -8,6 +8,8 @@ class ResponseMcpCallInProgressEvent(BaseModel): + """Emitted when an MCP tool call is in progress.""" + item_id: str """The unique identifier of the MCP tool call item being processed.""" diff --git a/src/openai/types/responses/response_mcp_list_tools_completed_event.py b/src/openai/types/responses/response_mcp_list_tools_completed_event.py index c60ad88ee5..685ba59c4d 100644 --- a/src/openai/types/responses/response_mcp_list_tools_completed_event.py +++ b/src/openai/types/responses/response_mcp_list_tools_completed_event.py @@ -8,6 +8,8 @@ class ResponseMcpListToolsCompletedEvent(BaseModel): + """Emitted when the list of available MCP tools has been successfully retrieved.""" + item_id: str """The ID of the MCP tool call item that produced this output.""" diff --git a/src/openai/types/responses/response_mcp_list_tools_failed_event.py b/src/openai/types/responses/response_mcp_list_tools_failed_event.py index 0c966c447a..c5fa54d231 100644 --- a/src/openai/types/responses/response_mcp_list_tools_failed_event.py +++ b/src/openai/types/responses/response_mcp_list_tools_failed_event.py @@ -8,6 +8,8 @@ class ResponseMcpListToolsFailedEvent(BaseModel): + """Emitted when the attempt to list available MCP tools has failed.""" + item_id: str """The ID of the MCP tool call item that failed.""" diff --git a/src/openai/types/responses/response_mcp_list_tools_in_progress_event.py b/src/openai/types/responses/response_mcp_list_tools_in_progress_event.py index f451db1ed5..403fdbdeb3 100644 --- a/src/openai/types/responses/response_mcp_list_tools_in_progress_event.py +++ b/src/openai/types/responses/response_mcp_list_tools_in_progress_event.py @@ -8,6 +8,10 @@ class ResponseMcpListToolsInProgressEvent(BaseModel): + """ + Emitted when the system is in the process of retrieving the list of available MCP tools. + """ + item_id: str """The ID of the MCP tool call item that is being processed.""" diff --git a/src/openai/types/responses/response_output_item.py b/src/openai/types/responses/response_output_item.py index f0a66e1836..990f947b90 100644 --- a/src/openai/types/responses/response_output_item.py +++ b/src/openai/types/responses/response_output_item.py @@ -32,6 +32,8 @@ class ImageGenerationCall(BaseModel): + """An image generation request made by the model.""" + id: str """The unique ID of the image generation call.""" @@ -46,6 +48,8 @@ class ImageGenerationCall(BaseModel): class LocalShellCallAction(BaseModel): + """Execute a shell command on the server.""" + command: List[str] """The command to run.""" @@ -66,6 +70,8 @@ class LocalShellCallAction(BaseModel): class LocalShellCall(BaseModel): + """A tool call to run a command on the local shell.""" + id: str """The unique ID of the local shell call.""" @@ -83,6 +89,8 @@ class LocalShellCall(BaseModel): class McpCall(BaseModel): + """An invocation of a tool on an MCP server.""" + id: str """The unique ID of the tool call.""" @@ -119,6 +127,8 @@ class McpCall(BaseModel): class McpListToolsTool(BaseModel): + """A tool available on an MCP server.""" + input_schema: object """The JSON schema describing the tool's input.""" @@ -133,6 +143,8 @@ class McpListToolsTool(BaseModel): class McpListTools(BaseModel): + """A list of tools available on an MCP server.""" + id: str """The unique ID of the list.""" @@ -150,6 +162,8 @@ class McpListTools(BaseModel): class McpApprovalRequest(BaseModel): + """A request for human approval of a tool invocation.""" + id: str """The unique ID of the approval request.""" diff --git a/src/openai/types/responses/response_output_item_added_event.py b/src/openai/types/responses/response_output_item_added_event.py index 7cd2a3946d..a42f6281e3 100644 --- a/src/openai/types/responses/response_output_item_added_event.py +++ b/src/openai/types/responses/response_output_item_added_event.py @@ -9,6 +9,8 @@ class ResponseOutputItemAddedEvent(BaseModel): + """Emitted when a new output item is added.""" + item: ResponseOutputItem """The output item that was added.""" diff --git a/src/openai/types/responses/response_output_item_done_event.py b/src/openai/types/responses/response_output_item_done_event.py index 37d3694cf7..50b99da569 100644 --- a/src/openai/types/responses/response_output_item_done_event.py +++ b/src/openai/types/responses/response_output_item_done_event.py @@ -9,6 +9,8 @@ class ResponseOutputItemDoneEvent(BaseModel): + """Emitted when an output item is marked done.""" + item: ResponseOutputItem """The output item that was marked done.""" diff --git a/src/openai/types/responses/response_output_message.py b/src/openai/types/responses/response_output_message.py index 3864aa2111..9c1d1f97fc 100644 --- a/src/openai/types/responses/response_output_message.py +++ b/src/openai/types/responses/response_output_message.py @@ -14,6 +14,8 @@ class ResponseOutputMessage(BaseModel): + """An output message from the model.""" + id: str """The unique ID of the output message.""" diff --git a/src/openai/types/responses/response_output_message_param.py b/src/openai/types/responses/response_output_message_param.py index 46cbbd20de..9c2f5246a1 100644 --- a/src/openai/types/responses/response_output_message_param.py +++ b/src/openai/types/responses/response_output_message_param.py @@ -14,6 +14,8 @@ class ResponseOutputMessageParam(TypedDict, total=False): + """An output message from the model.""" + id: Required[str] """The unique ID of the output message.""" diff --git a/src/openai/types/responses/response_output_refusal.py b/src/openai/types/responses/response_output_refusal.py index 685c8722a6..6bce26af74 100644 --- a/src/openai/types/responses/response_output_refusal.py +++ b/src/openai/types/responses/response_output_refusal.py @@ -8,6 +8,8 @@ class ResponseOutputRefusal(BaseModel): + """A refusal from the model.""" + refusal: str """The refusal explanation from the model.""" diff --git a/src/openai/types/responses/response_output_refusal_param.py b/src/openai/types/responses/response_output_refusal_param.py index 54cfaf0791..02bdfdcf4f 100644 --- a/src/openai/types/responses/response_output_refusal_param.py +++ b/src/openai/types/responses/response_output_refusal_param.py @@ -8,6 +8,8 @@ class ResponseOutputRefusalParam(TypedDict, total=False): + """A refusal from the model.""" + refusal: Required[str] """The refusal explanation from the model.""" diff --git a/src/openai/types/responses/response_output_text.py b/src/openai/types/responses/response_output_text.py index aa97b629f0..2386fcb3c0 100644 --- a/src/openai/types/responses/response_output_text.py +++ b/src/openai/types/responses/response_output_text.py @@ -19,6 +19,8 @@ class AnnotationFileCitation(BaseModel): + """A citation to a file.""" + file_id: str """The ID of the file.""" @@ -33,6 +35,8 @@ class AnnotationFileCitation(BaseModel): class AnnotationURLCitation(BaseModel): + """A citation for a web resource used to generate a model response.""" + end_index: int """The index of the last character of the URL citation in the message.""" @@ -50,6 +54,8 @@ class AnnotationURLCitation(BaseModel): class AnnotationContainerFileCitation(BaseModel): + """A citation for a container file used to generate a model response.""" + container_id: str """The ID of the container file.""" @@ -70,6 +76,8 @@ class AnnotationContainerFileCitation(BaseModel): class AnnotationFilePath(BaseModel): + """A path to a file.""" + file_id: str """The ID of the file.""" @@ -87,6 +95,8 @@ class AnnotationFilePath(BaseModel): class LogprobTopLogprob(BaseModel): + """The top log probability of a token.""" + token: str bytes: List[int] @@ -95,6 +105,8 @@ class LogprobTopLogprob(BaseModel): class Logprob(BaseModel): + """The log probability of a token.""" + token: str bytes: List[int] @@ -105,6 +117,8 @@ class Logprob(BaseModel): class ResponseOutputText(BaseModel): + """A text output from the model.""" + annotations: List[Annotation] """The annotations of the text output.""" diff --git a/src/openai/types/responses/response_output_text_annotation_added_event.py b/src/openai/types/responses/response_output_text_annotation_added_event.py index 62d8f72863..b9dc262150 100644 --- a/src/openai/types/responses/response_output_text_annotation_added_event.py +++ b/src/openai/types/responses/response_output_text_annotation_added_event.py @@ -8,6 +8,8 @@ class ResponseOutputTextAnnotationAddedEvent(BaseModel): + """Emitted when an annotation is added to output text content.""" + annotation: object """The annotation object being added. (See annotation schema for details.)""" diff --git a/src/openai/types/responses/response_output_text_param.py b/src/openai/types/responses/response_output_text_param.py index 63d2d394a8..bc30fbcd8e 100644 --- a/src/openai/types/responses/response_output_text_param.py +++ b/src/openai/types/responses/response_output_text_param.py @@ -18,6 +18,8 @@ class AnnotationFileCitation(TypedDict, total=False): + """A citation to a file.""" + file_id: Required[str] """The ID of the file.""" @@ -32,6 +34,8 @@ class AnnotationFileCitation(TypedDict, total=False): class AnnotationURLCitation(TypedDict, total=False): + """A citation for a web resource used to generate a model response.""" + end_index: Required[int] """The index of the last character of the URL citation in the message.""" @@ -49,6 +53,8 @@ class AnnotationURLCitation(TypedDict, total=False): class AnnotationContainerFileCitation(TypedDict, total=False): + """A citation for a container file used to generate a model response.""" + container_id: Required[str] """The ID of the container file.""" @@ -69,6 +75,8 @@ class AnnotationContainerFileCitation(TypedDict, total=False): class AnnotationFilePath(TypedDict, total=False): + """A path to a file.""" + file_id: Required[str] """The ID of the file.""" @@ -85,6 +93,8 @@ class AnnotationFilePath(TypedDict, total=False): class LogprobTopLogprob(TypedDict, total=False): + """The top log probability of a token.""" + token: Required[str] bytes: Required[Iterable[int]] @@ -93,6 +103,8 @@ class LogprobTopLogprob(TypedDict, total=False): class Logprob(TypedDict, total=False): + """The log probability of a token.""" + token: Required[str] bytes: Required[Iterable[int]] @@ -103,6 +115,8 @@ class Logprob(TypedDict, total=False): class ResponseOutputTextParam(TypedDict, total=False): + """A text output from the model.""" + annotations: Required[Iterable[Annotation]] """The annotations of the text output.""" diff --git a/src/openai/types/responses/response_prompt.py b/src/openai/types/responses/response_prompt.py index 537c2f8fbc..e3acacf63a 100644 --- a/src/openai/types/responses/response_prompt.py +++ b/src/openai/types/responses/response_prompt.py @@ -14,6 +14,11 @@ class ResponsePrompt(BaseModel): + """ + Reference to a prompt template and its variables. + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + """ + id: str """The unique identifier of the prompt template to use.""" diff --git a/src/openai/types/responses/response_prompt_param.py b/src/openai/types/responses/response_prompt_param.py index d935fa5191..f9a28b62a2 100644 --- a/src/openai/types/responses/response_prompt_param.py +++ b/src/openai/types/responses/response_prompt_param.py @@ -15,6 +15,11 @@ class ResponsePromptParam(TypedDict, total=False): + """ + Reference to a prompt template and its variables. + [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + """ + id: Required[str] """The unique identifier of the prompt template to use.""" diff --git a/src/openai/types/responses/response_queued_event.py b/src/openai/types/responses/response_queued_event.py index 40257408a4..a554215275 100644 --- a/src/openai/types/responses/response_queued_event.py +++ b/src/openai/types/responses/response_queued_event.py @@ -9,6 +9,8 @@ class ResponseQueuedEvent(BaseModel): + """Emitted when a response is queued and waiting to be processed.""" + response: Response """The full response object that is queued.""" diff --git a/src/openai/types/responses/response_reasoning_item.py b/src/openai/types/responses/response_reasoning_item.py index fc582cf7c5..1a22eb60cc 100644 --- a/src/openai/types/responses/response_reasoning_item.py +++ b/src/openai/types/responses/response_reasoning_item.py @@ -9,6 +9,8 @@ class Summary(BaseModel): + """A summary text from the model.""" + text: str """A summary of the reasoning output from the model so far.""" @@ -17,6 +19,8 @@ class Summary(BaseModel): class Content(BaseModel): + """Reasoning text from the model.""" + text: str """The reasoning text from the model.""" @@ -25,6 +29,13 @@ class Content(BaseModel): class ResponseReasoningItem(BaseModel): + """ + A description of the chain of thought used by a reasoning model while generating + a response. Be sure to include these items in your `input` to the Responses API + for subsequent turns of a conversation if you are manually + [managing context](https://platform.openai.com/docs/guides/conversation-state). + """ + id: str """The unique identifier of the reasoning content.""" diff --git a/src/openai/types/responses/response_reasoning_item_param.py b/src/openai/types/responses/response_reasoning_item_param.py index 56e88ba28d..40320b72e1 100644 --- a/src/openai/types/responses/response_reasoning_item_param.py +++ b/src/openai/types/responses/response_reasoning_item_param.py @@ -9,6 +9,8 @@ class Summary(TypedDict, total=False): + """A summary text from the model.""" + text: Required[str] """A summary of the reasoning output from the model so far.""" @@ -17,6 +19,8 @@ class Summary(TypedDict, total=False): class Content(TypedDict, total=False): + """Reasoning text from the model.""" + text: Required[str] """The reasoning text from the model.""" @@ -25,6 +29,13 @@ class Content(TypedDict, total=False): class ResponseReasoningItemParam(TypedDict, total=False): + """ + A description of the chain of thought used by a reasoning model while generating + a response. Be sure to include these items in your `input` to the Responses API + for subsequent turns of a conversation if you are manually + [managing context](https://platform.openai.com/docs/guides/conversation-state). + """ + id: Required[str] """The unique identifier of the reasoning content.""" diff --git a/src/openai/types/responses/response_reasoning_summary_part_added_event.py b/src/openai/types/responses/response_reasoning_summary_part_added_event.py index dc755b253a..e4b0f34231 100644 --- a/src/openai/types/responses/response_reasoning_summary_part_added_event.py +++ b/src/openai/types/responses/response_reasoning_summary_part_added_event.py @@ -8,6 +8,8 @@ class Part(BaseModel): + """The summary part that was added.""" + text: str """The text of the summary part.""" @@ -16,6 +18,8 @@ class Part(BaseModel): class ResponseReasoningSummaryPartAddedEvent(BaseModel): + """Emitted when a new reasoning summary part is added.""" + item_id: str """The ID of the item this summary part is associated with.""" diff --git a/src/openai/types/responses/response_reasoning_summary_part_done_event.py b/src/openai/types/responses/response_reasoning_summary_part_done_event.py index 7cc0b56d66..48f3f684e8 100644 --- a/src/openai/types/responses/response_reasoning_summary_part_done_event.py +++ b/src/openai/types/responses/response_reasoning_summary_part_done_event.py @@ -8,6 +8,8 @@ class Part(BaseModel): + """The completed summary part.""" + text: str """The text of the summary part.""" @@ -16,6 +18,8 @@ class Part(BaseModel): class ResponseReasoningSummaryPartDoneEvent(BaseModel): + """Emitted when a reasoning summary part is completed.""" + item_id: str """The ID of the item this summary part is associated with.""" diff --git a/src/openai/types/responses/response_reasoning_summary_text_delta_event.py b/src/openai/types/responses/response_reasoning_summary_text_delta_event.py index 96652991b6..84bcf039c4 100644 --- a/src/openai/types/responses/response_reasoning_summary_text_delta_event.py +++ b/src/openai/types/responses/response_reasoning_summary_text_delta_event.py @@ -8,6 +8,8 @@ class ResponseReasoningSummaryTextDeltaEvent(BaseModel): + """Emitted when a delta is added to a reasoning summary text.""" + delta: str """The text delta that was added to the summary.""" diff --git a/src/openai/types/responses/response_reasoning_summary_text_done_event.py b/src/openai/types/responses/response_reasoning_summary_text_done_event.py index b35b82316a..244d001b75 100644 --- a/src/openai/types/responses/response_reasoning_summary_text_done_event.py +++ b/src/openai/types/responses/response_reasoning_summary_text_done_event.py @@ -8,6 +8,8 @@ class ResponseReasoningSummaryTextDoneEvent(BaseModel): + """Emitted when a reasoning summary text is completed.""" + item_id: str """The ID of the item this summary text is associated with.""" diff --git a/src/openai/types/responses/response_reasoning_text_delta_event.py b/src/openai/types/responses/response_reasoning_text_delta_event.py index e1df893bac..0e05226c94 100644 --- a/src/openai/types/responses/response_reasoning_text_delta_event.py +++ b/src/openai/types/responses/response_reasoning_text_delta_event.py @@ -8,6 +8,8 @@ class ResponseReasoningTextDeltaEvent(BaseModel): + """Emitted when a delta is added to a reasoning text.""" + content_index: int """The index of the reasoning content part this delta is associated with.""" diff --git a/src/openai/types/responses/response_reasoning_text_done_event.py b/src/openai/types/responses/response_reasoning_text_done_event.py index d22d984e47..40e3f4701c 100644 --- a/src/openai/types/responses/response_reasoning_text_done_event.py +++ b/src/openai/types/responses/response_reasoning_text_done_event.py @@ -8,6 +8,8 @@ class ResponseReasoningTextDoneEvent(BaseModel): + """Emitted when a reasoning text is completed.""" + content_index: int """The index of the reasoning content part.""" diff --git a/src/openai/types/responses/response_refusal_delta_event.py b/src/openai/types/responses/response_refusal_delta_event.py index 03c903ed28..e3933b7dda 100644 --- a/src/openai/types/responses/response_refusal_delta_event.py +++ b/src/openai/types/responses/response_refusal_delta_event.py @@ -8,6 +8,8 @@ class ResponseRefusalDeltaEvent(BaseModel): + """Emitted when there is a partial refusal text.""" + content_index: int """The index of the content part that the refusal text is added to.""" diff --git a/src/openai/types/responses/response_refusal_done_event.py b/src/openai/types/responses/response_refusal_done_event.py index 61fd51aab0..91adeb6331 100644 --- a/src/openai/types/responses/response_refusal_done_event.py +++ b/src/openai/types/responses/response_refusal_done_event.py @@ -8,6 +8,8 @@ class ResponseRefusalDoneEvent(BaseModel): + """Emitted when refusal text is finalized.""" + content_index: int """The index of the content part that the refusal text is finalized.""" diff --git a/src/openai/types/responses/response_text_config.py b/src/openai/types/responses/response_text_config.py index c53546da6d..fbf4da0b03 100644 --- a/src/openai/types/responses/response_text_config.py +++ b/src/openai/types/responses/response_text_config.py @@ -10,6 +10,14 @@ class ResponseTextConfig(BaseModel): + """Configuration options for a text response from the model. + + Can be plain + text or structured JSON data. Learn more: + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ + format: Optional[ResponseFormatTextConfig] = None """An object specifying the format that the model must output. diff --git a/src/openai/types/responses/response_text_config_param.py b/src/openai/types/responses/response_text_config_param.py index 1229fce35b..9cd54765b0 100644 --- a/src/openai/types/responses/response_text_config_param.py +++ b/src/openai/types/responses/response_text_config_param.py @@ -11,6 +11,14 @@ class ResponseTextConfigParam(TypedDict, total=False): + """Configuration options for a text response from the model. + + Can be plain + text or structured JSON data. Learn more: + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ + format: ResponseFormatTextConfigParam """An object specifying the format that the model must output. diff --git a/src/openai/types/responses/response_text_delta_event.py b/src/openai/types/responses/response_text_delta_event.py index b5379b7ac3..4f802abfd2 100644 --- a/src/openai/types/responses/response_text_delta_event.py +++ b/src/openai/types/responses/response_text_delta_event.py @@ -17,6 +17,12 @@ class LogprobTopLogprob(BaseModel): class Logprob(BaseModel): + """ + A logprob is the logarithmic probability that the model assigns to producing + a particular token at a given position in the sequence. Less-negative (higher) + logprob values indicate greater model confidence in that token choice. + """ + token: str """A possible text token.""" @@ -28,6 +34,8 @@ class Logprob(BaseModel): class ResponseTextDeltaEvent(BaseModel): + """Emitted when there is an additional text delta.""" + content_index: int """The index of the content part that the text delta was added to.""" diff --git a/src/openai/types/responses/response_text_done_event.py b/src/openai/types/responses/response_text_done_event.py index d9776a1844..75bd479870 100644 --- a/src/openai/types/responses/response_text_done_event.py +++ b/src/openai/types/responses/response_text_done_event.py @@ -17,6 +17,12 @@ class LogprobTopLogprob(BaseModel): class Logprob(BaseModel): + """ + A logprob is the logarithmic probability that the model assigns to producing + a particular token at a given position in the sequence. Less-negative (higher) + logprob values indicate greater model confidence in that token choice. + """ + token: str """A possible text token.""" @@ -28,6 +34,8 @@ class Logprob(BaseModel): class ResponseTextDoneEvent(BaseModel): + """Emitted when text content is finalized.""" + content_index: int """The index of the content part that the text content is finalized.""" diff --git a/src/openai/types/responses/response_usage.py b/src/openai/types/responses/response_usage.py index 52b93ac578..d4b739c598 100644 --- a/src/openai/types/responses/response_usage.py +++ b/src/openai/types/responses/response_usage.py @@ -6,6 +6,8 @@ class InputTokensDetails(BaseModel): + """A detailed breakdown of the input tokens.""" + cached_tokens: int """The number of tokens that were retrieved from the cache. @@ -14,11 +16,18 @@ class InputTokensDetails(BaseModel): class OutputTokensDetails(BaseModel): + """A detailed breakdown of the output tokens.""" + reasoning_tokens: int """The number of reasoning tokens.""" class ResponseUsage(BaseModel): + """ + Represents token usage details including input tokens, output tokens, + a breakdown of output tokens, and the total tokens used. + """ + input_tokens: int """The number of input tokens.""" diff --git a/src/openai/types/responses/response_web_search_call_completed_event.py b/src/openai/types/responses/response_web_search_call_completed_event.py index 497f7bfe35..5aa7afe609 100644 --- a/src/openai/types/responses/response_web_search_call_completed_event.py +++ b/src/openai/types/responses/response_web_search_call_completed_event.py @@ -8,6 +8,8 @@ class ResponseWebSearchCallCompletedEvent(BaseModel): + """Emitted when a web search call is completed.""" + item_id: str """Unique ID for the output item associated with the web search call.""" diff --git a/src/openai/types/responses/response_web_search_call_in_progress_event.py b/src/openai/types/responses/response_web_search_call_in_progress_event.py index da8b3fe404..73b30ff5c0 100644 --- a/src/openai/types/responses/response_web_search_call_in_progress_event.py +++ b/src/openai/types/responses/response_web_search_call_in_progress_event.py @@ -8,6 +8,8 @@ class ResponseWebSearchCallInProgressEvent(BaseModel): + """Emitted when a web search call is initiated.""" + item_id: str """Unique ID for the output item associated with the web search call.""" diff --git a/src/openai/types/responses/response_web_search_call_searching_event.py b/src/openai/types/responses/response_web_search_call_searching_event.py index 42df9cb298..959c095187 100644 --- a/src/openai/types/responses/response_web_search_call_searching_event.py +++ b/src/openai/types/responses/response_web_search_call_searching_event.py @@ -8,6 +8,8 @@ class ResponseWebSearchCallSearchingEvent(BaseModel): + """Emitted when a web search call is executing.""" + item_id: str """Unique ID for the output item associated with the web search call.""" diff --git a/src/openai/types/responses/tool.py b/src/openai/types/responses/tool.py index bb32d4e1ec..1f1ef12358 100644 --- a/src/openai/types/responses/tool.py +++ b/src/openai/types/responses/tool.py @@ -38,6 +38,8 @@ class McpAllowedToolsMcpToolFilter(BaseModel): + """A filter object to specify which tools are allowed.""" + read_only: Optional[bool] = None """Indicates whether or not a tool modifies data or is read-only. @@ -54,6 +56,8 @@ class McpAllowedToolsMcpToolFilter(BaseModel): class McpRequireApprovalMcpToolApprovalFilterAlways(BaseModel): + """A filter object to specify which tools are allowed.""" + read_only: Optional[bool] = None """Indicates whether or not a tool modifies data or is read-only. @@ -67,6 +71,8 @@ class McpRequireApprovalMcpToolApprovalFilterAlways(BaseModel): class McpRequireApprovalMcpToolApprovalFilterNever(BaseModel): + """A filter object to specify which tools are allowed.""" + read_only: Optional[bool] = None """Indicates whether or not a tool modifies data or is read-only. @@ -80,6 +86,13 @@ class McpRequireApprovalMcpToolApprovalFilterNever(BaseModel): class McpRequireApprovalMcpToolApprovalFilter(BaseModel): + """Specify which of the MCP server's tools require approval. + + Can be + `always`, `never`, or a filter object associated with tools + that require approval. + """ + always: Optional[McpRequireApprovalMcpToolApprovalFilterAlways] = None """A filter object to specify which tools are allowed.""" @@ -91,6 +104,11 @@ class McpRequireApprovalMcpToolApprovalFilter(BaseModel): class Mcp(BaseModel): + """ + Give the model access to additional tools via remote Model Context Protocol + (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp). + """ + server_label: str """A label for this MCP server, used to identify it in tool calls.""" @@ -157,6 +175,11 @@ class Mcp(BaseModel): class CodeInterpreterContainerCodeInterpreterToolAuto(BaseModel): + """Configuration for a code interpreter container. + + Optionally specify the IDs of the files to run the code on. + """ + type: Literal["auto"] """Always `auto`.""" @@ -170,6 +193,8 @@ class CodeInterpreterContainerCodeInterpreterToolAuto(BaseModel): class CodeInterpreter(BaseModel): + """A tool that runs Python code to help generate a response to a prompt.""" + container: CodeInterpreterContainer """The code interpreter container. @@ -182,6 +207,12 @@ class CodeInterpreter(BaseModel): class ImageGenerationInputImageMask(BaseModel): + """Optional mask for inpainting. + + Contains `image_url` + (string, optional) and `file_id` (string, optional). + """ + file_id: Optional[str] = None """File ID for the mask image.""" @@ -190,6 +221,8 @@ class ImageGenerationInputImageMask(BaseModel): class ImageGeneration(BaseModel): + """A tool that generates images using a model like `gpt-image-1`.""" + type: Literal["image_generation"] """The type of the image generation tool. Always `image_generation`.""" @@ -248,6 +281,8 @@ class ImageGeneration(BaseModel): class LocalShell(BaseModel): + """A tool that allows the model to execute shell commands in a local environment.""" + type: Literal["local_shell"] """The type of the local shell tool. Always `local_shell`.""" diff --git a/src/openai/types/responses/tool_choice_allowed.py b/src/openai/types/responses/tool_choice_allowed.py index d7921dcb2a..400e170a57 100644 --- a/src/openai/types/responses/tool_choice_allowed.py +++ b/src/openai/types/responses/tool_choice_allowed.py @@ -9,6 +9,8 @@ class ToolChoiceAllowed(BaseModel): + """Constrains the tools available to the model to a pre-defined set.""" + mode: Literal["auto", "required"] """Constrains the tools available to the model to a pre-defined set. diff --git a/src/openai/types/responses/tool_choice_allowed_param.py b/src/openai/types/responses/tool_choice_allowed_param.py index 0712cab43b..cb316c1560 100644 --- a/src/openai/types/responses/tool_choice_allowed_param.py +++ b/src/openai/types/responses/tool_choice_allowed_param.py @@ -9,6 +9,8 @@ class ToolChoiceAllowedParam(TypedDict, total=False): + """Constrains the tools available to the model to a pre-defined set.""" + mode: Required[Literal["auto", "required"]] """Constrains the tools available to the model to a pre-defined set. diff --git a/src/openai/types/responses/tool_choice_apply_patch.py b/src/openai/types/responses/tool_choice_apply_patch.py index 7f815aa1a1..ef5a5e8bfa 100644 --- a/src/openai/types/responses/tool_choice_apply_patch.py +++ b/src/openai/types/responses/tool_choice_apply_patch.py @@ -8,5 +8,7 @@ class ToolChoiceApplyPatch(BaseModel): + """Forces the model to call the apply_patch tool when executing a tool call.""" + type: Literal["apply_patch"] """The tool to call. Always `apply_patch`.""" diff --git a/src/openai/types/responses/tool_choice_apply_patch_param.py b/src/openai/types/responses/tool_choice_apply_patch_param.py index 00d4b25f0e..193c99328a 100644 --- a/src/openai/types/responses/tool_choice_apply_patch_param.py +++ b/src/openai/types/responses/tool_choice_apply_patch_param.py @@ -8,5 +8,7 @@ class ToolChoiceApplyPatchParam(TypedDict, total=False): + """Forces the model to call the apply_patch tool when executing a tool call.""" + type: Required[Literal["apply_patch"]] """The tool to call. Always `apply_patch`.""" diff --git a/src/openai/types/responses/tool_choice_custom.py b/src/openai/types/responses/tool_choice_custom.py index d600e53616..dec85ef78c 100644 --- a/src/openai/types/responses/tool_choice_custom.py +++ b/src/openai/types/responses/tool_choice_custom.py @@ -8,6 +8,8 @@ class ToolChoiceCustom(BaseModel): + """Use this option to force the model to call a specific custom tool.""" + name: str """The name of the custom tool to call.""" diff --git a/src/openai/types/responses/tool_choice_custom_param.py b/src/openai/types/responses/tool_choice_custom_param.py index 55bc53b730..ccdbab568a 100644 --- a/src/openai/types/responses/tool_choice_custom_param.py +++ b/src/openai/types/responses/tool_choice_custom_param.py @@ -8,6 +8,8 @@ class ToolChoiceCustomParam(TypedDict, total=False): + """Use this option to force the model to call a specific custom tool.""" + name: Required[str] """The name of the custom tool to call.""" diff --git a/src/openai/types/responses/tool_choice_function.py b/src/openai/types/responses/tool_choice_function.py index 8d2a4f2822..b2aab24aca 100644 --- a/src/openai/types/responses/tool_choice_function.py +++ b/src/openai/types/responses/tool_choice_function.py @@ -8,6 +8,8 @@ class ToolChoiceFunction(BaseModel): + """Use this option to force the model to call a specific function.""" + name: str """The name of the function to call.""" diff --git a/src/openai/types/responses/tool_choice_function_param.py b/src/openai/types/responses/tool_choice_function_param.py index 910537fd97..837465ebd7 100644 --- a/src/openai/types/responses/tool_choice_function_param.py +++ b/src/openai/types/responses/tool_choice_function_param.py @@ -8,6 +8,8 @@ class ToolChoiceFunctionParam(TypedDict, total=False): + """Use this option to force the model to call a specific function.""" + name: Required[str] """The name of the function to call.""" diff --git a/src/openai/types/responses/tool_choice_mcp.py b/src/openai/types/responses/tool_choice_mcp.py index 8763d81635..a2c8049c2d 100644 --- a/src/openai/types/responses/tool_choice_mcp.py +++ b/src/openai/types/responses/tool_choice_mcp.py @@ -9,6 +9,10 @@ class ToolChoiceMcp(BaseModel): + """ + Use this option to force the model to call a specific tool on a remote MCP server. + """ + server_label: str """The label of the MCP server to use.""" diff --git a/src/openai/types/responses/tool_choice_mcp_param.py b/src/openai/types/responses/tool_choice_mcp_param.py index afcceb8cc5..9726e47a47 100644 --- a/src/openai/types/responses/tool_choice_mcp_param.py +++ b/src/openai/types/responses/tool_choice_mcp_param.py @@ -9,6 +9,10 @@ class ToolChoiceMcpParam(TypedDict, total=False): + """ + Use this option to force the model to call a specific tool on a remote MCP server. + """ + server_label: Required[str] """The label of the MCP server to use.""" diff --git a/src/openai/types/responses/tool_choice_shell.py b/src/openai/types/responses/tool_choice_shell.py index 1ad21c58f3..a78eccc387 100644 --- a/src/openai/types/responses/tool_choice_shell.py +++ b/src/openai/types/responses/tool_choice_shell.py @@ -8,5 +8,7 @@ class ToolChoiceShell(BaseModel): + """Forces the model to call the shell tool when a tool call is required.""" + type: Literal["shell"] """The tool to call. Always `shell`.""" diff --git a/src/openai/types/responses/tool_choice_shell_param.py b/src/openai/types/responses/tool_choice_shell_param.py index 2b04c00d56..0dbcc90f39 100644 --- a/src/openai/types/responses/tool_choice_shell_param.py +++ b/src/openai/types/responses/tool_choice_shell_param.py @@ -8,5 +8,7 @@ class ToolChoiceShellParam(TypedDict, total=False): + """Forces the model to call the shell tool when a tool call is required.""" + type: Required[Literal["shell"]] """The tool to call. Always `shell`.""" diff --git a/src/openai/types/responses/tool_choice_types.py b/src/openai/types/responses/tool_choice_types.py index b31a826051..044c014b19 100644 --- a/src/openai/types/responses/tool_choice_types.py +++ b/src/openai/types/responses/tool_choice_types.py @@ -8,6 +8,11 @@ class ToolChoiceTypes(BaseModel): + """ + Indicates that the model should use a built-in tool to generate a response. + [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools). + """ + type: Literal[ "file_search", "web_search_preview", diff --git a/src/openai/types/responses/tool_choice_types_param.py b/src/openai/types/responses/tool_choice_types_param.py index 15e0357471..9bf02dbfcc 100644 --- a/src/openai/types/responses/tool_choice_types_param.py +++ b/src/openai/types/responses/tool_choice_types_param.py @@ -8,6 +8,11 @@ class ToolChoiceTypesParam(TypedDict, total=False): + """ + Indicates that the model should use a built-in tool to generate a response. + [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools). + """ + type: Required[ Literal[ "file_search", diff --git a/src/openai/types/responses/tool_param.py b/src/openai/types/responses/tool_param.py index 779acf0a53..c6ffa39192 100644 --- a/src/openai/types/responses/tool_param.py +++ b/src/openai/types/responses/tool_param.py @@ -40,6 +40,8 @@ class McpAllowedToolsMcpToolFilter(TypedDict, total=False): + """A filter object to specify which tools are allowed.""" + read_only: bool """Indicates whether or not a tool modifies data or is read-only. @@ -56,6 +58,8 @@ class McpAllowedToolsMcpToolFilter(TypedDict, total=False): class McpRequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False): + """A filter object to specify which tools are allowed.""" + read_only: bool """Indicates whether or not a tool modifies data or is read-only. @@ -69,6 +73,8 @@ class McpRequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False): class McpRequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False): + """A filter object to specify which tools are allowed.""" + read_only: bool """Indicates whether or not a tool modifies data or is read-only. @@ -82,6 +88,13 @@ class McpRequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False): class McpRequireApprovalMcpToolApprovalFilter(TypedDict, total=False): + """Specify which of the MCP server's tools require approval. + + Can be + `always`, `never`, or a filter object associated with tools + that require approval. + """ + always: McpRequireApprovalMcpToolApprovalFilterAlways """A filter object to specify which tools are allowed.""" @@ -93,6 +106,11 @@ class McpRequireApprovalMcpToolApprovalFilter(TypedDict, total=False): class Mcp(TypedDict, total=False): + """ + Give the model access to additional tools via remote Model Context Protocol + (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp). + """ + server_label: Required[str] """A label for this MCP server, used to identify it in tool calls.""" @@ -157,6 +175,11 @@ class Mcp(TypedDict, total=False): class CodeInterpreterContainerCodeInterpreterToolAuto(TypedDict, total=False): + """Configuration for a code interpreter container. + + Optionally specify the IDs of the files to run the code on. + """ + type: Required[Literal["auto"]] """Always `auto`.""" @@ -170,6 +193,8 @@ class CodeInterpreterContainerCodeInterpreterToolAuto(TypedDict, total=False): class CodeInterpreter(TypedDict, total=False): + """A tool that runs Python code to help generate a response to a prompt.""" + container: Required[CodeInterpreterContainer] """The code interpreter container. @@ -182,6 +207,12 @@ class CodeInterpreter(TypedDict, total=False): class ImageGenerationInputImageMask(TypedDict, total=False): + """Optional mask for inpainting. + + Contains `image_url` + (string, optional) and `file_id` (string, optional). + """ + file_id: str """File ID for the mask image.""" @@ -190,6 +221,8 @@ class ImageGenerationInputImageMask(TypedDict, total=False): class ImageGeneration(TypedDict, total=False): + """A tool that generates images using a model like `gpt-image-1`.""" + type: Required[Literal["image_generation"]] """The type of the image generation tool. Always `image_generation`.""" @@ -248,6 +281,8 @@ class ImageGeneration(TypedDict, total=False): class LocalShell(TypedDict, total=False): + """A tool that allows the model to execute shell commands in a local environment.""" + type: Required[Literal["local_shell"]] """The type of the local shell tool. Always `local_shell`.""" diff --git a/src/openai/types/responses/web_search_preview_tool.py b/src/openai/types/responses/web_search_preview_tool.py index 66d6a24679..12478e896d 100644 --- a/src/openai/types/responses/web_search_preview_tool.py +++ b/src/openai/types/responses/web_search_preview_tool.py @@ -9,6 +9,8 @@ class UserLocation(BaseModel): + """The user's location.""" + type: Literal["approximate"] """The type of location approximation. Always `approximate`.""" @@ -32,6 +34,11 @@ class UserLocation(BaseModel): class WebSearchPreviewTool(BaseModel): + """This tool searches the web for relevant results to use in a response. + + Learn more about the [web search tool](https://platform.openai.com/docs/guides/tools-web-search). + """ + type: Literal["web_search_preview", "web_search_preview_2025_03_11"] """The type of the web search tool. diff --git a/src/openai/types/responses/web_search_preview_tool_param.py b/src/openai/types/responses/web_search_preview_tool_param.py index ec2173f8e8..09619a3394 100644 --- a/src/openai/types/responses/web_search_preview_tool_param.py +++ b/src/openai/types/responses/web_search_preview_tool_param.py @@ -9,6 +9,8 @@ class UserLocation(TypedDict, total=False): + """The user's location.""" + type: Required[Literal["approximate"]] """The type of location approximation. Always `approximate`.""" @@ -32,6 +34,11 @@ class UserLocation(TypedDict, total=False): class WebSearchPreviewToolParam(TypedDict, total=False): + """This tool searches the web for relevant results to use in a response. + + Learn more about the [web search tool](https://platform.openai.com/docs/guides/tools-web-search). + """ + type: Required[Literal["web_search_preview", "web_search_preview_2025_03_11"]] """The type of the web search tool. diff --git a/src/openai/types/responses/web_search_tool.py b/src/openai/types/responses/web_search_tool.py index bde9600c87..769f5c93a4 100644 --- a/src/openai/types/responses/web_search_tool.py +++ b/src/openai/types/responses/web_search_tool.py @@ -9,6 +9,8 @@ class Filters(BaseModel): + """Filters for the search.""" + allowed_domains: Optional[List[str]] = None """Allowed domains for the search. @@ -20,6 +22,8 @@ class Filters(BaseModel): class UserLocation(BaseModel): + """The approximate location of the user.""" + city: Optional[str] = None """Free text input for the city of the user, e.g. `San Francisco`.""" @@ -43,6 +47,12 @@ class UserLocation(BaseModel): class WebSearchTool(BaseModel): + """Search the Internet for sources related to the prompt. + + Learn more about the + [web search tool](https://platform.openai.com/docs/guides/tools-web-search). + """ + type: Literal["web_search", "web_search_2025_08_26"] """The type of the web search tool. diff --git a/src/openai/types/responses/web_search_tool_param.py b/src/openai/types/responses/web_search_tool_param.py index 7fa19e9c23..a4531a9304 100644 --- a/src/openai/types/responses/web_search_tool_param.py +++ b/src/openai/types/responses/web_search_tool_param.py @@ -11,6 +11,8 @@ class Filters(TypedDict, total=False): + """Filters for the search.""" + allowed_domains: Optional[SequenceNotStr[str]] """Allowed domains for the search. @@ -22,6 +24,8 @@ class Filters(TypedDict, total=False): class UserLocation(TypedDict, total=False): + """The approximate location of the user.""" + city: Optional[str] """Free text input for the city of the user, e.g. `San Francisco`.""" @@ -45,6 +49,12 @@ class UserLocation(TypedDict, total=False): class WebSearchToolParam(TypedDict, total=False): + """Search the Internet for sources related to the prompt. + + Learn more about the + [web search tool](https://platform.openai.com/docs/guides/tools-web-search). + """ + type: Required[Literal["web_search", "web_search_2025_08_26"]] """The type of the web search tool. diff --git a/src/openai/types/shared/comparison_filter.py b/src/openai/types/shared/comparison_filter.py index 33415ca4f9..852cac1738 100644 --- a/src/openai/types/shared/comparison_filter.py +++ b/src/openai/types/shared/comparison_filter.py @@ -9,6 +9,10 @@ class ComparisonFilter(BaseModel): + """ + A filter used to compare a specified attribute key to a given value using a defined comparison operation. + """ + key: str """The key to compare against the value.""" diff --git a/src/openai/types/shared/compound_filter.py b/src/openai/types/shared/compound_filter.py index 3aefa43647..4801aaac1a 100644 --- a/src/openai/types/shared/compound_filter.py +++ b/src/openai/types/shared/compound_filter.py @@ -12,6 +12,8 @@ class CompoundFilter(BaseModel): + """Combine multiple filters using `and` or `or`.""" + filters: List[Filter] """Array of filters to combine. diff --git a/src/openai/types/shared/custom_tool_input_format.py b/src/openai/types/shared/custom_tool_input_format.py index 53c8323ed2..9391692b7b 100644 --- a/src/openai/types/shared/custom_tool_input_format.py +++ b/src/openai/types/shared/custom_tool_input_format.py @@ -10,11 +10,15 @@ class Text(BaseModel): + """Unconstrained free-form text.""" + type: Literal["text"] """Unconstrained text format. Always `text`.""" class Grammar(BaseModel): + """A grammar defined by the user.""" + definition: str """The grammar definition.""" diff --git a/src/openai/types/shared/reasoning.py b/src/openai/types/shared/reasoning.py index b19476bcb5..1220a045b1 100644 --- a/src/openai/types/shared/reasoning.py +++ b/src/openai/types/shared/reasoning.py @@ -10,6 +10,12 @@ class Reasoning(BaseModel): + """**gpt-5 and o-series models only** + + Configuration options for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). + """ + effort: Optional[ReasoningEffort] = None """ Constrains effort on reasoning for diff --git a/src/openai/types/shared/response_format_json_object.py b/src/openai/types/shared/response_format_json_object.py index 2aaa5dbdfe..98e0da6a2c 100644 --- a/src/openai/types/shared/response_format_json_object.py +++ b/src/openai/types/shared/response_format_json_object.py @@ -8,5 +8,13 @@ class ResponseFormatJSONObject(BaseModel): + """JSON object response format. + + An older method of generating JSON responses. + Using `json_schema` is recommended for models that support it. Note that the + model will not generate JSON without a system or user message instructing it + to do so. + """ + type: Literal["json_object"] """The type of response format being defined. Always `json_object`.""" diff --git a/src/openai/types/shared/response_format_json_schema.py b/src/openai/types/shared/response_format_json_schema.py index c7924446f4..9b2adb66cd 100644 --- a/src/openai/types/shared/response_format_json_schema.py +++ b/src/openai/types/shared/response_format_json_schema.py @@ -11,6 +11,8 @@ class JSONSchema(BaseModel): + """Structured Outputs configuration options, including a JSON Schema.""" + name: str """The name of the response format. @@ -41,6 +43,12 @@ class JSONSchema(BaseModel): class ResponseFormatJSONSchema(BaseModel): + """JSON Schema response format. + + Used to generate structured JSON responses. + Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). + """ + json_schema: JSONSchema """Structured Outputs configuration options, including a JSON Schema.""" diff --git a/src/openai/types/shared/response_format_text.py b/src/openai/types/shared/response_format_text.py index f0c8cfb700..9f4bc0d13e 100644 --- a/src/openai/types/shared/response_format_text.py +++ b/src/openai/types/shared/response_format_text.py @@ -8,5 +8,7 @@ class ResponseFormatText(BaseModel): + """Default response format. Used to generate text responses.""" + type: Literal["text"] """The type of response format being defined. Always `text`.""" diff --git a/src/openai/types/shared/response_format_text_grammar.py b/src/openai/types/shared/response_format_text_grammar.py index b02f99c1b8..84cd141278 100644 --- a/src/openai/types/shared/response_format_text_grammar.py +++ b/src/openai/types/shared/response_format_text_grammar.py @@ -8,6 +8,11 @@ class ResponseFormatTextGrammar(BaseModel): + """ + A custom grammar for the model to follow when generating text. + Learn more in the [custom grammars guide](https://platform.openai.com/docs/guides/custom-grammars). + """ + grammar: str """The custom grammar for the model to follow.""" diff --git a/src/openai/types/shared/response_format_text_python.py b/src/openai/types/shared/response_format_text_python.py index 4cd18d46fa..1b04cb62ba 100644 --- a/src/openai/types/shared/response_format_text_python.py +++ b/src/openai/types/shared/response_format_text_python.py @@ -8,5 +8,11 @@ class ResponseFormatTextPython(BaseModel): + """Configure the model to generate valid Python code. + + See the + [custom grammars guide](https://platform.openai.com/docs/guides/custom-grammars) for more details. + """ + type: Literal["python"] """The type of response format being defined. Always `python`.""" diff --git a/src/openai/types/shared_params/comparison_filter.py b/src/openai/types/shared_params/comparison_filter.py index 1c40729c19..363688e467 100644 --- a/src/openai/types/shared_params/comparison_filter.py +++ b/src/openai/types/shared_params/comparison_filter.py @@ -11,6 +11,10 @@ class ComparisonFilter(TypedDict, total=False): + """ + A filter used to compare a specified attribute key to a given value using a defined comparison operation. + """ + key: Required[str] """The key to compare against the value.""" diff --git a/src/openai/types/shared_params/compound_filter.py b/src/openai/types/shared_params/compound_filter.py index d12e9b1bda..9358e46083 100644 --- a/src/openai/types/shared_params/compound_filter.py +++ b/src/openai/types/shared_params/compound_filter.py @@ -13,6 +13,8 @@ class CompoundFilter(TypedDict, total=False): + """Combine multiple filters using `and` or `or`.""" + filters: Required[Iterable[Filter]] """Array of filters to combine. diff --git a/src/openai/types/shared_params/custom_tool_input_format.py b/src/openai/types/shared_params/custom_tool_input_format.py index 37df393e39..ddc71cacb4 100644 --- a/src/openai/types/shared_params/custom_tool_input_format.py +++ b/src/openai/types/shared_params/custom_tool_input_format.py @@ -9,11 +9,15 @@ class Text(TypedDict, total=False): + """Unconstrained free-form text.""" + type: Required[Literal["text"]] """Unconstrained text format. Always `text`.""" class Grammar(TypedDict, total=False): + """A grammar defined by the user.""" + definition: Required[str] """The grammar definition.""" diff --git a/src/openai/types/shared_params/reasoning.py b/src/openai/types/shared_params/reasoning.py index 71cb37c65e..68d9c374b8 100644 --- a/src/openai/types/shared_params/reasoning.py +++ b/src/openai/types/shared_params/reasoning.py @@ -11,6 +11,12 @@ class Reasoning(TypedDict, total=False): + """**gpt-5 and o-series models only** + + Configuration options for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). + """ + effort: Optional[ReasoningEffort] """ Constrains effort on reasoning for diff --git a/src/openai/types/shared_params/response_format_json_object.py b/src/openai/types/shared_params/response_format_json_object.py index d4d1deaae5..ef5d43be2e 100644 --- a/src/openai/types/shared_params/response_format_json_object.py +++ b/src/openai/types/shared_params/response_format_json_object.py @@ -8,5 +8,13 @@ class ResponseFormatJSONObject(TypedDict, total=False): + """JSON object response format. + + An older method of generating JSON responses. + Using `json_schema` is recommended for models that support it. Note that the + model will not generate JSON without a system or user message instructing it + to do so. + """ + type: Required[Literal["json_object"]] """The type of response format being defined. Always `json_object`.""" diff --git a/src/openai/types/shared_params/response_format_json_schema.py b/src/openai/types/shared_params/response_format_json_schema.py index 5b0a13ee06..0a0e846873 100644 --- a/src/openai/types/shared_params/response_format_json_schema.py +++ b/src/openai/types/shared_params/response_format_json_schema.py @@ -9,6 +9,8 @@ class JSONSchema(TypedDict, total=False): + """Structured Outputs configuration options, including a JSON Schema.""" + name: Required[str] """The name of the response format. @@ -39,6 +41,12 @@ class JSONSchema(TypedDict, total=False): class ResponseFormatJSONSchema(TypedDict, total=False): + """JSON Schema response format. + + Used to generate structured JSON responses. + Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). + """ + json_schema: Required[JSONSchema] """Structured Outputs configuration options, including a JSON Schema.""" diff --git a/src/openai/types/shared_params/response_format_text.py b/src/openai/types/shared_params/response_format_text.py index c3ef2b0816..c195036f95 100644 --- a/src/openai/types/shared_params/response_format_text.py +++ b/src/openai/types/shared_params/response_format_text.py @@ -8,5 +8,7 @@ class ResponseFormatText(TypedDict, total=False): + """Default response format. Used to generate text responses.""" + type: Required[Literal["text"]] """The type of response format being defined. Always `text`.""" diff --git a/src/openai/types/static_file_chunking_strategy_object_param.py b/src/openai/types/static_file_chunking_strategy_object_param.py index 0cdf35c0df..40188a41d5 100644 --- a/src/openai/types/static_file_chunking_strategy_object_param.py +++ b/src/openai/types/static_file_chunking_strategy_object_param.py @@ -10,6 +10,8 @@ class StaticFileChunkingStrategyObjectParam(TypedDict, total=False): + """Customize your own chunking strategy by setting chunk size and chunk overlap.""" + static: Required[StaticFileChunkingStrategyParam] type: Required[Literal["static"]] diff --git a/src/openai/types/upload.py b/src/openai/types/upload.py index 914b69a863..d248da6ee3 100644 --- a/src/openai/types/upload.py +++ b/src/openai/types/upload.py @@ -10,6 +10,8 @@ class Upload(BaseModel): + """The Upload object can accept byte chunks in the form of Parts.""" + id: str """The Upload unique identifier, which can be referenced in API endpoints.""" diff --git a/src/openai/types/upload_create_params.py b/src/openai/types/upload_create_params.py index ab4cded81d..c25d65bedd 100644 --- a/src/openai/types/upload_create_params.py +++ b/src/openai/types/upload_create_params.py @@ -39,6 +39,11 @@ class UploadCreateParams(TypedDict, total=False): class ExpiresAfter(TypedDict, total=False): + """The expiration policy for a file. + + By default, files with `purpose=batch` expire after 30 days and all other files are persisted until they are manually deleted. + """ + anchor: Required[Literal["created_at"]] """Anchor timestamp after which the expiration policy applies. diff --git a/src/openai/types/uploads/upload_part.py b/src/openai/types/uploads/upload_part.py index e09621d8f9..e585b1a227 100644 --- a/src/openai/types/uploads/upload_part.py +++ b/src/openai/types/uploads/upload_part.py @@ -8,6 +8,8 @@ class UploadPart(BaseModel): + """The upload Part represents a chunk of bytes we can add to an Upload object.""" + id: str """The upload Part unique identifier, which can be referenced in API endpoints.""" diff --git a/src/openai/types/vector_store.py b/src/openai/types/vector_store.py index 2473a442d2..82899ecd1b 100644 --- a/src/openai/types/vector_store.py +++ b/src/openai/types/vector_store.py @@ -27,6 +27,8 @@ class FileCounts(BaseModel): class ExpiresAfter(BaseModel): + """The expiration policy for a vector store.""" + anchor: Literal["last_active_at"] """Anchor timestamp after which the expiration policy applies. @@ -38,6 +40,10 @@ class ExpiresAfter(BaseModel): class VectorStore(BaseModel): + """ + A vector store is a collection of processed files can be used by the `file_search` tool. + """ + id: str """The identifier, which can be referenced in API endpoints.""" diff --git a/src/openai/types/vector_store_create_params.py b/src/openai/types/vector_store_create_params.py index f373a6ed28..2b72562984 100644 --- a/src/openai/types/vector_store_create_params.py +++ b/src/openai/types/vector_store_create_params.py @@ -51,6 +51,8 @@ class VectorStoreCreateParams(TypedDict, total=False): class ExpiresAfter(TypedDict, total=False): + """The expiration policy for a vector store.""" + anchor: Required[Literal["last_active_at"]] """Anchor timestamp after which the expiration policy applies. diff --git a/src/openai/types/vector_store_search_params.py b/src/openai/types/vector_store_search_params.py index 8b7b13c4a1..851d63c5d1 100644 --- a/src/openai/types/vector_store_search_params.py +++ b/src/openai/types/vector_store_search_params.py @@ -36,6 +36,8 @@ class VectorStoreSearchParams(TypedDict, total=False): class RankingOptions(TypedDict, total=False): + """Ranking options for search.""" + ranker: Literal["none", "auto", "default-2024-11-15"] """Enable re-ranking; set to `none` to disable, which can help reduce latency.""" diff --git a/src/openai/types/vector_store_update_params.py b/src/openai/types/vector_store_update_params.py index 4f6ac63963..7c6f891170 100644 --- a/src/openai/types/vector_store_update_params.py +++ b/src/openai/types/vector_store_update_params.py @@ -29,6 +29,8 @@ class VectorStoreUpdateParams(TypedDict, total=False): class ExpiresAfter(TypedDict, total=False): + """The expiration policy for a vector store.""" + anchor: Required[Literal["last_active_at"]] """Anchor timestamp after which the expiration policy applies. diff --git a/src/openai/types/vector_stores/vector_store_file.py b/src/openai/types/vector_stores/vector_store_file.py index 001584dfd7..c1ea02227f 100644 --- a/src/openai/types/vector_stores/vector_store_file.py +++ b/src/openai/types/vector_stores/vector_store_file.py @@ -10,6 +10,11 @@ class LastError(BaseModel): + """The last error associated with this vector store file. + + Will be `null` if there are no errors. + """ + code: Literal["server_error", "unsupported_file", "invalid_file"] """One of `server_error`, `unsupported_file`, or `invalid_file`.""" @@ -18,6 +23,8 @@ class LastError(BaseModel): class VectorStoreFile(BaseModel): + """A list of files attached to a vector store.""" + id: str """The identifier, which can be referenced in API endpoints.""" diff --git a/src/openai/types/vector_stores/vector_store_file_batch.py b/src/openai/types/vector_stores/vector_store_file_batch.py index 57dbfbd809..b07eb25da5 100644 --- a/src/openai/types/vector_stores/vector_store_file_batch.py +++ b/src/openai/types/vector_stores/vector_store_file_batch.py @@ -25,6 +25,8 @@ class FileCounts(BaseModel): class VectorStoreFileBatch(BaseModel): + """A batch of files attached to a vector store.""" + id: str """The identifier, which can be referenced in API endpoints.""" diff --git a/src/openai/types/video.py b/src/openai/types/video.py index 22ee3a11f7..e732ea54ec 100644 --- a/src/openai/types/video.py +++ b/src/openai/types/video.py @@ -13,6 +13,8 @@ class Video(BaseModel): + """Structured information describing a generated video job.""" + id: str """Unique identifier for the video job.""" diff --git a/src/openai/types/video_delete_response.py b/src/openai/types/video_delete_response.py index e2673ffe2b..1ed543aec8 100644 --- a/src/openai/types/video_delete_response.py +++ b/src/openai/types/video_delete_response.py @@ -8,6 +8,8 @@ class VideoDeleteResponse(BaseModel): + """Confirmation payload returned after deleting a video.""" + id: str """Identifier of the deleted video.""" diff --git a/src/openai/types/webhooks/batch_cancelled_webhook_event.py b/src/openai/types/webhooks/batch_cancelled_webhook_event.py index 4bbd7307a5..9d1c485f5e 100644 --- a/src/openai/types/webhooks/batch_cancelled_webhook_event.py +++ b/src/openai/types/webhooks/batch_cancelled_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the batch API request.""" class BatchCancelledWebhookEvent(BaseModel): + """Sent when a batch API request has been cancelled.""" + id: str """The unique ID of the event.""" diff --git a/src/openai/types/webhooks/batch_completed_webhook_event.py b/src/openai/types/webhooks/batch_completed_webhook_event.py index a47ca156fa..5ae8191789 100644 --- a/src/openai/types/webhooks/batch_completed_webhook_event.py +++ b/src/openai/types/webhooks/batch_completed_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the batch API request.""" class BatchCompletedWebhookEvent(BaseModel): + """Sent when a batch API request has been completed.""" + id: str """The unique ID of the event.""" diff --git a/src/openai/types/webhooks/batch_expired_webhook_event.py b/src/openai/types/webhooks/batch_expired_webhook_event.py index e91001e8d8..2f08a7f579 100644 --- a/src/openai/types/webhooks/batch_expired_webhook_event.py +++ b/src/openai/types/webhooks/batch_expired_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the batch API request.""" class BatchExpiredWebhookEvent(BaseModel): + """Sent when a batch API request has expired.""" + id: str """The unique ID of the event.""" diff --git a/src/openai/types/webhooks/batch_failed_webhook_event.py b/src/openai/types/webhooks/batch_failed_webhook_event.py index ef80863edb..7166616588 100644 --- a/src/openai/types/webhooks/batch_failed_webhook_event.py +++ b/src/openai/types/webhooks/batch_failed_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the batch API request.""" class BatchFailedWebhookEvent(BaseModel): + """Sent when a batch API request has failed.""" + id: str """The unique ID of the event.""" diff --git a/src/openai/types/webhooks/eval_run_canceled_webhook_event.py b/src/openai/types/webhooks/eval_run_canceled_webhook_event.py index 855359f743..1948f8933b 100644 --- a/src/openai/types/webhooks/eval_run_canceled_webhook_event.py +++ b/src/openai/types/webhooks/eval_run_canceled_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the eval run.""" class EvalRunCanceledWebhookEvent(BaseModel): + """Sent when an eval run has been canceled.""" + id: str """The unique ID of the event.""" diff --git a/src/openai/types/webhooks/eval_run_failed_webhook_event.py b/src/openai/types/webhooks/eval_run_failed_webhook_event.py index 7671680720..4e4c860abc 100644 --- a/src/openai/types/webhooks/eval_run_failed_webhook_event.py +++ b/src/openai/types/webhooks/eval_run_failed_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the eval run.""" class EvalRunFailedWebhookEvent(BaseModel): + """Sent when an eval run has failed.""" + id: str """The unique ID of the event.""" diff --git a/src/openai/types/webhooks/eval_run_succeeded_webhook_event.py b/src/openai/types/webhooks/eval_run_succeeded_webhook_event.py index d0d1fc2b04..c20f22eeb9 100644 --- a/src/openai/types/webhooks/eval_run_succeeded_webhook_event.py +++ b/src/openai/types/webhooks/eval_run_succeeded_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the eval run.""" class EvalRunSucceededWebhookEvent(BaseModel): + """Sent when an eval run has succeeded.""" + id: str """The unique ID of the event.""" diff --git a/src/openai/types/webhooks/fine_tuning_job_cancelled_webhook_event.py b/src/openai/types/webhooks/fine_tuning_job_cancelled_webhook_event.py index 1fe3c06096..0cfff85dad 100644 --- a/src/openai/types/webhooks/fine_tuning_job_cancelled_webhook_event.py +++ b/src/openai/types/webhooks/fine_tuning_job_cancelled_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the fine-tuning job.""" class FineTuningJobCancelledWebhookEvent(BaseModel): + """Sent when a fine-tuning job has been cancelled.""" + id: str """The unique ID of the event.""" diff --git a/src/openai/types/webhooks/fine_tuning_job_failed_webhook_event.py b/src/openai/types/webhooks/fine_tuning_job_failed_webhook_event.py index 71d899c8ef..0eb6bf954f 100644 --- a/src/openai/types/webhooks/fine_tuning_job_failed_webhook_event.py +++ b/src/openai/types/webhooks/fine_tuning_job_failed_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the fine-tuning job.""" class FineTuningJobFailedWebhookEvent(BaseModel): + """Sent when a fine-tuning job has failed.""" + id: str """The unique ID of the event.""" diff --git a/src/openai/types/webhooks/fine_tuning_job_succeeded_webhook_event.py b/src/openai/types/webhooks/fine_tuning_job_succeeded_webhook_event.py index 470f1fcfaa..26b5ea8955 100644 --- a/src/openai/types/webhooks/fine_tuning_job_succeeded_webhook_event.py +++ b/src/openai/types/webhooks/fine_tuning_job_succeeded_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the fine-tuning job.""" class FineTuningJobSucceededWebhookEvent(BaseModel): + """Sent when a fine-tuning job has succeeded.""" + id: str """The unique ID of the event.""" diff --git a/src/openai/types/webhooks/realtime_call_incoming_webhook_event.py b/src/openai/types/webhooks/realtime_call_incoming_webhook_event.py index a166a3471b..4647a2e2ba 100644 --- a/src/openai/types/webhooks/realtime_call_incoming_webhook_event.py +++ b/src/openai/types/webhooks/realtime_call_incoming_webhook_event.py @@ -9,6 +9,8 @@ class DataSipHeader(BaseModel): + """A header from the SIP Invite.""" + name: str """Name of the SIP Header.""" @@ -17,6 +19,8 @@ class DataSipHeader(BaseModel): class Data(BaseModel): + """Event data payload.""" + call_id: str """The unique ID of this call.""" @@ -25,6 +29,8 @@ class Data(BaseModel): class RealtimeCallIncomingWebhookEvent(BaseModel): + """Sent when Realtime API Receives a incoming SIP call.""" + id: str """The unique ID of the event.""" diff --git a/src/openai/types/webhooks/response_cancelled_webhook_event.py b/src/openai/types/webhooks/response_cancelled_webhook_event.py index 443e360e90..cd791b3314 100644 --- a/src/openai/types/webhooks/response_cancelled_webhook_event.py +++ b/src/openai/types/webhooks/response_cancelled_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the model response.""" class ResponseCancelledWebhookEvent(BaseModel): + """Sent when a background response has been cancelled.""" + id: str """The unique ID of the event.""" diff --git a/src/openai/types/webhooks/response_completed_webhook_event.py b/src/openai/types/webhooks/response_completed_webhook_event.py index ac1feff32b..cf07f0c2c0 100644 --- a/src/openai/types/webhooks/response_completed_webhook_event.py +++ b/src/openai/types/webhooks/response_completed_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the model response.""" class ResponseCompletedWebhookEvent(BaseModel): + """Sent when a background response has been completed.""" + id: str """The unique ID of the event.""" diff --git a/src/openai/types/webhooks/response_failed_webhook_event.py b/src/openai/types/webhooks/response_failed_webhook_event.py index 5b4ba65e18..aecb1b8f47 100644 --- a/src/openai/types/webhooks/response_failed_webhook_event.py +++ b/src/openai/types/webhooks/response_failed_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the model response.""" class ResponseFailedWebhookEvent(BaseModel): + """Sent when a background response has failed.""" + id: str """The unique ID of the event.""" diff --git a/src/openai/types/webhooks/response_incomplete_webhook_event.py b/src/openai/types/webhooks/response_incomplete_webhook_event.py index 01609314e0..2367731e85 100644 --- a/src/openai/types/webhooks/response_incomplete_webhook_event.py +++ b/src/openai/types/webhooks/response_incomplete_webhook_event.py @@ -9,11 +9,15 @@ class Data(BaseModel): + """Event data payload.""" + id: str """The unique ID of the model response.""" class ResponseIncompleteWebhookEvent(BaseModel): + """Sent when a background response has been interrupted.""" + id: str """The unique ID of the event.""" From a12936b18cf19009d4e6d586c9b1958359636dbe Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 8 Dec 2025 22:42:59 +0000 Subject: [PATCH 4/5] feat(api): make model required for the responses/compact endpoint --- .stats.yml | 6 +- src/openai/resources/responses/responses.py | 42 ++-- .../responses/response_compact_params.py | 194 +++++++++--------- tests/api_resources/test_responses.py | 28 ++- 4 files changed, 141 insertions(+), 129 deletions(-) diff --git a/.stats.yml b/.stats.yml index 7adb61ca2e..0aa25fb4a4 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 137 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-fe8a79e6fd407e6c9afec60971f03076b65f711ccd6ea16457933b0e24fb1f6d.yml -openapi_spec_hash: 38c0a73f4e08843732c5f8002a809104 -config_hash: 2c350086d87a4b4532077363087840e7 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-88d85ff87ad8983262af2b729762a6e05fd509468bb691529bc2f81e4ce27c69.yml +openapi_spec_hash: 46a55acbccd0147534017b92c1f4dd99 +config_hash: 141b101c9f13b90e21af74e1686f1f41 diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py index c532fc0bb0..81e8980faf 100644 --- a/src/openai/resources/responses/responses.py +++ b/src/openai/resources/responses/responses.py @@ -1526,8 +1526,6 @@ def cancel( def compact( self, *, - input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit, - instructions: Optional[str] | Omit = omit, model: Union[ Literal[ "gpt-5.1", @@ -1614,8 +1612,9 @@ def compact( ], str, None, - ] - | Omit = omit, + ], + input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit, + instructions: Optional[str] | Omit = omit, previous_response_id: Optional[str] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -1628,6 +1627,12 @@ def compact( Compact conversation Args: + model: Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + input: Text, image, or file inputs to the model, used to generate a response instructions: A system (or developer) message inserted into the model's context. When used @@ -1635,12 +1640,6 @@ def compact( will not be carried over to the next response. This makes it simple to swap out system (or developer) messages in new responses. - model: Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a - wide range of models with different capabilities, performance characteristics, - and price points. Refer to the - [model guide](https://platform.openai.com/docs/models) to browse and compare - available models. - previous_response_id: The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state). @@ -1658,9 +1657,9 @@ def compact( "/responses/compact", body=maybe_transform( { + "model": model, "input": input, "instructions": instructions, - "model": model, "previous_response_id": previous_response_id, }, response_compact_params.ResponseCompactParams, @@ -3140,8 +3139,6 @@ async def cancel( async def compact( self, *, - input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit, - instructions: Optional[str] | Omit = omit, model: Union[ Literal[ "gpt-5.1", @@ -3228,8 +3225,9 @@ async def compact( ], str, None, - ] - | Omit = omit, + ], + input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit, + instructions: Optional[str] | Omit = omit, previous_response_id: Optional[str] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -3242,6 +3240,12 @@ async def compact( Compact conversation Args: + model: Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a + wide range of models with different capabilities, performance characteristics, + and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + input: Text, image, or file inputs to the model, used to generate a response instructions: A system (or developer) message inserted into the model's context. When used @@ -3249,12 +3253,6 @@ async def compact( will not be carried over to the next response. This makes it simple to swap out system (or developer) messages in new responses. - model: Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a - wide range of models with different capabilities, performance characteristics, - and price points. Refer to the - [model guide](https://platform.openai.com/docs/models) to browse and compare - available models. - previous_response_id: The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state). @@ -3272,9 +3270,9 @@ async def compact( "/responses/compact", body=await async_maybe_transform( { + "model": model, "input": input, "instructions": instructions, - "model": model, "previous_response_id": previous_response_id, }, response_compact_params.ResponseCompactParams, diff --git a/src/openai/types/responses/response_compact_params.py b/src/openai/types/responses/response_compact_params.py index fe38b15a9d..35a390f807 100644 --- a/src/openai/types/responses/response_compact_params.py +++ b/src/openai/types/responses/response_compact_params.py @@ -3,7 +3,7 @@ from __future__ import annotations from typing import Union, Iterable, Optional -from typing_extensions import Literal, TypedDict +from typing_extensions import Literal, Required, TypedDict from .response_input_item_param import ResponseInputItemParam @@ -11,6 +11,103 @@ class ResponseCompactParams(TypedDict, total=False): + model: Required[ + Union[ + Literal[ + "gpt-5.1", + "gpt-5.1-2025-11-13", + "gpt-5.1-codex", + "gpt-5.1-mini", + "gpt-5.1-chat-latest", + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5-2025-08-07", + "gpt-5-mini-2025-08-07", + "gpt-5-nano-2025-08-07", + "gpt-5-chat-latest", + "gpt-4.1", + "gpt-4.1-mini", + "gpt-4.1-nano", + "gpt-4.1-2025-04-14", + "gpt-4.1-mini-2025-04-14", + "gpt-4.1-nano-2025-04-14", + "o4-mini", + "o4-mini-2025-04-16", + "o3", + "o3-2025-04-16", + "o3-mini", + "o3-mini-2025-01-31", + "o1", + "o1-2024-12-17", + "o1-preview", + "o1-preview-2024-09-12", + "o1-mini", + "o1-mini-2024-09-12", + "gpt-4o", + "gpt-4o-2024-11-20", + "gpt-4o-2024-08-06", + "gpt-4o-2024-05-13", + "gpt-4o-audio-preview", + "gpt-4o-audio-preview-2024-10-01", + "gpt-4o-audio-preview-2024-12-17", + "gpt-4o-audio-preview-2025-06-03", + "gpt-4o-mini-audio-preview", + "gpt-4o-mini-audio-preview-2024-12-17", + "gpt-4o-search-preview", + "gpt-4o-mini-search-preview", + "gpt-4o-search-preview-2025-03-11", + "gpt-4o-mini-search-preview-2025-03-11", + "chatgpt-4o-latest", + "codex-mini-latest", + "gpt-4o-mini", + "gpt-4o-mini-2024-07-18", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + "o1-pro", + "o1-pro-2025-03-19", + "o3-pro", + "o3-pro-2025-06-10", + "o3-deep-research", + "o3-deep-research-2025-06-26", + "o4-mini-deep-research", + "o4-mini-deep-research-2025-06-26", + "computer-use-preview", + "computer-use-preview-2025-03-11", + "gpt-5-codex", + "gpt-5-pro", + "gpt-5-pro-2025-10-06", + "gpt-5.1-codex-max", + ], + str, + None, + ] + ] + """Model ID used to generate the response, like `gpt-5` or `o3`. + + OpenAI offers a wide range of models with different capabilities, performance + characteristics, and price points. Refer to the + [model guide](https://platform.openai.com/docs/models) to browse and compare + available models. + """ + input: Union[str, Iterable[ResponseInputItemParam], None] """Text, image, or file inputs to the model, used to generate a response""" @@ -22,101 +119,6 @@ class ResponseCompactParams(TypedDict, total=False): system (or developer) messages in new responses. """ - model: Union[ - Literal[ - "gpt-5.1", - "gpt-5.1-2025-11-13", - "gpt-5.1-codex", - "gpt-5.1-mini", - "gpt-5.1-chat-latest", - "gpt-5", - "gpt-5-mini", - "gpt-5-nano", - "gpt-5-2025-08-07", - "gpt-5-mini-2025-08-07", - "gpt-5-nano-2025-08-07", - "gpt-5-chat-latest", - "gpt-4.1", - "gpt-4.1-mini", - "gpt-4.1-nano", - "gpt-4.1-2025-04-14", - "gpt-4.1-mini-2025-04-14", - "gpt-4.1-nano-2025-04-14", - "o4-mini", - "o4-mini-2025-04-16", - "o3", - "o3-2025-04-16", - "o3-mini", - "o3-mini-2025-01-31", - "o1", - "o1-2024-12-17", - "o1-preview", - "o1-preview-2024-09-12", - "o1-mini", - "o1-mini-2024-09-12", - "gpt-4o", - "gpt-4o-2024-11-20", - "gpt-4o-2024-08-06", - "gpt-4o-2024-05-13", - "gpt-4o-audio-preview", - "gpt-4o-audio-preview-2024-10-01", - "gpt-4o-audio-preview-2024-12-17", - "gpt-4o-audio-preview-2025-06-03", - "gpt-4o-mini-audio-preview", - "gpt-4o-mini-audio-preview-2024-12-17", - "gpt-4o-search-preview", - "gpt-4o-mini-search-preview", - "gpt-4o-search-preview-2025-03-11", - "gpt-4o-mini-search-preview-2025-03-11", - "chatgpt-4o-latest", - "codex-mini-latest", - "gpt-4o-mini", - "gpt-4o-mini-2024-07-18", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-0125-preview", - "gpt-4-turbo-preview", - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0301", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k-0613", - "o1-pro", - "o1-pro-2025-03-19", - "o3-pro", - "o3-pro-2025-06-10", - "o3-deep-research", - "o3-deep-research-2025-06-26", - "o4-mini-deep-research", - "o4-mini-deep-research-2025-06-26", - "computer-use-preview", - "computer-use-preview-2025-03-11", - "gpt-5-codex", - "gpt-5-pro", - "gpt-5-pro-2025-10-06", - "gpt-5.1-codex-max", - ], - str, - None, - ] - """Model ID used to generate the response, like `gpt-5` or `o3`. - - OpenAI offers a wide range of models with different capabilities, performance - characteristics, and price points. Refer to the - [model guide](https://platform.openai.com/docs/models) to browse and compare - available models. - """ - previous_response_id: Optional[str] """The unique ID of the previous response to the model. diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py index 14e2d911ef..d330eed134 100644 --- a/tests/api_resources/test_responses.py +++ b/tests/api_resources/test_responses.py @@ -361,22 +361,26 @@ def test_path_params_cancel(self, client: OpenAI) -> None: @parametrize def test_method_compact(self, client: OpenAI) -> None: - response = client.responses.compact() + response = client.responses.compact( + model="gpt-5.1", + ) assert_matches_type(CompactedResponse, response, path=["response"]) @parametrize def test_method_compact_with_all_params(self, client: OpenAI) -> None: response = client.responses.compact( + model="gpt-5.1", input="string", instructions="instructions", - model="gpt-5.1", previous_response_id="resp_123", ) assert_matches_type(CompactedResponse, response, path=["response"]) @parametrize def test_raw_response_compact(self, client: OpenAI) -> None: - http_response = client.responses.with_raw_response.compact() + http_response = client.responses.with_raw_response.compact( + model="gpt-5.1", + ) assert http_response.is_closed is True assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -385,7 +389,9 @@ def test_raw_response_compact(self, client: OpenAI) -> None: @parametrize def test_streaming_response_compact(self, client: OpenAI) -> None: - with client.responses.with_streaming_response.compact() as http_response: + with client.responses.with_streaming_response.compact( + model="gpt-5.1", + ) as http_response: assert not http_response.is_closed assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -751,22 +757,26 @@ async def test_path_params_cancel(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_method_compact(self, async_client: AsyncOpenAI) -> None: - response = await async_client.responses.compact() + response = await async_client.responses.compact( + model="gpt-5.1", + ) assert_matches_type(CompactedResponse, response, path=["response"]) @parametrize async def test_method_compact_with_all_params(self, async_client: AsyncOpenAI) -> None: response = await async_client.responses.compact( + model="gpt-5.1", input="string", instructions="instructions", - model="gpt-5.1", previous_response_id="resp_123", ) assert_matches_type(CompactedResponse, response, path=["response"]) @parametrize async def test_raw_response_compact(self, async_client: AsyncOpenAI) -> None: - http_response = await async_client.responses.with_raw_response.compact() + http_response = await async_client.responses.with_raw_response.compact( + model="gpt-5.1", + ) assert http_response.is_closed is True assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -775,7 +785,9 @@ async def test_raw_response_compact(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_compact(self, async_client: AsyncOpenAI) -> None: - async with async_client.responses.with_streaming_response.compact() as http_response: + async with async_client.responses.with_streaming_response.compact( + model="gpt-5.1", + ) as http_response: assert not http_response.is_closed assert http_response.http_request.headers.get("X-Stainless-Lang") == "python" From 061bfa1ee756e01687f41d38a68b1c0cc348dc8e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 10 Dec 2025 05:05:02 +0000 Subject: [PATCH 5/5] release: 2.10.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 19 +++++++++++++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 22 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 427b8ec423..21f60560ae 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.9.0" + ".": "2.10.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 6de78290fc..58a092665e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## 2.10.0 (2025-12-10) + +Full Changelog: [v2.9.0...v2.10.0](https://github.com/openai/openai-python/compare/v2.9.0...v2.10.0) + +### Features + +* **api:** make model required for the responses/compact endpoint ([a12936b](https://github.com/openai/openai-python/commit/a12936b18cf19009d4e6d586c9b1958359636dbe)) + + +### Bug Fixes + +* **types:** allow pyright to infer TypedDict types within SequenceNotStr ([8f0d230](https://github.com/openai/openai-python/commit/8f0d23066c1edc38a6e9858b054dceaf92ae001b)) + + +### Chores + +* add missing docstrings ([f20a9a1](https://github.com/openai/openai-python/commit/f20a9a18a421ba69622c77ab539509d218e774eb)) +* **internal:** update docstring ([9a993f2](https://github.com/openai/openai-python/commit/9a993f2261b6524aa30b955e006c7ea89f086968)) + ## 2.9.0 (2025-12-04) Full Changelog: [v2.8.1...v2.9.0](https://github.com/openai/openai-python/compare/v2.8.1...v2.9.0) diff --git a/pyproject.toml b/pyproject.toml index 4735412341..e7d181d007 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "2.9.0" +version = "2.10.0" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index e5ddb8f4eb..c7a4f08f04 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "2.9.0" # x-release-please-version +__version__ = "2.10.0" # x-release-please-version