diff --git a/async-openai/src/chat.rs b/async-openai/src/chat.rs index 28c89f9d..e826e4f5 100644 --- a/async-openai/src/chat.rs +++ b/async-openai/src/chat.rs @@ -1,15 +1,19 @@ +use serde::Serialize; + use crate::{ config::Config, error::OpenAIError, - types::{ + types::chat::{ + ChatCompletionDeleted, ChatCompletionList, ChatCompletionMessageList, ChatCompletionResponseStream, CreateChatCompletionRequest, CreateChatCompletionResponse, + UpdateChatCompletionRequest, }, Client, }; /// Given a list of messages comprising a conversation, the model will return a response. /// -/// Related guide: [Chat completions](https://platform.openai.com//docs/guides/text-generation) +/// Related guide: [Chat Completions](https://platform.openai.com/docs/guides/text-generation) pub struct Chat<'c, C: Config> { client: &'c Client, } @@ -19,21 +23,13 @@ impl<'c, C: Config> Chat<'c, C> { Self { client } } - /// Creates a model response for the given chat conversation. Learn more in - /// the - /// - /// [text generation](https://platform.openai.com/docs/guides/text-generation), - /// [vision](https://platform.openai.com/docs/guides/vision), - /// - /// and [audio](https://platform.openai.com/docs/guides/audio) guides. + /// Creates a model response for the given chat conversation. /// + /// Returns a [chat completion](https://platform.openai.com/docs/api-reference/chat/object) object, or a streamed sequence of [chat completion chunk](https://platform.openai.com/docs/api-reference/chat/streaming) objects if the request is streamed. /// - /// Parameter support can differ depending on the model used to generate the - /// response, particularly for newer reasoning models. Parameters that are - /// only supported for reasoning models are noted below. For the current state - /// of unsupported parameters in reasoning models, + /// Learn more in the [text generation](https://platform.openai.com/docs/guides/text-generation), [vision](https://platform.openai.com/docs/guides/vision), and [audio](https://platform.openai.com/docs/guides/audio) guides. /// - /// [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). + /// Parameter support can differ depending on the model used to generate the response, particularly for newer reasoning models. Parameters that are only supported for reasoning models are noted below. For the current state of unsupported parameters in reasoning models, [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning). /// /// byot: You must ensure "stream: false" in serialized `request` #[crate::byot( @@ -55,9 +51,11 @@ impl<'c, C: Config> Chat<'c, C> { self.client.post("/chat/completions", request).await } - /// Creates a completion for the chat message + /// Creates a completion for the chat message. /// - /// partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. + /// If set to true, the model response data will be streamed to the client as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + /// + /// See the [Streaming section](https://platform.openai.com/docs/api-reference/chat/streaming) for more information, along with the [streaming responses](https://platform.openai.com/docs/guides/streaming-responses) guide for more information on how to handle the streaming events. /// /// [ChatCompletionResponseStream] is a parsed SSE stream until a \[DONE\] is received from server. /// @@ -85,4 +83,73 @@ impl<'c, C: Config> Chat<'c, C> { } Ok(self.client.post_stream("/chat/completions", request).await) } + + /// List stored Chat Completions. Only Chat Completions that have been stored + /// with the `store` parameter set to `true` will be returned. + #[crate::byot(T0 = serde::Serialize, R = serde::de::DeserializeOwned)] + pub async fn list(&self, query: &Q) -> Result + where + Q: Serialize + ?Sized, + { + self.client + .get_with_query("/chat/completions", &query) + .await + } + + /// Get a stored chat completion. Only Chat Completions that have been created + /// with the `store` parameter set to `true` will be returned. + #[crate::byot(T0 = std::fmt::Display, R = serde::de::DeserializeOwned)] + pub async fn retrieve( + &self, + completion_id: &str, + ) -> Result { + self.client + .get(&format!("/chat/completions/{completion_id}")) + .await + } + + /// Modify a stored chat completion. Only Chat Completions that have been + /// created with the `store` parameter set to `true` can be modified. Currently, + /// the only supported modification is to update the `metadata` field. + #[crate::byot( + T0 = std::fmt::Display, + T1 = serde::Serialize, + R = serde::de::DeserializeOwned + )] + pub async fn update( + &self, + completion_id: &str, + request: UpdateChatCompletionRequest, + ) -> Result { + self.client + .post(&format!("/chat/completions/{completion_id}"), request) + .await + } + + /// Delete a stored chat completion. Only Chat Completions that have been + /// created with the `store` parameter set to `true` can be deleted. + #[crate::byot(T0 = std::fmt::Display, R = serde::de::DeserializeOwned)] + pub async fn delete(&self, completion_id: &str) -> Result { + self.client + .delete(&format!("/chat/completions/{completion_id}")) + .await + } + + /// Get a list of messages for the specified chat completion. + #[crate::byot(T0 = std::fmt::Display, T1 = serde::Serialize, R = serde::de::DeserializeOwned)] + pub async fn messages( + &self, + completion_id: &str, + query: &Q, + ) -> Result + where + Q: Serialize + ?Sized, + { + self.client + .get_with_query( + &format!("/chat/completions/{completion_id}/messages"), + &query, + ) + .await + } } diff --git a/async-openai/src/config.rs b/async-openai/src/config.rs index a55e007c..294c8780 100644 --- a/async-openai/src/config.rs +++ b/async-openai/src/config.rs @@ -241,7 +241,7 @@ impl Config for AzureConfig { #[cfg(test)] mod test { use super::*; - use crate::types::{ + use crate::types::chat::{ ChatCompletionRequestMessage, ChatCompletionRequestUserMessage, CreateChatCompletionRequest, }; use crate::Client; diff --git a/async-openai/src/container_files.rs b/async-openai/src/container_files.rs index 0cd17407..633f873c 100644 --- a/async-openai/src/container_files.rs +++ b/async-openai/src/container_files.rs @@ -4,7 +4,7 @@ use serde::Serialize; use crate::{ config::Config, error::OpenAIError, - types::{ + types::containers::{ ContainerFileListResource, ContainerFileResource, CreateContainerFileRequest, DeleteContainerFileResponse, }, diff --git a/async-openai/src/containers.rs b/async-openai/src/containers.rs index 57a88a20..7254bccf 100644 --- a/async-openai/src/containers.rs +++ b/async-openai/src/containers.rs @@ -4,7 +4,7 @@ use crate::{ config::Config, container_files::ContainerFiles, error::OpenAIError, - types::{ + types::containers::{ ContainerListResource, ContainerResource, CreateContainerRequest, DeleteContainerResponse, }, Client, diff --git a/async-openai/src/types/assistant.rs b/async-openai/src/types/assistant.rs index cd0aba47..9e53bbe1 100644 --- a/async-openai/src/types/assistant.rs +++ b/async-openai/src/types/assistant.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; use crate::error::OpenAIError; -use super::{FunctionName, FunctionObject, ResponseFormat}; +use crate::types::chat::{FunctionName, FunctionObject, ResponseFormat}; #[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)] pub struct AssistantToolCodeInterpreterResources { diff --git a/async-openai/src/types/assistant_impls.rs b/async-openai/src/types/assistant_impls.rs index bd8d4bf7..f1bc03dc 100644 --- a/async-openai/src/types/assistant_impls.rs +++ b/async-openai/src/types/assistant_impls.rs @@ -1,7 +1,7 @@ -use super::{ - AssistantToolCodeInterpreterResources, AssistantToolFileSearchResources, +use crate::types::{ + chat::FunctionObject, AssistantToolCodeInterpreterResources, AssistantToolFileSearchResources, AssistantToolResources, AssistantTools, AssistantToolsFileSearch, AssistantToolsFunction, - CreateAssistantToolFileSearchResources, CreateAssistantToolResources, FunctionObject, + CreateAssistantToolFileSearchResources, CreateAssistantToolResources, }; impl From for AssistantTools { diff --git a/async-openai/src/types/chat.rs b/async-openai/src/types/chat/chat.rs similarity index 69% rename from async-openai/src/types/chat.rs rename to async-openai/src/types/chat/chat.rs index e519286d..c6409473 100644 --- a/async-openai/src/types/chat.rs +++ b/async-openai/src/types/chat/chat.rs @@ -4,7 +4,10 @@ use derive_builder::Builder; use futures::Stream; use serde::{Deserialize, Serialize}; -use crate::error::OpenAIError; +use crate::{ + error::OpenAIError, + types::{responses::CustomGrammarFormatParam, Metadata}, +}; #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] #[serde(untagged)] @@ -18,7 +21,7 @@ pub enum Prompt { #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] #[serde(untagged)] -pub enum Stop { +pub enum StopConfiguration { String(String), // nullable: true StringArray(Vec), // minItems: 1; maxItems: 4 } @@ -78,7 +81,7 @@ pub enum Role { } /// The name and arguments of a function that should be called, as generated by the model. -#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Default)] pub struct FunctionCall { /// The name of the function to call. pub name: String, @@ -147,7 +150,14 @@ pub struct ChatCompletionRequestDeveloperMessage { #[serde(untagged)] pub enum ChatCompletionRequestDeveloperMessageContent { Text(String), - Array(Vec), + Array(Vec), +} + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[serde(tag = "type")] +#[serde(rename_all = "snake_case")] +pub enum ChatCompletionRequestDeveloperMessageContentPart { + Text(ChatCompletionRequestMessageContentPartText), } #[derive(Debug, Serialize, Deserialize, Default, Clone, Builder, PartialEq)] @@ -239,6 +249,26 @@ pub struct ChatCompletionRequestMessageContentPartAudio { pub input_audio: InputAudio, } +#[derive(Debug, Serialize, Deserialize, Default, Clone, PartialEq)] +pub struct FileObject { + /// The base64 encoded file data, used when passing the file to the model + /// as a string. + #[serde(skip_serializing_if = "Option::is_none")] + file_data: Option, + /// The ID of an uploaded file to use as input. + #[serde(skip_serializing_if = "Option::is_none")] + file_id: Option, + /// The name of the file, used when passing the file to the model as a + /// string. + #[serde(skip_serializing_if = "Option::is_none")] + filename: Option, +} + +#[derive(Debug, Serialize, Deserialize, Default, Clone, PartialEq)] +pub struct ChatCompletionRequestMessageContentPartFile { + pub file: FileObject, +} + #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] #[serde(tag = "type")] #[serde(rename_all = "snake_case")] @@ -246,6 +276,7 @@ pub enum ChatCompletionRequestUserMessageContentPart { Text(ChatCompletionRequestMessageContentPartText), ImageUrl(ChatCompletionRequestMessageContentPartImage), InputAudio(ChatCompletionRequestMessageContentPartAudio), + File(ChatCompletionRequestMessageContentPartFile), } #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] @@ -347,7 +378,7 @@ pub struct ChatCompletionRequestAssistantMessage { #[serde(skip_serializing_if = "Option::is_none")] pub audio: Option, #[serde(skip_serializing_if = "Option::is_none")] - pub tool_calls: Option>, + pub tool_calls: Option>, /// Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. #[deprecated] #[serde(skip_serializing_if = "Option::is_none")] @@ -393,15 +424,37 @@ pub enum ChatCompletionRequestMessage { } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +#[serde(tag = "type")] +#[serde(rename_all = "snake_case")] +pub enum ChatCompletionMessageToolCalls { + Function(ChatCompletionMessageToolCall), + Custom(ChatCompletionMessageCustomToolCall), +} + +#[derive(Debug, Serialize, Deserialize, Default, Clone, PartialEq)] pub struct ChatCompletionMessageToolCall { /// The ID of the tool call. pub id: String, - /// The type of the tool. Currently, only `function` is supported. - pub r#type: ChatCompletionToolType, /// The function that the model called. pub function: FunctionCall, } +#[derive(Debug, Serialize, Deserialize, Default, Clone, PartialEq)] +pub struct ChatCompletionMessageCustomToolCall { + /// The ID of the tool call. + pub id: String, + /// The custom tool that the model called. + pub custom_tool: CustomTool, +} + +#[derive(Debug, Serialize, Deserialize, Default, Clone, PartialEq)] +pub struct CustomTool { + /// The name of the custom tool to call. + pub name: String, + /// The input for the custom tool call generated by the model. + pub input: String, +} + #[derive(Debug, Serialize, Deserialize, Default, Clone, PartialEq)] pub struct ChatCompletionResponseMessageAudio { /// Unique identifier for this audio response. @@ -414,6 +467,24 @@ pub struct ChatCompletionResponseMessageAudio { pub transcript: String, } +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum ChatCompletionResponseMessageAnnotation { + UrlCitation { url_citation: UrlCitation }, +} + +#[derive(Debug, Serialize, Deserialize, Default, Clone, PartialEq)] +pub struct UrlCitation { + /// The index of the last character of the URL citation in the message. + pub end_index: u32, + /// The index of the first character of the URL citation in the message. + pub start_index: u32, + /// The title of the web resource. + pub title: String, + /// The URL of the web resource. + pub url: String, +} + /// A chat completion message generated by the model. #[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] pub struct ChatCompletionResponseMessage { @@ -425,7 +496,10 @@ pub struct ChatCompletionResponseMessage { pub refusal: Option, /// The tool calls generated by the model, such as function calls. #[serde(skip_serializing_if = "Option::is_none")] - pub tool_calls: Option>, + pub tool_calls: Option>, + + #[serde(skip_serializing_if = "Option::is_none")] + pub annotations: Option>, /// The role of the author of this message. pub role: Role, @@ -516,25 +590,47 @@ pub struct ResponseFormatJsonSchema { pub strict: Option, } -#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)] -#[serde(rename_all = "lowercase")] -pub enum ChatCompletionToolType { - #[default] - Function, +#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum ChatCompletionTools { + /// A function tool that can be used to generate a response. + Function(ChatCompletionTool), + /// A custom tool that processes input using a specified format. + Custom(CustomToolChatCompletions), } -#[derive(Clone, Serialize, Default, Debug, Builder, Deserialize, PartialEq)] -#[builder(name = "ChatCompletionToolArgs")] -#[builder(pattern = "mutable")] -#[builder(setter(into, strip_option), default)] -#[builder(derive(Debug))] -#[builder(build_fn(error = "OpenAIError"))] +#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)] pub struct ChatCompletionTool { - #[builder(default = "ChatCompletionToolType::Function")] - pub r#type: ChatCompletionToolType, pub function: FunctionObject, } +#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)] +pub struct CustomToolChatCompletions { + pub custom: CustomToolProperties, +} + +#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)] +pub struct CustomToolProperties { + /// The name of the custom tool, used to identify it in tool calls. + pub name: String, + + /// Optional description of the custom tool, used to provide more context. + pub description: Option, + + /// The input format for the custom tool. Default is unconstrained text. + pub format: CustomToolPropertiesFormat, +} + +#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum CustomToolPropertiesFormat { + /// Unconstrained free-form text. + #[default] + Text, + /// A grammar defined by the user. + Grammar { grammar: CustomGrammarFormatParam }, +} + #[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)] pub struct FunctionName { /// The name of the function to call. @@ -544,12 +640,20 @@ pub struct FunctionName { /// Specifies a tool the model should use. Use to force the model to call a specific function. #[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)] pub struct ChatCompletionNamedToolChoice { - /// The type of the tool. Currently, only `function` is supported. - pub r#type: ChatCompletionToolType, - pub function: FunctionName, } +#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)] +pub struct ChatCompletionNamedToolChoiceCustom { + pub custom: CustomName, +} + +#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)] +pub struct CustomName { + /// The name of the custom tool to call. + pub name: String, +} + /// Controls which (if any) tool is called by the model. /// `none` means the model will not call any tool and instead generates a message. /// `auto` means the model can pick between generating a message or calling one or more tools. @@ -557,15 +661,57 @@ pub struct ChatCompletionNamedToolChoice { /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. /// /// `none` is the default when no tools are present. `auto` is the default if tools are present. +#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum ChatCompletionToolChoiceOption { + AllowedTools(ChatCompletionAllowedToolsChoice), + Function(ChatCompletionNamedToolChoice), + Custom(ChatCompletionNamedToolChoiceCustom), + + #[serde(untagged)] + Mode(ToolChoiceOptions), +} + #[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)] +pub struct ChatCompletionAllowedToolsChoice { + pub allowed_tools: Vec, +} + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] #[serde(rename_all = "lowercase")] -pub enum ChatCompletionToolChoiceOption { +pub enum ToolChoiceAllowedMode { + Auto, + Required, +} + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct ChatCompletionAllowedTools { + /// Constrains the tools available to the model to a pre-defined set. + /// + /// `auto` allows the model to pick from among the allowed tools and generate a + /// message. + /// + /// `required` requires the model to call one or more of the allowed tools. + pub mode: ToolChoiceAllowedMode, + /// A list of tool definitions that the model should be allowed to call. + /// + /// For the Chat Completions API, the list of tool definitions might look like: + /// ```json + /// [ + /// { "type": "function", "function": { "name": "get_weather" } }, + /// { "type": "function", "function": { "name": "get_time" } } + /// ] + /// ``` + pub tools: Vec, +} + +#[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)] +#[serde(rename_all = "lowercase")] +pub enum ToolChoiceOptions { #[default] None, Auto, Required, - #[serde(untagged)] - Named(ChatCompletionNamedToolChoice), } #[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)] @@ -625,20 +771,22 @@ pub enum ServiceTier { Priority, } -#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)] +#[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)] #[serde(rename_all = "lowercase")] -pub enum ServiceTierResponse { - Scale, - Default, - Flex, - Priority, +pub enum ReasoningEffort { + Minimal, + Low, + #[default] + Medium, + High, } -#[derive(Clone, Serialize, Debug, Deserialize, PartialEq)] +/// Constrains the verbosity of the model's response. Lower values will result in more concise responses, while higher values will result in more verbose responses. Currently supported values are `low`, `medium`, and `high`. +#[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)] #[serde(rename_all = "lowercase")] -pub enum ReasoningEffort { - Minimal, +pub enum Verbosity { Low, + #[default] Medium, High, } @@ -651,7 +799,7 @@ pub enum ReasoningEffort { /// audio](https://platform.openai.com/docs/guides/audio). To request that this model generate both text and audio responses, you can use: `["text", "audio"]` #[derive(Clone, Serialize, Debug, Deserialize, PartialEq)] #[serde(rename_all = "lowercase")] -pub enum ChatCompletionModalities { +pub enum ResponseModalities { Text, Audio, } @@ -683,15 +831,20 @@ pub enum ChatCompletionAudioVoice { Ballad, Coral, Echo, + Fable, + Nova, + Onyx, Sage, Shimmer, - Verse, + #[serde(untagged)] + Other(String), } #[derive(Clone, Serialize, Debug, Deserialize, PartialEq)] #[serde(rename_all = "lowercase")] pub enum ChatCompletionAudioFormat { Wav, + Aac, Mp3, Flac, Opus, @@ -700,9 +853,10 @@ pub enum ChatCompletionAudioFormat { #[derive(Clone, Serialize, Debug, Deserialize, PartialEq)] pub struct ChatCompletionAudio { - /// The voice the model uses to respond. Supported voices are `ash`, `ballad`, `coral`, `sage`, and `verse` (also supported but not recommended are `alloy`, `echo`, and `shimmer`; these voices are less expressive). + /// The voice the model uses to respond. Supported voices are + /// `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, and `shimmer`. pub voice: ChatCompletionAudioVoice, - /// Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`. + /// Specifies the output audio format. Must be one of `wav`, `aac`, `mp3`, `flac`, `opus`, or `pcm16`. pub format: ChatCompletionAudioFormat, } @@ -713,126 +867,173 @@ pub struct ChatCompletionAudio { #[builder(derive(Debug))] #[builder(build_fn(error = "OpenAIError"))] pub struct CreateChatCompletionRequest { - /// A list of messages comprising the conversation so far. Depending on the [model](https://platform.openai.com/docs/models) you use, different message types (modalities) are supported, like [text](https://platform.openai.com/docs/guides/text-generation), [images](https://platform.openai.com/docs/guides/vision), and [audio](https://platform.openai.com/docs/guides/audio). + /// A list of messages comprising the conversation so far. Depending on the + /// [model](https://platform.openai.com/docs/models) you use, different message types (modalities) + /// are supported, like [text](https://platform.openai.com/docs/guides/text-generation), + /// [images](https://platform.openai.com/docs/guides/vision), and + /// [audio](https://platform.openai.com/docs/guides/audio). pub messages: Vec, // min: 1 - /// ID of the model to use. - /// See the [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) table for details on which models work with the Chat API. + /// Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI + /// offers a wide range of models with different capabilities, performance + /// characteristics, and price points. Refer to the + /// [model guide](https://platform.openai.com/docs/models) + /// to browse and compare available models. pub model: String, - /// Whether or not to store the output of this chat completion request + /// Output types that you would like the model to generate. Most models are capable of generating + /// text, which is the default: + /// + /// `["text"]` + /// The `gpt-4o-audio-preview` model can also be used to + /// [generate audio](https://platform.openai.com/docs/guides/audio). To request that this model + /// generate both text and audio responses, you can use: /// - /// for use in our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products. + /// `["text", "audio"]` #[serde(skip_serializing_if = "Option::is_none")] - pub store: Option, // nullable: true, default: false + pub modalities: Option>, + + /// Constrains the verbosity of the model's response. Lower values will result in + /// more concise responses, while higher values will result in more verbose responses. + /// Currently supported values are `low`, `medium`, and `high`. + #[serde(skip_serializing_if = "Option::is_none")] + pub verbosity: Option, - /// **o1 models only** - /// /// Constrains effort on reasoning for /// [reasoning models](https://platform.openai.com/docs/guides/reasoning). - /// - /// Currently supported values are `low`, `medium`, and `high`. Reducing - /// - /// reasoning effort can result in faster responses and fewer tokens - /// used on reasoning in a response. + /// Currently supported values are `minimal`, `low`, `medium`, and `high`. Reducing + /// reasoning effort can result in faster responses and fewer tokens used + /// on reasoning in a response. + /// Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort. #[serde(skip_serializing_if = "Option::is_none")] pub reasoning_effort: Option, - /// Developer-defined tags and values used for filtering completions in the [dashboard](https://platform.openai.com/chat-completions). + /// An upper bound for the number of tokens that can be generated for a completion, including + /// visible output tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). #[serde(skip_serializing_if = "Option::is_none")] - pub metadata: Option, // nullable: true + pub max_completion_tokens: Option, - /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on + /// their existing frequency in the text so far, decreasing the model's + /// likelihood to repeat the same line verbatim. #[serde(skip_serializing_if = "Option::is_none")] pub frequency_penalty: Option, // min: -2.0, max: 2.0, default: 0 - /// Modify the likelihood of specified tokens appearing in the completion. - /// - /// Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. - /// Mathematically, the bias is added to the logits generated by the model prior to sampling. - /// The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; - /// values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on + /// whether they appear in the text so far, increasing the model's likelihood + /// to talk about new topics. #[serde(skip_serializing_if = "Option::is_none")] - pub logit_bias: Option>, // default: null + pub presence_penalty: Option, // min: -2.0, max: 2.0, default 0 - /// Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. + /// This tool searches the web for relevant results to use in a response. + /// Learn more about the [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). #[serde(skip_serializing_if = "Option::is_none")] - pub logprobs: Option, + pub web_search_options: Option, - /// An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + /// An integer between 0 and 20 specifying the number of most likely tokens to + /// return at each token position, each with an associated log probability. + /// `logprobs` must be set to `true` if this parameter is used. #[serde(skip_serializing_if = "Option::is_none")] pub top_logprobs: Option, - /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the chat completion. + /// An object specifying the format that the model must output. /// - /// This value can be used to control [costs](https://openai.com/api/pricing/) for text generated via API. - /// This value is now deprecated in favor of `max_completion_tokens`, and is - /// not compatible with [o1 series models](https://platform.openai.com/docs/guides/reasoning). - #[deprecated] + /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables + /// Structured Outputs which ensures the model will match your supplied JSON + /// schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + /// + /// Setting to `{ "type": "json_object" }` enables the older JSON mode, which + /// ensures the message the model generates is valid JSON. Using `json_schema` + /// is preferred for models that support it. #[serde(skip_serializing_if = "Option::is_none")] - pub max_tokens: Option, + pub response_format: Option, - /// An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + /// Parameters for audio output. Required when audio output is requested with + /// `modalities: ["audio"]`. [Learn more](https://platform.openai.com/docs/guides/audio). #[serde(skip_serializing_if = "Option::is_none")] - pub max_completion_tokens: Option, + pub audio: Option, - /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + /// Whether or not to store the output of this chat completion request for + /// use in our [model distillation](https://platform.openai.com/docs/guides/distillation) or + /// [evals](https://platform.openai.com/docs/guides/evals) products. + /// + /// Supports text and image inputs. Note: image inputs over 8MB will be dropped. #[serde(skip_serializing_if = "Option::is_none")] - pub n: Option, // min:1, max: 128, default: 1 + pub store: Option, // nullable: true, default: false + /// If set to true, the model response data will be streamed to the client + /// as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + /// See the [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming) + /// for more information, along with the [streaming responses](https://platform.openai.com/docs/guides/streaming-responses) + /// guide for more information on how to handle the streaming events. #[serde(skip_serializing_if = "Option::is_none")] - pub modalities: Option>, + pub stream: Option, - /// Configuration for a [Predicted Output](https://platform.openai.com/docs/guides/predicted-outputs),which can greatly improve response times when large parts of the model response are known ahead of time. This is most common when you are regenerating a file with only minor changes to most of the content. + /// Not supported with latest reasoning models `o3` and `o4-mini`. + /// + /// Up to 4 sequences where the API will stop generating further tokens. The + /// returned text will not contain the stop sequence. #[serde(skip_serializing_if = "Option::is_none")] - pub prediction: Option, + pub stop: Option, - /// Parameters for audio output. Required when audio output is requested with `modalities: ["audio"]`. [Learn more](https://platform.openai.com/docs/guides/audio). + /// Modify the likelihood of specified tokens appearing in the completion. + /// + /// Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. + /// Mathematically, the bias is added to the logits generated by the model prior to sampling. + /// The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; + /// values like -100 or 100 should result in a ban or exclusive selection of the relevant token. #[serde(skip_serializing_if = "Option::is_none")] - pub audio: Option, + pub logit_bias: Option>, // default: null - /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + /// Whether to return log probabilities of the output tokens or not. If true, + /// returns the log probabilities of each output token returned in the `content` of `message`. #[serde(skip_serializing_if = "Option::is_none")] - pub presence_penalty: Option, // min: -2.0, max: 2.0, default 0 + pub logprobs: Option, - /// An object specifying the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. - /// - /// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - /// - /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. - /// - /// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in + /// the chat completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated via API. + /// This value is now deprecated in favor of `max_completion_tokens`, and is + /// not compatible with [o-series models](https://platform.openai.com/docs/guides/reasoning). + #[deprecated] #[serde(skip_serializing_if = "Option::is_none")] - pub response_format: Option, + pub max_tokens: Option, - /// This feature is in Beta. - /// If specified, our system will make a best effort to sample deterministically, such that repeated requests - /// with the same `seed` and parameters should return the same result. - /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + /// How many chat completion choices to generate for each input message. Note that you will be + /// charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to + /// minimize costs. #[serde(skip_serializing_if = "Option::is_none")] - pub seed: Option, + pub n: Option, // min:1, max: 128, default: 1 - /// Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - /// - If set to 'auto', the system will utilize scale tier credits until they are exhausted. - /// - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - /// - When not set, the default behavior is 'auto'. - /// - /// When this parameter is set, the response body will include the `service_tier` utilized. + /// Configuration for a [Predicted Output](https://platform.openai.com/docs/guides/predicted-outputs), + /// which can greatly improve response times when large parts of the model + /// response are known ahead of time. This is most common when you are + /// regenerating a file with only minor changes to most of the content. #[serde(skip_serializing_if = "Option::is_none")] - pub service_tier: Option, + pub prediction: Option, - /// Up to 4 sequences where the API will stop generating further tokens. + /// This feature is in Beta. + /// + /// If specified, our system will make a best effort to sample deterministically, such that + /// repeated requests with the same `seed` and parameters should return the same result. + /// + /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response + /// parameter to monitor changes in the backend. + #[deprecated] #[serde(skip_serializing_if = "Option::is_none")] - pub stop: Option, + pub seed: Option, - /// If set, partial message deltas will be sent, like in ChatGPT. - /// Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - /// as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). #[serde(skip_serializing_if = "Option::is_none")] - pub stream: Option, + pub stream_options: Option, + /// Specifies the processing type used for serving the request. + /// - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. + /// - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. + /// - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or '[priority](https://openai.com/api-priority-processing/)', then the request will be processed with the corresponding service tier. + /// - When not set, the default behavior is 'auto'. + /// + /// When the `service_tier` parameter is set, the response body will include the `service_tier` value based on the processing mode actually used to serve the request. This response value may be different from the value set in the parameter. #[serde(skip_serializing_if = "Option::is_none")] - pub stream_options: Option, + pub service_tier: Option, /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, /// while lower values like 0.2 will make it more focused and deterministic. @@ -849,26 +1050,49 @@ pub struct CreateChatCompletionRequest { #[serde(skip_serializing_if = "Option::is_none")] pub top_p: Option, // min: 0, max: 1, default: 1 - /// A list of tools the model may call. Currently, only functions are supported as a tool. - /// Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. + /// A list of tools the model may call. You can provide either + /// [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) or + /// [function tools](https://platform.openai.com/docs/guides/function-calling). #[serde(skip_serializing_if = "Option::is_none")] - pub tools: Option>, - + pub tools: Option>, + + /// Controls which (if any) tool is called by the model. + /// `none` means the model will not call any tool and instead generates a message. + /// `auto` means the model can pick between generating a message or calling one or more tools. + /// `required` means the model must call one or more tools. + /// Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces + /// the model to call that tool. + /// `none` is the default when no tools are present. `auto` is the default if tools are present. #[serde(skip_serializing_if = "Option::is_none")] pub tool_choice: Option, - /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) during tool use. + /// Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + /// during tool use. #[serde(skip_serializing_if = "Option::is_none")] pub parallel_tool_calls: Option, - /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + /// This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use `prompt_cache_key` + /// instead to maintain caching optimizations. + /// A stable identifier for your end-users. + /// Used to boost cache hit rates by better bucketing similar requests and to help OpenAI detect and + /// prevent abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + #[deprecated] #[serde(skip_serializing_if = "Option::is_none")] pub user: Option, - /// This tool searches the web for relevant results to use in a response. - /// Learn more about the [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + /// A stable identifier used to help detect users of your application that may be violating OpenAI's + /// usage policies. + /// + /// The IDs should be a string that uniquely identifies each user. We recommend hashing their username + /// or email address, in order to avoid sending us any identifying information. [Learn + /// more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). #[serde(skip_serializing_if = "Option::is_none")] - pub web_search_options: Option, + pub safety_identifier: Option, + + /// Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces + /// the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + #[serde(skip_serializing_if = "Option::is_none")] + pub prompt_cache_key: Option, /// Deprecated in favor of `tool_choice`. /// @@ -888,13 +1112,33 @@ pub struct CreateChatCompletionRequest { #[deprecated] #[serde(skip_serializing_if = "Option::is_none")] pub functions: Option>, + + /// Developer-defined tags and values used for filtering completions in the [dashboard](https://platform.openai.com/chat-completions). + #[serde(skip_serializing_if = "Option::is_none")] + pub metadata: Option, // nullable: true } /// Options for streaming response. Only set this when you set `stream: true`. #[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)] pub struct ChatCompletionStreamOptions { - /// If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value. - pub include_usage: bool, + /// If set, an additional chunk will be streamed before the `data: [DONE]` + /// message. The `usage` field on this chunk shows the token usage statistics + /// for the entire request, and the `choices` field will always be an empty + /// array. + /// + /// All other chunks will also include a `usage` field, but with a null + /// value. **NOTE:** If the stream is interrupted, you may not receive the + /// final usage chunk which contains the total token usage for the request. + pub include_usage: Option, + + /// When true, stream obfuscation will be enabled. Stream obfuscation adds + /// random characters to an `obfuscation` field on streaming delta events to + /// normalize payload sizes as a mitigation to certain side-channel attacks. + /// These obfuscation fields are included by default, but add a small amount + /// of overhead to the data stream. You can set `include_obfuscation` to + /// false to optimize for bandwidth if you trust the network links between + /// your application and the OpenAI API. + pub include_obfuscation: Option, } #[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)] @@ -965,10 +1209,11 @@ pub struct CreateChatCompletionResponse { pub model: String, /// The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. #[serde(skip_serializing_if = "Option::is_none")] - pub service_tier: Option, + pub service_tier: Option, /// This fingerprint represents the backend configuration that the model runs with. /// /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + #[deprecated] #[serde(skip_serializing_if = "Option::is_none")] pub system_fingerprint: Option, @@ -998,10 +1243,16 @@ pub struct ChatCompletionMessageToolCallChunk { /// The ID of the tool call. pub id: Option, /// The type of the tool. Currently, only `function` is supported. - pub r#type: Option, + pub r#type: Option, pub function: Option, } +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[serde(rename_all = "lowercase")] +pub enum FunctionType { + Function, +} + /// A chat completion delta generated by streamed model responses. #[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] pub struct ChatCompletionStreamResponseDelta { @@ -1040,8 +1291,8 @@ pub struct ChatChoiceStream { pub logprobs: Option, } +/// Represents a streamed chunk of a chat completion response returned by the model, based on the provided input. [Learn more](https://platform.openai.com/docs/guides/streaming-responses). #[derive(Debug, Deserialize, Clone, PartialEq, Serialize)] -/// Represents a streamed chunk of a chat completion response returned by model, based on the provided input. pub struct CreateChatCompletionStreamResponse { /// A unique identifier for the chat completion. Each chunk has the same ID. pub id: String, @@ -1053,9 +1304,10 @@ pub struct CreateChatCompletionStreamResponse { /// The model to generate the completion. pub model: String, /// The service tier used for processing the request. This field is only included if the `service_tier` parameter is specified in the request. - pub service_tier: Option, + pub service_tier: Option, /// This fingerprint represents the backend configuration that the model runs with. /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + #[deprecated] pub system_fingerprint: Option, /// The object type, which is always `chat.completion.chunk`. pub object: String, @@ -1064,3 +1316,78 @@ pub struct CreateChatCompletionStreamResponse { /// When present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request. pub usage: Option, } + +/// An object representing a list of Chat Completions. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct ChatCompletionList { + /// The type of this object. It is always set to "list". + pub object: String, + /// An array of chat completion objects. + pub data: Vec, + /// The identifier of the first chat completion in the data array. + pub first_id: String, + /// The identifier of the last chat completion in the data array. + pub last_id: String, + /// Indicates whether there are more Chat Completions available. + pub has_more: bool, +} + +/// Response when deleting a chat completion. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct ChatCompletionDeleted { + /// The type of object being deleted. + pub object: String, + /// The ID of the chat completion that was deleted. + pub id: String, + /// Whether the chat completion was deleted. + pub deleted: bool, +} + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[serde(tag = "type")] +#[serde(rename_all = "snake_case")] + +pub enum ContentPart { + Text(ChatCompletionRequestMessageContentPartText), + ImageUrl(ChatCompletionRequestMessageContentPartImage), +} + +/// A chat completion message with additional fields for listing. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct ChatCompletionMessageListItem { + /// The identifier of the chat message. + pub id: String, + /// If a content parts array was provided, this is an array of `text` and `image_url` parts. Otherwise, null. + #[serde(skip_serializing_if = "Option::is_none")] + pub content_parts: Option>, + + #[serde(flatten)] + pub message: ChatCompletionResponseMessage, +} + +/// An object representing a list of chat completion messages. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct ChatCompletionMessageList { + /// The type of this object. It is always set to "list". + pub object: String, + /// An array of chat completion message objects. + pub data: Vec, + /// The identifier of the first chat message in the data array. + pub first_id: String, + /// The identifier of the last chat message in the data array. + pub last_id: String, + /// Indicates whether there are more chat messages available. + pub has_more: bool, +} + +/// Request to update a chat completion. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)] +#[builder(name = "UpdateChatCompletionRequestArgs")] +#[builder(pattern = "mutable")] +#[builder(setter(into, strip_option), default)] +#[builder(derive(Debug))] +#[builder(build_fn(error = "OpenAIError"))] +pub struct UpdateChatCompletionRequest { + /// Set of 16 key-value pairs that can be attached to an object. + pub metadata: Metadata, +} diff --git a/async-openai/src/types/chat/mod.rs b/async-openai/src/types/chat/mod.rs new file mode 100644 index 00000000..5d0ed72d --- /dev/null +++ b/async-openai/src/types/chat/mod.rs @@ -0,0 +1,3 @@ +mod chat; + +pub use chat::*; diff --git a/async-openai/src/types/common.rs b/async-openai/src/types/common.rs index a2050567..6155e46b 100644 --- a/async-openai/src/types/common.rs +++ b/async-openai/src/types/common.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, path::PathBuf}; +use std::path::PathBuf; use bytes::Bytes; use serde::{Deserialize, Serialize}; @@ -25,10 +25,10 @@ pub enum OrganizationRole { /// characters. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)] #[serde(transparent)] -pub struct Metadata(HashMap); +pub struct Metadata(serde_json::Value); -impl From> for Metadata { - fn from(value: HashMap) -> Self { +impl From for Metadata { + fn from(value: serde_json::Value) -> Self { Self(value) } } diff --git a/async-openai/src/types/completion.rs b/async-openai/src/types/completion.rs index 7e1e8fec..d3c22d88 100644 --- a/async-openai/src/types/completion.rs +++ b/async-openai/src/types/completion.rs @@ -6,7 +6,9 @@ use serde::{Deserialize, Serialize}; use crate::error::OpenAIError; -use super::{ChatCompletionStreamOptions, Choice, CompletionUsage, Prompt, Stop}; +use crate::types::chat::{ + ChatCompletionStreamOptions, Choice, CompletionUsage, Prompt, StopConfiguration, +}; #[derive(Clone, Serialize, Deserialize, Default, Debug, Builder, PartialEq)] #[builder(name = "CreateCompletionRequestArgs")] @@ -74,7 +76,7 @@ pub struct CreateCompletionRequest { /// Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. #[serde(skip_serializing_if = "Option::is_none")] - pub stop: Option, + pub stop: Option, /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. /// diff --git a/async-openai/src/types/containers.rs b/async-openai/src/types/containers/containers.rs similarity index 99% rename from async-openai/src/types/containers.rs rename to async-openai/src/types/containers/containers.rs index b5d6201e..b970a73e 100644 --- a/async-openai/src/types/containers.rs +++ b/async-openai/src/types/containers/containers.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; use crate::error::OpenAIError; -use super::InputSource; +use crate::types::InputSource; #[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] pub struct ContainerResource { diff --git a/async-openai/src/types/containers/mod.rs b/async-openai/src/types/containers/mod.rs new file mode 100644 index 00000000..f8f3c54e --- /dev/null +++ b/async-openai/src/types/containers/mod.rs @@ -0,0 +1,3 @@ +mod containers; + +pub use containers::*; diff --git a/async-openai/src/types/evals/eval.rs b/async-openai/src/types/evals/eval.rs index 425424ed..96e96d5f 100644 --- a/async-openai/src/types/evals/eval.rs +++ b/async-openai/src/types/evals/eval.rs @@ -2,11 +2,12 @@ use derive_builder::Builder; use serde::{Deserialize, Serialize}; use crate::error::OpenAIError; +use crate::types::chat::{ChatCompletionTool, ImageDetail, InputAudio, ResponseFormat}; use crate::types::graders::{ GraderLabelModel, GraderPython, GraderScoreModel, GraderStringCheck, GraderTextSimilarity, }; use crate::types::responses::{ResponseTextParam, Tool}; -use crate::types::{ChatCompletionTool, ImageDetail, InputAudio, Metadata, ResponseFormat}; +use crate::types::Metadata; // Re-export commonly used types pub use crate::types::responses::{EasyInputMessage, InputTextContent, ReasoningEffort}; diff --git a/async-openai/src/types/graders/grader.rs b/async-openai/src/types/graders/grader.rs index 3671ad49..88915390 100644 --- a/async-openai/src/types/graders/grader.rs +++ b/async-openai/src/types/graders/grader.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; -use crate::types::{evals::EvalItem, ReasoningEffort}; +use crate::types::{chat::ReasoningEffort, evals::EvalItem}; /// String check operation. #[derive(Debug, Deserialize, Serialize, Clone, Copy, PartialEq)] diff --git a/async-openai/src/types/impls.rs b/async-openai/src/types/impls.rs index 52990d48..d9bbaa54 100644 --- a/async-openai/src/types/impls.rs +++ b/async-openai/src/types/impls.rs @@ -8,42 +8,46 @@ use crate::{ error::OpenAIError, traits::AsyncTryFrom, types::{ + audio::{ + AudioInput, AudioResponseFormat, CreateSpeechResponse, CreateTranscriptionRequest, + CreateTranslationRequest, TimestampGranularity, TranscriptionInclude, + }, audio::{TranscriptionChunkingStrategy, TranslationResponseFormat}, + chat::{ + ChatCompletionFunctionCall, ChatCompletionFunctions, ChatCompletionNamedToolChoice, + }, + chat::{ + ChatCompletionRequestAssistantMessage, ChatCompletionRequestAssistantMessageContent, + ChatCompletionRequestDeveloperMessage, ChatCompletionRequestDeveloperMessageContent, + ChatCompletionRequestFunctionMessage, ChatCompletionRequestMessage, + ChatCompletionRequestMessageContentPartAudio, + ChatCompletionRequestMessageContentPartImage, + ChatCompletionRequestMessageContentPartText, ChatCompletionRequestSystemMessage, + ChatCompletionRequestSystemMessageContent, ChatCompletionRequestToolMessage, + ChatCompletionRequestToolMessageContent, ChatCompletionRequestUserMessage, + ChatCompletionRequestUserMessageContent, ChatCompletionRequestUserMessageContentPart, + ChatCompletionToolChoiceOption, FunctionName, ImageUrl, Prompt, Role, + StopConfiguration, + }, + containers::CreateContainerFileRequest, + embeddings::EmbeddingInput, + files::{CreateFileRequest, FileExpirationAfterAnchor, FileInput, FilePurpose}, + images::{ + CreateImageEditRequest, CreateImageVariationRequest, DallE2ImageSize, Image, + ImageInput, ImageModel, ImageResponseFormat, ImageSize, ImagesResponse, + }, images::{ImageBackground, ImageEditInput, ImageOutputFormat, ImageQuality, InputFidelity}, - InputSource, VideoSize, + moderations::ModerationInput, + responses::{EasyInputContent, Role as ResponsesRole}, + uploads::AddUploadPartRequest, + videos::{CreateVideoRequest, VideoSize}, + CreateMessageRequestContent, InputSource, }, util::{create_all_dir, create_file_part}, }; use bytes::Bytes; -use super::{ - audio::{ - AudioInput, AudioResponseFormat, CreateSpeechResponse, CreateTranscriptionRequest, - CreateTranslationRequest, TimestampGranularity, TranscriptionInclude, - }, - embeddings::EmbeddingInput, - files::{CreateFileRequest, FileExpirationAfterAnchor, FileInput, FilePurpose}, - images::{ - CreateImageEditRequest, CreateImageVariationRequest, DallE2ImageSize, Image, ImageInput, - ImageModel, ImageResponseFormat, ImageSize, ImagesResponse, - }, - moderations::ModerationInput, - responses::{EasyInputContent, Role as ResponsesRole}, - uploads::AddUploadPartRequest, - ChatCompletionFunctionCall, ChatCompletionFunctions, ChatCompletionNamedToolChoice, - ChatCompletionRequestAssistantMessage, ChatCompletionRequestAssistantMessageContent, - ChatCompletionRequestDeveloperMessage, ChatCompletionRequestDeveloperMessageContent, - ChatCompletionRequestFunctionMessage, ChatCompletionRequestMessage, - ChatCompletionRequestMessageContentPartAudio, ChatCompletionRequestMessageContentPartImage, - ChatCompletionRequestMessageContentPartText, ChatCompletionRequestSystemMessage, - ChatCompletionRequestSystemMessageContent, ChatCompletionRequestToolMessage, - ChatCompletionRequestToolMessageContent, ChatCompletionRequestUserMessage, - ChatCompletionRequestUserMessageContent, ChatCompletionRequestUserMessageContentPart, - ChatCompletionToolChoiceOption, CreateContainerFileRequest, CreateMessageRequestContent, - CreateVideoRequest, FunctionName, ImageUrl, Prompt, Role, Stop, -}; - /// for `impl_from!(T, Enum)`, implements /// - `From` /// - `From>` @@ -96,10 +100,10 @@ impl_from!(&str, Prompt); impl_from!(String, Prompt); impl_from!(&String, Prompt); -// From String "family" to Stop -impl_from!(&str, Stop); -impl_from!(String, Stop); -impl_from!(&String, Stop); +// From String "family" to StopConfiguration +impl_from!(&str, StopConfiguration); +impl_from!(String, StopConfiguration); +impl_from!(&String, StopConfiguration); // From String "family" to ModerationInput impl_from!(&str, ModerationInput); @@ -722,7 +726,6 @@ impl From for FunctionName { impl From<&str> for ChatCompletionNamedToolChoice { fn from(value: &str) -> Self { Self { - r#type: super::ChatCompletionToolType::Function, function: value.into(), } } @@ -731,32 +734,11 @@ impl From<&str> for ChatCompletionNamedToolChoice { impl From for ChatCompletionNamedToolChoice { fn from(value: String) -> Self { Self { - r#type: super::ChatCompletionToolType::Function, function: value.into(), } } } -impl From<&str> for ChatCompletionToolChoiceOption { - fn from(value: &str) -> Self { - match value { - "auto" => Self::Auto, - "none" => Self::None, - _ => Self::Named(value.into()), - } - } -} - -impl From for ChatCompletionToolChoiceOption { - fn from(value: String) -> Self { - match value.as_str() { - "auto" => Self::Auto, - "none" => Self::None, - _ => Self::Named(value.into()), - } - } -} - impl From<(String, serde_json::Value)> for ChatCompletionFunctions { fn from(value: (String, serde_json::Value)) -> Self { Self { diff --git a/async-openai/src/types/message.rs b/async-openai/src/types/message.rs index af79ccc1..e3f00ea9 100644 --- a/async-openai/src/types/message.rs +++ b/async-openai/src/types/message.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; use crate::error::OpenAIError; -use super::{ImageDetail, ImageUrl}; +use crate::types::chat::{ImageDetail, ImageUrl}; #[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)] #[serde(rename_all = "lowercase")] diff --git a/async-openai/src/types/mod.rs b/async-openai/src/types/mod.rs index 25fd7f5c..f28556e5 100644 --- a/async-openai/src/types/mod.rs +++ b/async-openai/src/types/mod.rs @@ -6,10 +6,10 @@ mod assistant_stream; pub mod audio; mod audit_log; pub mod batches; -mod chat; +pub mod chat; mod common; mod completion; -mod containers; +pub mod containers; pub mod embeddings; pub mod evals; pub mod files; @@ -35,8 +35,8 @@ mod step; mod thread; pub mod uploads; mod users; -mod vector_store; -mod video; +pub mod vectorstores; +pub mod videos; #[cfg_attr(docsrs, doc(cfg(feature = "webhook")))] #[cfg(feature = "webhook")] pub mod webhooks; @@ -44,10 +44,8 @@ pub mod webhooks; pub use assistant::*; pub use assistant_stream::*; pub use audit_log::*; -pub use chat::*; pub use common::*; pub use completion::*; -pub use containers::*; pub use invites::*; pub use logprob::*; pub use mcp::*; @@ -60,8 +58,6 @@ pub use run::*; pub use step::*; pub use thread::*; pub use users::*; -pub use vector_store::*; -pub use video::*; mod impls; use derive_builder::UninitializedFieldError; diff --git a/async-openai/src/types/responses/response.rs b/async-openai/src/types/responses/response.rs index d4684b5c..f1a91387 100644 --- a/async-openai/src/types/responses/response.rs +++ b/async-openai/src/types/responses/response.rs @@ -1,5 +1,5 @@ use crate::error::OpenAIError; -pub use crate::types::{ +pub use crate::types::chat::{ CompletionTokensDetails, ImageDetail, PromptTokensDetails, ReasoningEffort, ResponseFormatJsonSchema, }; @@ -1394,7 +1394,7 @@ pub struct ToolChoiceAllowed { /// message. /// /// `required` requires the model to call one or more of the allowed tools. - mode: ToolChoiceAllowedMode, + pub mode: ToolChoiceAllowedMode, /// A list of tool definitions that the model should be allowed to call. /// /// For the Responses API, the list of tool definitions might look like: @@ -1405,7 +1405,7 @@ pub struct ToolChoiceAllowed { /// { "type": "image_generation" } /// ] /// ``` - tools: Vec, + pub tools: Vec, } /// The type of hosted tool the model should to use. Learn more about diff --git a/async-openai/src/types/run.rs b/async-openai/src/types/run.rs index 8be4ad99..e5355841 100644 --- a/async-openai/src/types/run.rs +++ b/async-openai/src/types/run.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use derive_builder::Builder; use serde::{Deserialize, Serialize}; -use crate::{error::OpenAIError, types::FunctionCall}; +use crate::{error::OpenAIError, types::chat::FunctionCall}; use super::{ AssistantTools, AssistantsApiResponseFormatOption, AssistantsApiToolChoiceOption, diff --git a/async-openai/src/types/vectorstores/mod.rs b/async-openai/src/types/vectorstores/mod.rs new file mode 100644 index 00000000..e38e391f --- /dev/null +++ b/async-openai/src/types/vectorstores/mod.rs @@ -0,0 +1,3 @@ +mod vector_store; + +pub use vector_store::*; diff --git a/async-openai/src/types/vector_store.rs b/async-openai/src/types/vectorstores/vector_store.rs similarity index 80% rename from async-openai/src/types/vector_store.rs rename to async-openai/src/types/vectorstores/vector_store.rs index b1682633..978892ab 100644 --- a/async-openai/src/types/vector_store.rs +++ b/async-openai/src/types/vectorstores/vector_store.rs @@ -3,9 +3,12 @@ use std::collections::HashMap; use derive_builder::Builder; use serde::{Deserialize, Serialize}; -use crate::error::OpenAIError; +use crate::{ + error::OpenAIError, + types::{responses::Filter, Metadata}, +}; -use super::StaticChunkingStrategy; +use crate::types::StaticChunkingStrategy; #[derive(Debug, Serialize, Deserialize, Default, Clone, Builder, PartialEq)] #[builder(name = "CreateVectorStoreRequestArgs")] @@ -20,23 +23,27 @@ pub struct CreateVectorStoreRequest { /// The name of the vector store. #[serde(skip_serializing_if = "Option::is_none")] pub name: Option, + /// A description for the vector store. Can be used to describe the vector store's purpose. + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, /// The expiration policy for a vector store. #[serde(skip_serializing_if = "Option::is_none")] pub expires_after: Option, - /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty. + /// The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only + /// applicable if `file_ids` is non-empty. #[serde(skip_serializing_if = "Option::is_none")] - pub chunking_strategy: Option, + pub chunking_strategy: Option, /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. #[serde(skip_serializing_if = "Option::is_none")] - pub metadata: Option>, + pub metadata: Option, } #[derive(Debug, Serialize, Deserialize, Default, Clone, PartialEq)] #[serde(tag = "type")] -pub enum VectorStoreChunkingStrategy { +pub enum ChunkingStrategyRequestParam { /// The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. #[default] #[serde(rename = "auto")] @@ -80,7 +87,7 @@ pub struct VectorStoreObject { pub last_active_at: Option, /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. - pub metadata: Option>, + pub metadata: Option, } #[derive(Debug, Deserialize, Clone, PartialEq, Serialize)] @@ -133,7 +140,7 @@ pub struct UpdateVectorStoreRequest { #[serde(skip_serializing_if = "Option::is_none")] pub expires_after: Option, #[serde(skip_serializing_if = "Option::is_none")] - pub metadata: Option>, + pub metadata: Option, } #[derive(Debug, Deserialize, Clone, PartialEq, Serialize)] @@ -162,7 +169,9 @@ pub struct VectorStoreFileObject { /// The last error associated with this vector store file. Will be `null` if there are no errors. pub last_error: Option, /// The strategy used to chunk the file. - pub chunking_strategy: Option, + pub chunking_strategy: Option, + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters, booleans, or numbers. + pub attributes: Option, } #[derive(Debug, Deserialize, Clone, PartialEq, Serialize)] @@ -192,12 +201,22 @@ pub enum VectorStoreFileErrorCode { #[derive(Debug, Deserialize, Clone, PartialEq, Serialize)] #[serde(tag = "type")] #[serde(rename_all = "lowercase")] -pub enum VectorStoreFileObjectChunkingStrategy { +pub enum ChunkingStrategyResponse { /// This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API. + #[serde(rename = "other")] Other, - Static { - r#static: StaticChunkingStrategy, - }, + #[serde(rename = "static")] + Static { r#static: StaticChunkingStrategy }, +} + +#[derive(Debug, Serialize, Deserialize, Default, Clone, PartialEq)] +#[serde(transparent)] +pub struct VectorStoreFileAttributes(pub HashMap); + +impl From> for VectorStoreFileAttributes { + fn from(attributes: HashMap) -> Self { + Self(attributes) + } } #[derive(Debug, Serialize, Deserialize, Default, Clone, Builder, PartialEq)] @@ -210,9 +229,9 @@ pub struct CreateVectorStoreFileRequest { /// A [File](https://platform.openai.com/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. pub file_id: String, #[serde(skip_serializing_if = "Option::is_none")] - pub chunking_strategy: Option, + pub chunking_strategy: Option, #[serde(skip_serializing_if = "Option::is_none")] - pub attributes: Option>, + pub attributes: Option, } #[derive(Debug, Deserialize, Clone, PartialEq, Serialize)] @@ -229,9 +248,23 @@ pub struct DeleteVectorStoreFileResponse { #[builder(derive(Debug))] #[builder(build_fn(error = "OpenAIError"))] pub struct CreateVectorStoreFileBatchRequest { - /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. - pub file_ids: Vec, // minItems: 1, maxItems: 500 - pub chunking_strategy: Option, + /// A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store + /// should use. Useful for tools like `file_search` that can access files. If `attributes` or + /// `chunking_strategy` are provided, they will be applied to all files in the batch. Mutually + /// exclusive with `files`. + #[serde(skip_serializing_if = "Option::is_none")] + pub file_ids: Option>, // minItems: 1, maxItems: 500 + /// A list of objects that each include a `file_id` plus optional `attributes` or `chunking_strategy`. + /// Use this when you need to override metadata for specific files. The global `attributes` or + /// `chunking_strategy` will be ignored and must be specified for each file. Mutually exclusive + /// with `file_ids`. + #[serde(skip_serializing_if = "Option::is_none")] + pub files: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub chunking_strategy: Option, + /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters, booleans, or numbers. + #[serde(skip_serializing_if = "Option::is_none")] + pub attributes: Option, } #[derive(Debug, Deserialize, Clone, PartialEq, Serialize)] @@ -247,7 +280,7 @@ pub enum VectorStoreFileBatchStatus { pub struct VectorStoreFileBatchCounts { /// The number of files that are currently being processed. pub in_progress: u32, - /// The number of files that have been successfully processed. + /// The number of files that have been processed. pub completed: u32, /// The number of files that have failed to process. pub failed: u32, @@ -257,12 +290,12 @@ pub struct VectorStoreFileBatchCounts { pub total: u32, } -/// A batch of files attached to a vector store. +/// A batch of files attached to a vector store. #[derive(Debug, Deserialize, Clone, PartialEq, Serialize)] pub struct VectorStoreFileBatchObject { /// The identifier, which can be referenced in API endpoints. pub id: String, - /// The object type, which is always `vector_store.file_batch`. + /// The object type, which is always `vector_store.files_batch`. pub object: String, /// The Unix timestamp (in seconds) for when the vector store files batch was created. pub created_at: u32, @@ -319,7 +352,7 @@ pub struct VectorStoreSearchRequest { /// A filter to apply based on file attributes. #[serde(skip_serializing_if = "Option::is_none")] - pub filters: Option, + pub filters: Option, /// Ranking options for search. #[serde(skip_serializing_if = "Option::is_none")] @@ -359,50 +392,6 @@ impl From> for VectorStoreSearchQuery { } } -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] -#[serde(untagged)] -pub enum VectorStoreSearchFilter { - Comparison(ComparisonFilter), - Compound(CompoundFilter), -} - -impl From for VectorStoreSearchFilter { - fn from(filter: ComparisonFilter) -> Self { - Self::Comparison(filter) - } -} - -impl From for VectorStoreSearchFilter { - fn from(filter: CompoundFilter) -> Self { - Self::Compound(filter) - } -} - -/// A filter used to compare a specified attribute key to a given value using a defined comparison operation. -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] -pub struct ComparisonFilter { - /// Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. - pub r#type: ComparisonType, - - /// The key to compare against the value. - pub key: String, - - /// The value to compare against the attribute key; supports string, number, or boolean types. - pub value: AttributeValue, -} - -/// Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. -#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)] -#[serde(rename_all = "lowercase")] -pub enum ComparisonType { - Eq, - Ne, - Gt, - Gte, - Lt, - Lte, -} - /// The value to compare against the attribute key; supports string, number, or boolean types. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] #[serde(untagged)] @@ -448,30 +437,15 @@ pub struct RankingOptions { #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub enum Ranker { + /// Enable re-ranking; set to `none` to disable, which can help reduce latency. + #[serde(rename = "none")] + None, #[serde(rename = "auto")] Auto, #[serde(rename = "default-2024-11-15")] Default20241115, } -/// Combine multiple filters using `and` or `or`. -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] -pub struct CompoundFilter { - /// Type of operation: `and` or `or`. - pub r#type: CompoundFilterType, - - /// Array of filters to combine. Items can be `ComparisonFilter` or `CompoundFilter` - pub filters: Vec, -} - -/// Type of operation: `and` or `or`. -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] -#[serde(rename_all = "lowercase")] -pub enum CompoundFilterType { - And, - Or, -} - #[derive(Debug, Deserialize, Clone, PartialEq, Serialize)] pub struct VectorStoreSearchResultsPage { /// The object type, which is always `vector_store.search_results.page`. @@ -502,7 +476,7 @@ pub struct VectorStoreSearchResultItem { pub score: f32, // minimum: 0, maximum: 1 /// Attributes of the vector store file. - pub attributes: HashMap, + pub attributes: VectorStoreFileAttributes, /// Content chunks from the file. pub content: Vec, @@ -516,3 +490,8 @@ pub struct VectorStoreSearchResultContentObject { /// The text content returned from search. pub text: String, } + +#[derive(Debug, Serialize, Deserialize, Default, Clone, Builder, PartialEq)] +pub struct UpdateVectorStoreFileAttributesRequest { + pub attributes: VectorStoreFileAttributes, +} diff --git a/async-openai/src/types/videos/mod.rs b/async-openai/src/types/videos/mod.rs new file mode 100644 index 00000000..a361cb4e --- /dev/null +++ b/async-openai/src/types/videos/mod.rs @@ -0,0 +1,3 @@ +mod video; + +pub use video::*; diff --git a/async-openai/src/types/video.rs b/async-openai/src/types/videos/video.rs similarity index 100% rename from async-openai/src/types/video.rs rename to async-openai/src/types/videos/video.rs diff --git a/async-openai/src/vector_store_file_batches.rs b/async-openai/src/vector_store_file_batches.rs index 8e1384a9..e7a34a33 100644 --- a/async-openai/src/vector_store_file_batches.rs +++ b/async-openai/src/vector_store_file_batches.rs @@ -3,7 +3,7 @@ use serde::Serialize; use crate::{ config::Config, error::OpenAIError, - types::{ + types::vectorstores::{ CreateVectorStoreFileBatchRequest, ListVectorStoreFilesResponse, VectorStoreFileBatchObject, }, Client, @@ -69,7 +69,7 @@ impl<'c, C: Config> VectorStoreFileBatches<'c, C> { /// Returns a list of vector store files in a batch. #[crate::byot(T0 = std::fmt::Display, T1 = serde::Serialize, R = serde::de::DeserializeOwned)] - pub async fn list( + pub async fn list_files( &self, batch_id: &str, query: &Q, diff --git a/async-openai/src/vector_store_files.rs b/async-openai/src/vector_store_files.rs index ed5b40e0..1b72dc2c 100644 --- a/async-openai/src/vector_store_files.rs +++ b/async-openai/src/vector_store_files.rs @@ -3,9 +3,10 @@ use serde::Serialize; use crate::{ config::Config, error::OpenAIError, - types::{ + types::vectorstores::{ CreateVectorStoreFileRequest, DeleteVectorStoreFileResponse, ListVectorStoreFilesResponse, - VectorStoreFileContentResponse, VectorStoreFileObject, + UpdateVectorStoreFileAttributesRequest, VectorStoreFileContentResponse, + VectorStoreFileObject, }, Client, }; @@ -79,6 +80,21 @@ impl<'c, C: Config> VectorStoreFiles<'c, C> { .await } + /// Update attributes on a vector store file. + #[crate::byot(T0 = std::fmt::Display, T1 = serde::Serialize, R = serde::de::DeserializeOwned)] + pub async fn update( + &self, + file_id: &str, + request: UpdateVectorStoreFileAttributesRequest, + ) -> Result { + self.client + .post( + &format!("/vector_stores/{}/files/{file_id}", &self.vector_store_id), + request, + ) + .await + } + /// Retrieve the parsed contents of a vector store file. #[crate::byot(T0 = std::fmt::Display, R = serde::de::DeserializeOwned)] pub async fn retrieve_file_content( @@ -97,7 +113,7 @@ impl<'c, C: Config> VectorStoreFiles<'c, C> { #[cfg(test)] mod tests { use crate::types::files::{CreateFileRequest, FileInput, FilePurpose}; - use crate::types::CreateVectorStoreRequest; + use crate::types::vectorstores::CreateVectorStoreRequest; use crate::Client; #[tokio::test] @@ -124,6 +140,7 @@ mod tests { .create(CreateVectorStoreRequest { file_ids: Some(vec![file_handle.id.clone()]), name: None, + description: None, expires_after: None, chunking_strategy: None, metadata: None, diff --git a/async-openai/src/vector_stores.rs b/async-openai/src/vector_stores.rs index de459e8c..53764edd 100644 --- a/async-openai/src/vector_stores.rs +++ b/async-openai/src/vector_stores.rs @@ -3,7 +3,7 @@ use serde::Serialize; use crate::{ config::Config, error::OpenAIError, - types::{ + types::vectorstores::{ CreateVectorStoreRequest, DeleteVectorStoreResponse, ListVectorStoresResponse, UpdateVectorStoreRequest, VectorStoreObject, VectorStoreSearchRequest, VectorStoreSearchResultsPage, diff --git a/async-openai/src/video.rs b/async-openai/src/video.rs index 8d73fd01..cef03822 100644 --- a/async-openai/src/video.rs +++ b/async-openai/src/video.rs @@ -1,7 +1,7 @@ use crate::{ config::Config, error::OpenAIError, - types::{ + types::videos::{ CreateVideoRequest, ListVideosResponse, RemixVideoRequest, VideoJob, VideoJobMetadata, VideoVariant, }, diff --git a/async-openai/tests/bring-your-own-type.rs b/async-openai/tests/bring-your-own-type.rs index 175a8cd2..ba22d51e 100644 --- a/async-openai/tests/bring-your-own-type.rs +++ b/async-openai/tests/bring-your-own-type.rs @@ -88,15 +88,25 @@ async fn test_byot_completions() { async fn test_byot_audio() { let client = Client::new(); - let _r: Result = client.audio().transcribe_byot(MyJson(json!({}))).await; let _r: Result = client .audio() - .transcribe_verbose_json_byot(MyJson(json!({}))) + .transcription() + .create_byot(MyJson(json!({}))) .await; - let _r: Result = client.audio().translate_byot(MyJson(json!({}))).await; let _r: Result = client .audio() - .translate_verbose_json_byot(MyJson(json!({}))) + .transcription() + .create_verbose_json_byot(MyJson(json!({}))) + .await; + let _r: Result = client + .audio() + .translation() + .create_byot(MyJson(json!({}))) + .await; + let _r: Result = client + .audio() + .translation() + .create_verbose_json_byot(MyJson(json!({}))) .await; } diff --git a/examples/assistants-file-search/src/main.rs b/examples/assistants-file-search/src/main.rs index fdfa32b3..cff4a039 100644 --- a/examples/assistants-file-search/src/main.rs +++ b/examples/assistants-file-search/src/main.rs @@ -3,10 +3,10 @@ use std::error::Error; use async_openai::{ types::files::{CreateFileRequest, FilePurpose}, types::{ - AssistantToolFileSearchResources, AssistantToolsFileSearch, CreateAssistantRequestArgs, - CreateMessageRequestArgs, CreateRunRequest, CreateThreadRequest, CreateVectorStoreRequest, - MessageAttachment, MessageAttachmentTool, MessageContent, MessageRole, - ModifyAssistantRequest, RunStatus, + vectorstores::CreateVectorStoreRequest, AssistantToolFileSearchResources, + AssistantToolsFileSearch, CreateAssistantRequestArgs, CreateMessageRequestArgs, + CreateRunRequest, CreateThreadRequest, MessageAttachment, MessageAttachmentTool, + MessageContent, MessageRole, ModifyAssistantRequest, RunStatus, }, Client, }; diff --git a/examples/assistants-func-call-stream/src/main.rs b/examples/assistants-func-call-stream/src/main.rs index a745292f..df8739ee 100644 --- a/examples/assistants-func-call-stream/src/main.rs +++ b/examples/assistants-func-call-stream/src/main.rs @@ -3,9 +3,9 @@ use std::error::Error; use async_openai::{ config::OpenAIConfig, types::{ - AssistantStreamEvent, CreateAssistantRequestArgs, CreateMessageRequest, CreateRunRequest, - CreateThreadRequest, FunctionObject, MessageDeltaContent, MessageRole, RunObject, - SubmitToolOutputsRunRequest, ToolsOutputs, + chat::FunctionObject, AssistantStreamEvent, CreateAssistantRequestArgs, + CreateMessageRequest, CreateRunRequest, CreateThreadRequest, MessageDeltaContent, + MessageRole, RunObject, SubmitToolOutputsRunRequest, ToolsOutputs, }, Client, }; diff --git a/examples/azure-openai-service/src/main.rs b/examples/azure-openai-service/src/main.rs index fc4d026c..ba9acda1 100644 --- a/examples/azure-openai-service/src/main.rs +++ b/examples/azure-openai-service/src/main.rs @@ -3,8 +3,8 @@ use std::error::Error; use async_openai::{ config::AzureConfig, types::{ - embeddings::CreateEmbeddingRequestArgs, ChatCompletionRequestSystemMessageArgs, - ChatCompletionRequestUserMessageArgs, CreateChatCompletionRequestArgs, + chat::ChatCompletionRequestSystemMessageArgs, chat::ChatCompletionRequestUserMessageArgs, + chat::CreateChatCompletionRequestArgs, embeddings::CreateEmbeddingRequestArgs, }, Client, }; diff --git a/examples/chat-store/src/main.rs b/examples/chat-store/src/main.rs index 0b611b3e..ea6e21f6 100644 --- a/examples/chat-store/src/main.rs +++ b/examples/chat-store/src/main.rs @@ -1,5 +1,5 @@ use async_openai::{ - types::{ + types::chat::{ ChatCompletionRequestSystemMessageArgs, ChatCompletionRequestUserMessageArgs, CreateChatCompletionRequestArgs, }, @@ -37,13 +37,40 @@ async fn main() -> Result<(), Box> { let response = client.chat().create(request).await?; - println!("\nResponse:\n"); - for choice in response.choices { - println!( - "{}: Role: {} Content: {:?}", - choice.index, choice.message.role, choice.message.content - ); - } + println!("Chat Completion Response:\n"); + println!("{:#?}", response); + + // api doesnt return the chat completion immediately, so retrieval doesnt work immediately, sleep + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + + // get chat completion object + let chat_completion = client.chat().retrieve(&response.id).await?; + + println!("--------------------------------"); + println!("Retrieved chat completion:\n"); + println!("{:#?}", chat_completion); + + let chat_completion_messages = client + .chat() + .messages(&response.id, &[("limit", 10)]) + .await?; + + println!("--------------------------------"); + println!("Retrieved chat completion messages:\n"); + println!("{:#?}", chat_completion_messages); + + // list all chat completions + let chat_completions = client.chat().list(&[("limit", 10)]).await?; + + println!("--------------------------------"); + println!("Retrieved chat completions:\n"); + println!("{:#?}", chat_completions); + + let deleted = client.chat().delete(&response.id).await?; + + println!("--------------------------------"); + println!("Deleted chat completion:\n"); + println!("{:#?}", deleted); Ok(()) } diff --git a/examples/chat-stream/src/main.rs b/examples/chat-stream/src/main.rs index f3d22cde..517e8a7c 100644 --- a/examples/chat-stream/src/main.rs +++ b/examples/chat-stream/src/main.rs @@ -1,8 +1,8 @@ use std::error::Error; use std::io::{stdout, Write}; -use async_openai::types::ChatCompletionRequestUserMessageArgs; -use async_openai::{types::CreateChatCompletionRequestArgs, Client}; +use async_openai::types::chat::ChatCompletionRequestUserMessageArgs; +use async_openai::{types::chat::CreateChatCompletionRequestArgs, Client}; use futures::StreamExt; #[tokio::main] diff --git a/examples/chat/src/main.rs b/examples/chat/src/main.rs index 4ca389ef..0c529bfc 100644 --- a/examples/chat/src/main.rs +++ b/examples/chat/src/main.rs @@ -1,7 +1,7 @@ use std::error::Error; use async_openai::{ - types::{ + types::chat::{ ChatCompletionRequestAssistantMessageArgs, ChatCompletionRequestSystemMessageArgs, ChatCompletionRequestUserMessageArgs, CreateChatCompletionRequestArgs, }, diff --git a/examples/completions-web-search/src/main.rs b/examples/completions-web-search/src/main.rs index 6839895e..103212e8 100644 --- a/examples/completions-web-search/src/main.rs +++ b/examples/completions-web-search/src/main.rs @@ -1,8 +1,8 @@ -use async_openai::types::{ +use async_openai::types::chat::{ ChatCompletionRequestUserMessageArgs, WebSearchContextSize, WebSearchLocation, WebSearchOptions, WebSearchUserLocation, WebSearchUserLocationType, }; -use async_openai::{types::CreateChatCompletionRequestArgs, Client}; +use async_openai::{types::chat::CreateChatCompletionRequestArgs, Client}; #[tokio::main] async fn main() -> Result<(), Box> { diff --git a/examples/containers/src/main.rs b/examples/containers/src/main.rs index 8b7b74f0..f3c54e81 100644 --- a/examples/containers/src/main.rs +++ b/examples/containers/src/main.rs @@ -1,8 +1,9 @@ use async_openai::{ - types::{ + types::containers::{ ContainerExpiresAfter, ContainerExpiresAfterAnchor, CreateContainerFileRequest, - CreateContainerRequestArgs, InputSource, + CreateContainerRequestArgs, }, + types::InputSource, Client, }; diff --git a/examples/conversations/src/main.rs b/examples/conversations/src/main.rs index 0fad7452..c62e27f3 100644 --- a/examples/conversations/src/main.rs +++ b/examples/conversations/src/main.rs @@ -6,7 +6,7 @@ use async_openai::{ }, Client, }; -use std::collections::HashMap; +use serde_json::json; #[tokio::main] async fn main() -> Result<(), Box> { @@ -21,11 +21,9 @@ async fn main() -> Result<(), Box> { .conversations() .create( CreateConversationRequestArgs::default() - .metadata({ - let mut metadata = HashMap::new(); - metadata.insert("topic".to_string(), "demo".to_string()); - metadata - }) + .metadata(json!({ + "topic": "demo", + })) .items(vec![InputItem::from_easy_message(EasyInputMessage { r#type: MessageType::Message, role: Role::User, @@ -108,12 +106,9 @@ async fn main() -> Result<(), Box> { .update( &conversation.id, UpdateConversationRequestArgs::default() - .metadata({ - let mut metadata = HashMap::new(); - metadata.insert("topic".to_string(), "updated-demo".into()); - metadata.insert("status".to_string(), "active".into()); - metadata - }) + .metadata(json!({ + "topic": "updated-demo", + })) .build()?, ) .await?; diff --git a/examples/function-call-stream/src/main.rs b/examples/function-call-stream/src/main.rs index ba70eb9e..376d93fc 100644 --- a/examples/function-call-stream/src/main.rs +++ b/examples/function-call-stream/src/main.rs @@ -2,11 +2,11 @@ use std::collections::HashMap; use std::error::Error; use std::io::{stdout, Write}; -use async_openai::types::{ +use async_openai::types::chat::{ ChatCompletionRequestFunctionMessageArgs, ChatCompletionRequestUserMessageArgs, FinishReason, }; use async_openai::{ - types::{ChatCompletionFunctionsArgs, CreateChatCompletionRequestArgs}, + types::chat::{ChatCompletionFunctionsArgs, CreateChatCompletionRequestArgs}, Client, }; diff --git a/examples/function-call/src/main.rs b/examples/function-call/src/main.rs index d85aa3a8..3ddd8a88 100644 --- a/examples/function-call/src/main.rs +++ b/examples/function-call/src/main.rs @@ -1,5 +1,5 @@ use async_openai::{ - types::{ + types::chat::{ ChatCompletionFunctionsArgs, ChatCompletionRequestFunctionMessageArgs, ChatCompletionRequestUserMessageArgs, CreateChatCompletionRequestArgs, }, diff --git a/examples/gemini-openai-compatibility/src/gemini_types.rs b/examples/gemini-openai-compatibility/src/gemini_types.rs index 51ada220..70e34a8f 100644 --- a/examples/gemini-openai-compatibility/src/gemini_types.rs +++ b/examples/gemini-openai-compatibility/src/gemini_types.rs @@ -3,7 +3,10 @@ use std::pin::Pin; /// Gemini types (Generally user defined types) for Gemini API use async_openai::{ error::OpenAIError, - types::{images::Image, ChatChoice, ChatChoiceStream, CompletionUsage}, + types::{ + chat::{ChatChoice, ChatChoiceStream, CompletionUsage}, + images::Image, + }, }; use futures::Stream; use serde::{Deserialize, Serialize}; diff --git a/examples/gemini-openai-compatibility/src/main.rs b/examples/gemini-openai-compatibility/src/main.rs index 7ef2de77..eb17b845 100644 --- a/examples/gemini-openai-compatibility/src/main.rs +++ b/examples/gemini-openai-compatibility/src/main.rs @@ -1,11 +1,13 @@ use async_openai::{ config::OpenAIConfig, types::{ + chat::{ + ChatCompletionRequestMessage, ChatCompletionRequestUserMessage, + ChatCompletionRequestUserMessageContentPart, CreateChatCompletionRequestArgs, + InputAudio, ResponseFormat, ResponseFormatJsonSchema, + }, embeddings::CreateEmbeddingRequestArgs, images::{CreateImageRequestArgs, Image, ImageModel, ImageResponseFormat}, - ChatCompletionRequestMessage, ChatCompletionRequestUserMessage, - ChatCompletionRequestUserMessageContentPart, CreateChatCompletionRequestArgs, InputAudio, - ResponseFormat, ResponseFormatJsonSchema, }, Client, }; @@ -63,7 +65,7 @@ async fn stream_chat() -> Result<(), Box> { .model("gemini-2.0-flash") .messages(vec![ChatCompletionRequestMessage::User( ChatCompletionRequestUserMessage { - content: async_openai::types::ChatCompletionRequestUserMessageContent::Text( + content: async_openai::types::chat::ChatCompletionRequestUserMessageContent::Text( "What is the meaning of life?".to_string(), ), ..Default::default() @@ -169,14 +171,14 @@ async fn image_understanding() -> Result<(), Box> { .messages([ ChatCompletionRequestMessage::User("What do you see in this image?".into()), ChatCompletionRequestMessage::User(ChatCompletionRequestUserMessage { - content: async_openai::types::ChatCompletionRequestUserMessageContent::Array(vec![ - ChatCompletionRequestUserMessageContentPart::ImageUrl( - async_openai::types::ChatCompletionRequestMessageContentPartImage { + content: async_openai::types::chat::ChatCompletionRequestUserMessageContent::Array( + vec![ChatCompletionRequestUserMessageContentPart::ImageUrl( + async_openai::types::chat::ChatCompletionRequestMessageContentPartImage { image_url: ("data:image/jpg;base64,".to_string() + &image_base64) .into(), }, - ), - ]), + )], + ), ..Default::default() }), ]) @@ -240,16 +242,16 @@ async fn audio_understanding() -> Result<(), Box> { .messages([ ChatCompletionRequestMessage::User("Transcribe this audio file.".into()), ChatCompletionRequestMessage::User(ChatCompletionRequestUserMessage { - content: async_openai::types::ChatCompletionRequestUserMessageContent::Array(vec![ - ChatCompletionRequestUserMessageContentPart::InputAudio( - async_openai::types::ChatCompletionRequestMessageContentPartAudio { + content: async_openai::types::chat::ChatCompletionRequestUserMessageContent::Array( + vec![ChatCompletionRequestUserMessageContentPart::InputAudio( + async_openai::types::chat::ChatCompletionRequestMessageContentPartAudio { input_audio: InputAudio { data: audio_base64, - format: async_openai::types::InputAudioFormat::Mp3, + format: async_openai::types::chat::InputAudioFormat::Mp3, }, }, - ), - ]), + )], + ), ..Default::default() }), ]) @@ -294,7 +296,7 @@ async fn structured_output() -> Result<(), Box> { .model("gemini-2.0-flash") .messages([ChatCompletionRequestMessage::User( ChatCompletionRequestUserMessage { - content: async_openai::types::ChatCompletionRequestUserMessageContent::Text( + content: async_openai::types::chat::ChatCompletionRequestUserMessageContent::Text( "How can I solve 8x + 7 = -23?".to_string(), ), ..Default::default() diff --git a/examples/ollama-chat/src/main.rs b/examples/ollama-chat/src/main.rs index 831fddac..c0973573 100644 --- a/examples/ollama-chat/src/main.rs +++ b/examples/ollama-chat/src/main.rs @@ -2,7 +2,7 @@ use std::error::Error; use async_openai::{ config::OpenAIConfig, - types::{ + types::chat::{ ChatCompletionRequestAssistantMessageArgs, ChatCompletionRequestSystemMessageArgs, ChatCompletionRequestUserMessageArgs, CreateChatCompletionRequestArgs, }, diff --git a/examples/structured-outputs-schemars/src/main.rs b/examples/structured-outputs-schemars/src/main.rs index f1e77db4..25c6a5d3 100644 --- a/examples/structured-outputs-schemars/src/main.rs +++ b/examples/structured-outputs-schemars/src/main.rs @@ -1,7 +1,7 @@ use std::error::Error; use async_openai::{ - types::{ + types::chat::{ ChatCompletionRequestMessage, ChatCompletionRequestSystemMessage, ChatCompletionRequestUserMessage, CreateChatCompletionRequestArgs, ResponseFormat, ResponseFormatJsonSchema, diff --git a/examples/structured-outputs/src/main.rs b/examples/structured-outputs/src/main.rs index 3948308d..aab5e139 100644 --- a/examples/structured-outputs/src/main.rs +++ b/examples/structured-outputs/src/main.rs @@ -1,7 +1,7 @@ use std::error::Error; use async_openai::{ - types::{ + types::chat::{ ChatCompletionRequestSystemMessage, ChatCompletionRequestUserMessage, CreateChatCompletionRequestArgs, ResponseFormat, ResponseFormatJsonSchema, }, diff --git a/examples/tool-call-stream/src/main.rs b/examples/tool-call-stream/src/main.rs index fd1ce77e..fd10be1e 100644 --- a/examples/tool-call-stream/src/main.rs +++ b/examples/tool-call-stream/src/main.rs @@ -3,13 +3,13 @@ use std::error::Error; use std::io::{stdout, Write}; use std::sync::Arc; -use async_openai::types::{ - ChatCompletionMessageToolCall, ChatCompletionRequestAssistantMessageArgs, - ChatCompletionRequestMessage, ChatCompletionRequestToolMessageArgs, - ChatCompletionRequestUserMessageArgs, ChatCompletionToolArgs, ChatCompletionToolType, - FinishReason, FunctionCall, FunctionObjectArgs, +use async_openai::types::chat::{ + ChatCompletionMessageToolCall, ChatCompletionMessageToolCalls, + ChatCompletionRequestAssistantMessageArgs, ChatCompletionRequestMessage, + ChatCompletionRequestToolMessageArgs, ChatCompletionRequestUserMessageArgs, ChatCompletionTool, + ChatCompletionTools, FinishReason, FunctionCall, FunctionObjectArgs, }; -use async_openai::{types::CreateChatCompletionRequestArgs, Client}; +use async_openai::{types::chat::CreateChatCompletionRequestArgs, Client}; use futures::StreamExt; use rand::seq::SliceRandom; use rand::{thread_rng, Rng}; @@ -28,26 +28,23 @@ async fn main() -> Result<(), Box> { .content(user_prompt) .build()? .into()]) - .tools(vec![ChatCompletionToolArgs::default() - .r#type(ChatCompletionToolType::Function) - .function( - FunctionObjectArgs::default() - .name("get_current_weather") - .description("Get the current weather in a given location") - .parameters(json!({ - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and state, e.g. San Francisco, CA", - }, - "unit": { "type": "string", "enum": ["celsius", "fahrenheit"] }, + .tools(vec![ChatCompletionTools::Function(ChatCompletionTool { + function: FunctionObjectArgs::default() + .name("get_current_weather") + .description("Get the current weather in a given location") + .parameters(json!({ + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", }, - "required": ["location"], - })) - .build()?, - ) - .build()?]) + "unit": { "type": "string", "enum": ["celsius", "fahrenheit"] }, + }, + "required": ["location"], + })) + .build()?, + })]) .build()?; let mut stream = client.chat().create_stream(request).await?; @@ -72,7 +69,6 @@ async fn main() -> Result<(), Box> { let state = states_lock.entry(key).or_insert_with(|| { ChatCompletionMessageToolCall { id: tool_call_data.id.clone().unwrap_or_default(), - r#type: ChatCompletionToolType::Function, function: FunctionCall { name: tool_call_data .function @@ -138,10 +134,12 @@ async fn main() -> Result<(), Box> { .build()? .into()]; - let tool_calls: Vec = + let tool_calls: Vec = function_responses_lock .iter() - .map(|tc| tc.0.clone()) + .map(|tc| { + ChatCompletionMessageToolCalls::Function(tc.0.clone()) + }) .collect(); let assistant_messages: ChatCompletionRequestMessage = diff --git a/examples/tool-call/src/main.rs b/examples/tool-call/src/main.rs index c88fa2fa..01a2fe5f 100644 --- a/examples/tool-call/src/main.rs +++ b/examples/tool-call/src/main.rs @@ -1,13 +1,13 @@ use std::collections::HashMap; use std::io::{stdout, Write}; -use async_openai::types::{ - ChatCompletionMessageToolCall, ChatCompletionRequestAssistantMessageArgs, +use async_openai::types::chat::{ + ChatCompletionMessageToolCalls, ChatCompletionRequestAssistantMessageArgs, ChatCompletionRequestMessage, ChatCompletionRequestToolMessageArgs, - ChatCompletionRequestUserMessageArgs, ChatCompletionToolArgs, ChatCompletionToolType, + ChatCompletionRequestUserMessageArgs, ChatCompletionTool, ChatCompletionTools, FunctionObjectArgs, }; -use async_openai::{types::CreateChatCompletionRequestArgs, Client}; +use async_openai::{types::chat::CreateChatCompletionRequestArgs, Client}; use futures::StreamExt; use rand::seq::SliceRandom; use rand::{thread_rng, Rng}; @@ -25,26 +25,23 @@ async fn main() -> Result<(), Box> { .content(user_prompt) .build()? .into()]) - .tools(vec![ChatCompletionToolArgs::default() - .r#type(ChatCompletionToolType::Function) - .function( - FunctionObjectArgs::default() - .name("get_current_weather") - .description("Get the current weather in a given location") - .parameters(json!({ - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and state, e.g. San Francisco, CA", - }, - "unit": { "type": "string", "enum": ["celsius", "fahrenheit"] }, + .tools(vec![ChatCompletionTools::Function(ChatCompletionTool { + function: FunctionObjectArgs::default() + .name("get_current_weather") + .description("Get the current weather in a given location") + .parameters(json!({ + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", }, - "required": ["location"], - })) - .build()?, - ) - .build()?]) + "unit": { "type": "string", "enum": ["celsius", "fahrenheit"] }, + }, + "required": ["location"], + })) + .build()?, + })]) .build()?; let response_message = client @@ -59,14 +56,17 @@ async fn main() -> Result<(), Box> { if let Some(tool_calls) = response_message.tool_calls { let mut handles = Vec::new(); - for tool_call in tool_calls { - let name = tool_call.function.name.clone(); - let args = tool_call.function.arguments.clone(); - let tool_call_clone = tool_call.clone(); - - let handle = - tokio::spawn(async move { call_fn(&name, &args).await.unwrap_or_default() }); - handles.push((handle, tool_call_clone)); + for tool_call_enum in tool_calls { + // Extract the function tool call from the enum + if let ChatCompletionMessageToolCalls::Function(tool_call) = tool_call_enum { + let name = tool_call.function.name.clone(); + let args = tool_call.function.arguments.clone(); + let tool_call_clone = tool_call.clone(); + + let handle = + tokio::spawn(async move { call_fn(&name, &args).await.unwrap_or_default() }); + handles.push((handle, tool_call_clone)); + } } let mut function_responses = Vec::new(); @@ -83,9 +83,12 @@ async fn main() -> Result<(), Box> { .build()? .into()]; - let tool_calls: Vec = function_responses + // Convert ChatCompletionMessageToolCall to ChatCompletionMessageToolCalls enum + let tool_calls: Vec = function_responses .iter() - .map(|(tool_call, _response_content)| tool_call.clone()) + .map(|(tool_call, _response_content)| { + ChatCompletionMessageToolCalls::Function(tool_call.clone()) + }) .collect(); let assistant_messages: ChatCompletionRequestMessage = diff --git a/examples/vector-store-retrieval/src/main.rs b/examples/vector-store-retrieval/src/main.rs index 32bfdfbb..74fe7aff 100644 --- a/examples/vector-store-retrieval/src/main.rs +++ b/examples/vector-store-retrieval/src/main.rs @@ -3,7 +3,7 @@ use std::error::Error; use async_openai::{ types::{ files::{CreateFileRequest, FilePurpose}, - CreateVectorStoreRequest, VectorStoreSearchRequest, VectorStoreStatus, + vectorstores::{CreateVectorStoreRequest, VectorStoreSearchRequest, VectorStoreStatus}, }, Client, }; diff --git a/examples/video/src/main.rs b/examples/video/src/main.rs index d9e553c4..5f66248e 100644 --- a/examples/video/src/main.rs +++ b/examples/video/src/main.rs @@ -1,6 +1,6 @@ use async_openai::{ config::OpenAIConfig, - types::{CreateVideoRequestArgs, VideoJob, VideoSize, VideoVariant}, + types::videos::{CreateVideoRequestArgs, VideoJob, VideoSize, VideoVariant}, Client, }; use bytes::Bytes; @@ -69,7 +69,7 @@ async fn create_video(client: &Client) -> Result Result<(), Box> { let client = Client::new(); - let _video = create_video(&client).await?; + let video = create_video(&client).await?; // wait for above video to be "completed" tokio::time::sleep(tokio::time::Duration::from_secs(5)).await; let videos = client.videos().list(&[("limit", "100")]).await?; @@ -92,12 +92,10 @@ async fn main() -> Result<(), Box> { } } - for video in videos.data { - println!( - "\nVideo deleted: {:?}", - client.videos().delete(&video.id).await? - ); - } + println!( + "\nVideo deleted: {:?}", + client.videos().delete(&video.id).await? + ); Ok(()) } diff --git a/examples/vision-chat/src/main.rs b/examples/vision-chat/src/main.rs index a28538a4..94dfc996 100644 --- a/examples/vision-chat/src/main.rs +++ b/examples/vision-chat/src/main.rs @@ -1,7 +1,7 @@ use std::error::Error; use async_openai::{ - types::{ + types::chat::{ ChatCompletionRequestMessageContentPartImageArgs, ChatCompletionRequestMessageContentPartTextArgs, ChatCompletionRequestUserMessageArgs, CreateChatCompletionRequestArgs, ImageDetail, ImageUrlArgs,