diff --git a/async-openai/src/client.rs b/async-openai/src/client.rs index fe558259..be35142f 100644 --- a/async-openai/src/client.rs +++ b/async-openai/src/client.rs @@ -544,23 +544,18 @@ impl Client { } /// Make HTTP GET request to receive SSE - pub(crate) async fn _get_stream( + pub(crate) async fn get_stream( &self, path: &str, - query: &Q, + request_options: &RequestOptions, ) -> Pin> + Send>> where - Q: Serialize + ?Sized, O: DeserializeOwned + std::marker::Send + 'static, { - let event_source = self - .http_client - .get(self.config.url(path)) - .query(query) - .query(&self.config.query()) - .headers(self.config.headers()) - .eventsource() - .unwrap(); + let request_builder = + self.build_request_builder(reqwest::Method::GET, path, request_options); + + let event_source = request_builder.eventsource().unwrap(); stream(event_source).await } diff --git a/async-openai/src/responses.rs b/async-openai/src/responses.rs index 73933db2..52ee4f8a 100644 --- a/async-openai/src/responses.rs +++ b/async-openai/src/responses.rs @@ -81,6 +81,25 @@ impl<'c, C: Config> Responses<'c, C> { .await } + /// Retrieves a model response with the given ID with streaming. + /// + /// Response events will be sent as server-sent events as they become available. + #[crate::byot( + T0 = std::fmt::Display, + R = serde::de::DeserializeOwned, + stream = "true", + where_clause = "R: std::marker::Send + 'static" + )] + pub async fn retrieve_stream(&self, response_id: &str) -> Result { + let mut request_options = self.request_options.clone(); + request_options.with_query(&[("stream", "true")])?; + + Ok(self + .client + .get_stream(&format!("/responses/{}", response_id), &request_options) + .await) + } + /// Deletes a model response with the given ID. #[crate::byot(T0 = std::fmt::Display, R = serde::de::DeserializeOwned)] pub async fn delete(&self, response_id: &str) -> Result { diff --git a/async-openai/src/types/audio/audio_types.rs b/async-openai/src/types/audio/audio_types.rs index 253c7009..56d9b898 100644 --- a/async-openai/src/types/audio/audio_types.rs +++ b/async-openai/src/types/audio/audio_types.rs @@ -22,6 +22,8 @@ pub enum Voice { Sage, Shimmer, Verse, + #[serde(untagged)] + Other(String), } #[derive(Debug, Default, Clone, PartialEq)] diff --git a/async-openai/src/types/chat/impls.rs b/async-openai/src/types/chat/impls.rs index 8869f41a..5ddadc81 100644 --- a/async-openai/src/types/chat/impls.rs +++ b/async-openai/src/types/chat/impls.rs @@ -1,7 +1,8 @@ use std::fmt::Display; use crate::types::chat::{ - ChatCompletionFunctionCall, ChatCompletionNamedToolChoice, + ChatCompletionFunctionCall, ChatCompletionMessageCustomToolCall, ChatCompletionMessageToolCall, + ChatCompletionMessageToolCalls, ChatCompletionNamedToolChoice, ChatCompletionRequestAssistantMessage, ChatCompletionRequestAssistantMessageContent, ChatCompletionRequestDeveloperMessage, ChatCompletionRequestDeveloperMessageContent, ChatCompletionRequestFunctionMessage, ChatCompletionRequestMessage, @@ -10,7 +11,8 @@ use crate::types::chat::{ ChatCompletionRequestSystemMessageContent, ChatCompletionRequestToolMessage, ChatCompletionRequestToolMessageContent, ChatCompletionRequestUserMessage, ChatCompletionRequestUserMessageContent, ChatCompletionRequestUserMessageContentPart, - FunctionName, ImageUrl, Role, + ChatCompletionTool, ChatCompletionTools, CustomToolChatCompletions, FunctionName, ImageUrl, + Role, }; impl From for ChatCompletionRequestMessage { @@ -332,3 +334,69 @@ impl Display for Role { ) } } + +impl From for Vec { + fn from(value: ChatCompletionTool) -> Self { + vec![ChatCompletionTools::Function(value)] + } +} + +impl From for Vec { + fn from(value: CustomToolChatCompletions) -> Self { + vec![ChatCompletionTools::Custom(value)] + } +} + +impl From for Vec { + fn from(value: ChatCompletionRequestUserMessage) -> Self { + vec![value.into()] + } +} + +impl From for Vec { + fn from(value: ChatCompletionRequestSystemMessage) -> Self { + vec![value.into()] + } +} + +impl From for Vec { + fn from(value: ChatCompletionRequestDeveloperMessage) -> Self { + vec![value.into()] + } +} + +impl From for Vec { + fn from(value: ChatCompletionRequestAssistantMessage) -> Self { + vec![value.into()] + } +} + +impl From for Vec { + fn from(value: ChatCompletionRequestFunctionMessage) -> Self { + vec![value.into()] + } +} + +impl From for Vec { + fn from(value: ChatCompletionRequestToolMessage) -> Self { + vec![value.into()] + } +} + +impl From for ChatCompletionMessageToolCalls { + fn from(value: ChatCompletionMessageToolCall) -> Self { + ChatCompletionMessageToolCalls::Function(value) + } +} + +impl From for ChatCompletionMessageToolCalls { + fn from(value: ChatCompletionMessageCustomToolCall) -> Self { + ChatCompletionMessageToolCalls::Custom(value) + } +} + +impl From for ChatCompletionRequestMessageContentPartImage { + fn from(value: ImageUrl) -> Self { + ChatCompletionRequestMessageContentPartImage { image_url: value } + } +} diff --git a/async-openai/src/types/realtime/session.rs b/async-openai/src/types/realtime/session.rs index 1b2a3017..98a9c64b 100644 --- a/async-openai/src/types/realtime/session.rs +++ b/async-openai/src/types/realtime/session.rs @@ -150,6 +150,8 @@ pub enum RealtimeVoice { Verse, Marin, Cedar, + #[serde(untagged)] + Other(String), } #[derive(Debug, Serialize, Deserialize, Clone)] diff --git a/examples/chat/src/main.rs b/examples/chat/src/main.rs index 07b3b63c..23f0799c 100644 --- a/examples/chat/src/main.rs +++ b/examples/chat/src/main.rs @@ -3,8 +3,8 @@ use std::error::Error; use async_openai::{ traits::RequestOptionsBuilder, types::chat::{ - ChatCompletionRequestAssistantMessageArgs, ChatCompletionRequestSystemMessageArgs, - ChatCompletionRequestUserMessageArgs, CreateChatCompletionRequestArgs, + ChatCompletionRequestAssistantMessage, ChatCompletionRequestSystemMessage, + ChatCompletionRequestUserMessage, CreateChatCompletionRequestArgs, }, Client, }; @@ -17,22 +17,14 @@ async fn main() -> Result<(), Box> { .max_tokens(512u32) .model("gpt-3.5-turbo") .messages([ - ChatCompletionRequestSystemMessageArgs::default() - .content("You are a helpful assistant.") - .build()? - .into(), - ChatCompletionRequestUserMessageArgs::default() - .content("Who won the world series in 2020?") - .build()? - .into(), - ChatCompletionRequestAssistantMessageArgs::default() - .content("The Los Angeles Dodgers won the World Series in 2020.") - .build()? - .into(), - ChatCompletionRequestUserMessageArgs::default() - .content("Where was it played?") - .build()? - .into(), + // Can also use ChatCompletionRequestMessageArgs for builder pattern + ChatCompletionRequestSystemMessage::from("You are a helpful assistant.").into(), + ChatCompletionRequestUserMessage::from("Who won the world series in 2020?").into(), + ChatCompletionRequestAssistantMessage::from( + "The Los Angeles Dodgers won the World Series in 2020.", + ) + .into(), + ChatCompletionRequestUserMessage::from("Where was it played?").into(), ]) .build()?; diff --git a/examples/responses-retrieve-stream/Cargo.toml b/examples/responses-retrieve-stream/Cargo.toml new file mode 100644 index 00000000..8f57a2a8 --- /dev/null +++ b/examples/responses-retrieve-stream/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "responses-retrive-stream" +version = "0.1.0" +edition = "2021" +publish = false + +[dependencies] +async-openai = { path = "../../async-openai" } +tokio = { version = "1.0", features = ["full"] } +futures = "0.3" +serde_json = "1.0" + diff --git a/examples/responses-retrieve-stream/src/main.rs b/examples/responses-retrieve-stream/src/main.rs new file mode 100644 index 00000000..64c88a91 --- /dev/null +++ b/examples/responses-retrieve-stream/src/main.rs @@ -0,0 +1,45 @@ +use async_openai::{ + types::responses::{CreateResponseArgs, ResponseStreamEvent}, + Client, +}; +use futures::StreamExt; +use std::error::Error; +use std::io::{stdout, Write}; + +#[tokio::main] +async fn main() -> Result<(), Box> { + let client = Client::new(); + + // First, create a response with background=true and stream=true + println!("Creating a respose with background=true and stream=true ..."); + let create_request = CreateResponseArgs::default() + .model("gpt-4.1") + .background(true) + .stream(true) + .input("Write a function in Rust that adds two u32 and returns u64'") + .build()?; + + let mut response = client.responses().create_stream(create_request).await?; + let mut response_id = None; + let mut lock = stdout().lock(); + + while let Some(result) = response.next().await { + if let Ok(ResponseStreamEvent::ResponseCreated(event)) = result { + writeln!(lock, "Response created with ID: {}", event.response.id).unwrap(); + response_id = Some(event.response.id.clone()); + break; + } + } + + if let Some(response_id) = response_id { + writeln!(lock, "\nRetrieving {} with streaming...\n", &response_id).unwrap(); + let mut retrieve_stream = client.responses().retrieve_stream(&response_id).await?; + while let Some(result) = retrieve_stream.next().await { + if let Ok(ResponseStreamEvent::ResponseOutputTextDelta(delta)) = result { + write!(lock, "{}", delta.delta).unwrap(); + } + } + } + + Ok(()) +} diff --git a/examples/tool-call-stream/src/main.rs b/examples/tool-call-stream/src/main.rs index fd10be1e..a3de2c5d 100644 --- a/examples/tool-call-stream/src/main.rs +++ b/examples/tool-call-stream/src/main.rs @@ -1,34 +1,28 @@ -use std::collections::HashMap; -use std::error::Error; use std::io::{stdout, Write}; -use std::sync::Arc; use async_openai::types::chat::{ ChatCompletionMessageToolCall, ChatCompletionMessageToolCalls, - ChatCompletionRequestAssistantMessageArgs, ChatCompletionRequestMessage, - ChatCompletionRequestToolMessageArgs, ChatCompletionRequestUserMessageArgs, ChatCompletionTool, - ChatCompletionTools, FinishReason, FunctionCall, FunctionObjectArgs, + ChatCompletionRequestAssistantMessage, ChatCompletionRequestMessage, + ChatCompletionRequestToolMessage, ChatCompletionRequestUserMessage, ChatCompletionTool, + FinishReason, FunctionObjectArgs, }; use async_openai::{types::chat::CreateChatCompletionRequestArgs, Client}; use futures::StreamExt; use rand::seq::SliceRandom; use rand::{thread_rng, Rng}; -use serde_json::{json, Value}; -use tokio::sync::Mutex; +use serde_json::json; #[tokio::main] async fn main() -> Result<(), Box> { let client = Client::new(); let user_prompt = "What's the weather like in Boston and Atlanta?"; + // Create the initial request using ergonomic From traits let request = CreateChatCompletionRequestArgs::default() - .max_tokens(512u32) - .model("gpt-4-1106-preview") - .messages([ChatCompletionRequestUserMessageArgs::default() - .content(user_prompt) - .build()? - .into()]) - .tools(vec![ChatCompletionTools::Function(ChatCompletionTool { + .max_completion_tokens(512u32) + .model("gpt-5-mini") + .messages(ChatCompletionRequestUserMessage::from(user_prompt)) + .tools(ChatCompletionTool { function: FunctionObjectArgs::default() .name("get_current_weather") .description("Get the current weather in a given location") @@ -44,208 +38,159 @@ async fn main() -> Result<(), Box> { "required": ["location"], })) .build()?, - })]) + }) .build()?; + // Stream the initial response and collect tool calls let mut stream = client.chat().create_stream(request).await?; + let mut tool_calls = Vec::new(); + let mut execution_handles = Vec::new(); + let mut stdout_lock = stdout().lock(); - let tool_call_states: Arc>> = - Arc::new(Mutex::new(HashMap::new())); - + // First stream: collect tool calls, print content, and start executing tool calls as soon as they're complete while let Some(result) = stream.next().await { - match result { - Ok(response) => { - for chat_choice in response.choices { - let function_responses: Arc< - Mutex>, - > = Arc::new(Mutex::new(Vec::new())); - if let Some(tool_calls) = chat_choice.delta.tool_calls { - for tool_call_chunk in tool_calls.into_iter() { - let key = (chat_choice.index, tool_call_chunk.index); - let states = tool_call_states.clone(); - let tool_call_data = tool_call_chunk.clone(); - - let mut states_lock = states.lock().await; - let state = states_lock.entry(key).or_insert_with(|| { - ChatCompletionMessageToolCall { - id: tool_call_data.id.clone().unwrap_or_default(), - function: FunctionCall { - name: tool_call_data - .function - .as_ref() - .and_then(|f| f.name.clone()) - .unwrap_or_default(), - arguments: tool_call_data - .function - .as_ref() - .and_then(|f| f.arguments.clone()) - .unwrap_or_default(), - }, - } - }); - if let Some(arguments) = tool_call_chunk - .function - .as_ref() - .and_then(|f| f.arguments.as_ref()) - { - state.function.arguments.push_str(arguments); - } - } + let response = result?; + + for choice in response.choices { + // Print any content deltas + if let Some(content) = &choice.delta.content { + write!(stdout_lock, "{}", content)?; + } + + // Collect tool call chunks + if let Some(tool_call_chunks) = choice.delta.tool_calls { + for chunk in tool_call_chunks { + let index = chunk.index as usize; + + // Ensure we have enough space in the vector + while tool_calls.len() <= index { + tool_calls.push(ChatCompletionMessageToolCall { + id: String::new(), + function: Default::default(), + }); + } + + // Update the tool call with chunk data + let tool_call = &mut tool_calls[index]; + if let Some(id) = chunk.id { + tool_call.id = id; } - if let Some(finish_reason) = &chat_choice.finish_reason { - if matches!(finish_reason, FinishReason::ToolCalls) { - let tool_call_states_clone = tool_call_states.clone(); - - let tool_calls_to_process = { - let states_lock = tool_call_states_clone.lock().await; - states_lock - .iter() - .map(|(_key, tool_call)| { - let name = tool_call.function.name.clone(); - let args = tool_call.function.arguments.clone(); - let tool_call_clone = tool_call.clone(); - (name, args, tool_call_clone) - }) - .collect::>() - }; - - let mut handles = Vec::new(); - for (name, args, tool_call_clone) in tool_calls_to_process { - let response_content_clone = function_responses.clone(); - let handle = tokio::spawn(async move { - let response_content = call_fn(&name, &args).await.unwrap(); - let mut function_responses_lock = - response_content_clone.lock().await; - function_responses_lock - .push((tool_call_clone, response_content)); - }); - handles.push(handle); - } - - for handle in handles { - handle.await.unwrap(); - } - - let function_responses_clone = function_responses.clone(); - let function_responses_lock = function_responses_clone.lock().await; - let mut messages: Vec = - vec![ChatCompletionRequestUserMessageArgs::default() - .content(user_prompt) - .build()? - .into()]; - - let tool_calls: Vec = - function_responses_lock - .iter() - .map(|tc| { - ChatCompletionMessageToolCalls::Function(tc.0.clone()) - }) - .collect(); - - let assistant_messages: ChatCompletionRequestMessage = - ChatCompletionRequestAssistantMessageArgs::default() - .tool_calls(tool_calls) - .build() - .map_err(|e| Box::new(e) as Box) - .unwrap() - .into(); - - let tool_messages: Vec = - function_responses_lock - .iter() - .map(|tc| { - ChatCompletionRequestToolMessageArgs::default() - .content(tc.1.to_string()) - .tool_call_id(tc.0.id.clone()) - .build() - .map_err(|e| Box::new(e) as Box) - .unwrap() - .into() - }) - .collect(); - - messages.push(assistant_messages); - messages.extend(tool_messages); - - let request = CreateChatCompletionRequestArgs::default() - .max_tokens(512u32) - .model("gpt-4-1106-preview") - .messages(messages) - .build() - .map_err(|e| Box::new(e) as Box)?; - - let mut stream = client.chat().create_stream(request).await?; - - let mut response_content = String::new(); - let mut lock = stdout().lock(); - while let Some(result) = stream.next().await { - match result { - Ok(response) => { - for chat_choice in response.choices.iter() { - if let Some(ref content) = chat_choice.delta.content { - write!(lock, "{}", content).unwrap(); - response_content.push_str(content); - } - } - } - Err(err) => { - return Err(Box::new(err) as Box); - } - } - } + if let Some(function_chunk) = chunk.function { + if let Some(name) = function_chunk.name { + tool_call.function.name = name; + } + if let Some(arguments) = function_chunk.arguments { + tool_call.function.arguments.push_str(&arguments); } } + } + } - if let Some(content) = &chat_choice.delta.content { - let mut lock = stdout().lock(); - write!(lock, "{}", content).unwrap(); - } + // When tool calls are complete, start executing them immediately + if matches!(choice.finish_reason, Some(FinishReason::ToolCalls)) { + // Spawn execution tasks for all collected tool calls + for tool_call in tool_calls.iter() { + let name = tool_call.function.name.clone(); + let args = tool_call.function.arguments.clone(); + let tool_call_id = tool_call.id.clone(); + + let handle = tokio::spawn(async move { + let result = call_function(&name, &args).await; + (tool_call_id, result) + }); + execution_handles.push(handle); } } - Err(err) => { - let mut lock = stdout().lock(); - writeln!(lock, "error: {err:?}").unwrap(); + } + stdout_lock.flush()?; + } + + // Wait for all tool call executions to complete (outside the stream loop) + if !execution_handles.is_empty() { + let mut tool_responses = Vec::new(); + for handle in execution_handles { + let (tool_call_id, response) = handle.await?; + tool_responses.push((tool_call_id, response)); + } + + // Build the follow-up request using ergonomic From traits + let mut messages: Vec = + vec![ChatCompletionRequestUserMessage::from(user_prompt).into()]; + + // Add assistant message with tool calls + let assistant_tool_calls: Vec = tool_calls + .iter() + .map(|tc| tc.clone().into()) // From + .collect(); + messages.push( + ChatCompletionRequestAssistantMessage { + content: None, + tool_calls: Some(assistant_tool_calls), + ..Default::default() } + .into(), + ); + + // Add tool response messages + for (tool_call_id, response) in tool_responses { + messages.push( + ChatCompletionRequestToolMessage { + content: response.to_string().into(), + tool_call_id, + } + .into(), + ); + } + + // Second stream: get the final response + let follow_up_request = CreateChatCompletionRequestArgs::default() + .max_completion_tokens(512u32) + .model("gpt-5-mini") + .messages(messages) + .build()?; + + let mut follow_up_stream = client.chat().create_stream(follow_up_request).await?; + + while let Some(result) = follow_up_stream.next().await { + let response = result?; + for choice in response.choices { + if let Some(content) = &choice.delta.content { + write!(stdout_lock, "{}", content)?; + } + } + stdout_lock.flush()?; } - stdout() - .flush() - .map_err(|e| Box::new(e) as Box)?; } Ok(()) } -async fn call_fn(name: &str, args: &str) -> Result> { - let mut available_functions: HashMap<&str, fn(&str, &str) -> serde_json::Value> = - HashMap::new(); - available_functions.insert("get_current_weather", get_current_weather); - - let function_args: serde_json::Value = args.parse().unwrap(); - - let location = function_args["location"].as_str().unwrap(); - let unit = function_args["unit"].as_str().unwrap_or("fahrenheit"); - let function = available_functions.get(name).unwrap(); - let function_response = function(location, unit); - Ok(function_response) +async fn call_function(name: &str, args: &str) -> serde_json::Value { + match name { + "get_current_weather" => get_current_weather(args), + _ => json!({"error": format!("Unknown function: {}", name)}), + } } -fn get_current_weather(location: &str, unit: &str) -> serde_json::Value { - let mut rng = thread_rng(); +fn get_current_weather(args: &str) -> serde_json::Value { + let args: serde_json::Value = args.parse().unwrap_or(json!({})); + let location = args["location"] + .as_str() + .unwrap_or("unknown location") + .to_string(); + let unit = args["unit"].as_str().unwrap_or("fahrenheit"); + let mut rng = thread_rng(); let temperature: i32 = rng.gen_range(20..=55); - let forecasts = [ "sunny", "cloudy", "overcast", "rainy", "windy", "foggy", "snowy", ]; - let forecast = forecasts.choose(&mut rng).unwrap_or(&"sunny"); - let weather_info = json!({ + json!({ "location": location, "temperature": temperature.to_string(), "unit": unit, "forecast": forecast - }); - - weather_info + }) } diff --git a/examples/tool-call/src/main.rs b/examples/tool-call/src/main.rs index 01a2fe5f..0018a053 100644 --- a/examples/tool-call/src/main.rs +++ b/examples/tool-call/src/main.rs @@ -3,9 +3,8 @@ use std::io::{stdout, Write}; use async_openai::types::chat::{ ChatCompletionMessageToolCalls, ChatCompletionRequestAssistantMessageArgs, - ChatCompletionRequestMessage, ChatCompletionRequestToolMessageArgs, - ChatCompletionRequestUserMessageArgs, ChatCompletionTool, ChatCompletionTools, - FunctionObjectArgs, + ChatCompletionRequestMessage, ChatCompletionRequestToolMessage, + ChatCompletionRequestUserMessage, ChatCompletionTool, FunctionObjectArgs, }; use async_openai::{types::chat::CreateChatCompletionRequestArgs, Client}; use futures::StreamExt; @@ -19,13 +18,10 @@ async fn main() -> Result<(), Box> { let user_prompt = "What's the weather like in Boston and Atlanta?"; let request = CreateChatCompletionRequestArgs::default() - .max_tokens(512u32) - .model("gpt-4-1106-preview") - .messages([ChatCompletionRequestUserMessageArgs::default() - .content(user_prompt) - .build()? - .into()]) - .tools(vec![ChatCompletionTools::Function(ChatCompletionTool { + .max_completion_tokens(512u32) + .model("gpt-5-mini") + .messages(ChatCompletionRequestUserMessage::from(user_prompt)) + .tools(ChatCompletionTool { function: FunctionObjectArgs::default() .name("get_current_weather") .description("Get the current weather in a given location") @@ -38,10 +34,12 @@ async fn main() -> Result<(), Box> { }, "unit": { "type": "string", "enum": ["celsius", "fahrenheit"] }, }, - "required": ["location"], + "required": ["location", "unit"], + "additionalProperties": false })) + .strict(true) .build()?, - })]) + }) .build()?; let response_message = client @@ -50,7 +48,7 @@ async fn main() -> Result<(), Box> { .await? .choices .first() - .unwrap() + .ok_or("No choices")? .message .clone(); @@ -78,10 +76,7 @@ async fn main() -> Result<(), Box> { } let mut messages: Vec = - vec![ChatCompletionRequestUserMessageArgs::default() - .content(user_prompt) - .build()? - .into()]; + ChatCompletionRequestUserMessage::from(user_prompt).into(); // Convert ChatCompletionMessageToolCall to ChatCompletionMessageToolCalls enum let tool_calls: Vec = function_responses @@ -100,12 +95,10 @@ async fn main() -> Result<(), Box> { let tool_messages: Vec = function_responses .iter() .map(|(tool_call, response_content)| { - ChatCompletionRequestToolMessageArgs::default() - .content(response_content.to_string()) - .tool_call_id(tool_call.id.clone()) - .build() - .unwrap() - .into() + ChatCompletionRequestMessage::Tool(ChatCompletionRequestToolMessage { + content: response_content.to_string().into(), + tool_call_id: tool_call.id.clone(), + }) }) .collect(); @@ -113,11 +106,10 @@ async fn main() -> Result<(), Box> { messages.extend(tool_messages); let subsequent_request = CreateChatCompletionRequestArgs::default() - .max_tokens(512u32) - .model("gpt-4-1106-preview") + .max_completion_tokens(512u32) + .model("gpt-5-mini") .messages(messages) - .build() - .map_err(|e| Box::new(e) as Box)?; + .build()?; let mut stream = client.chat().create_stream(subsequent_request).await?; diff --git a/examples/vision-chat/README.md b/examples/vision-chat/README.md deleted file mode 100644 index fd211978..00000000 --- a/examples/vision-chat/README.md +++ /dev/null @@ -1,5 +0,0 @@ -### Output - -> Response: -> -> 0: Role: assistant Content: "This is an image of a wooden boardwalk trail extending through a lush green meadow or wetland area. The sky is partly cloudy with a rich blue color, and it seems to be a bright sunny day. This type of boardwalk is often constructed in natural areas to allow people to enjoy the scenery without disturbing the local flora and fauna. It provides a clear path through potentially marshy or sensitive ecosystems and can be found in nature reserves, parks, or conservation areas." diff --git a/examples/vision-chat/src/main.rs b/examples/vision-chat/src/main.rs index 94dfc996..6f4a3f31 100644 --- a/examples/vision-chat/src/main.rs +++ b/examples/vision-chat/src/main.rs @@ -2,9 +2,9 @@ use std::error::Error; use async_openai::{ types::chat::{ - ChatCompletionRequestMessageContentPartImageArgs, - ChatCompletionRequestMessageContentPartTextArgs, ChatCompletionRequestUserMessageArgs, - CreateChatCompletionRequestArgs, ImageDetail, ImageUrlArgs, + ChatCompletionRequestMessageContentPartImage, ChatCompletionRequestMessageContentPartText, + ChatCompletionRequestUserMessageArgs, CreateChatCompletionRequestArgs, ImageDetail, + ImageUrl, }, Client, }; @@ -14,32 +14,27 @@ use async_openai::{ async fn main() -> Result<(), Box> { let client = Client::new(); - let image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"; + // Image Credit: https://unsplash.com/photos/pride-of-lion-on-field-L4-BDd01wmM + let image_url = + "https://images.unsplash.com/photo-1554990772-0bea55d510d5?q=80&w=512&auto=format"; let request = CreateChatCompletionRequestArgs::default() .model("gpt-4o-mini") .max_tokens(300_u32) .messages([ChatCompletionRequestUserMessageArgs::default() .content(vec![ - ChatCompletionRequestMessageContentPartTextArgs::default() - .text("What is this image?") - .build()? - .into(), - ChatCompletionRequestMessageContentPartImageArgs::default() - .image_url( - ImageUrlArgs::default() - .url(image_url) - .detail(ImageDetail::High) - .build()?, - ) - .build()? - .into(), + ChatCompletionRequestMessageContentPartText::from("What is this image?").into(), + ChatCompletionRequestMessageContentPartImage::from(ImageUrl { + url: image_url.to_string(), + detail: Some(ImageDetail::High), + }) + .into(), ]) .build()? .into()]) .build()?; - println!("{}", serde_json::to_string(&request).unwrap()); + println!("{}", serde_json::to_string(&request)?); let response = client.chat().create(request).await?;