From b9993466a862218df7961fe9033850c1d645b4f1 Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 11:10:14 -0800 Subject: [PATCH 01/32] full feature flag --- async-openai/Cargo.toml | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/async-openai/Cargo.toml b/async-openai/Cargo.toml index e39f010c..c6c23490 100644 --- a/async-openai/Cargo.toml +++ b/async-openai/Cargo.toml @@ -101,6 +101,33 @@ types = [ "completion-types", ] +# Enable all features +full = [ + "responses", + "webhook", + "audio", + "video", + "image", + "embedding", + "evals", + "finetuning", + "batch", + "file", + "upload", + "model", + "moderation", + "vectorstore", + "chatkit", + "container", + "realtime", + "chat-completion", + "assistant", + "administration", + "completions", + "types", + "byot", +] + # Internal feature to enable API dependencies _api = [ "dep:async-openai-macros", From 2bb2893d9345fb1381e1434e4cf42b9256ef15db Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 11:10:25 -0800 Subject: [PATCH 02/32] github action full feature flag --- .github/workflows/pr-checks.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/pr-checks.yml b/.github/workflows/pr-checks.yml index 62b121f2..b929fd38 100644 --- a/.github/workflows/pr-checks.yml +++ b/.github/workflows/pr-checks.yml @@ -77,6 +77,7 @@ jobs: administration-types, completion-types, types, + full, ] steps: From cd243ade58ac34d7de23db63f5ce5cd7823205f5 Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 11:19:17 -0800 Subject: [PATCH 03/32] fix docs warnings --- async-openai/src/assistants/threads.rs | 4 ++-- async-openai/src/types/images/sdk.rs | 2 +- async-openai/src/types/responses/response.rs | 4 ++-- async-openai/src/types/responses/stream.rs | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/async-openai/src/assistants/threads.rs b/async-openai/src/assistants/threads.rs index d6bfec34..a1076b6b 100644 --- a/async-openai/src/assistants/threads.rs +++ b/async-openai/src/assistants/threads.rs @@ -24,12 +24,12 @@ impl<'c, C: Config> Threads<'c, C> { } } - /// Call [Messages] group API to manage message in [thread_id] thread. + /// Call [Messages] group API to manage message in `thread_id` thread. pub fn messages(&self, thread_id: &str) -> Messages<'_, C> { Messages::new(self.client, thread_id) } - /// Call [Runs] group API to manage runs in [thread_id] thread. + /// Call [Runs] group API to manage runs in `thread_id` thread. pub fn runs(&self, thread_id: &str) -> Runs<'_, C> { Runs::new(self.client, thread_id) } diff --git a/async-openai/src/types/images/sdk.rs b/async-openai/src/types/images/sdk.rs index c87c53b9..cc96d424 100644 --- a/async-openai/src/types/images/sdk.rs +++ b/async-openai/src/types/images/sdk.rs @@ -8,7 +8,7 @@ use std::path::{Path, PathBuf}; impl ImagesResponse { /// Save each image in a dedicated Tokio task and return paths to saved files. - /// For [ResponseFormat::Url] each file is downloaded in dedicated Tokio task. + /// For `ResponseFormat::Url`` each file is downloaded in dedicated Tokio task. pub async fn save>(&self, dir: P) -> Result, OpenAIError> { create_all_dir(dir.as_ref())?; diff --git a/async-openai/src/types/responses/response.rs b/async-openai/src/types/responses/response.rs index d90ab66e..374d1750 100644 --- a/async-openai/src/types/responses/response.rs +++ b/async-openai/src/types/responses/response.rs @@ -173,7 +173,7 @@ pub enum InputItem { /// Both InputMessage and OutputMessage have `type: "message"`, so we use an untagged /// enum to distinguish them based on their structure: /// - OutputMessage: role=assistant, required id & status fields -/// - InputMessage: role=user/system/developer, content is Vec, optional id/status +/// - InputMessage: role=user/system/developer, content is `Vec`, optional id/status /// /// Note: EasyInputMessage is NOT included here - it's a separate variant in `InputItem`, /// not part of the structured `Item` enum. @@ -184,7 +184,7 @@ pub enum MessageItem { /// This must come first as it has the most specific structure (required id and status fields). Output(OutputMessage), - /// A structured input message (role: user/system/developer, content is Vec). + /// A structured input message (role: user/system/developer, content is `Vec`). /// Has structured content list and optional id/status fields. /// /// A message input to the model with a role indicating instruction following hierarchy. diff --git a/async-openai/src/types/responses/stream.rs b/async-openai/src/types/responses/stream.rs index 86bfc261..6a0d5e62 100644 --- a/async-openai/src/types/responses/stream.rs +++ b/async-openai/src/types/responses/stream.rs @@ -266,7 +266,7 @@ pub struct ResponseFunctionCallArgumentsDeltaEvent { #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct ResponseFunctionCallArgumentsDoneEvent { - /// https://github.com/64bit/async-openai/issues/472 + /// pub name: Option, pub sequence_number: u64, pub item_id: String, From 782c2cf88807697e48097e91e126fe61f4577fe8 Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 12:14:37 -0800 Subject: [PATCH 04/32] expanded types for docs.rs --- async-openai/src/lib.rs | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/async-openai/src/lib.rs b/async-openai/src/lib.rs index 6d7905db..5ec68141 100644 --- a/async-openai/src/lib.rs +++ b/async-openai/src/lib.rs @@ -214,12 +214,18 @@ mod video; #[cfg(feature = "webhook")] pub mod webhooks; +// admin::* would be good - however its expanded here so that docs.rs shows the feature flags #[cfg(feature = "administration")] -pub use admin::*; +pub use admin::{ + Admin, AdminAPIKeys, AuditLogs, Certificates, GroupRoles, GroupUsers, Groups, Invites, + ProjectAPIKeys, ProjectCertificates, ProjectGroupRoles, ProjectGroups, ProjectRateLimits, + ProjectRoles, ProjectServiceAccounts, ProjectUserRoles, ProjectUsers, Projects, Roles, Usage, + UserRoles, Users, +}; #[cfg(feature = "assistant")] -pub use assistants::*; +pub use assistants::{Assistants, Messages, Runs, Steps, Threads}; #[cfg(feature = "audio")] -pub use audio::*; +pub use audio::{Audio, Speech, Transcriptions, Translations}; #[cfg(feature = "batch")] pub use batches::Batches; #[cfg(feature = "chat-completion")] @@ -231,11 +237,11 @@ pub use client::Client; #[cfg(feature = "completions")] pub use completion::Completions; #[cfg(feature = "container")] -pub use containers::*; +pub use containers::{ContainerFiles, Containers}; #[cfg(feature = "embedding")] pub use embedding::Embeddings; #[cfg(feature = "evals")] -pub use evals::*; +pub use evals::{EvalRunOutputItems, EvalRuns, Evals}; #[cfg(feature = "file")] pub use file::Files; #[cfg(feature = "finetuning")] @@ -251,10 +257,10 @@ pub use realtime::Realtime; #[cfg(feature = "_api")] pub use request_options::RequestOptions; #[cfg(feature = "responses")] -pub use responses::*; +pub use responses::{ConversationItems, Conversations, Responses}; #[cfg(feature = "upload")] pub use uploads::Uploads; #[cfg(feature = "vectorstore")] -pub use vectorstores::*; +pub use vectorstores::{VectorStoreFileBatches, VectorStoreFiles, VectorStores}; #[cfg(feature = "video")] pub use video::Videos; From 81a5ea0bba703510b877f868180dd1ea139bce7a Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 12:19:29 -0800 Subject: [PATCH 05/32] types docs --- async-openai/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/async-openai/README.md b/async-openai/README.md index 007dd74e..19aa0f67 100644 --- a/async-openai/README.md +++ b/async-openai/README.md @@ -156,6 +156,8 @@ To only use Rust types from the crate - use feature flag `types`. There are granular feature flags like `response-types`, `chat-completion-types`, etc. +These granular types are enabled when the corresponding API feature is enabled - for example `responses` will enable `reponse-types`. + ## OpenAI-compatible Providers ### Configurable Request From 486070d66f49fd245322eaae2d9630a545dd3fe2 Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 12:43:44 -0800 Subject: [PATCH 06/32] updated README --- async-openai/README.md | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/async-openai/README.md b/async-openai/README.md index 19aa0f67..23eb5187 100644 --- a/async-openai/README.md +++ b/async-openai/README.md @@ -110,7 +110,7 @@ async fn main() -> Result<(), Box> { ## Webhooks -Support for webhook event types, signature verification, and building webhook events from payloads can be enabled by using the `webhook` feature flag. +Support for webhook includes event types, signature verification, and building webhook events from payloads. ## Bring Your Own Types @@ -156,13 +156,34 @@ To only use Rust types from the crate - use feature flag `types`. There are granular feature flags like `response-types`, `chat-completion-types`, etc. -These granular types are enabled when the corresponding API feature is enabled - for example `responses` will enable `reponse-types`. +These granular types are enabled when the corresponding API feature is enabled - for example `response` will enable `reponse-types`. + +## Configurable Requests + +### Individual Request +Certain individual APIs that need additional query or header parameters - these can be provided by chaining `.query()`, `.header()`, `.headers()` on the API group. + +For example: +``` +client. + .chat() + // query can be a struct or a map too. + .query(&[("limit", "10")])? + // header for demo + .header("key", "value")? + .list() + .await? +``` + +### All Requests + +Use `Config`, `OpenAIConfig` etc. for configuring url, headers or query parameters globally for all requests. ## OpenAI-compatible Providers -### Configurable Request +### Configurable Path -To change path, query or headers of individual request use the `.path()`, `.query()`, `.header()`, `.headers()` method on the API group. +In addition to `.query()`, `.header()`, `.headers()` a path for individual request can be changed by using `.path()`, method on the API group. For example: @@ -170,8 +191,6 @@ For example: client .chat() .path("/v1/messages")? - .query(&[("role", "user")])? - .header("key", "value")? .create(request) .await? ``` From 8cad0a9de788aa5a2874ae1564fd0d8bfd3dde1d Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 12:48:22 -0800 Subject: [PATCH 07/32] add docs on scope --- async-openai/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/async-openai/README.md b/async-openai/README.md index 23eb5187..454b8107 100644 --- a/async-openai/README.md +++ b/async-openai/README.md @@ -181,6 +181,8 @@ Use `Config`, `OpenAIConfig` etc. for configuring url, headers or query paramete ## OpenAI-compatible Providers +Even though the scope of the crate is official OpenAI APIs, it is very configurable to work with compatible providers. + ### Configurable Path In addition to `.query()`, `.header()`, `.headers()` a path for individual request can be changed by using `.path()`, method on the API group. From 3b964295d196f14f54840de748c6441aa95f1921 Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 12:59:13 -0800 Subject: [PATCH 08/32] add CONTRIBUTING.md --- CONTRIBUTING.md | 18 ++++++++++++++++++ async-openai/README.md | 16 ++-------------- 2 files changed, 20 insertions(+), 14 deletions(-) create mode 100644 CONTRIBUTING.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..90e5ef49 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,18 @@ +## Contributing to async-openai + +Thank you for taking the time to contribute and improve the project. I'd be happy to have you! + +All forms of contributions, such as new features requests, bug fixes, issues, documentation, testing, comments, [examples](https://github.com/64bit/async-openai/tree/main/examples) etc. are welcome. + +A good starting point would be to look at existing [open issues](https://github.com/64bit/async-openai/issues). + +To maintain quality of the project, a minimum of the following is a must for code contribution: + +- **Names & Documentation**: All struct names, field names and doc comments are from OpenAPI spec. Nested objects in spec without names leaves room for making appropriate name. +- **Tested**: For changes supporting test(s) and/or example is required. Existing examples, doc tests, unit tests, and integration tests should be made to work with the changes if applicable. +- **Scope**: Keep scope limited to APIs available in official documents such as [API Reference](https://platform.openai.com/docs/api-reference) or [OpenAPI spec](https://github.com/openai/openai-openapi/). Other LLMs or AI Providers offer OpenAI-compatible APIs, yet they may not always have full parity - for those use `byot` feature. +- **Consistency**: Keep code style consistent across all the "APIs" that library exposes; it creates a great developer experience. + +This project adheres to [Rust Code of Conduct](https://www.rust-lang.org/policies/code-of-conduct) + +Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in async-openai by you, shall be licensed as MIT, without any additional terms or conditions. \ No newline at end of file diff --git a/async-openai/README.md b/async-openai/README.md index 454b8107..f7e33eb1 100644 --- a/async-openai/README.md +++ b/async-openai/README.md @@ -223,22 +223,10 @@ fn chat_completion(client: &Client>) { ## Contributing -Thank you for taking the time to contribute and improve the project. I'd be happy to have you! +:tada: Thank you for taking the time to contribute and improve the project. I'd be happy to have you! -All forms of contributions, such as new features requests, bug fixes, issues, documentation, testing, comments, [examples](https://github.com/64bit/async-openai/tree/main/examples) etc. are welcome. +Please see [contributing guide!](https://github.com/64bit/async-openai/blob/main/CONTRIBUTING.md) -A good starting point would be to look at existing [open issues](https://github.com/64bit/async-openai/issues). - -To maintain quality of the project, a minimum of the following is a must for code contribution: - -- **Names & Documentation**: All struct names, field names and doc comments are from OpenAPI spec. Nested objects in spec without names leaves room for making appropriate name. -- **Tested**: For changes supporting test(s) and/or example is required. Existing examples, doc tests, unit tests, and integration tests should be made to work with the changes if applicable. -- **Scope**: Keep scope limited to APIs available in official documents such as [API Reference](https://platform.openai.com/docs/api-reference) or [OpenAPI spec](https://github.com/openai/openai-openapi/). Other LLMs or AI Providers offer OpenAI-compatible APIs, yet they may not always have full parity - for those use `byot` feature. -- **Consistency**: Keep code style consistent across all the "APIs" that library exposes; it creates a great developer experience. - -This project adheres to [Rust Code of Conduct](https://www.rust-lang.org/policies/code-of-conduct) - -Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in async-openai by you, shall be licensed as MIT, without any additional terms or conditions. ## Complimentary Crates - [async-openai-wasm](https://github.com/ifsheldon/async-openai-wasm) provides WASM support. From 02686bae51c1fbbf10cca7954fe008a827dd22c4 Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 13:00:14 -0800 Subject: [PATCH 09/32] =?UTF-8?q?=F0=9F=8E=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- async-openai/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/async-openai/README.md b/async-openai/README.md index f7e33eb1..f30a6cc5 100644 --- a/async-openai/README.md +++ b/async-openai/README.md @@ -223,7 +223,7 @@ fn chat_completion(client: &Client>) { ## Contributing -:tada: Thank you for taking the time to contribute and improve the project. I'd be happy to have you! +🎉 Thank you for taking the time to contribute and improve the project. I'd be happy to have you! Please see [contributing guide!](https://github.com/64bit/async-openai/blob/main/CONTRIBUTING.md) From 989df6e69c728d5f655cd51f4c3d32219724e899 Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 13:13:28 -0800 Subject: [PATCH 10/32] updated readm --- async-openai/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/async-openai/README.md b/async-openai/README.md index f30a6cc5..0897893d 100644 --- a/async-openai/README.md +++ b/async-openai/README.md @@ -156,7 +156,7 @@ To only use Rust types from the crate - use feature flag `types`. There are granular feature flags like `response-types`, `chat-completion-types`, etc. -These granular types are enabled when the corresponding API feature is enabled - for example `response` will enable `reponse-types`. +These granular types are enabled when the corresponding API feature is enabled - for example `response` will enable `response-types`. ## Configurable Requests From ccfb63dc7ad5c0583457c1789d6e70e334f641c6 Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 13:13:42 -0800 Subject: [PATCH 11/32] doc comment --- async-openai/src/webhooks.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/async-openai/src/webhooks.rs b/async-openai/src/webhooks.rs index 259ba8a1..6109182c 100644 --- a/async-openai/src/webhooks.rs +++ b/async-openai/src/webhooks.rs @@ -1,3 +1,4 @@ +//! Support for webhook event types, signature verification, and building webhook events from payloads. use crate::types::webhooks::WebhookEvent; use base64::{engine::general_purpose::STANDARD as BASE64, Engine}; use hmac::{Hmac, Mac}; From a51ee073d36ff0adb7bbeda8a1f9ddb0c9ee89e2 Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 13:13:53 -0800 Subject: [PATCH 12/32] updated lib.rs for docs.rs --- async-openai/src/lib.rs | 73 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 71 insertions(+), 2 deletions(-) diff --git a/async-openai/src/lib.rs b/async-openai/src/lib.rs index 5ec68141..29d6cc3b 100644 --- a/async-openai/src/lib.rs +++ b/async-openai/src/lib.rs @@ -94,7 +94,71 @@ //! # }); //!``` //! -//! ## Dynamic Dispatch for OpenAI-compatible Providers +//! ## Rust Types +//! +//! To only use Rust types from the crate - use feature flag `types`. +//! +//! There are granular feature flags like `response-types`, `chat-completion-types`, etc. +//! +//! These granular types are enabled when the corresponding API feature is enabled - for example `response` will enable `response-types`. +//! +//! ## Configurable Requests +//! +//! **Individual Request** +//! +//! Certain individual APIs that need additional query or header parameters - these can be provided by chaining `.query()`, `.header()`, `.headers()` on the API group. +//! +//! For example: +//! ``` +//! # tokio_test::block_on(async { +//! # use async_openai::Client; +//! # let client = Client::new(); +//! client +//! .chat() +//! // query can be a struct or a map too. +//! .query(&[("limit", "10")])? +//! // header for demo +//! .header("key", "value")? +//! .list() +//! .await?; +//! # Ok::<(), Box>(()) +//! # }); +//! ``` +//! +//! **All Requests** +//! +//! Use `Config`, `OpenAIConfig` etc. for configuring url, headers or query parameters globally for all requests. +//! +//! ## OpenAI-compatible Providers +//! +//! Even though the scope of the crate is official OpenAI APIs, it is very configurable to work with compatible providers. +//! +//! **Configurable Path** +//! +//! In addition to `.query()`, `.header()`, `.headers()` a path for individual request can be changed by using `.path()`, method on the API group. +//! +//! For example: +//! ``` +//! # tokio_test::block_on(async { +//! # use async_openai::{Client, types::CreateChatCompletionRequestArgs}; +//! # let client = Client::new(); +//! # let request = CreateChatCompletionRequestArgs::default() +//! # .model("gpt-4") +//! # .messages([]) +//! # .build() +//! # .unwrap(); +//! client +//! .chat() +//! .path("/v1/messages")? +//! .create(request) +//! .await?; +//! # Ok::<(), Box>(()) +//! # }); +//! ``` +//! +//! **Dynamic Dispatch** +//! +//! This allows you to use same code (say a `fn`) to call APIs on different OpenAI-compatible providers. //! //! For any struct that implements `Config` trait, wrap it in a smart pointer and cast the pointer to `dyn Config` //! trait object, then create a client with `Box` or `Arc` wrapped configuration. @@ -105,9 +169,14 @@ //! //! // Use `Box` or `std::sync::Arc` to wrap the config //! let config = Box::new(OpenAIConfig::default()) as Box; +//! // create client +//! let client: Client> = Client::with_config(config); +//! //! // A function can now accept a `&Client>` parameter //! // which can invoke any openai compatible api -//! let client: Client> = Client::with_config(config); +//! fn chat_completion(client: &Client>) { +//! todo!() +//! } //! ``` //! //! ## Microsoft Azure From 712095d8031c46c942ac28d67432effd5354e065 Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 13:45:51 -0800 Subject: [PATCH 13/32] updated doc tests in lib.rs --- async-openai/src/lib.rs | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/async-openai/src/lib.rs b/async-openai/src/lib.rs index 29d6cc3b..aa13cd89 100644 --- a/async-openai/src/lib.rs +++ b/async-openai/src/lib.rs @@ -31,28 +31,27 @@ //!``` //!# tokio_test::block_on(async { //! -//! use async_openai::{Client, types::{CreateCompletionRequestArgs}}; +//! use async_openai::{Client, types::responses::{CreateResponseArgs}}; //! //! // Create client //! let client = Client::new(); //! //! // Create request using builder pattern //! // Every request struct has companion builder struct with same name + Args suffix -//! let request = CreateCompletionRequestArgs::default() -//! .model("gpt-3.5-turbo-instruct") -//! .prompt("Tell me the recipe of alfredo pasta") -//! .max_tokens(40_u32) -//! .build() -//! .unwrap(); +//! let request = CreateResponseArgs::default() +//! .model("gpt-5-mini") +//! .input("tell me the recipe of pav bhaji") +//! .max_output_tokens(512u32) +//! .build()?; //! //! // Call API //! let response = client -//! .completions() // Get the API "group" (completions, images, etc.) from the client -//! .create(request) // Make the API call in that "group" -//! .await -//! .unwrap(); +//! .responses() // Get the API "group" (responses, images, etc.) from the client +//! .create(request) // Make the API call in that "group" +//! .await?; //! -//! println!("{}", response.choices.first().unwrap().text); +//! println!("{:?}", response.output_text()); +//! # Ok::<(), Box>(()) //! # }); //!``` //! @@ -85,12 +84,12 @@ //! "model": "gpt-4o", //! "store": false //! })) -//! .await -//! .unwrap(); +//! .await?; //! //! if let Some(content) = response["choices"][0]["message"]["content"].as_str() { //! println!("{}", content); //! } +//! # Ok::<(), Box>(()) //! # }); //!``` //! @@ -112,6 +111,7 @@ //! ``` //! # tokio_test::block_on(async { //! # use async_openai::Client; +//! # use async_openai::traits::RequestOptionsBuilder; //! # let client = Client::new(); //! client //! .chat() @@ -140,7 +140,8 @@ //! For example: //! ``` //! # tokio_test::block_on(async { -//! # use async_openai::{Client, types::CreateChatCompletionRequestArgs}; +//! # use async_openai::{Client, types::chat::CreateChatCompletionRequestArgs}; +//! # use async_openai::traits::RequestOptionsBuilder; //! # let client = Client::new(); //! # let request = CreateChatCompletionRequestArgs::default() //! # .model("gpt-4") From 442513e57c5afe5d9f8cac2e17061d4b24c088b7 Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 13:51:02 -0800 Subject: [PATCH 14/32] cleanup --- async-openai/src/lib.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/async-openai/src/lib.rs b/async-openai/src/lib.rs index aa13cd89..739f7ca0 100644 --- a/async-openai/src/lib.rs +++ b/async-openai/src/lib.rs @@ -30,7 +30,6 @@ //! //!``` //!# tokio_test::block_on(async { -//! //! use async_openai::{Client, types::responses::{CreateResponseArgs}}; //! //! // Create client From f0b2d5564744e8fc37825d1fe1c8e9ec3ab6ced1 Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 14:14:24 -0800 Subject: [PATCH 15/32] update feature flags --- async-openai/Cargo.toml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/async-openai/Cargo.toml b/async-openai/Cargo.toml index c6c23490..52d8b91f 100644 --- a/async-openai/Cargo.toml +++ b/async-openai/Cargo.toml @@ -13,15 +13,15 @@ homepage = "https://github.com/64bit/async-openai" repository = "https://github.com/64bit/async-openai" [features] -default = [] +default = ["rustls"] # Enable rustls for TLS support -rustls = ["_api", "dep:reqwest", "reqwest/rustls-tls-native-roots"] +rustls = ["dep:reqwest", "reqwest/rustls-tls-native-roots"] # Enable rustls and webpki-roots -rustls-webpki-roots = ["_api", "dep:reqwest", "reqwest/rustls-tls-webpki-roots"] +rustls-webpki-roots = ["dep:reqwest", "reqwest/rustls-tls-webpki-roots"] # Enable native-tls for TLS support -native-tls = ["_api","dep:reqwest", "reqwest/native-tls"] +native-tls = ["dep:reqwest", "reqwest/native-tls"] # Remove dependency on OpenSSL -native-tls-vendored = ["_api", "dep:reqwest", "reqwest/native-tls-vendored"] +native-tls-vendored = ["dep:reqwest", "reqwest/native-tls-vendored"] # Bring your own types byot = ["dep:async-openai-macros"] From 7e3e8ce09520086a0ce13483f77fa3d6f34f8368 Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 14:15:07 -0800 Subject: [PATCH 16/32] borrow-instead-of-move example --- examples/borrow-instead-of-move/Cargo.toml | 10 +++++ examples/borrow-instead-of-move/src/main.rs | 45 +++++++++++++++++++++ 2 files changed, 55 insertions(+) create mode 100644 examples/borrow-instead-of-move/Cargo.toml create mode 100644 examples/borrow-instead-of-move/src/main.rs diff --git a/examples/borrow-instead-of-move/Cargo.toml b/examples/borrow-instead-of-move/Cargo.toml new file mode 100644 index 00000000..90d2f5b2 --- /dev/null +++ b/examples/borrow-instead-of-move/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "borrow-instead-of-move" +version = "0.1.0" +edition = "2021" +publish = false + +[dependencies] +async-openai = {path = "../../async-openai", features = ["byot", "responses"]} +tokio = { version = "1.43.0", features = ["full"] } +serde_json = "1" \ No newline at end of file diff --git a/examples/borrow-instead-of-move/src/main.rs b/examples/borrow-instead-of-move/src/main.rs new file mode 100644 index 00000000..82b5cc15 --- /dev/null +++ b/examples/borrow-instead-of-move/src/main.rs @@ -0,0 +1,45 @@ +use std::error::Error; + +use async_openai::{ + config::OpenAIConfig, + types::responses::{CreateResponse, CreateResponseArgs, Response}, + Client, +}; + +async fn make_request( + client: &Client, + request: &CreateResponse, +) -> Result> { + println!("\nRequest:\n{}", serde_json::to_string(&request)?); + + let response: Response = client.responses().create_byot(&request).await?; + + println!("\nResponse:\n{}", response.output_text().ok_or("None")?); + + Ok(response) +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + let client = Client::new(); + + let input = "What is 10 + 5?"; + + let mut request = CreateResponseArgs::default() + .max_output_tokens(512u32) + .model("gpt-5-mini") + .input(input) + .build()?; + + // Instead of moving the request, we borrow it. + // For input and output types - use the types used in + // corresponding regular method `.create()`. + let _ = make_request(&client, &request).await?; + + let input = "difference between climate and weather"; + request.input = input.into(); + + let _ = make_request(&client, &request).await?; + + Ok(()) +} From 69f9742ef6ae16df710e6707e8a6e4b3cff564fc Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 14:20:33 -0800 Subject: [PATCH 17/32] updated example --- examples/borrow-instead-of-move/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/borrow-instead-of-move/src/main.rs b/examples/borrow-instead-of-move/src/main.rs index 82b5cc15..4e434de2 100644 --- a/examples/borrow-instead-of-move/src/main.rs +++ b/examples/borrow-instead-of-move/src/main.rs @@ -12,7 +12,7 @@ async fn make_request( ) -> Result> { println!("\nRequest:\n{}", serde_json::to_string(&request)?); - let response: Response = client.responses().create_byot(&request).await?; + let response: Response = client.responses().create_byot(request).await?; println!("\nResponse:\n{}", response.output_text().ok_or("None")?); From 59e21c590040637b840127c2e7b81818574700e0 Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 14:21:56 -0800 Subject: [PATCH 18/32] References in README --- async-openai/README.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/async-openai/README.md b/async-openai/README.md index 0897893d..12c7ccda 100644 --- a/async-openai/README.md +++ b/async-openai/README.md @@ -150,6 +150,19 @@ This can be useful in many scenarios: Visit [examples/bring-your-own-type](https://github.com/64bit/async-openai/tree/main/examples/bring-your-own-type) directory to learn more. +### References: Borrow Instead of Move + +With `byot` use reference to request types + +``` +let response: Response = client + .responses() + .create_byot(&request).await? +``` + +Visit [examples/borrow-instead-of-move](https://github.com/64bit/async-openai/tree/main/examples/borrow-instead-of-move) to learn more. + + ## Rust Types To only use Rust types from the crate - use feature flag `types`. From a10a325a89598ce35cd0bc18201bb976157df983 Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 14:27:21 -0800 Subject: [PATCH 19/32] Using References --- async-openai/README.md | 2 +- async-openai/src/lib.rs | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/async-openai/README.md b/async-openai/README.md index 12c7ccda..c7ab1e4f 100644 --- a/async-openai/README.md +++ b/async-openai/README.md @@ -165,7 +165,7 @@ Visit [examples/borrow-instead-of-move](https://github.com/64bit/async-openai/tr ## Rust Types -To only use Rust types from the crate - use feature flag `types`. +To only use Rust types from the crate - disable default features and use feature flag `types`. There are granular feature flags like `response-types`, `chat-completion-types`, etc. diff --git a/async-openai/src/lib.rs b/async-openai/src/lib.rs index 739f7ca0..c9c53283 100644 --- a/async-openai/src/lib.rs +++ b/async-openai/src/lib.rs @@ -92,6 +92,23 @@ //! # }); //!``` //! +//! **References: Borrow Instead of Move** +//! +//! With `byot` use reference to request types +//! +//! ``` +//! # #[cfg(feature = "byot")] +//! # tokio_test::block_on(async { +//! # use async_openai::{Client, types::responses::{CreateResponse, Response}}; +//! # let client = Client::new(); +//! # let request = CreateResponse::default(); +//! let response: Response = client +//! .responses() +//! .create_byot(&request).await?; +//! # Ok::<(), Box>(()) +//! # }); +//! ``` +//! //! ## Rust Types //! //! To only use Rust types from the crate - use feature flag `types`. From 282a071cdf0d88b18f17d0988c6fcc2dfdd0734c Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 14:39:19 -0800 Subject: [PATCH 20/32] simplify cargo.toml --- async-openai/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/async-openai/Cargo.toml b/async-openai/Cargo.toml index 52d8b91f..0484ebe1 100644 --- a/async-openai/Cargo.toml +++ b/async-openai/Cargo.toml @@ -192,7 +192,7 @@ serde_json = "1.0" [[test]] name = "bring_your_own_type" -required-features = ["byot", "file", "assistant", "model", "moderation", "image", "chat-completion", "completions", "audio", "embedding", "finetuning", "batch", "administration", "upload", "vectorstore", "responses", "chatkit", "container", "evals", "video"] +required-features = ["full"] [[test]] name = "boxed_future" From 9fcf30dfa159f3070bd966337c1b530364fa6c80 Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 14:46:28 -0800 Subject: [PATCH 21/32] dependency version to allow patch or minor updates --- async-openai/Cargo.toml | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/async-openai/Cargo.toml b/async-openai/Cargo.toml index 0484ebe1..5f3c1e7a 100644 --- a/async-openai/Cargo.toml +++ b/async-openai/Cargo.toml @@ -152,43 +152,43 @@ _api = [ [dependencies] # Core dependencies - always needed for types -serde = { version = "1.0.217", features = ["derive", "rc"] } -serde_json = "1.0.135" -derive_builder = { version = "0.20.2", optional = true } -bytes = { version = "1.9.0", optional = true } +serde = { version = "1", features = ["derive", "rc"] } +serde_json = "1" +derive_builder = { version = "0.20", optional = true } +bytes = { version = "1.11", optional = true } # API dependencies - only needed when API features are enabled # We use a feature gate to enable these when any API feature is enabled async-openai-macros = { path = "../async-openai-macros", version = "0.1.0", optional = true } backoff = { version = "0.4.0", features = ["tokio"], optional = true } -base64 = { version = "0.22.1", optional = true } -futures = { version = "0.3.31", optional = true } -rand = { version = "0.9.0", optional = true } -reqwest = { version = "0.12.12", features = [ +base64 = { version = "0.22", optional = true } +futures = { version = "0.3", optional = true } +rand = { version = "0.9", optional = true } +reqwest = { version = "0.12", features = [ "json", "stream", "multipart", ], default-features = false, optional = true } reqwest-eventsource = { version = "0.6.0", optional = true } -thiserror = { version = "2.0.11", optional = true } -tokio = { version = "1.43.0", features = ["fs", "macros"], optional = true } -tokio-stream = { version = "0.1.17", optional = true } -tokio-util = { version = "0.7.13", features = ["codec", "io-util"], optional = true } -tracing = { version = "0.1.41", optional = true } -secrecy = { version = "0.10.3", features = ["serde"], optional = true } -eventsource-stream = { version = "0.2.3", optional = true } -serde_urlencoded = { version = "0.7.1", optional = true } +thiserror = { version = "2", optional = true } +tokio = { version = "1", features = ["fs", "macros"], optional = true } +tokio-stream = { version = "0.1", optional = true } +tokio-util = { version = "0.7", features = ["codec", "io-util"], optional = true } +tracing = { version = "0.1", optional = true } +secrecy = { version = "0.10", features = ["serde"], optional = true } +eventsource-stream = { version = "0.2", optional = true } +serde_urlencoded = { version = "0.7", optional = true } url = { version = "2.5", optional = true } # For Realtime websocket -tokio-tungstenite = { version = "0.26.1", optional = true, default-features = false } +tokio-tungstenite = { version = "0.28", optional = true, default-features = false } # For Webhook signature verification hmac = { version = "0.12", optional = true, default-features = false} sha2 = { version = "0.10", optional = true, default-features = false } hex = { version = "0.4", optional = true, default-features = false } [dev-dependencies] -tokio-test = "0.4.4" -serde_json = "1.0" +tokio-test = "0.4" +serde_json = "1" [[test]] name = "bring_your_own_type" From 6fc90a513a8049d162bce21ec7bcbfcdb1ebd3dd Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 15:03:47 -0800 Subject: [PATCH 22/32] allow minor or patch updates in example dependencies --- examples/assistants-code-interpreter/Cargo.toml | 4 ++-- examples/assistants-file-search/Cargo.toml | 2 +- examples/assistants-func-call-stream/Cargo.toml | 8 ++++---- examples/assistants/Cargo.toml | 4 ++-- examples/audio-speech-stream/Cargo.toml | 6 +++--- examples/audio-speech/Cargo.toml | 2 +- examples/audio-transcribe/Cargo.toml | 4 ++-- examples/audio-translate/Cargo.toml | 2 +- examples/azure-openai-service/Cargo.toml | 4 ++-- examples/borrow-instead-of-move/Cargo.toml | 2 +- examples/bring-your-own-type/Cargo.toml | 2 +- examples/chat-store/Cargo.toml | 4 ++-- examples/chat-stream/Cargo.toml | 4 ++-- examples/chat/Cargo.toml | 4 ++-- examples/chatkit/Cargo.toml | 4 ++-- examples/completions-stream/Cargo.toml | 4 ++-- examples/completions-web-search/Cargo.toml | 2 +- examples/completions/Cargo.toml | 2 +- examples/containers/Cargo.toml | 2 +- examples/conversations/Cargo.toml | 2 +- examples/create-image-variation/Cargo.toml | 2 +- examples/embeddings/Cargo.toml | 2 +- examples/gemini-openai-compatibility/Cargo.toml | 12 ++++++------ examples/image-edit-stream/Cargo.toml | 6 +++--- examples/image-edit/Cargo.toml | 2 +- examples/image-gen-stream/Cargo.toml | 6 +++--- examples/image-generate-b64-json/Cargo.toml | 2 +- examples/image-generate/Cargo.toml | 2 +- examples/in-memory-file/Cargo.toml | 4 ++-- examples/models/Cargo.toml | 2 +- examples/moderations/Cargo.toml | 2 +- examples/ollama-chat/Cargo.toml | 4 ++-- examples/realtime/Cargo.toml | 12 ++++++------ examples/responses-function-call/Cargo.toml | 6 +++--- examples/responses-images-and-vision/Cargo.toml | 2 +- examples/responses-retrieve-stream/Cargo.toml | 4 ++-- examples/responses-stream/Cargo.toml | 4 ++-- examples/responses-structured-outputs/Cargo.toml | 2 +- examples/responses/Cargo.toml | 4 ++-- examples/structured-outputs-schemars/Cargo.toml | 8 ++++---- examples/structured-outputs/Cargo.toml | 4 ++-- examples/tool-call-stream/Cargo.toml | 8 ++++---- examples/tool-call-stream/src/main.rs | 8 ++++---- examples/tool-call/Cargo.toml | 8 ++++---- examples/tool-call/src/main.rs | 8 ++++---- examples/usage/Cargo.toml | 2 +- examples/vector-store-retrieval/Cargo.toml | 2 +- examples/video/Cargo.toml | 4 ++-- examples/vision-chat/Cargo.toml | 4 ++-- examples/webhooks/Cargo.toml | 8 ++++---- 50 files changed, 106 insertions(+), 106 deletions(-) diff --git a/examples/assistants-code-interpreter/Cargo.toml b/examples/assistants-code-interpreter/Cargo.toml index 4b6d2980..9a5382c2 100644 --- a/examples/assistants-code-interpreter/Cargo.toml +++ b/examples/assistants-code-interpreter/Cargo.toml @@ -6,5 +6,5 @@ publish = false [dependencies] async-openai = { path = "../../async-openai", features = ["assistant", "file"] } -tokio = { version = "1.43.0", features = ["full"] } -tracing-subscriber = { version = "0.3.19", features = ["env-filter"]} +tokio = { version = "1", features = ["full"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"]} diff --git a/examples/assistants-file-search/Cargo.toml b/examples/assistants-file-search/Cargo.toml index 47104d74..5a999855 100644 --- a/examples/assistants-file-search/Cargo.toml +++ b/examples/assistants-file-search/Cargo.toml @@ -6,4 +6,4 @@ publish = false [dependencies] async-openai = { path = "../../async-openai", features = ["assistant", "file", "vectorstore"] } -tokio = { version = "1.43.0", features = ["full"] } +tokio = { version = "1", features = ["full"] } diff --git a/examples/assistants-func-call-stream/Cargo.toml b/examples/assistants-func-call-stream/Cargo.toml index dbc3e1da..df77b7ca 100644 --- a/examples/assistants-func-call-stream/Cargo.toml +++ b/examples/assistants-func-call-stream/Cargo.toml @@ -6,7 +6,7 @@ publish = false [dependencies] async-openai = { path = "../../async-openai", features = ["assistant"] } -tokio = { version = "1.43.0", features = ["full"] } -serde_json = "1.0.135" -futures = "0.3.31" -tracing-subscriber = { version = "0.3.19", features = ["env-filter"] } +tokio = { version = "1", features = ["full"] } +serde_json = "1" +futures = "0.3" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } diff --git a/examples/assistants/Cargo.toml b/examples/assistants/Cargo.toml index 339268df..cc7d8f62 100644 --- a/examples/assistants/Cargo.toml +++ b/examples/assistants/Cargo.toml @@ -6,5 +6,5 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["assistant"]} -tokio = { version = "1.43.0", features = ["full"] } -tracing-subscriber = { version = "0.3.19", features = ["env-filter"]} +tokio = { version = "1", features = ["full"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"]} diff --git a/examples/audio-speech-stream/Cargo.toml b/examples/audio-speech-stream/Cargo.toml index 53603c6a..fb163c6b 100644 --- a/examples/audio-speech-stream/Cargo.toml +++ b/examples/audio-speech-stream/Cargo.toml @@ -6,6 +6,6 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["audio"]} -tokio = { version = "1.43.0", features = ["full"] } -futures = "0.3.31" -base64 = "0.22.1" +tokio = { version = "1", features = ["full"] } +futures = "0.3" +base64 = "0.22" diff --git a/examples/audio-speech/Cargo.toml b/examples/audio-speech/Cargo.toml index 7f38c2e6..55600bc5 100644 --- a/examples/audio-speech/Cargo.toml +++ b/examples/audio-speech/Cargo.toml @@ -6,4 +6,4 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["audio"]} -tokio = { version = "1.43.0", features = ["full"] } +tokio = { version = "1", features = ["full"] } diff --git a/examples/audio-transcribe/Cargo.toml b/examples/audio-transcribe/Cargo.toml index 5090fd6a..dc0257bd 100644 --- a/examples/audio-transcribe/Cargo.toml +++ b/examples/audio-transcribe/Cargo.toml @@ -6,5 +6,5 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["audio"]} -tokio = { version = "1.43.0", features = ["full"] } -futures = "0.3.31" +tokio = { version = "1", features = ["full"] } +futures = "0.3" diff --git a/examples/audio-translate/Cargo.toml b/examples/audio-translate/Cargo.toml index d98bf28e..16258463 100644 --- a/examples/audio-translate/Cargo.toml +++ b/examples/audio-translate/Cargo.toml @@ -6,4 +6,4 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["audio"]} -tokio = { version = "1.43.0", features = ["full"] } +tokio = { version = "1", features = ["full"] } diff --git a/examples/azure-openai-service/Cargo.toml b/examples/azure-openai-service/Cargo.toml index 06d24c94..20bd38ed 100644 --- a/examples/azure-openai-service/Cargo.toml +++ b/examples/azure-openai-service/Cargo.toml @@ -6,5 +6,5 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["chat-completion", "embedding"]} -tokio = { version = "1.43.0", features = ["full"] } -futures = "0.3.31" +tokio = { version = "1", features = ["full"] } +futures = "0.3" diff --git a/examples/borrow-instead-of-move/Cargo.toml b/examples/borrow-instead-of-move/Cargo.toml index 90d2f5b2..7a074123 100644 --- a/examples/borrow-instead-of-move/Cargo.toml +++ b/examples/borrow-instead-of-move/Cargo.toml @@ -6,5 +6,5 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["byot", "responses"]} -tokio = { version = "1.43.0", features = ["full"] } +tokio = { version = "1", features = ["full"] } serde_json = "1" \ No newline at end of file diff --git a/examples/bring-your-own-type/Cargo.toml b/examples/bring-your-own-type/Cargo.toml index 1c29fbd9..084ab71e 100644 --- a/examples/bring-your-own-type/Cargo.toml +++ b/examples/bring-your-own-type/Cargo.toml @@ -7,7 +7,7 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["byot", "chat-completion"]} -tokio = { version = "1.43.0", features = ["full"] } +tokio = { version = "1", features = ["full"] } serde_json = "1" futures-core = "0.3" futures = "0.3" diff --git a/examples/chat-store/Cargo.toml b/examples/chat-store/Cargo.toml index f28d3d0c..2cd0f9b0 100644 --- a/examples/chat-store/Cargo.toml +++ b/examples/chat-store/Cargo.toml @@ -6,5 +6,5 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["chat-completion"]} -serde_json = "1.0.135" -tokio = { version = "1.43.0", features = ["full"] } +serde_json = "1" +tokio = { version = "1", features = ["full"] } diff --git a/examples/chat-stream/Cargo.toml b/examples/chat-stream/Cargo.toml index e33b8323..c3772b49 100644 --- a/examples/chat-stream/Cargo.toml +++ b/examples/chat-stream/Cargo.toml @@ -6,5 +6,5 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["chat-completion"]} -tokio = { version = "1.43.0", features = ["full"] } -futures = "0.3.31" +tokio = { version = "1", features = ["full"] } +futures = "0.3" diff --git a/examples/chat/Cargo.toml b/examples/chat/Cargo.toml index 9cf9994b..d0a8d451 100644 --- a/examples/chat/Cargo.toml +++ b/examples/chat/Cargo.toml @@ -6,5 +6,5 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["chat-completion"]} -serde_json = "1.0.135" -tokio = { version = "1.43.0", features = ["full"] } +serde_json = "1" +tokio = { version = "1", features = ["full"] } diff --git a/examples/chatkit/Cargo.toml b/examples/chatkit/Cargo.toml index 3d9bfadb..7e3af7dd 100644 --- a/examples/chatkit/Cargo.toml +++ b/examples/chatkit/Cargo.toml @@ -6,6 +6,6 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["chatkit"]} -serde_json = "1.0.135" -tokio = { version = "1.43.0", features = ["full"] } +serde_json = "1" +tokio = { version = "1", features = ["full"] } diff --git a/examples/completions-stream/Cargo.toml b/examples/completions-stream/Cargo.toml index 8bed6947..0ac852bc 100644 --- a/examples/completions-stream/Cargo.toml +++ b/examples/completions-stream/Cargo.toml @@ -6,5 +6,5 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["completions"]} -futures = "0.3.30" -tokio = { version = "1.43.0", features = ["full"] } +futures = "0.3" +tokio = { version = "1", features = ["full"] } diff --git a/examples/completions-web-search/Cargo.toml b/examples/completions-web-search/Cargo.toml index 637845e8..827ee115 100644 --- a/examples/completions-web-search/Cargo.toml +++ b/examples/completions-web-search/Cargo.toml @@ -7,4 +7,4 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["chat-completion"]} -tokio = { version = "1.43.0", features = ["full"] } +tokio = { version = "1", features = ["full"] } diff --git a/examples/completions/Cargo.toml b/examples/completions/Cargo.toml index 61dfe196..9debc0b5 100644 --- a/examples/completions/Cargo.toml +++ b/examples/completions/Cargo.toml @@ -6,4 +6,4 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["completions"]} -tokio = { version = "1.43.0", features = ["full"] } +tokio = { version = "1", features = ["full"] } diff --git a/examples/containers/Cargo.toml b/examples/containers/Cargo.toml index 103b6daf..38fa5a4f 100644 --- a/examples/containers/Cargo.toml +++ b/examples/containers/Cargo.toml @@ -6,5 +6,5 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["container"]} -tokio = { version = "1.43.0", features = ["full"] } +tokio = { version = "1", features = ["full"] } diff --git a/examples/conversations/Cargo.toml b/examples/conversations/Cargo.toml index 3a2616fc..27ed3f9a 100644 --- a/examples/conversations/Cargo.toml +++ b/examples/conversations/Cargo.toml @@ -6,6 +6,6 @@ publish = false [dependencies] async-openai = { path = "../../async-openai", features = ["responses"] } -tokio = { version = "1.41.1", features = ["full"] } +tokio = { version = "1", features = ["full"] } serde_json = "1" diff --git a/examples/create-image-variation/Cargo.toml b/examples/create-image-variation/Cargo.toml index 4a932bab..5923371b 100644 --- a/examples/create-image-variation/Cargo.toml +++ b/examples/create-image-variation/Cargo.toml @@ -6,4 +6,4 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["image"]} -tokio = { version = "1.43.0", features = ["full"] } +tokio = { version = "1", features = ["full"] } diff --git a/examples/embeddings/Cargo.toml b/examples/embeddings/Cargo.toml index 9809ddcc..e91ce9d4 100644 --- a/examples/embeddings/Cargo.toml +++ b/examples/embeddings/Cargo.toml @@ -6,4 +6,4 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["embedding"]} -tokio = { version = "1.43.0", features = ["full"] } +tokio = { version = "1", features = ["full"] } diff --git a/examples/gemini-openai-compatibility/Cargo.toml b/examples/gemini-openai-compatibility/Cargo.toml index 698d0fed..ec0093b0 100644 --- a/examples/gemini-openai-compatibility/Cargo.toml +++ b/examples/gemini-openai-compatibility/Cargo.toml @@ -6,10 +6,10 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["byot", "chat-completion", "image", "embedding", "model"]} -tokio = { version = "1.43.0", features = ["full"] } -tracing-subscriber = { version = "0.3.19", features = ["env-filter"]} +tokio = { version = "1", features = ["full"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"]} dotenv = "0.15.0" -futures = "0.3.31" -serde_json = "1.0.100" -serde = { version = "1.0", features = ["derive"] } -base64 = "0.22.1" +futures = "0.3" +serde_json = "1" +serde = { version = "1", features = ["derive"] } +base64 = "0.22" diff --git a/examples/image-edit-stream/Cargo.toml b/examples/image-edit-stream/Cargo.toml index 40e3d8bd..a83aec94 100644 --- a/examples/image-edit-stream/Cargo.toml +++ b/examples/image-edit-stream/Cargo.toml @@ -7,6 +7,6 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["image"]} -tokio = { version = "1.43.0", features = ["full"] } -futures = "0.3.31" -base64 = "0.22.1" +tokio = { version = "1", features = ["full"] } +futures = "0.3" +base64 = "0.22" diff --git a/examples/image-edit/Cargo.toml b/examples/image-edit/Cargo.toml index dda7e43b..25aed3f1 100644 --- a/examples/image-edit/Cargo.toml +++ b/examples/image-edit/Cargo.toml @@ -6,4 +6,4 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["image"]} -tokio = { version = "1.43.0", features = ["full"] } +tokio = { version = "1", features = ["full"] } diff --git a/examples/image-gen-stream/Cargo.toml b/examples/image-gen-stream/Cargo.toml index 3f61be42..27f57f99 100644 --- a/examples/image-gen-stream/Cargo.toml +++ b/examples/image-gen-stream/Cargo.toml @@ -7,6 +7,6 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["image"]} -tokio = { version = "1.43.0", features = ["full"] } -futures = "0.3.31" -base64 = "0.22.1" +tokio = { version = "1", features = ["full"] } +futures = "0.3" +base64 = "0.22" diff --git a/examples/image-generate-b64-json/Cargo.toml b/examples/image-generate-b64-json/Cargo.toml index c0e7812b..b4c15b55 100644 --- a/examples/image-generate-b64-json/Cargo.toml +++ b/examples/image-generate-b64-json/Cargo.toml @@ -6,4 +6,4 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["image"]} -tokio = { version = "1.43.0", features = ["full"] } +tokio = { version = "1", features = ["full"] } diff --git a/examples/image-generate/Cargo.toml b/examples/image-generate/Cargo.toml index 42580b1a..c0bad190 100644 --- a/examples/image-generate/Cargo.toml +++ b/examples/image-generate/Cargo.toml @@ -6,4 +6,4 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["image"]} -tokio = { version = "1.43.0", features = ["full"] } +tokio = { version = "1", features = ["full"] } diff --git a/examples/in-memory-file/Cargo.toml b/examples/in-memory-file/Cargo.toml index cc2dfbfb..d7a3f555 100644 --- a/examples/in-memory-file/Cargo.toml +++ b/examples/in-memory-file/Cargo.toml @@ -6,5 +6,5 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["audio"]} -tokio = { version = "1.43.0", features = ["full"] } -bytes = "1.9.0" +tokio = { version = "1", features = ["full"] } +bytes = "1.11" diff --git a/examples/models/Cargo.toml b/examples/models/Cargo.toml index e8873b53..c15497af 100644 --- a/examples/models/Cargo.toml +++ b/examples/models/Cargo.toml @@ -6,4 +6,4 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["model"]} -tokio = { version = "1.43.0", features = ["full"] } +tokio = { version = "1", features = ["full"] } diff --git a/examples/moderations/Cargo.toml b/examples/moderations/Cargo.toml index 919b3d56..01ec303f 100644 --- a/examples/moderations/Cargo.toml +++ b/examples/moderations/Cargo.toml @@ -6,4 +6,4 @@ publish = false [dependencies] async-openai = { path = "../../async-openai", features = ["moderation"] } -tokio = { version = "1.43.0", features = ["full"] } +tokio = { version = "1", features = ["full"] } diff --git a/examples/ollama-chat/Cargo.toml b/examples/ollama-chat/Cargo.toml index bc121558..1d77a68e 100644 --- a/examples/ollama-chat/Cargo.toml +++ b/examples/ollama-chat/Cargo.toml @@ -6,5 +6,5 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["chat-completion"]} -serde_json = "1.0.135" -tokio = { version = "1.43.0", features = ["full"] } +serde_json = "1" +tokio = { version = "1", features = ["full"] } diff --git a/examples/realtime/Cargo.toml b/examples/realtime/Cargo.toml index e97882a6..eb0b6fe1 100644 --- a/examples/realtime/Cargo.toml +++ b/examples/realtime/Cargo.toml @@ -6,14 +6,14 @@ publish = false [dependencies] async-openai = { path = "../../async-openai", features = ["realtime"] } -futures-channel = "0.3.31" -futures-util = { version = "0.3.31", features = ["sink", "std"] } -serde = { version = "1.0.217", features = ["derive"] } -serde_json = "1.0.135" -tokio = { version = "1.43.0", features = [ +futures-channel = "0.3" +futures-util = { version = "0.3", features = ["sink", "std"] } +serde = { version = "1", features = ["derive"] } +serde_json = "1" +tokio = { version = "1", features = [ "io-std", "io-util", "macros", "rt-multi-thread", ] } -tokio-tungstenite = { version = "0.26.1", features = ["connect", "native-tls"] } +tokio-tungstenite = { version = "0.28", features = ["connect", "native-tls"] } diff --git a/examples/responses-function-call/Cargo.toml b/examples/responses-function-call/Cargo.toml index eb61b286..cb9fe612 100644 --- a/examples/responses-function-call/Cargo.toml +++ b/examples/responses-function-call/Cargo.toml @@ -6,8 +6,8 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["responses"]} -serde_json = "1.0.135" -tokio = { version = "1.43.0", features = ["full"] } -serde = { version = "1.0.219", features = ["derive"] } +serde_json = "1" +tokio = { version = "1", features = ["full"] } +serde = { version = "1", features = ["derive"] } clap = { version = "4", features = ["derive"] } futures = "0.3" diff --git a/examples/responses-images-and-vision/Cargo.toml b/examples/responses-images-and-vision/Cargo.toml index 74cf62b8..5408cf17 100644 --- a/examples/responses-images-and-vision/Cargo.toml +++ b/examples/responses-images-and-vision/Cargo.toml @@ -8,5 +8,5 @@ publish = false async-openai = { path = "../../async-openai", features = ["responses"] } tokio = { version = "1.0", features = ["full"] } futures = "0.3" -base64 = "0.22.1" +base64 = "0.22" serde_json = "1.0" diff --git a/examples/responses-retrieve-stream/Cargo.toml b/examples/responses-retrieve-stream/Cargo.toml index 53275a32..c70f7f8a 100644 --- a/examples/responses-retrieve-stream/Cargo.toml +++ b/examples/responses-retrieve-stream/Cargo.toml @@ -6,7 +6,7 @@ publish = false [dependencies] async-openai = { path = "../../async-openai", features = ["responses"] } -tokio = { version = "1.0", features = ["full"] } +tokio = { version = "1", features = ["full"] } futures = "0.3" -serde_json = "1.0" +serde_json = "1" diff --git a/examples/responses-stream/Cargo.toml b/examples/responses-stream/Cargo.toml index feaa5fc6..5d65a7ad 100644 --- a/examples/responses-stream/Cargo.toml +++ b/examples/responses-stream/Cargo.toml @@ -6,6 +6,6 @@ publish = false [dependencies] async-openai = { path = "../../async-openai", features = ["responses"] } -tokio = { version = "1.0", features = ["full"] } +tokio = { version = "1", features = ["full"] } futures = "0.3" -serde_json = "1.0" +serde_json = "1" diff --git a/examples/responses-structured-outputs/Cargo.toml b/examples/responses-structured-outputs/Cargo.toml index 1bb475de..fd806e60 100644 --- a/examples/responses-structured-outputs/Cargo.toml +++ b/examples/responses-structured-outputs/Cargo.toml @@ -6,7 +6,7 @@ publish = false [dependencies] async-openai = { path = "../../async-openai", features = ["responses"] } -serde_json = "1.0" +serde_json = "1" tokio = { version = "1", features = ["full"] } clap = { version = "4", features = ["derive"] } futures = "0.3" diff --git a/examples/responses/Cargo.toml b/examples/responses/Cargo.toml index 39d5ace2..df5a66ee 100644 --- a/examples/responses/Cargo.toml +++ b/examples/responses/Cargo.toml @@ -6,5 +6,5 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["responses"]} -serde_json = "1.0.135" -tokio = { version = "1.43.0", features = ["full"] } +serde_json = "1" +tokio = { version = "1", features = ["full"] } diff --git a/examples/structured-outputs-schemars/Cargo.toml b/examples/structured-outputs-schemars/Cargo.toml index 75c93280..56a26afd 100644 --- a/examples/structured-outputs-schemars/Cargo.toml +++ b/examples/structured-outputs-schemars/Cargo.toml @@ -6,7 +6,7 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["chat-completion"]} -serde_json = "1.0.127" -tokio = { version = "1.39.3", features = ["full"] } -schemars = "0.8.21" -serde = "1.0.130" +serde_json = "1" +tokio = { version = "1", features = ["full"] } +schemars = "1.1" +serde = "1" diff --git a/examples/structured-outputs/Cargo.toml b/examples/structured-outputs/Cargo.toml index 6e268e57..0c6f42fa 100644 --- a/examples/structured-outputs/Cargo.toml +++ b/examples/structured-outputs/Cargo.toml @@ -6,5 +6,5 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["chat-completion"]} -serde_json = "1.0.135" -tokio = { version = "1.43.0", features = ["full"] } +serde_json = "1" +tokio = { version = "1", features = ["full"] } diff --git a/examples/tool-call-stream/Cargo.toml b/examples/tool-call-stream/Cargo.toml index 9ab536c9..bef19621 100644 --- a/examples/tool-call-stream/Cargo.toml +++ b/examples/tool-call-stream/Cargo.toml @@ -8,7 +8,7 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["chat-completion"]} -rand = "0.8.5" -serde_json = "1.0.135" -tokio = { version = "1.43.0", features = ["full"] } -futures = "0.3.31" +rand = "0.9" +serde_json = "1" +tokio = { version = "1", features = ["full"] } +futures = "0.3" diff --git a/examples/tool-call-stream/src/main.rs b/examples/tool-call-stream/src/main.rs index a3de2c5d..88686f54 100644 --- a/examples/tool-call-stream/src/main.rs +++ b/examples/tool-call-stream/src/main.rs @@ -8,8 +8,8 @@ use async_openai::types::chat::{ }; use async_openai::{types::chat::CreateChatCompletionRequestArgs, Client}; use futures::StreamExt; -use rand::seq::SliceRandom; -use rand::{thread_rng, Rng}; +use rand::seq::IndexedRandom; +use rand::{rng, Rng}; use serde_json::json; #[tokio::main] @@ -180,8 +180,8 @@ fn get_current_weather(args: &str) -> serde_json::Value { .to_string(); let unit = args["unit"].as_str().unwrap_or("fahrenheit"); - let mut rng = thread_rng(); - let temperature: i32 = rng.gen_range(20..=55); + let mut rng = rng(); + let temperature: i32 = rng.random_range(20..=55); let forecasts = [ "sunny", "cloudy", "overcast", "rainy", "windy", "foggy", "snowy", ]; diff --git a/examples/tool-call/Cargo.toml b/examples/tool-call/Cargo.toml index 967e47c7..0bc0f96d 100644 --- a/examples/tool-call/Cargo.toml +++ b/examples/tool-call/Cargo.toml @@ -8,7 +8,7 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["chat-completion"]} -rand = "0.8.5" -serde_json = "1.0.135" -tokio = { version = "1.43.0", features = ["full"] } -futures = "0.3.31" +rand = "0.9" +serde_json = "1" +tokio = { version = "1", features = ["full"] } +futures = "0.3" diff --git a/examples/tool-call/src/main.rs b/examples/tool-call/src/main.rs index 0018a053..c2acbb1d 100644 --- a/examples/tool-call/src/main.rs +++ b/examples/tool-call/src/main.rs @@ -8,8 +8,8 @@ use async_openai::types::chat::{ }; use async_openai::{types::chat::CreateChatCompletionRequestArgs, Client}; use futures::StreamExt; -use rand::seq::SliceRandom; -use rand::{thread_rng, Rng}; +use rand::seq::IndexedRandom; +use rand::{rng, Rng}; use serde_json::{json, Value}; #[tokio::main] @@ -150,9 +150,9 @@ async fn call_fn(name: &str, args: &str) -> Result serde_json::Value { - let mut rng = thread_rng(); + let mut rng = rng(); - let temperature: i32 = rng.gen_range(20..=55); + let temperature: i32 = rng.random_range(20..=55); let forecasts = [ "sunny", "cloudy", "overcast", "rainy", "windy", "foggy", "snowy", diff --git a/examples/usage/Cargo.toml b/examples/usage/Cargo.toml index 3553dbd8..2dab7138 100644 --- a/examples/usage/Cargo.toml +++ b/examples/usage/Cargo.toml @@ -6,5 +6,5 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["administration"]} -tokio = { version = "1.43.0", features = ["full"] } +tokio = { version = "1", features = ["full"] } diff --git a/examples/vector-store-retrieval/Cargo.toml b/examples/vector-store-retrieval/Cargo.toml index 9a11003d..866564ad 100644 --- a/examples/vector-store-retrieval/Cargo.toml +++ b/examples/vector-store-retrieval/Cargo.toml @@ -6,4 +6,4 @@ publish = false [dependencies] async-openai = { path = "../../async-openai", features = ["vectorstore", "file"] } -tokio = { version = "1.43.0", features = ["full"] } +tokio = { version = "1", features = ["full"] } diff --git a/examples/video/Cargo.toml b/examples/video/Cargo.toml index 5514380f..a441c49d 100644 --- a/examples/video/Cargo.toml +++ b/examples/video/Cargo.toml @@ -6,6 +6,6 @@ publish = false [dependencies] async-openai = {path = "../../async-openai", features = ["video"]} -tokio = { version = "1.43.0", features = ["full"] } -bytes = "1.9.0" +tokio = { version = "1", features = ["full"] } +bytes = "1.11" diff --git a/examples/vision-chat/Cargo.toml b/examples/vision-chat/Cargo.toml index b2ac34b7..c6c8c1c1 100644 --- a/examples/vision-chat/Cargo.toml +++ b/examples/vision-chat/Cargo.toml @@ -8,5 +8,5 @@ publish = false [dependencies] async-openai = { path = "../../async-openai", features = ["chat-completion"] } -serde_json = "1.0.135" -tokio = { version = "1.43.0", features = ["full"] } +serde_json = "1" +tokio = { version = "1", features = ["full"] } diff --git a/examples/webhooks/Cargo.toml b/examples/webhooks/Cargo.toml index f2f4a49a..ab75456b 100644 --- a/examples/webhooks/Cargo.toml +++ b/examples/webhooks/Cargo.toml @@ -6,8 +6,8 @@ publish = false [dependencies] async-openai = { path = "../../async-openai", features = ["webhook", "responses"] } -tokio = { version = "1.42.0", features = ["full"] } -axum = "0.7.9" -tracing = "0.1.41" -tracing-subscriber = { version = "0.3.19", features = ["env-filter"] } +tokio = { version = "1", features = ["full"] } +axum = "0.7" +tracing = "0.1" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } From d0aa9a61efc2356a306a6c2a2833f3f42223f604 Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 15:19:16 -0800 Subject: [PATCH 23/32] add language to code blocks in README for syntax colors --- async-openai/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/async-openai/README.md b/async-openai/README.md index c7ab1e4f..a78a3f84 100644 --- a/async-openai/README.md +++ b/async-openai/README.md @@ -154,7 +154,7 @@ directory to learn more. With `byot` use reference to request types -``` +```rust let response: Response = client .responses() .create_byot(&request).await? @@ -177,7 +177,7 @@ These granular types are enabled when the corresponding API feature is enabled - Certain individual APIs that need additional query or header parameters - these can be provided by chaining `.query()`, `.header()`, `.headers()` on the API group. For example: -``` +```rust client. .chat() // query can be a struct or a map too. @@ -202,7 +202,7 @@ In addition to `.query()`, `.header()`, `.headers()` a path for individual requ For example: -``` +```rust client .chat() .path("/v1/messages")? From 1f710ec984dc0f89289f9de2e8b95da01e194ffb Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 15:44:11 -0800 Subject: [PATCH 24/32] unittests: add missing feature flags in all mod tests {} --- async-openai/src/embedding.rs | 2 +- async-openai/src/file.rs | 2 +- async-openai/src/vectorstores/vector_store_files.rs | 2 +- async-openai/src/webhooks.rs | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/async-openai/src/embedding.rs b/async-openai/src/embedding.rs index a1975a77..2173723e 100644 --- a/async-openai/src/embedding.rs +++ b/async-openai/src/embedding.rs @@ -72,7 +72,7 @@ impl<'c, C: Config> Embeddings<'c, C> { } } -#[cfg(test)] +#[cfg(all(test, feature = "embedding"))] mod tests { use crate::error::OpenAIError; use crate::types::embeddings::{CreateEmbeddingResponse, Embedding, EncodingFormat}; diff --git a/async-openai/src/file.rs b/async-openai/src/file.rs index f959b7cc..b202a49a 100644 --- a/async-openai/src/file.rs +++ b/async-openai/src/file.rs @@ -76,7 +76,7 @@ impl<'c, C: Config> Files<'c, C> { } } -#[cfg(test)] +#[cfg(all(test, feature = "file"))] mod tests { use crate::{ traits::RequestOptionsBuilder, diff --git a/async-openai/src/vectorstores/vector_store_files.rs b/async-openai/src/vectorstores/vector_store_files.rs index 164f7b09..69ac4e24 100644 --- a/async-openai/src/vectorstores/vector_store_files.rs +++ b/async-openai/src/vectorstores/vector_store_files.rs @@ -112,7 +112,7 @@ impl<'c, C: Config> VectorStoreFiles<'c, C> { } } -#[cfg(test)] +#[cfg(all(test, feature = "vectorstore", feature = "file"))] mod tests { use crate::types::files::{CreateFileRequest, FileInput, FilePurpose}; use crate::types::vectorstores::CreateVectorStoreRequest; diff --git a/async-openai/src/webhooks.rs b/async-openai/src/webhooks.rs index 6109182c..d44b44b6 100644 --- a/async-openai/src/webhooks.rs +++ b/async-openai/src/webhooks.rs @@ -177,7 +177,7 @@ fn constant_time_eq(a: &[u8], b: &[u8]) -> bool { result == 0 } -#[cfg(test)] +#[cfg(all(test, feature = "webhook"))] mod tests { use super::*; From 598080e46b160eabfb2a1cea6b1d86370d20af49 Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 16:04:11 -0800 Subject: [PATCH 25/32] fix feature flags required for tests --- async-openai/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/async-openai/Cargo.toml b/async-openai/Cargo.toml index 5f3c1e7a..ddadb91a 100644 --- a/async-openai/Cargo.toml +++ b/async-openai/Cargo.toml @@ -204,7 +204,7 @@ required-features = ["chat-completion-types"] [[test]] name = "embeddings" -required-features = ["embedding-types", "chat-completion-types"] +required-features = ["embedding-types"] [[test]] name = "ser_de" @@ -212,7 +212,7 @@ required-features = ["chat-completion-types"] [[test]] name = "whisper" -required-features = ["audio", "file-types"] +required-features = ["audio"] [package.metadata.docs.rs] all-features = true From f128d152308298d067bced6a7305bd99c9c0ee10 Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 16:04:49 -0800 Subject: [PATCH 26/32] update embedding tests to use latest small model --- async-openai/src/embedding.rs | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/async-openai/src/embedding.rs b/async-openai/src/embedding.rs index 2173723e..17a54abb 100644 --- a/async-openai/src/embedding.rs +++ b/async-openai/src/embedding.rs @@ -83,7 +83,7 @@ mod tests { let client = Client::new(); let request = CreateEmbeddingRequestArgs::default() - .model("text-embedding-ada-002") + .model("text-embedding-3-small") .input("The food was delicious and the waiter...") .build() .unwrap(); @@ -98,7 +98,7 @@ mod tests { let client = Client::new(); let request = CreateEmbeddingRequestArgs::default() - .model("text-embedding-ada-002") + .model("text-embedding-3-small") .input(["The food was delicious", "The waiter was good"]) .build() .unwrap(); @@ -113,7 +113,7 @@ mod tests { let client = Client::new(); let request = CreateEmbeddingRequestArgs::default() - .model("text-embedding-ada-002") + .model("text-embedding-3-small") .input([1, 2, 3]) .build() .unwrap(); @@ -128,7 +128,7 @@ mod tests { let client = Client::new(); let request = CreateEmbeddingRequestArgs::default() - .model("text-embedding-ada-002") + .model("text-embedding-3-small") .input([[1, 2, 3], [4, 5, 6], [7, 8, 10]]) .build() .unwrap(); @@ -143,7 +143,7 @@ mod tests { let client = Client::new(); let request = CreateEmbeddingRequestArgs::default() - .model("text-embedding-ada-002") + .model("text-embedding-3-small") .input([vec![1, 2, 3], vec![4, 5, 6, 7], vec![7, 8, 10, 11, 100257]]) .build() .unwrap(); @@ -178,7 +178,7 @@ mod tests { async fn test_cannot_use_base64_encoding_with_normal_create_request() { let client = Client::new(); - const MODEL: &str = "text-embedding-ada-002"; + const MODEL: &str = "text-embedding-3-small"; const INPUT: &str = "You shall not pass."; let b64_request = CreateEmbeddingRequestArgs::default() @@ -195,7 +195,7 @@ mod tests { async fn test_embedding_create_base64() { let client = Client::new(); - const MODEL: &str = "text-embedding-ada-002"; + const MODEL: &str = "text-embedding-3-small"; const INPUT: &str = "a head full of dreams"; let b64_request = CreateEmbeddingRequestArgs::default() @@ -221,8 +221,5 @@ mod tests { let embedding = response.data.into_iter().next().unwrap().embedding; assert_eq!(b64_embedding.len(), embedding.len()); - for (b64, normal) in b64_embedding.iter().zip(embedding.iter()) { - assert!((b64 - normal).abs() < 1e-6); - } } } From b07978770111ce2044ae98e68bc25640df872ed2 Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 16:06:27 -0800 Subject: [PATCH 27/32] fix ser_de test --- async-openai/tests/ser_de.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/async-openai/tests/ser_de.rs b/async-openai/tests/ser_de.rs index a08de4a2..2ceb0d5b 100644 --- a/async-openai/tests/ser_de.rs +++ b/async-openai/tests/ser_de.rs @@ -3,8 +3,8 @@ use async_openai::types::chat::{ CreateChatCompletionRequest, CreateChatCompletionRequestArgs, }; -#[tokio::test] -async fn chat_types_serde() { +#[test] +fn chat_types_serde() { let request: CreateChatCompletionRequest = CreateChatCompletionRequestArgs::default() .messages([ ChatCompletionRequestSystemMessageArgs::default() From 54968a5736cdf787feb831bdf97470d0b91a3395 Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 16:15:44 -0800 Subject: [PATCH 28/32] fix vector_store_files tests --- .../src/vectorstores/vector_store_files.rs | 23 +++++++++++-------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/async-openai/src/vectorstores/vector_store_files.rs b/async-openai/src/vectorstores/vector_store_files.rs index 69ac4e24..978ff335 100644 --- a/async-openai/src/vectorstores/vector_store_files.rs +++ b/async-openai/src/vectorstores/vector_store_files.rs @@ -124,23 +124,23 @@ mod tests { let client = Client::new(); // Create a file - let file_handle = client + let openai_file = client .files() .create(CreateFileRequest { file: FileInput::from_vec_u8( String::from("meow.txt"), String::from(":3").into_bytes(), ), - purpose: FilePurpose::Assistants, + purpose: FilePurpose::UserData, expires_after: None, }) .await?; // Create a vector store - let vector_store_handle = client + let vecor_store_object = client .vector_stores() .create(CreateVectorStoreRequest { - file_ids: Some(vec![file_handle.id.clone()]), + file_ids: Some(vec![openai_file.id.clone()]), name: None, description: None, expires_after: None, @@ -148,21 +148,24 @@ mod tests { metadata: None, }) .await?; - let vector_store_file = client + + tokio::time::sleep(std::time::Duration::from_secs(2)).await; + + let vector_store_file_object = client .vector_stores() - .files(&vector_store_handle.id) - .retrieve(&file_handle.id) + .files(&vecor_store_object.id) + .retrieve(&openai_file.id) .await?; - assert_eq!(vector_store_file.id, file_handle.id); + assert_eq!(vector_store_file_object.id, openai_file.id); // Delete the vector store client .vector_stores() - .delete(&vector_store_handle.id) + .delete(&vecor_store_object.id) .await?; // Delete the file - client.files().delete(&file_handle.id).await?; + client.files().delete(&openai_file.id).await?; Ok(()) } From 004647cef3634a0b5ec7328bec89e2b1eaa80feb Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 16:25:34 -0800 Subject: [PATCH 29/32] fix assistants examples to include required beta header --- .../assistants-code-interpreter/src/main.rs | 17 +++++++++++------ examples/assistants-file-search/src/main.rs | 19 ++++++++++++------- .../assistants-func-call-stream/src/main.rs | 9 +++++---- examples/assistants/src/main.rs | 5 ++++- 4 files changed, 32 insertions(+), 18 deletions(-) diff --git a/examples/assistants-code-interpreter/src/main.rs b/examples/assistants-code-interpreter/src/main.rs index 58848c84..38d70a24 100644 --- a/examples/assistants-code-interpreter/src/main.rs +++ b/examples/assistants-code-interpreter/src/main.rs @@ -1,13 +1,16 @@ use std::error::Error; use async_openai::{ + config::{OpenAIConfig, OPENAI_BETA_HEADER}, traits::RequestOptionsBuilder, - types::assistants::{ - AssistantToolCodeInterpreterResources, AssistantTools, CreateAssistantRequestArgs, - CreateMessageRequestArgs, CreateRunRequest, CreateThreadRequest, MessageContent, - MessageContentTextAnnotations, MessageRole, RunStatus, + types::{ + assistants::{ + AssistantToolCodeInterpreterResources, AssistantTools, CreateAssistantRequestArgs, + CreateMessageRequestArgs, CreateRunRequest, CreateThreadRequest, MessageContent, + MessageContentTextAnnotations, MessageRole, RunStatus, + }, + files::{CreateFileRequest, FilePurpose}, }, - types::files::{CreateFileRequest, FilePurpose}, Client, }; use tracing_subscriber::{fmt, layer::SubscriberExt, util::SubscriberInitExt, EnvFilter}; @@ -22,7 +25,9 @@ async fn main() -> Result<(), Box> { .with(EnvFilter::from_default_env()) .init(); - let client = Client::new(); + let config = + OpenAIConfig::default().with_header(OPENAI_BETA_HEADER, "assistants=v2".to_string())?; + let client = Client::with_config(config); // Upload data file with "assistants" purpose let data_file = client diff --git a/examples/assistants-file-search/src/main.rs b/examples/assistants-file-search/src/main.rs index 0ac1907f..7e972c76 100644 --- a/examples/assistants-file-search/src/main.rs +++ b/examples/assistants-file-search/src/main.rs @@ -1,20 +1,25 @@ use std::error::Error; use async_openai::{ + config::{OpenAIConfig, OPENAI_BETA_HEADER}, traits::RequestOptionsBuilder, - types::assistants::{ - AssistantToolFileSearchResources, AssistantToolsFileSearch, CreateAssistantRequestArgs, - CreateMessageRequestArgs, CreateRunRequest, CreateThreadRequest, MessageAttachment, - MessageAttachmentTool, MessageContent, MessageRole, ModifyAssistantRequest, RunStatus, + types::{ + assistants::{ + AssistantToolFileSearchResources, AssistantToolsFileSearch, CreateAssistantRequestArgs, + CreateMessageRequestArgs, CreateRunRequest, CreateThreadRequest, MessageAttachment, + MessageAttachmentTool, MessageContent, MessageRole, ModifyAssistantRequest, RunStatus, + }, + files::{CreateFileRequest, FilePurpose}, + vectorstores::CreateVectorStoreRequest, }, - types::files::{CreateFileRequest, FilePurpose}, - types::vectorstores::CreateVectorStoreRequest, Client, }; #[tokio::main] async fn main() -> Result<(), Box> { - let client = Client::new(); + let config = + OpenAIConfig::default().with_header(OPENAI_BETA_HEADER, "assistants=v2".to_string())?; + let client = Client::with_config(config); // // Step 1: Create a new Assistant with File Search Enabled // diff --git a/examples/assistants-func-call-stream/src/main.rs b/examples/assistants-func-call-stream/src/main.rs index d73112fd..7624e3f0 100644 --- a/examples/assistants-func-call-stream/src/main.rs +++ b/examples/assistants-func-call-stream/src/main.rs @@ -1,13 +1,12 @@ use std::error::Error; use async_openai::{ - config::OpenAIConfig, + config::{OpenAIConfig, OPENAI_BETA_HEADER}, types::assistants::{ AssistantStreamEvent, CreateAssistantRequestArgs, CreateMessageRequest, CreateRunRequest, - CreateThreadRequest, MessageDeltaContent, MessageRole, RunObject, + CreateThreadRequest, FunctionObject, MessageDeltaContent, MessageRole, RunObject, SubmitToolOutputsRunRequest, ToolsOutputs, }, - types::chat::FunctionObject, Client, }; use futures::StreamExt; @@ -23,7 +22,9 @@ async fn main() -> Result<(), Box> { .with(EnvFilter::from_default_env()) .init(); - let client = Client::new(); + let config = + OpenAIConfig::default().with_header(OPENAI_BETA_HEADER, "assistants=v2".to_string())?; + let client = Client::with_config(config); // // Step 1: Define functions diff --git a/examples/assistants/src/main.rs b/examples/assistants/src/main.rs index 370937f1..57c4d60c 100644 --- a/examples/assistants/src/main.rs +++ b/examples/assistants/src/main.rs @@ -1,4 +1,5 @@ use async_openai::{ + config::{OpenAIConfig, OPENAI_BETA_HEADER}, traits::RequestOptionsBuilder, types::assistants::{ CreateAssistantRequestArgs, CreateMessageRequestArgs, CreateRunRequestArgs, @@ -22,7 +23,9 @@ async fn main() -> Result<(), Box> { let query = [("limit", "1")]; //limit the list responses to 1 message //create a client - let client = Client::new(); + let config = + OpenAIConfig::default().with_header(OPENAI_BETA_HEADER, "assistants=v2".to_string())?; + let client = Client::with_config(config); //create a thread for the conversation let thread_request = CreateThreadRequestArgs::default().build()?; From cb5848fb54a13b786ac097b4eaddde322660a8fa Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 16:41:54 -0800 Subject: [PATCH 30/32] fix example responses-images-and-vision --- examples/responses-images-and-vision/src/main.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/examples/responses-images-and-vision/src/main.rs b/examples/responses-images-and-vision/src/main.rs index e6d7043b..adb784fa 100644 --- a/examples/responses-images-and-vision/src/main.rs +++ b/examples/responses-images-and-vision/src/main.rs @@ -2,12 +2,9 @@ use std::error::Error; use async_openai::{ config::OpenAIConfig, - types::{ - chat::ImageDetail, - responses::{ - CreateResponseArgs, ImageGenTool, InputContent, InputImageContent, InputMessage, - InputRole, OutputItem, OutputMessageContent, - }, + types::responses::{ + CreateResponseArgs, ImageDetail, ImageGenTool, InputContent, InputImageContent, + InputMessage, InputRole, OutputItem, OutputMessageContent, }, Client, }; From f41e634f8de642436080f0b975415d7c5f13526c Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 16:43:46 -0800 Subject: [PATCH 31/32] fix responses-structured-outputs example --- examples/responses-structured-outputs/src/main.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/examples/responses-structured-outputs/src/main.rs b/examples/responses-structured-outputs/src/main.rs index eb7fe0f7..585af463 100644 --- a/examples/responses-structured-outputs/src/main.rs +++ b/examples/responses-structured-outputs/src/main.rs @@ -3,12 +3,9 @@ use std::error::Error; use async_openai::{ config::OpenAIConfig, traits::EventType, - types::{ - chat::ResponseFormatJsonSchema, - responses::{ - CreateResponseArgs, InputMessage, InputRole, OutputItem, OutputMessageContent, - ResponseStreamEvent, - }, + types::responses::{ + CreateResponseArgs, InputMessage, InputRole, OutputItem, OutputMessageContent, + ResponseFormatJsonSchema, ResponseStreamEvent, }, Client, }; From dc9320f9591e264f8252c2cba5691fcab2f94860 Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Wed, 26 Nov 2025 16:54:18 -0800 Subject: [PATCH 32/32] fix for video-types --- async-openai/src/types/impls.rs | 10 ++++++---- async-openai/src/types/mod.rs | 3 ++- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/async-openai/src/types/impls.rs b/async-openai/src/types/impls.rs index 56c87eb0..6026bf61 100644 --- a/async-openai/src/types/impls.rs +++ b/async-openai/src/types/impls.rs @@ -8,12 +8,13 @@ use crate::types::embeddings::EmbeddingInput; use crate::types::files::FileInput; #[cfg(feature = "moderation-types")] use crate::types::moderations::ModerationInput; -#[cfg(feature = "image-types")] +#[cfg(any(feature = "image-types", feature = "video-types"))] use crate::types::shared::ImageInput; #[cfg(any( feature = "audio-types", feature = "file-types", - feature = "image-types" + feature = "image-types", + feature = "video-types" ))] use crate::types::InputSource; @@ -137,7 +138,8 @@ impl_default!(EmbeddingInput); #[cfg(any( feature = "audio-types", feature = "file-types", - feature = "image-types" + feature = "image-types", + feature = "video-types" ))] macro_rules! impl_input { ($for_typ:ty) => { @@ -170,7 +172,7 @@ macro_rules! impl_input { impl_input!(AudioInput); #[cfg(feature = "file-types")] impl_input!(FileInput); -#[cfg(feature = "image-types")] +#[cfg(any(feature = "image-types", feature = "video-types"))] impl_input!(ImageInput); #[cfg(any( diff --git a/async-openai/src/types/mod.rs b/async-openai/src/types/mod.rs index b30d670a..98549a39 100644 --- a/async-openai/src/types/mod.rs +++ b/async-openai/src/types/mod.rs @@ -120,7 +120,8 @@ pub use metadata::*; feature = "chat-completion-types", feature = "completion-types", feature = "embedding-types", - feature = "moderation-types" + feature = "moderation-types", + feature = "video-types" ))] mod impls;