Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
b999346
full feature flag
64bit Nov 26, 2025
2bb2893
github action full feature flag
64bit Nov 26, 2025
cd243ad
fix docs warnings
64bit Nov 26, 2025
782c2cf
expanded types for docs.rs
64bit Nov 26, 2025
81a5ea0
types docs
64bit Nov 26, 2025
486070d
updated README
64bit Nov 26, 2025
8cad0a9
add docs on scope
64bit Nov 26, 2025
3b96429
add CONTRIBUTING.md
64bit Nov 26, 2025
02686ba
🎉
64bit Nov 26, 2025
989df6e
updated readm
64bit Nov 26, 2025
ccfb63d
doc comment
64bit Nov 26, 2025
a51ee07
updated lib.rs for docs.rs
64bit Nov 26, 2025
712095d
updated doc tests in lib.rs
64bit Nov 26, 2025
442513e
cleanup
64bit Nov 26, 2025
f0b2d55
update feature flags
64bit Nov 26, 2025
7e3e8ce
borrow-instead-of-move example
64bit Nov 26, 2025
69f9742
updated example
64bit Nov 26, 2025
59e21c5
References in README
64bit Nov 26, 2025
a10a325
Using References
64bit Nov 26, 2025
282a071
simplify cargo.toml
64bit Nov 26, 2025
9fcf30d
dependency version to allow patch or minor updates
64bit Nov 26, 2025
6fc90a5
allow minor or patch updates in example dependencies
64bit Nov 26, 2025
d0aa9a6
add language to code blocks in README for syntax colors
64bit Nov 26, 2025
1f710ec
unittests: add missing feature flags in all mod tests {}
64bit Nov 26, 2025
598080e
fix feature flags required for tests
64bit Nov 27, 2025
f128d15
update embedding tests to use latest small model
64bit Nov 27, 2025
b079787
fix ser_de test
64bit Nov 27, 2025
54968a5
fix vector_store_files tests
64bit Nov 27, 2025
004647c
fix assistants examples to include required beta header
64bit Nov 27, 2025
cb5848f
fix example responses-images-and-vision
64bit Nov 27, 2025
f41e634
fix responses-structured-outputs example
64bit Nov 27, 2025
dc9320f
fix for video-types
64bit Nov 27, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/pr-checks.yml
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ jobs:
administration-types,
completion-types,
types,
full,
]

steps:
Expand Down
18 changes: 18 additions & 0 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
## Contributing to async-openai

Thank you for taking the time to contribute and improve the project. I'd be happy to have you!

All forms of contributions, such as new features requests, bug fixes, issues, documentation, testing, comments, [examples](https://github.com/64bit/async-openai/tree/main/examples) etc. are welcome.

A good starting point would be to look at existing [open issues](https://github.com/64bit/async-openai/issues).

To maintain quality of the project, a minimum of the following is a must for code contribution:

- **Names & Documentation**: All struct names, field names and doc comments are from OpenAPI spec. Nested objects in spec without names leaves room for making appropriate name.
- **Tested**: For changes supporting test(s) and/or example is required. Existing examples, doc tests, unit tests, and integration tests should be made to work with the changes if applicable.
- **Scope**: Keep scope limited to APIs available in official documents such as [API Reference](https://platform.openai.com/docs/api-reference) or [OpenAPI spec](https://github.com/openai/openai-openapi/). Other LLMs or AI Providers offer OpenAI-compatible APIs, yet they may not always have full parity - for those use `byot` feature.
- **Consistency**: Keep code style consistent across all the "APIs" that library exposes; it creates a great developer experience.

This project adheres to [Rust Code of Conduct](https://www.rust-lang.org/policies/code-of-conduct)

Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in async-openai by you, shall be licensed as MIT, without any additional terms or conditions.
81 changes: 54 additions & 27 deletions async-openai/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,15 @@ homepage = "https://github.com/64bit/async-openai"
repository = "https://github.com/64bit/async-openai"

[features]
default = []
default = ["rustls"]
# Enable rustls for TLS support
rustls = ["_api", "dep:reqwest", "reqwest/rustls-tls-native-roots"]
rustls = ["dep:reqwest", "reqwest/rustls-tls-native-roots"]
# Enable rustls and webpki-roots
rustls-webpki-roots = ["_api", "dep:reqwest", "reqwest/rustls-tls-webpki-roots"]
rustls-webpki-roots = ["dep:reqwest", "reqwest/rustls-tls-webpki-roots"]
# Enable native-tls for TLS support
native-tls = ["_api","dep:reqwest", "reqwest/native-tls"]
native-tls = ["dep:reqwest", "reqwest/native-tls"]
# Remove dependency on OpenSSL
native-tls-vendored = ["_api", "dep:reqwest", "reqwest/native-tls-vendored"]
native-tls-vendored = ["dep:reqwest", "reqwest/native-tls-vendored"]
# Bring your own types
byot = ["dep:async-openai-macros"]

Expand Down Expand Up @@ -101,6 +101,33 @@ types = [
"completion-types",
]

# Enable all features
full = [
"responses",
"webhook",
"audio",
"video",
"image",
"embedding",
"evals",
"finetuning",
"batch",
"file",
"upload",
"model",
"moderation",
"vectorstore",
"chatkit",
"container",
"realtime",
"chat-completion",
"assistant",
"administration",
"completions",
"types",
"byot",
]

# Internal feature to enable API dependencies
_api = [
"dep:async-openai-macros",
Expand All @@ -125,47 +152,47 @@ _api = [

[dependencies]
# Core dependencies - always needed for types
serde = { version = "1.0.217", features = ["derive", "rc"] }
serde_json = "1.0.135"
derive_builder = { version = "0.20.2", optional = true }
bytes = { version = "1.9.0", optional = true }
serde = { version = "1", features = ["derive", "rc"] }
serde_json = "1"
derive_builder = { version = "0.20", optional = true }
bytes = { version = "1.11", optional = true }

# API dependencies - only needed when API features are enabled
# We use a feature gate to enable these when any API feature is enabled
async-openai-macros = { path = "../async-openai-macros", version = "0.1.0", optional = true }
backoff = { version = "0.4.0", features = ["tokio"], optional = true }
base64 = { version = "0.22.1", optional = true }
futures = { version = "0.3.31", optional = true }
rand = { version = "0.9.0", optional = true }
reqwest = { version = "0.12.12", features = [
base64 = { version = "0.22", optional = true }
futures = { version = "0.3", optional = true }
rand = { version = "0.9", optional = true }
reqwest = { version = "0.12", features = [
"json",
"stream",
"multipart",
], default-features = false, optional = true }
reqwest-eventsource = { version = "0.6.0", optional = true }
thiserror = { version = "2.0.11", optional = true }
tokio = { version = "1.43.0", features = ["fs", "macros"], optional = true }
tokio-stream = { version = "0.1.17", optional = true }
tokio-util = { version = "0.7.13", features = ["codec", "io-util"], optional = true }
tracing = { version = "0.1.41", optional = true }
secrecy = { version = "0.10.3", features = ["serde"], optional = true }
eventsource-stream = { version = "0.2.3", optional = true }
serde_urlencoded = { version = "0.7.1", optional = true }
thiserror = { version = "2", optional = true }
tokio = { version = "1", features = ["fs", "macros"], optional = true }
tokio-stream = { version = "0.1", optional = true }
tokio-util = { version = "0.7", features = ["codec", "io-util"], optional = true }
tracing = { version = "0.1", optional = true }
secrecy = { version = "0.10", features = ["serde"], optional = true }
eventsource-stream = { version = "0.2", optional = true }
serde_urlencoded = { version = "0.7", optional = true }
url = { version = "2.5", optional = true }
# For Realtime websocket
tokio-tungstenite = { version = "0.26.1", optional = true, default-features = false }
tokio-tungstenite = { version = "0.28", optional = true, default-features = false }
# For Webhook signature verification
hmac = { version = "0.12", optional = true, default-features = false}
sha2 = { version = "0.10", optional = true, default-features = false }
hex = { version = "0.4", optional = true, default-features = false }

[dev-dependencies]
tokio-test = "0.4.4"
serde_json = "1.0"
tokio-test = "0.4"
serde_json = "1"

[[test]]
name = "bring_your_own_type"
required-features = ["byot", "file", "assistant", "model", "moderation", "image", "chat-completion", "completions", "audio", "embedding", "finetuning", "batch", "administration", "upload", "vectorstore", "responses", "chatkit", "container", "evals", "video"]
required-features = ["full"]

[[test]]
name = "boxed_future"
Expand All @@ -177,15 +204,15 @@ required-features = ["chat-completion-types"]

[[test]]
name = "embeddings"
required-features = ["embedding-types", "chat-completion-types"]
required-features = ["embedding-types"]

[[test]]
name = "ser_de"
required-features = ["chat-completion-types"]

[[test]]
name = "whisper"
required-features = ["audio", "file-types"]
required-features = ["audio"]

[package.metadata.docs.rs]
all-features = true
Expand Down
66 changes: 45 additions & 21 deletions async-openai/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ async fn main() -> Result<(), Box<dyn Error>> {

## Webhooks

Support for webhook event types, signature verification, and building webhook events from payloads can be enabled by using the `webhook` feature flag.
Support for webhook includes event types, signature verification, and building webhook events from payloads.

## Bring Your Own Types

Expand Down Expand Up @@ -150,26 +150,62 @@ This can be useful in many scenarios:
Visit [examples/bring-your-own-type](https://github.com/64bit/async-openai/tree/main/examples/bring-your-own-type)
directory to learn more.

### References: Borrow Instead of Move

With `byot` use reference to request types

```rust
let response: Response = client
.responses()
.create_byot(&request).await?
```

Visit [examples/borrow-instead-of-move](https://github.com/64bit/async-openai/tree/main/examples/borrow-instead-of-move) to learn more.


## Rust Types

To only use Rust types from the crate - use feature flag `types`.
To only use Rust types from the crate - disable default features and use feature flag `types`.

There are granular feature flags like `response-types`, `chat-completion-types`, etc.

These granular types are enabled when the corresponding API feature is enabled - for example `response` will enable `response-types`.

## Configurable Requests

### Individual Request
Certain individual APIs that need additional query or header parameters - these can be provided by chaining `.query()`, `.header()`, `.headers()` on the API group.

For example:
```rust
client.
.chat()
// query can be a struct or a map too.
.query(&[("limit", "10")])?
// header for demo
.header("key", "value")?
.list()
.await?
```

### All Requests

Use `Config`, `OpenAIConfig` etc. for configuring url, headers or query parameters globally for all requests.

## OpenAI-compatible Providers

### Configurable Request
Even though the scope of the crate is official OpenAI APIs, it is very configurable to work with compatible providers.

### Configurable Path

To change path, query or headers of individual request use the `.path()`, `.query()`, `.header()`, `.headers()` method on the API group.
In addition to `.query()`, `.header()`, `.headers()` a path for individual request can be changed by using `.path()`, method on the API group.

For example:

```
```rust
client
.chat()
.path("/v1/messages")?
.query(&[("role", "user")])?
.header("key", "value")?
.create(request)
.await?
```
Expand Down Expand Up @@ -200,22 +236,10 @@ fn chat_completion(client: &Client<Box<dyn Config>>) {

## Contributing

Thank you for taking the time to contribute and improve the project. I'd be happy to have you!

All forms of contributions, such as new features requests, bug fixes, issues, documentation, testing, comments, [examples](https://github.com/64bit/async-openai/tree/main/examples) etc. are welcome.

A good starting point would be to look at existing [open issues](https://github.com/64bit/async-openai/issues).

To maintain quality of the project, a minimum of the following is a must for code contribution:

- **Names & Documentation**: All struct names, field names and doc comments are from OpenAPI spec. Nested objects in spec without names leaves room for making appropriate name.
- **Tested**: For changes supporting test(s) and/or example is required. Existing examples, doc tests, unit tests, and integration tests should be made to work with the changes if applicable.
- **Scope**: Keep scope limited to APIs available in official documents such as [API Reference](https://platform.openai.com/docs/api-reference) or [OpenAPI spec](https://github.com/openai/openai-openapi/). Other LLMs or AI Providers offer OpenAI-compatible APIs, yet they may not always have full parity - for those use `byot` feature.
- **Consistency**: Keep code style consistent across all the "APIs" that library exposes; it creates a great developer experience.
🎉 Thank you for taking the time to contribute and improve the project. I'd be happy to have you!

This project adheres to [Rust Code of Conduct](https://www.rust-lang.org/policies/code-of-conduct)
Please see [contributing guide!](https://github.com/64bit/async-openai/blob/main/CONTRIBUTING.md)

Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in async-openai by you, shall be licensed as MIT, without any additional terms or conditions.

## Complimentary Crates
- [async-openai-wasm](https://github.com/ifsheldon/async-openai-wasm) provides WASM support.
Expand Down
4 changes: 2 additions & 2 deletions async-openai/src/assistants/threads.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,12 @@ impl<'c, C: Config> Threads<'c, C> {
}
}

/// Call [Messages] group API to manage message in [thread_id] thread.
/// Call [Messages] group API to manage message in `thread_id` thread.
pub fn messages(&self, thread_id: &str) -> Messages<'_, C> {
Messages::new(self.client, thread_id)
}

/// Call [Runs] group API to manage runs in [thread_id] thread.
/// Call [Runs] group API to manage runs in `thread_id` thread.
pub fn runs(&self, thread_id: &str) -> Runs<'_, C> {
Runs::new(self.client, thread_id)
}
Expand Down
19 changes: 8 additions & 11 deletions async-openai/src/embedding.rs
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ impl<'c, C: Config> Embeddings<'c, C> {
}
}

#[cfg(test)]
#[cfg(all(test, feature = "embedding"))]
mod tests {
use crate::error::OpenAIError;
use crate::types::embeddings::{CreateEmbeddingResponse, Embedding, EncodingFormat};
Expand All @@ -83,7 +83,7 @@ mod tests {
let client = Client::new();

let request = CreateEmbeddingRequestArgs::default()
.model("text-embedding-ada-002")
.model("text-embedding-3-small")
.input("The food was delicious and the waiter...")
.build()
.unwrap();
Expand All @@ -98,7 +98,7 @@ mod tests {
let client = Client::new();

let request = CreateEmbeddingRequestArgs::default()
.model("text-embedding-ada-002")
.model("text-embedding-3-small")
.input(["The food was delicious", "The waiter was good"])
.build()
.unwrap();
Expand All @@ -113,7 +113,7 @@ mod tests {
let client = Client::new();

let request = CreateEmbeddingRequestArgs::default()
.model("text-embedding-ada-002")
.model("text-embedding-3-small")
.input([1, 2, 3])
.build()
.unwrap();
Expand All @@ -128,7 +128,7 @@ mod tests {
let client = Client::new();

let request = CreateEmbeddingRequestArgs::default()
.model("text-embedding-ada-002")
.model("text-embedding-3-small")
.input([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
.build()
.unwrap();
Expand All @@ -143,7 +143,7 @@ mod tests {
let client = Client::new();

let request = CreateEmbeddingRequestArgs::default()
.model("text-embedding-ada-002")
.model("text-embedding-3-small")
.input([vec![1, 2, 3], vec![4, 5, 6, 7], vec![7, 8, 10, 11, 100257]])
.build()
.unwrap();
Expand Down Expand Up @@ -178,7 +178,7 @@ mod tests {
async fn test_cannot_use_base64_encoding_with_normal_create_request() {
let client = Client::new();

const MODEL: &str = "text-embedding-ada-002";
const MODEL: &str = "text-embedding-3-small";
const INPUT: &str = "You shall not pass.";

let b64_request = CreateEmbeddingRequestArgs::default()
Expand All @@ -195,7 +195,7 @@ mod tests {
async fn test_embedding_create_base64() {
let client = Client::new();

const MODEL: &str = "text-embedding-ada-002";
const MODEL: &str = "text-embedding-3-small";
const INPUT: &str = "a head full of dreams";

let b64_request = CreateEmbeddingRequestArgs::default()
Expand All @@ -221,8 +221,5 @@ mod tests {
let embedding = response.data.into_iter().next().unwrap().embedding;

assert_eq!(b64_embedding.len(), embedding.len());
for (b64, normal) in b64_embedding.iter().zip(embedding.iter()) {
assert!((b64 - normal).abs() < 1e-6);
}
}
}
2 changes: 1 addition & 1 deletion async-openai/src/file.rs
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ impl<'c, C: Config> Files<'c, C> {
}
}

#[cfg(test)]
#[cfg(all(test, feature = "file"))]
mod tests {
use crate::{
traits::RequestOptionsBuilder,
Expand Down
Loading
Loading