Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
42 commits
Select commit Hold shift + click to select a range
4c6445b
updated client events
64bit Oct 22, 2025
70214d7
updated server event
64bit Nov 1, 2025
26542d0
updated rate limit
64bit Nov 1, 2025
6cfc72c
updated session configuration
64bit Nov 1, 2025
2dc4467
transctiption session configuration
64bit Nov 1, 2025
7623dae
udpates to realtime types
64bit Nov 2, 2025
7935950
updated Item
64bit Nov 2, 2025
c3c62a4
updated realtime types
64bit Nov 2, 2025
26f802a
update examples/realtime with GA api
64bit Nov 2, 2025
68d1344
checkpoint: responses types updates
64bit Nov 2, 2025
dd5e23f
checkpoint for updated types
64bit Nov 3, 2025
15c9435
checkpoint for updates to responses types
64bit Nov 3, 2025
84bbfe4
updates for CreateResponse
64bit Nov 3, 2025
b5bc8ed
add reponses apis
64bit Nov 3, 2025
d20e865
list input items
64bit Nov 4, 2025
30964bf
add get_input_token_counts for responses
64bit Nov 4, 2025
2413171
implement ItemResource
64bit Nov 4, 2025
2f78f5d
types/responses dir
64bit Nov 4, 2025
d9dcf24
response streaming events
64bit Nov 4, 2025
7da4798
fix compilation
64bit Nov 4, 2025
569e595
compiling example/responses
64bit Nov 4, 2025
6db509f
fix types
64bit Nov 4, 2025
0f0bfa1
fix examples/responses-function-call
64bit Nov 4, 2025
86d5cf6
fix examples/responses-stream
64bit Nov 4, 2025
3421427
update it to RealtimeResponse to distinguish from Response
64bit Nov 4, 2025
7371c88
avoid name conflicts
64bit Nov 4, 2025
95a2217
update realtime types
64bit Nov 4, 2025
58557c0
update realtime example
64bit Nov 4, 2025
ce11c05
update names
64bit Nov 4, 2025
5f3dbed
updated realtime spec
64bit Nov 4, 2025
4e0fa1d
RealtimeConversationItem
64bit Nov 4, 2025
c39abf0
RealtimeConversationItem
64bit Nov 4, 2025
c8e6148
updates for the spec
64bit Nov 4, 2025
46c7159
update types to match spec
64bit Nov 5, 2025
6486c1c
types updated
64bit Nov 5, 2025
5a1cd63
update realtime types
64bit Nov 5, 2025
9571490
match realtime client event to spec
64bit Nov 5, 2025
fb26251
update examples/realtime
64bit Nov 5, 2025
d926b06
match realtime server event type names with spec
64bit Nov 5, 2025
1fbd6a2
match responses stream event names with spec
64bit Nov 5, 2025
66bdbcd
reusable type
64bit Nov 5, 2025
cc205a9
updated readme
64bit Nov 5, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 1 addition & 4 deletions async-openai/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
- [x] Models
- [x] Moderations
- [x] Organizations | Administration (partially implemented)
- [x] Realtime (Beta) (partially implemented)
- [x] Realtime GA (partially implemented)
- [x] Responses (partially implemented)
- [x] Uploads
- [x] Videos
Expand Down Expand Up @@ -65,7 +65,6 @@ $Env:OPENAI_API_KEY='sk-...'
## Realtime API

Only types for Realtime API are implemented, and can be enabled with feature flag `realtime`.
These types were written before OpenAI released official specs.

## Image Generation Example

Expand Down Expand Up @@ -179,8 +178,6 @@ To maintain quality of the project, a minimum of the following is a must for cod
This project adheres to [Rust Code of Conduct](https://www.rust-lang.org/policies/code-of-conduct)

## Complimentary Crates

- [openai-func-enums](https://github.com/frankfralick/openai-func-enums) provides procedural macros that make it easier to use this library with OpenAI API's tool calling feature. It also provides derive macros you can add to existing [clap](https://github.com/clap-rs/clap) application subcommands for natural language use of command line tools. It also supports openai's [parallel tool calls](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling) and allows you to choose between running multiple tool calls concurrently or own their own OS threads.
- [async-openai-wasm](https://github.com/ifsheldon/async-openai-wasm) provides WASM support.

## License
Expand Down
76 changes: 71 additions & 5 deletions async-openai/src/responses.rs
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
use serde::Serialize;

use crate::{
config::Config,
error::OpenAIError,
types::responses::{CreateResponse, Response, ResponseStream},
types::responses::{
CreateResponse, DeleteResponse, Response, ResponseItemList, ResponseStream,
TokenCountsBody, TokenCountsResource,
},
Client,
};

/// Given text input or a list of context items, the model will generate a response.
///
/// Related guide: [Responses](https://platform.openai.com/docs/api-reference/responses)
pub struct Responses<'c, C: Config> {
client: &'c Client<C>,
}
Expand All @@ -18,7 +20,15 @@ impl<'c, C: Config> Responses<'c, C> {
Self { client }
}

/// Creates a model response for the given input.
/// Creates a model response. Provide [text](https://platform.openai.com/docs/guides/text) or
/// [image](https://platform.openai.com/docs/guides/images) inputs to generate
/// [text](https://platform.openai.com/docs/guides/text) or
/// [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have the model call
/// your own [custom code](https://platform.openai.com/docs/guides/function-calling) or use
/// built-in [tools](https://platform.openai.com/docs/guides/tools) like
/// [web search](https://platform.openai.com/docs/guides/tools-web-search)
/// or [file search](https://platform.openai.com/docs/guides/tools-file-search) to use your own data
/// as input for the model's response.
#[crate::byot(
T0 = serde::Serialize,
R = serde::de::DeserializeOwned
Expand Down Expand Up @@ -52,4 +62,60 @@ impl<'c, C: Config> Responses<'c, C> {
}
Ok(self.client.post_stream("/responses", request).await)
}

/// Retrieves a model response with the given ID.
#[crate::byot(T0 = std::fmt::Display, T1 = serde::Serialize, R = serde::de::DeserializeOwned)]
pub async fn retrieve<Q>(&self, response_id: &str, query: &Q) -> Result<Response, OpenAIError>
where
Q: Serialize + ?Sized,
{
self.client
.get_with_query(&format!("/responses/{}", response_id), &query)
.await
}

/// Deletes a model response with the given ID.
#[crate::byot(T0 = std::fmt::Display, R = serde::de::DeserializeOwned)]
pub async fn delete(&self, response_id: &str) -> Result<DeleteResponse, OpenAIError> {
self.client
.delete(&format!("/responses/{}", response_id))
.await
}

/// Cancels a model response with the given ID. Only responses created with the
/// `background` parameter set to `true` can be cancelled.
/// [Learn more](https://platform.openai.com/docs/guides/background).
#[crate::byot(T0 = std::fmt::Display, R = serde::de::DeserializeOwned)]
pub async fn cancel(&self, response_id: &str) -> Result<Response, OpenAIError> {
self.client
.post(
&format!("/responses/{}/cancel", response_id),
serde_json::json!({}),
)
.await
}

/// Returns a list of input items for a given response.
#[crate::byot(T0 = std::fmt::Display, T1 = serde::Serialize, R = serde::de::DeserializeOwned)]
pub async fn list_input_items<Q>(
&self,
response_id: &str,
query: &Q,
) -> Result<ResponseItemList, OpenAIError>
where
Q: Serialize + ?Sized,
{
self.client
.get_with_query(&format!("/responses/{}/input_items", response_id), &query)
.await
}

/// Get input token counts
#[crate::byot(T0 = serde::Serialize, R = serde::de::DeserializeOwned)]
pub async fn get_input_token_counts(
&self,
request: TokenCountsBody,
) -> Result<TokenCountsResource, OpenAIError> {
self.client.post("/responses/input_tokens", request).await
}
}
7 changes: 6 additions & 1 deletion async-openai/src/types/chat.rs
Original file line number Diff line number Diff line change
Expand Up @@ -504,9 +504,14 @@ pub struct ResponseFormatJsonSchema {
/// The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
pub name: String,
/// The schema for the response format, described as a JSON Schema object.
/// Learn how to build JSON schemas [here](https://json-schema.org/).
#[serde(skip_serializing_if = "Option::is_none")]
pub schema: Option<serde_json::Value>,
/// Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
/// Whether to enable strict schema adherence when generating the output.
/// If set to true, the model will always follow the exact schema defined
/// in the `schema` field. Only a subset of JSON Schema is supported when
/// `strict` is `true`. To learn more, read the [Structured Outputs
/// guide](https://platform.openai.com/docs/guides/structured-outputs).
#[serde(skip_serializing_if = "Option::is_none")]
pub strict: Option<bool>,
}
Expand Down
36 changes: 6 additions & 30 deletions async-openai/src/types/impls.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ use crate::{
use bytes::Bytes;

use super::{
responses::{CodeInterpreterContainer, Input, InputContent, Role as ResponsesRole},
responses::{EasyInputContent, Role as ResponsesRole},
AddUploadPartRequest, AudioInput, AudioResponseFormat, ChatCompletionFunctionCall,
ChatCompletionFunctions, ChatCompletionNamedToolChoice, ChatCompletionRequestAssistantMessage,
ChatCompletionRequestAssistantMessageContent, ChatCompletionRequestDeveloperMessage,
Expand Down Expand Up @@ -1047,50 +1047,26 @@ impl AsyncTryFrom<CreateVideoRequest> for reqwest::multipart::Form {

// end: types to multipart form

impl Default for Input {
impl Default for EasyInputContent {
fn default() -> Self {
Self::Text("".to_string())
}
}

impl Default for InputContent {
fn default() -> Self {
Self::TextInput("".to_string())
}
}

impl From<String> for Input {
fn from(value: String) -> Self {
Input::Text(value)
}
}

impl From<&str> for Input {
fn from(value: &str) -> Self {
Input::Text(value.to_owned())
}
}

impl Default for ResponsesRole {
fn default() -> Self {
Self::User
}
}

impl From<String> for InputContent {
impl From<String> for EasyInputContent {
fn from(value: String) -> Self {
Self::TextInput(value)
Self::Text(value)
}
}

impl From<&str> for InputContent {
impl From<&str> for EasyInputContent {
fn from(value: &str) -> Self {
Self::TextInput(value.to_owned())
}
}

impl Default for CodeInterpreterContainer {
fn default() -> Self {
CodeInterpreterContainer::Id("".to_string())
Self::Text(value.to_owned())
}
}
137 changes: 137 additions & 0 deletions async-openai/src/types/mcp.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,137 @@
use derive_builder::Builder;
use serde::{Deserialize, Serialize};

use crate::error::OpenAIError;

#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
#[serde(rename_all = "snake_case")]
pub enum McpToolConnectorId {
ConnectorDropbox,
ConnectorGmail,
ConnectorGooglecalendar,
ConnectorGoogledrive,
ConnectorMicrosoftteams,
ConnectorOutlookcalendar,
ConnectorOutlookemail,
ConnectorSharepoint,
}

#[derive(Debug, Serialize, Deserialize, Clone, Builder, PartialEq, Default)]
#[builder(
name = "MCPToolArgs",
pattern = "mutable",
setter(into, strip_option),
default
)]
#[builder(build_fn(error = "OpenAIError"))]
pub struct MCPTool {
/// A label for this MCP server, used to identify it in tool calls.
pub server_label: String,

/// List of allowed tool names or a filter object.
#[serde(skip_serializing_if = "Option::is_none")]
pub allowed_tools: Option<MCPToolAllowedTools>,

/// An OAuth access token that can be used with a remote MCP server, either with a custom MCP
/// server URL or a service connector. Your application must handle the OAuth authorization
/// flow and provide the token here.
#[serde(skip_serializing_if = "Option::is_none")]
pub authorization: Option<String>,

/// Identifier for service connectors, like those available in ChatGPT. One of `server_url` or
/// `connector_id` must be provided. Learn more about service connectors [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors).
///
/// Currently supported `connector_id` values are:
/// - Dropbox: `connector_dropbox`
/// - Gmail: `connector_gmail`
/// - Google Calendar: `connector_googlecalendar`
/// - Google Drive: `connector_googledrive`
/// - Microsoft Teams: `connector_microsoftteams`
/// - Outlook Calendar: `connector_outlookcalendar`
/// - Outlook Email: `connector_outlookemail`
/// - SharePoint: `connector_sharepoint`
#[serde(skip_serializing_if = "Option::is_none")]
pub connector_id: Option<McpToolConnectorId>,

/// Optional HTTP headers to send to the MCP server. Use for authentication or other purposes.
#[serde(skip_serializing_if = "Option::is_none")]
pub headers: Option<serde_json::Value>,

/// Specify which of the MCP server's tools require approval.
#[serde(skip_serializing_if = "Option::is_none")]
pub require_approval: Option<MCPToolRequireApproval>,

/// Optional description of the MCP server, used to provide more context.
#[serde(skip_serializing_if = "Option::is_none")]
pub server_description: Option<String>,

/// The URL for the MCP server. One of `server_url` or `connector_id` must be provided.
#[serde(skip_serializing_if = "Option::is_none")]
pub server_url: Option<String>,
}

#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
#[serde(untagged)]
pub enum MCPToolAllowedTools {
/// A string array of allowed tool names
List(Vec<String>),
/// A filter object to specify which tools are allowed.
Filter(MCPToolFilter),
}

#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
pub struct MCPToolFilter {
/// Indicates whether or not a tool modifies data or is read-only.
/// If an MCP server is annotated with [readOnlyHint](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
/// it will match this filter.
#[serde(skip_serializing_if = "Option::is_none")]
pub read_only: Option<bool>,
/// List of allowed tool names.
#[serde(skip_serializing_if = "Option::is_none")]
pub tool_names: Option<Vec<String>>,
}

/// Approval policy or filter for MCP tools.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
#[serde(untagged)]
pub enum MCPToolRequireApproval {
/// Specify which of the MCP server's tools require approval. Can be
/// `always`, `never`, or a filter object associated with tools
/// that require approval.
Filter(MCPToolApprovalFilter),
/// Specify a single approval policy for all tools. One of `always` or
/// `never`. When set to `always`, all tools will require approval. When
/// set to `never`, all tools will not require approval.
ApprovalSetting(MCPToolApprovalSetting),
}

#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
#[serde(rename_all = "lowercase")]
pub enum MCPToolApprovalSetting {
Always,
Never,
}

#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
pub struct MCPToolApprovalFilter {
/// A list of tools that always require approval.
#[serde(skip_serializing_if = "Option::is_none")]
pub always: Option<MCPToolFilter>,
/// A list of tools that never require approval.
#[serde(skip_serializing_if = "Option::is_none")]
pub never: Option<MCPToolFilter>,
}

#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
pub struct MCPListToolsTool {
/// The JSON schema describing the tool's input.
pub input_schema: serde_json::Value,
/// The name of the tool.
pub name: String,
/// Additional annotations about the tool.
#[serde(skip_serializing_if = "Option::is_none")]
pub annotations: Option<serde_json::Value>,
/// The description of the tool.
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
2 changes: 2 additions & 0 deletions async-openai/src/types/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ mod file;
mod fine_tuning;
mod image;
mod invites;
mod mcp;
mod message;
mod model;
mod moderation;
Expand Down Expand Up @@ -46,6 +47,7 @@ pub use file::*;
pub use fine_tuning::*;
pub use image::*;
pub use invites::*;
pub use mcp::*;
pub use message::*;
pub use model::*;
pub use moderation::*;
Expand Down
Loading
Loading