diff --git a/async-openai/Cargo.toml b/async-openai/Cargo.toml index e28f201d..2eee2d2b 100644 --- a/async-openai/Cargo.toml +++ b/async-openai/Cargo.toml @@ -62,7 +62,7 @@ tokio-test = "0.4.4" serde_json = "1.0" [[test]] -name = "bring-your-own-type" +name = "bring_your_own_type" required-features = ["byot"] [package.metadata.docs.rs] diff --git a/async-openai/src/admin.rs b/async-openai/src/admin/admin_.rs similarity index 85% rename from async-openai/src/admin.rs rename to async-openai/src/admin/admin_.rs index 06dfb469..d061f189 100644 --- a/async-openai/src/admin.rs +++ b/async-openai/src/admin/admin_.rs @@ -1,7 +1,6 @@ use crate::{ - admin_api_keys::AdminAPIKeys, audit_logs::AuditLogs, certificates::Certificates, - config::Config, groups::Groups, invites::Invites, projects::Projects, roles::Roles, - users::Users, Client, + config::Config, AdminAPIKeys, AuditLogs, Certificates, Client, Groups, Invites, Projects, + Roles, Usage, Users, }; /// Admin group for all administration APIs. @@ -54,4 +53,9 @@ impl<'c, C: Config> Admin<'c, C> { pub fn groups(&self) -> Groups<'_, C> { Groups::new(self.client) } + + /// To call [Usage] group related APIs using this client. + pub fn usage(&self) -> Usage<'_, C> { + Usage::new(self.client) + } } diff --git a/async-openai/src/admin_api_keys.rs b/async-openai/src/admin/admin_api_keys.rs similarity index 100% rename from async-openai/src/admin_api_keys.rs rename to async-openai/src/admin/admin_api_keys.rs diff --git a/async-openai/src/audit_logs.rs b/async-openai/src/admin/audit_logs.rs similarity index 100% rename from async-openai/src/audit_logs.rs rename to async-openai/src/admin/audit_logs.rs diff --git a/async-openai/src/certificates.rs b/async-openai/src/admin/certificates.rs similarity index 100% rename from async-openai/src/certificates.rs rename to async-openai/src/admin/certificates.rs diff --git a/async-openai/src/group_roles.rs b/async-openai/src/admin/group_roles.rs similarity index 100% rename from async-openai/src/group_roles.rs rename to async-openai/src/admin/group_roles.rs diff --git a/async-openai/src/group_users.rs b/async-openai/src/admin/group_users.rs similarity index 100% rename from async-openai/src/group_users.rs rename to async-openai/src/admin/group_users.rs diff --git a/async-openai/src/groups.rs b/async-openai/src/admin/groups.rs similarity index 96% rename from async-openai/src/groups.rs rename to async-openai/src/admin/groups.rs index c15e599b..ab55a33b 100644 --- a/async-openai/src/groups.rs +++ b/async-openai/src/admin/groups.rs @@ -1,13 +1,11 @@ use crate::{ config::Config, error::OpenAIError, - group_roles::GroupRoles, - group_users::GroupUsers, types::admin::groups::{ CreateGroupBody, GroupDeletedResource, GroupListResource, GroupResourceWithSuccess, GroupResponse, }, - Client, RequestOptions, + Client, GroupRoles, GroupUsers, RequestOptions, }; /// Manage reusable collections of users for organization-wide access control and maintain their membership. diff --git a/async-openai/src/invites.rs b/async-openai/src/admin/invites.rs similarity index 100% rename from async-openai/src/invites.rs rename to async-openai/src/admin/invites.rs diff --git a/async-openai/src/admin/mod.rs b/async-openai/src/admin/mod.rs new file mode 100644 index 00000000..55691896 --- /dev/null +++ b/async-openai/src/admin/mod.rs @@ -0,0 +1,45 @@ +mod admin_; +mod admin_api_keys; +mod audit_logs; +mod certificates; +mod group_roles; +mod group_users; +mod groups; +mod invites; +mod project_api_keys; +mod project_certificates; +mod project_group_roles; +mod project_groups; +mod project_rate_limits; +mod project_roles; +mod project_service_accounts; +mod project_user_roles; +mod project_users; +mod projects; +mod roles; +mod usage; +mod user_roles; +mod users; + +pub use admin_::*; +pub use admin_api_keys::*; +pub use audit_logs::*; +pub use certificates::*; +pub use group_roles::*; +pub use group_users::*; +pub use groups::*; +pub use invites::*; +pub use project_api_keys::*; +pub use project_certificates::*; +pub use project_group_roles::*; +pub use project_groups::*; +pub use project_rate_limits::*; +pub use project_roles::*; +pub use project_service_accounts::*; +pub use project_user_roles::*; +pub use project_users::*; +pub use projects::*; +pub use roles::*; +pub use usage::*; +pub use user_roles::*; +pub use users::*; diff --git a/async-openai/src/project_api_keys.rs b/async-openai/src/admin/project_api_keys.rs similarity index 100% rename from async-openai/src/project_api_keys.rs rename to async-openai/src/admin/project_api_keys.rs diff --git a/async-openai/src/project_certificates.rs b/async-openai/src/admin/project_certificates.rs similarity index 100% rename from async-openai/src/project_certificates.rs rename to async-openai/src/admin/project_certificates.rs diff --git a/async-openai/src/project_group_roles.rs b/async-openai/src/admin/project_group_roles.rs similarity index 100% rename from async-openai/src/project_group_roles.rs rename to async-openai/src/admin/project_group_roles.rs diff --git a/async-openai/src/project_groups.rs b/async-openai/src/admin/project_groups.rs similarity index 100% rename from async-openai/src/project_groups.rs rename to async-openai/src/admin/project_groups.rs diff --git a/async-openai/src/project_rate_limits.rs b/async-openai/src/admin/project_rate_limits.rs similarity index 100% rename from async-openai/src/project_rate_limits.rs rename to async-openai/src/admin/project_rate_limits.rs diff --git a/async-openai/src/project_roles.rs b/async-openai/src/admin/project_roles.rs similarity index 100% rename from async-openai/src/project_roles.rs rename to async-openai/src/admin/project_roles.rs diff --git a/async-openai/src/project_service_accounts.rs b/async-openai/src/admin/project_service_accounts.rs similarity index 100% rename from async-openai/src/project_service_accounts.rs rename to async-openai/src/admin/project_service_accounts.rs diff --git a/async-openai/src/project_user_roles.rs b/async-openai/src/admin/project_user_roles.rs similarity index 100% rename from async-openai/src/project_user_roles.rs rename to async-openai/src/admin/project_user_roles.rs diff --git a/async-openai/src/project_users.rs b/async-openai/src/admin/project_users.rs similarity index 100% rename from async-openai/src/project_users.rs rename to async-openai/src/admin/project_users.rs diff --git a/async-openai/src/projects.rs b/async-openai/src/admin/projects.rs similarity index 92% rename from async-openai/src/projects.rs rename to async-openai/src/admin/projects.rs index 47277e49..2a72fdaf 100644 --- a/async-openai/src/projects.rs +++ b/async-openai/src/admin/projects.rs @@ -1,17 +1,12 @@ use crate::{ config::Config, error::OpenAIError, - project_api_keys::ProjectAPIKeys, - project_certificates::ProjectCertificates, - project_group_roles::ProjectGroupRoles, - project_groups::ProjectGroups, - project_rate_limits::ProjectRateLimits, - project_roles::ProjectRoles, - project_user_roles::ProjectUserRoles, types::admin::projects::{ Project, ProjectCreateRequest, ProjectListResponse, ProjectUpdateRequest, }, - Client, ProjectServiceAccounts, ProjectUsers, RequestOptions, + Client, ProjectAPIKeys, ProjectCertificates, ProjectGroupRoles, ProjectGroups, + ProjectRateLimits, ProjectRoles, ProjectServiceAccounts, ProjectUserRoles, ProjectUsers, + RequestOptions, }; /// Manage the projects within an organization includes creation, updating, and archiving or projects. diff --git a/async-openai/src/roles.rs b/async-openai/src/admin/roles.rs similarity index 100% rename from async-openai/src/roles.rs rename to async-openai/src/admin/roles.rs diff --git a/async-openai/src/usage.rs b/async-openai/src/admin/usage.rs similarity index 100% rename from async-openai/src/usage.rs rename to async-openai/src/admin/usage.rs diff --git a/async-openai/src/user_roles.rs b/async-openai/src/admin/user_roles.rs similarity index 100% rename from async-openai/src/user_roles.rs rename to async-openai/src/admin/user_roles.rs diff --git a/async-openai/src/users.rs b/async-openai/src/admin/users.rs similarity index 97% rename from async-openai/src/users.rs rename to async-openai/src/admin/users.rs index a38f4efd..391b6312 100644 --- a/async-openai/src/users.rs +++ b/async-openai/src/admin/users.rs @@ -2,8 +2,7 @@ use crate::{ config::Config, error::OpenAIError, types::admin::users::{User, UserDeleteResponse, UserListResponse, UserRoleUpdateRequest}, - user_roles::UserRoles, - Client, RequestOptions, + Client, RequestOptions, UserRoles, }; /// Manage users and their role in an organization. Users will be automatically added to the Default project. diff --git a/async-openai/src/assistants.rs b/async-openai/src/assistants/assistants_.rs similarity index 100% rename from async-openai/src/assistants.rs rename to async-openai/src/assistants/assistants_.rs diff --git a/async-openai/src/messages.rs b/async-openai/src/assistants/messages.rs similarity index 100% rename from async-openai/src/messages.rs rename to async-openai/src/assistants/messages.rs diff --git a/async-openai/src/assistants/mod.rs b/async-openai/src/assistants/mod.rs new file mode 100644 index 00000000..5016d90a --- /dev/null +++ b/async-openai/src/assistants/mod.rs @@ -0,0 +1,11 @@ +mod assistants_; +mod messages; +mod runs; +mod steps; +mod threads; + +pub use assistants_::*; +pub use messages::*; +pub use runs::*; +pub use steps::*; +pub use threads::*; diff --git a/async-openai/src/runs.rs b/async-openai/src/assistants/runs.rs similarity index 99% rename from async-openai/src/runs.rs rename to async-openai/src/assistants/runs.rs index 39ca5b23..93dd56a6 100644 --- a/async-openai/src/runs.rs +++ b/async-openai/src/assistants/runs.rs @@ -1,7 +1,7 @@ use crate::{ + assistants::Steps, config::Config, error::OpenAIError, - steps::Steps, types::assistants::{ AssistantEventStream, CreateRunRequest, ListRunsResponse, ModifyRunRequest, RunObject, SubmitToolOutputsRunRequest, diff --git a/async-openai/src/steps.rs b/async-openai/src/assistants/steps.rs similarity index 100% rename from async-openai/src/steps.rs rename to async-openai/src/assistants/steps.rs diff --git a/async-openai/src/threads.rs b/async-openai/src/assistants/threads.rs similarity index 100% rename from async-openai/src/threads.rs rename to async-openai/src/assistants/threads.rs diff --git a/async-openai/src/audio.rs b/async-openai/src/audio/audio_.rs similarity index 100% rename from async-openai/src/audio.rs rename to async-openai/src/audio/audio_.rs diff --git a/async-openai/src/audio/mod.rs b/async-openai/src/audio/mod.rs new file mode 100644 index 00000000..f96362df --- /dev/null +++ b/async-openai/src/audio/mod.rs @@ -0,0 +1,9 @@ +mod audio_; +mod speech; +mod transcriptions; +mod translations; + +pub use audio_::*; +pub use speech::*; +pub use transcriptions::*; +pub use translations::*; diff --git a/async-openai/src/speech.rs b/async-openai/src/audio/speech.rs similarity index 100% rename from async-openai/src/speech.rs rename to async-openai/src/audio/speech.rs diff --git a/async-openai/src/transcriptions.rs b/async-openai/src/audio/transcriptions.rs similarity index 100% rename from async-openai/src/transcriptions.rs rename to async-openai/src/audio/transcriptions.rs diff --git a/async-openai/src/translations.rs b/async-openai/src/audio/translations.rs similarity index 100% rename from async-openai/src/translations.rs rename to async-openai/src/audio/translations.rs diff --git a/async-openai/src/client.rs b/async-openai/src/client.rs index be35142f..1e0e3682 100644 --- a/async-openai/src/client.rs +++ b/async-openai/src/client.rs @@ -16,7 +16,7 @@ use crate::{ moderation::Moderations, traits::AsyncTryFrom, Assistants, Audio, Batches, Chat, Completions, Containers, Conversations, Embeddings, Evals, - FineTuning, Models, RequestOptions, Responses, Threads, Uploads, Usage, VectorStores, Videos, + FineTuning, Models, RequestOptions, Responses, Threads, Uploads, VectorStores, Videos, }; #[cfg(feature = "realtime")] @@ -158,11 +158,6 @@ impl Client { Admin::new(self) } - /// To call [Usage] group related APIs using this client. - pub fn usage(&self) -> Usage<'_, C> { - Usage::new(self) - } - /// To call [Responses] group related APIs using this client. pub fn responses(&self) -> Responses<'_, C> { Responses::new(self) diff --git a/async-openai/src/completion.rs b/async-openai/src/completion.rs index 2d21b9cc..5d538091 100644 --- a/async-openai/src/completion.rs +++ b/async-openai/src/completion.rs @@ -2,7 +2,9 @@ use crate::{ client::Client, config::Config, error::OpenAIError, - types::{CompletionResponseStream, CreateCompletionRequest, CreateCompletionResponse}, + types::completions::{ + CompletionResponseStream, CreateCompletionRequest, CreateCompletionResponse, + }, RequestOptions, }; diff --git a/async-openai/src/container_files.rs b/async-openai/src/containers/container_files.rs similarity index 100% rename from async-openai/src/container_files.rs rename to async-openai/src/containers/container_files.rs diff --git a/async-openai/src/containers.rs b/async-openai/src/containers/containers_.rs similarity index 96% rename from async-openai/src/containers.rs rename to async-openai/src/containers/containers_.rs index c5b58bff..0e5b7489 100644 --- a/async-openai/src/containers.rs +++ b/async-openai/src/containers/containers_.rs @@ -1,11 +1,10 @@ use crate::{ config::Config, - container_files::ContainerFiles, error::OpenAIError, types::containers::{ ContainerListResource, ContainerResource, CreateContainerRequest, DeleteContainerResponse, }, - Client, RequestOptions, + Client, ContainerFiles, RequestOptions, }; pub struct Containers<'c, C: Config> { diff --git a/async-openai/src/containers/mod.rs b/async-openai/src/containers/mod.rs new file mode 100644 index 00000000..72051ed7 --- /dev/null +++ b/async-openai/src/containers/mod.rs @@ -0,0 +1,5 @@ +mod container_files; +mod containers_; + +pub use container_files::*; +pub use containers_::*; diff --git a/async-openai/src/eval_run_output_items.rs b/async-openai/src/evals/eval_run_output_items.rs similarity index 100% rename from async-openai/src/eval_run_output_items.rs rename to async-openai/src/evals/eval_run_output_items.rs diff --git a/async-openai/src/eval_runs.rs b/async-openai/src/evals/eval_runs.rs similarity index 97% rename from async-openai/src/eval_runs.rs rename to async-openai/src/evals/eval_runs.rs index 590e4ec9..be8619f4 100644 --- a/async-openai/src/eval_runs.rs +++ b/async-openai/src/evals/eval_runs.rs @@ -1,9 +1,8 @@ use crate::{ config::Config, error::OpenAIError, - eval_run_output_items::EvalRunOutputItems, types::evals::{CreateEvalRunRequest, DeleteEvalRunResponse, EvalRun, EvalRunList}, - Client, RequestOptions, + Client, EvalRunOutputItems, RequestOptions, }; pub struct EvalRuns<'c, C: Config> { diff --git a/async-openai/src/evals.rs b/async-openai/src/evals/evals_.rs similarity index 98% rename from async-openai/src/evals.rs rename to async-openai/src/evals/evals_.rs index 00e8ac5c..ec92cee6 100644 --- a/async-openai/src/evals.rs +++ b/async-openai/src/evals/evals_.rs @@ -1,9 +1,8 @@ use crate::{ config::Config, error::OpenAIError, - eval_runs::EvalRuns, types::evals::{CreateEvalRequest, DeleteEvalResponse, Eval, EvalList, UpdateEvalRequest}, - Client, RequestOptions, + Client, EvalRuns, RequestOptions, }; /// Create, manage, and run evals in the OpenAI platform. Related guide: diff --git a/async-openai/src/evals/mod.rs b/async-openai/src/evals/mod.rs new file mode 100644 index 00000000..45fc0be4 --- /dev/null +++ b/async-openai/src/evals/mod.rs @@ -0,0 +1,7 @@ +mod eval_run_output_items; +mod eval_runs; +mod evals_; + +pub use eval_run_output_items::*; +pub use eval_runs::*; +pub use evals_::*; diff --git a/async-openai/src/impls.rs b/async-openai/src/impls.rs index 2875411a..09d42dbb 100644 --- a/async-openai/src/impls.rs +++ b/async-openai/src/impls.rs @@ -1,56 +1,56 @@ use crate::{ - admin_api_keys::AdminAPIKeys, + admin::AdminAPIKeys, + admin::AuditLogs, + admin::Certificates, + admin::GroupRoles, + admin::GroupUsers, + admin::Groups, + admin::Invites, + admin::ProjectAPIKeys, + admin::ProjectCertificates, + admin::ProjectGroupRoles, + admin::ProjectGroups, + admin::ProjectRateLimits, + admin::ProjectRoles, + admin::ProjectServiceAccounts, + admin::ProjectUserRoles, + admin::ProjectUsers, + admin::Projects, + admin::Roles, + admin::Usage, + admin::UserRoles, + admin::Users, assistants::Assistants, + assistants::Messages, + assistants::Runs, + assistants::Steps, + assistants::Threads, audio::Audio, - audit_logs::AuditLogs, + audio::Speech, + audio::Transcriptions, + audio::Translations, batches::Batches, - certificates::Certificates, chat::Chat, chatkit::{Chatkit, ChatkitSessions, ChatkitThreads}, completion::Completions, - container_files::ContainerFiles, + containers::ContainerFiles, containers::Containers, - conversation_items::ConversationItems, - conversations::Conversations, embedding::Embeddings, - eval_run_output_items::EvalRunOutputItems, - eval_runs::EvalRuns, + evals::EvalRunOutputItems, + evals::EvalRuns, evals::Evals, file::Files, fine_tuning::FineTuning, - group_roles::GroupRoles, - group_users::GroupUsers, - groups::Groups, image::Images, - invites::Invites, - messages::Messages, model::Models, moderation::Moderations, - project_api_keys::ProjectAPIKeys, - project_certificates::ProjectCertificates, - project_group_roles::ProjectGroupRoles, - project_groups::ProjectGroups, - project_rate_limits::ProjectRateLimits, - project_roles::ProjectRoles, - project_service_accounts::ProjectServiceAccounts, - project_user_roles::ProjectUserRoles, - project_users::ProjectUsers, - projects::Projects, + responses::ConversationItems, + responses::Conversations, responses::Responses, - roles::Roles, - runs::Runs, - speech::Speech, - steps::Steps, - threads::Threads, - transcriptions::Transcriptions, - translations::Translations, uploads::Uploads, - usage::Usage, - user_roles::UserRoles, - users::Users, - vector_store_file_batches::VectorStoreFileBatches, - vector_store_files::VectorStoreFiles, - vector_stores::VectorStores, + vectorstores::VectorStoreFileBatches, + vectorstores::VectorStoreFiles, + vectorstores::VectorStores, video::Videos, }; diff --git a/async-openai/src/lib.rs b/async-openai/src/lib.rs index fd17d1cb..57a3179c 100644 --- a/async-openai/src/lib.rs +++ b/async-openai/src/lib.rs @@ -141,128 +141,59 @@ pub(crate) use async_openai_macros::byot; pub(crate) use async_openai_macros::byot_passthrough as byot; mod admin; -mod admin_api_keys; mod assistants; mod audio; -mod audit_logs; mod batches; -mod certificates; mod chat; mod chatkit; mod client; mod completion; pub mod config; -mod container_files; mod containers; -mod conversation_items; -mod conversations; mod download; mod embedding; pub mod error; -mod eval_run_output_items; -mod eval_runs; mod evals; mod file; mod fine_tuning; -mod group_roles; -mod group_users; -mod groups; mod image; mod impls; -mod invites; -mod messages; mod model; mod moderation; -mod project_api_keys; -mod project_certificates; -mod project_group_roles; -mod project_groups; -mod project_rate_limits; -mod project_roles; -mod project_service_accounts; -mod project_user_roles; -mod project_users; -mod projects; #[cfg(feature = "realtime")] mod realtime; mod request_options; mod responses; -mod roles; -mod runs; -mod speech; -mod steps; -mod threads; pub mod traits; -mod transcriptions; -mod translations; pub mod types; mod uploads; -mod usage; -mod user_roles; -mod users; mod util; -mod vector_store_file_batches; -mod vector_store_files; -mod vector_stores; +mod vectorstores; mod video; #[cfg(feature = "webhook")] pub mod webhooks; -pub use admin::Admin; -pub use admin_api_keys::AdminAPIKeys; -pub use assistants::Assistants; +pub use admin::*; +pub use assistants::*; pub use audio::Audio; -pub use audit_logs::AuditLogs; +pub use audio::*; pub use batches::Batches; -pub use certificates::Certificates; pub use chat::Chat; pub use chatkit::Chatkit; pub use client::Client; pub use completion::Completions; -pub use container_files::ContainerFiles; -pub use containers::Containers; -pub use conversation_items::ConversationItems; -pub use conversations::Conversations; +pub use containers::*; pub use embedding::Embeddings; -pub use eval_run_output_items::EvalRunOutputItems; -pub use eval_runs::EvalRuns; -pub use evals::Evals; +pub use evals::*; pub use file::Files; pub use fine_tuning::FineTuning; -pub use group_roles::GroupRoles; -pub use group_users::GroupUsers; -pub use groups::Groups; pub use image::Images; -pub use invites::Invites; -pub use messages::Messages; pub use model::Models; pub use moderation::Moderations; -pub use project_api_keys::ProjectAPIKeys; -pub use project_certificates::ProjectCertificates; -pub use project_group_roles::ProjectGroupRoles; -pub use project_groups::ProjectGroups; -pub use project_rate_limits::ProjectRateLimits; -pub use project_roles::ProjectRoles; -pub use project_service_accounts::ProjectServiceAccounts; -pub use project_user_roles::ProjectUserRoles; -pub use project_users::ProjectUsers; -pub use projects::Projects; #[cfg(feature = "realtime")] pub use realtime::Realtime; pub use request_options::RequestOptions; -pub use responses::Responses; -pub use roles::Roles; -pub use runs::Runs; -pub use speech::Speech; -pub use steps::Steps; -pub use threads::Threads; -pub use transcriptions::Transcriptions; -pub use translations::Translations; +pub use responses::*; pub use uploads::Uploads; -pub use usage::Usage; -pub use user_roles::UserRoles; -pub use users::Users; -pub use vector_store_file_batches::VectorStoreFileBatches; -pub use vector_store_files::VectorStoreFiles; -pub use vector_stores::VectorStores; +pub use vectorstores::*; pub use video::Videos; diff --git a/async-openai/src/conversation_items.rs b/async-openai/src/responses/conversation_items.rs similarity index 100% rename from async-openai/src/conversation_items.rs rename to async-openai/src/responses/conversation_items.rs diff --git a/async-openai/src/conversations.rs b/async-openai/src/responses/conversations.rs similarity index 97% rename from async-openai/src/conversations.rs rename to async-openai/src/responses/conversations.rs index 87598ef6..1afa4944 100644 --- a/async-openai/src/conversations.rs +++ b/async-openai/src/responses/conversations.rs @@ -1,12 +1,11 @@ use crate::{ config::Config, - conversation_items::ConversationItems, error::OpenAIError, types::responses::{ ConversationResource, CreateConversationRequest, DeleteConversationResponse, UpdateConversationRequest, }, - Client, RequestOptions, + Client, ConversationItems, RequestOptions, }; pub struct Conversations<'c, C: Config> { diff --git a/async-openai/src/responses/mod.rs b/async-openai/src/responses/mod.rs new file mode 100644 index 00000000..b61465e6 --- /dev/null +++ b/async-openai/src/responses/mod.rs @@ -0,0 +1,7 @@ +mod conversation_items; +mod conversations; +mod responses_; + +pub use conversation_items::*; +pub use conversations::*; +pub use responses_::*; diff --git a/async-openai/src/responses.rs b/async-openai/src/responses/responses_.rs similarity index 100% rename from async-openai/src/responses.rs rename to async-openai/src/responses/responses_.rs diff --git a/async-openai/src/types/admin/api_keys/api.rs b/async-openai/src/types/admin/api_keys/api.rs new file mode 100644 index 00000000..bc6f9c80 --- /dev/null +++ b/async-openai/src/types/admin/api_keys/api.rs @@ -0,0 +1,32 @@ +use crate::types::OpenAIError; +use derive_builder::Builder; +use serde::{Deserialize, Serialize}; + +/// Sort order for listing admin API keys. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +#[serde(rename_all = "lowercase")] +pub enum ListAdminApiKeysOrder { + /// Ascending order + Asc, + /// Descending order + Desc, +} + +/// Query parameters for listing admin API keys. +#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] +#[builder(name = "ListAdminApiKeysQueryArgs")] +#[builder(pattern = "mutable")] +#[builder(setter(into, strip_option), default)] +#[builder(derive(Debug))] +#[builder(build_fn(error = "OpenAIError"))] +pub struct ListAdminApiKeysQuery { + /// Return keys with IDs that come after this ID in the pagination order. + #[serde(skip_serializing_if = "Option::is_none")] + pub after: Option, + /// Order results by creation time, ascending or descending. + #[serde(skip_serializing_if = "Option::is_none")] + pub order: Option, + /// Maximum number of keys to return. + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, +} diff --git a/async-openai/src/types/admin/api_keys.rs b/async-openai/src/types/admin/api_keys/api_keys_.rs similarity index 72% rename from async-openai/src/types/admin/api_keys.rs rename to async-openai/src/types/admin/api_keys/api_keys_.rs index 3578591c..0be8723f 100644 --- a/async-openai/src/types/admin/api_keys.rs +++ b/async-openai/src/types/admin/api_keys/api_keys_.rs @@ -2,35 +2,6 @@ use crate::types::OpenAIError; use derive_builder::Builder; use serde::{Deserialize, Serialize}; -/// Sort order for listing admin API keys. -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -#[serde(rename_all = "lowercase")] -pub enum ListAdminApiKeysOrder { - /// Ascending order - Asc, - /// Descending order - Desc, -} - -/// Query parameters for listing admin API keys. -#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] -#[builder(name = "ListAdminApiKeysQueryArgs")] -#[builder(pattern = "mutable")] -#[builder(setter(into, strip_option), default)] -#[builder(derive(Debug))] -#[builder(build_fn(error = "OpenAIError"))] -pub struct ListAdminApiKeysQuery { - /// Return keys with IDs that come after this ID in the pagination order. - #[serde(skip_serializing_if = "Option::is_none")] - pub after: Option, - /// Order results by creation time, ascending or descending. - #[serde(skip_serializing_if = "Option::is_none")] - pub order: Option, - /// Maximum number of keys to return. - #[serde(skip_serializing_if = "Option::is_none")] - pub limit: Option, -} - /// Represents an individual Admin API key in an org. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct AdminApiKey { diff --git a/async-openai/src/types/admin/api_keys/mod.rs b/async-openai/src/types/admin/api_keys/mod.rs new file mode 100644 index 00000000..b686ebc3 --- /dev/null +++ b/async-openai/src/types/admin/api_keys/mod.rs @@ -0,0 +1,5 @@ +mod api; +mod api_keys_; + +pub use api::*; +pub use api_keys_::*; diff --git a/async-openai/src/types/admin/audit_logs/api.rs b/async-openai/src/types/admin/audit_logs/api.rs new file mode 100644 index 00000000..c17bec69 --- /dev/null +++ b/async-openai/src/types/admin/audit_logs/api.rs @@ -0,0 +1,40 @@ +use crate::error::OpenAIError; +use derive_builder::Builder; +use serde::Serialize; + +/// Query parameters for listing audit logs. +#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] +#[builder(name = "ListAuditLogsQueryArgs")] +#[builder(pattern = "mutable")] +#[builder(setter(into, strip_option), default)] +#[builder(derive(Debug))] +#[builder(build_fn(error = "OpenAIError"))] +pub struct ListAuditLogsQuery { + /// Return only events whose `effective_at` (Unix seconds) is in this range. + #[serde(skip_serializing_if = "Option::is_none")] + pub effective_at: Option, + /// Return only events for these projects. + #[serde(skip_serializing_if = "Option::is_none")] + pub project_ids: Option>, + /// Return only events with a `type` in one of these values. + #[serde(skip_serializing_if = "Option::is_none")] + pub event_types: Option>, + /// Return only events performed by these actors. + #[serde(skip_serializing_if = "Option::is_none")] + pub actor_ids: Option>, + /// Return only events performed by users with these emails. + #[serde(skip_serializing_if = "Option::is_none")] + pub actor_emails: Option>, + /// Return only events performed on these targets. + #[serde(skip_serializing_if = "Option::is_none")] + pub resource_ids: Option>, + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + #[serde(skip_serializing_if = "Option::is_none")] + pub after: Option, + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + #[serde(skip_serializing_if = "Option::is_none")] + pub before: Option, +} diff --git a/async-openai/src/types/admin/audit_logs.rs b/async-openai/src/types/admin/audit_logs/audit_logs_.rs similarity index 88% rename from async-openai/src/types/admin/audit_logs.rs rename to async-openai/src/types/admin/audit_logs/audit_logs_.rs index 05bfa4d8..1d3b137e 100644 --- a/async-openai/src/types/admin/audit_logs.rs +++ b/async-openai/src/types/admin/audit_logs/audit_logs_.rs @@ -1,44 +1,5 @@ -use crate::error::OpenAIError; -use derive_builder::Builder; use serde::{Deserialize, Serialize}; -/// Query parameters for listing audit logs. -#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] -#[builder(name = "ListAuditLogsQueryArgs")] -#[builder(pattern = "mutable")] -#[builder(setter(into, strip_option), default)] -#[builder(derive(Debug))] -#[builder(build_fn(error = "OpenAIError"))] -pub struct ListAuditLogsQuery { - /// Return only events whose `effective_at` (Unix seconds) is in this range. - #[serde(skip_serializing_if = "Option::is_none")] - pub effective_at: Option, - /// Return only events for these projects. - #[serde(skip_serializing_if = "Option::is_none")] - pub project_ids: Option>, - /// Return only events with a `type` in one of these values. - #[serde(skip_serializing_if = "Option::is_none")] - pub event_types: Option>, - /// Return only events performed by these actors. - #[serde(skip_serializing_if = "Option::is_none")] - pub actor_ids: Option>, - /// Return only events performed by users with these emails. - #[serde(skip_serializing_if = "Option::is_none")] - pub actor_emails: Option>, - /// Return only events performed on these targets. - #[serde(skip_serializing_if = "Option::is_none")] - pub resource_ids: Option>, - /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. - #[serde(skip_serializing_if = "Option::is_none")] - pub limit: Option, - /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. - #[serde(skip_serializing_if = "Option::is_none")] - pub after: Option, - /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. - #[serde(skip_serializing_if = "Option::is_none")] - pub before: Option, -} - /// The event type. #[derive(Debug, Serialize, Deserialize)] pub enum AuditLogEventType { diff --git a/async-openai/src/types/admin/audit_logs/mod.rs b/async-openai/src/types/admin/audit_logs/mod.rs new file mode 100644 index 00000000..4b39dabc --- /dev/null +++ b/async-openai/src/types/admin/audit_logs/mod.rs @@ -0,0 +1,5 @@ +mod api; +mod audit_logs_; + +pub use api::*; +pub use audit_logs_::*; diff --git a/async-openai/src/types/admin/certificates/api.rs b/async-openai/src/types/admin/certificates/api.rs new file mode 100644 index 00000000..e849c9ff --- /dev/null +++ b/async-openai/src/types/admin/certificates/api.rs @@ -0,0 +1,74 @@ +use crate::error::OpenAIError; +use derive_builder::Builder; +use serde::{Deserialize, Serialize}; + +/// Sort order for listing organization certificates. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +#[serde(rename_all = "lowercase")] +pub enum ListOrganizationCertificatesOrder { + /// Ascending order + Asc, + /// Descending order + Desc, +} + +/// Query parameters for listing organization certificates. +#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] +#[builder(name = "ListOrganizationCertificatesQueryArgs")] +#[builder(pattern = "mutable")] +#[builder(setter(into, strip_option), default)] +#[builder(derive(Debug))] +#[builder(build_fn(error = "OpenAIError"))] +pub struct ListOrganizationCertificatesQuery { + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + #[serde(skip_serializing_if = "Option::is_none")] + pub after: Option, + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order. + #[serde(skip_serializing_if = "Option::is_none")] + pub order: Option, +} + +/// Sort order for listing project certificates. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +#[serde(rename_all = "lowercase")] +pub enum ListProjectCertificatesOrder { + /// Ascending order + Asc, + /// Descending order + Desc, +} + +/// Query parameters for listing project certificates. +#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] +#[builder(name = "ListProjectCertificatesQueryArgs")] +#[builder(pattern = "mutable")] +#[builder(setter(into, strip_option), default)] +#[builder(derive(Debug))] +#[builder(build_fn(error = "OpenAIError"))] +pub struct ListProjectCertificatesQuery { + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + #[serde(skip_serializing_if = "Option::is_none")] + pub after: Option, + /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order. + #[serde(skip_serializing_if = "Option::is_none")] + pub order: Option, +} + +/// Query parameters for getting a certificate. +#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] +#[builder(name = "GetCertificateQueryArgs")] +#[builder(pattern = "mutable")] +#[builder(setter(into, strip_option), default)] +#[builder(derive(Debug))] +#[builder(build_fn(error = "OpenAIError"))] +pub struct GetCertificateQuery { + /// A list of additional fields to include in the response. Currently the only supported value is `content` to fetch the PEM content of the certificate. + #[serde(skip_serializing_if = "Option::is_none")] + pub include: Option>, +} diff --git a/async-openai/src/types/admin/certificates.rs b/async-openai/src/types/admin/certificates/certificates_.rs similarity index 55% rename from async-openai/src/types/admin/certificates.rs rename to async-openai/src/types/admin/certificates/certificates_.rs index 2370f00b..e10774f0 100644 --- a/async-openai/src/types/admin/certificates.rs +++ b/async-openai/src/types/admin/certificates/certificates_.rs @@ -2,77 +2,6 @@ use crate::error::OpenAIError; use derive_builder::Builder; use serde::{Deserialize, Serialize}; -/// Sort order for listing organization certificates. -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -#[serde(rename_all = "lowercase")] -pub enum ListOrganizationCertificatesOrder { - /// Ascending order - Asc, - /// Descending order - Desc, -} - -/// Query parameters for listing organization certificates. -#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] -#[builder(name = "ListOrganizationCertificatesQueryArgs")] -#[builder(pattern = "mutable")] -#[builder(setter(into, strip_option), default)] -#[builder(derive(Debug))] -#[builder(build_fn(error = "OpenAIError"))] -pub struct ListOrganizationCertificatesQuery { - /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. - #[serde(skip_serializing_if = "Option::is_none")] - pub limit: Option, - /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. - #[serde(skip_serializing_if = "Option::is_none")] - pub after: Option, - /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order. - #[serde(skip_serializing_if = "Option::is_none")] - pub order: Option, -} - -/// Sort order for listing project certificates. -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -#[serde(rename_all = "lowercase")] -pub enum ListProjectCertificatesOrder { - /// Ascending order - Asc, - /// Descending order - Desc, -} - -/// Query parameters for listing project certificates. -#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] -#[builder(name = "ListProjectCertificatesQueryArgs")] -#[builder(pattern = "mutable")] -#[builder(setter(into, strip_option), default)] -#[builder(derive(Debug))] -#[builder(build_fn(error = "OpenAIError"))] -pub struct ListProjectCertificatesQuery { - /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. - #[serde(skip_serializing_if = "Option::is_none")] - pub limit: Option, - /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. - #[serde(skip_serializing_if = "Option::is_none")] - pub after: Option, - /// Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order. - #[serde(skip_serializing_if = "Option::is_none")] - pub order: Option, -} - -/// Query parameters for getting a certificate. -#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] -#[builder(name = "GetCertificateQueryArgs")] -#[builder(pattern = "mutable")] -#[builder(setter(into, strip_option), default)] -#[builder(derive(Debug))] -#[builder(build_fn(error = "OpenAIError"))] -pub struct GetCertificateQuery { - /// A list of additional fields to include in the response. Currently the only supported value is `content` to fetch the PEM content of the certificate. - #[serde(skip_serializing_if = "Option::is_none")] - pub include: Option>, -} - /// Represents an individual certificate uploaded to the organization. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct Certificate { diff --git a/async-openai/src/types/admin/certificates/mod.rs b/async-openai/src/types/admin/certificates/mod.rs new file mode 100644 index 00000000..6e132bce --- /dev/null +++ b/async-openai/src/types/admin/certificates/mod.rs @@ -0,0 +1,5 @@ +mod api; +mod certificates_; + +pub use api::*; +pub use certificates_::*; diff --git a/async-openai/src/types/admin/groups/api.rs b/async-openai/src/types/admin/groups/api.rs new file mode 100644 index 00000000..cdef7850 --- /dev/null +++ b/async-openai/src/types/admin/groups/api.rs @@ -0,0 +1,32 @@ +use crate::types::OpenAIError; +use derive_builder::Builder; +use serde::{Deserialize, Serialize}; + +/// Sort order for listing groups. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +#[serde(rename_all = "lowercase")] +pub enum ListGroupsOrder { + /// Ascending order + Asc, + /// Descending order + Desc, +} + +/// Query parameters for listing groups. +#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] +#[builder(name = "ListGroupsQueryArgs")] +#[builder(pattern = "mutable")] +#[builder(setter(into, strip_option), default)] +#[builder(derive(Debug))] +#[builder(build_fn(error = "OpenAIError"))] +pub struct ListGroupsQuery { + /// A limit on the number of groups to be returned. Limit can range between 0 and 1000, and the default is 100. + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + /// A cursor for use in pagination. `after` is a group ID that defines your place in the list. + #[serde(skip_serializing_if = "Option::is_none")] + pub after: Option, + /// Specifies the sort order of the returned groups. + #[serde(skip_serializing_if = "Option::is_none")] + pub order: Option, +} diff --git a/async-openai/src/types/admin/groups.rs b/async-openai/src/types/admin/groups/groups_.rs similarity index 85% rename from async-openai/src/types/admin/groups.rs rename to async-openai/src/types/admin/groups/groups_.rs index 4581df54..33dbc66e 100644 --- a/async-openai/src/types/admin/groups.rs +++ b/async-openai/src/types/admin/groups/groups_.rs @@ -4,35 +4,6 @@ use crate::types::OpenAIError; use derive_builder::Builder; use serde::{Deserialize, Serialize}; -/// Sort order for listing groups. -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -#[serde(rename_all = "lowercase")] -pub enum ListGroupsOrder { - /// Ascending order - Asc, - /// Descending order - Desc, -} - -/// Query parameters for listing groups. -#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] -#[builder(name = "ListGroupsQueryArgs")] -#[builder(pattern = "mutable")] -#[builder(setter(into, strip_option), default)] -#[builder(derive(Debug))] -#[builder(build_fn(error = "OpenAIError"))] -pub struct ListGroupsQuery { - /// A limit on the number of groups to be returned. Limit can range between 0 and 1000, and the default is 100. - #[serde(skip_serializing_if = "Option::is_none")] - pub limit: Option, - /// A cursor for use in pagination. `after` is a group ID that defines your place in the list. - #[serde(skip_serializing_if = "Option::is_none")] - pub after: Option, - /// Specifies the sort order of the returned groups. - #[serde(skip_serializing_if = "Option::is_none")] - pub order: Option, -} - /// Summary information about a group returned in role assignment responses. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct Group { diff --git a/async-openai/src/types/admin/groups/mod.rs b/async-openai/src/types/admin/groups/mod.rs new file mode 100644 index 00000000..338577fb --- /dev/null +++ b/async-openai/src/types/admin/groups/mod.rs @@ -0,0 +1,5 @@ +mod api; +mod groups_; + +pub use api::*; +pub use groups_::*; diff --git a/async-openai/src/types/admin/invites/api.rs b/async-openai/src/types/admin/invites/api.rs new file mode 100644 index 00000000..fb8f0845 --- /dev/null +++ b/async-openai/src/types/admin/invites/api.rs @@ -0,0 +1,19 @@ +use crate::types::OpenAIError; +use derive_builder::Builder; +use serde::Serialize; + +/// Query parameters for listing invites. +#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] +#[builder(name = "ListInvitesQueryArgs")] +#[builder(pattern = "mutable")] +#[builder(setter(into, strip_option), default)] +#[builder(derive(Debug))] +#[builder(build_fn(error = "OpenAIError"))] +pub struct ListInvitesQuery { + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + #[serde(skip_serializing_if = "Option::is_none")] + pub after: Option, +} diff --git a/async-openai/src/types/admin/invites.rs b/async-openai/src/types/admin/invites/invites_.rs similarity index 71% rename from async-openai/src/types/admin/invites.rs rename to async-openai/src/types/admin/invites/invites_.rs index 1db16692..934ca14a 100644 --- a/async-openai/src/types/admin/invites.rs +++ b/async-openai/src/types/admin/invites/invites_.rs @@ -1,24 +1,8 @@ +use crate::types::admin::roles::OrganizationRole; use crate::types::OpenAIError; -use crate::types::OrganizationRole; use derive_builder::Builder; use serde::{Deserialize, Serialize}; -/// Query parameters for listing invites. -#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] -#[builder(name = "ListInvitesQueryArgs")] -#[builder(pattern = "mutable")] -#[builder(setter(into, strip_option), default)] -#[builder(derive(Debug))] -#[builder(build_fn(error = "OpenAIError"))] -pub struct ListInvitesQuery { - /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. - #[serde(skip_serializing_if = "Option::is_none")] - pub limit: Option, - /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. - #[serde(skip_serializing_if = "Option::is_none")] - pub after: Option, -} - #[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)] #[serde(rename_all = "lowercase")] pub enum InviteStatus { diff --git a/async-openai/src/types/admin/invites/mod.rs b/async-openai/src/types/admin/invites/mod.rs new file mode 100644 index 00000000..4878fca4 --- /dev/null +++ b/async-openai/src/types/admin/invites/mod.rs @@ -0,0 +1,5 @@ +mod api; +mod invites_; + +pub use api::*; +pub use invites_::*; diff --git a/async-openai/src/types/admin/project_api_keys/api.rs b/async-openai/src/types/admin/project_api_keys/api.rs new file mode 100644 index 00000000..e46457ef --- /dev/null +++ b/async-openai/src/types/admin/project_api_keys/api.rs @@ -0,0 +1,19 @@ +use crate::error::OpenAIError; +use derive_builder::Builder; +use serde::Serialize; + +/// Query parameters for listing project API keys. +#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] +#[builder(name = "ListProjectApiKeysQueryArgs")] +#[builder(pattern = "mutable")] +#[builder(setter(into, strip_option), default)] +#[builder(derive(Debug))] +#[builder(build_fn(error = "OpenAIError"))] +pub struct ListProjectApiKeysQuery { + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + #[serde(skip_serializing_if = "Option::is_none")] + pub after: Option, +} diff --git a/async-openai/src/types/admin/project_api_keys/mod.rs b/async-openai/src/types/admin/project_api_keys/mod.rs new file mode 100644 index 00000000..d9f2e30a --- /dev/null +++ b/async-openai/src/types/admin/project_api_keys/mod.rs @@ -0,0 +1,5 @@ +mod api; +mod project_api_keys_; + +pub use api::*; +pub use project_api_keys_::*; diff --git a/async-openai/src/types/admin/project_api_keys.rs b/async-openai/src/types/admin/project_api_keys/project_api_keys_.rs similarity index 74% rename from async-openai/src/types/admin/project_api_keys.rs rename to async-openai/src/types/admin/project_api_keys/project_api_keys_.rs index 3bf0875a..2d339d7b 100644 --- a/async-openai/src/types/admin/project_api_keys.rs +++ b/async-openai/src/types/admin/project_api_keys/project_api_keys_.rs @@ -1,26 +1,8 @@ -use crate::error::OpenAIError; -use derive_builder::Builder; use serde::{Deserialize, Serialize}; use crate::types::admin::project_service_accounts::ProjectServiceAccount; use crate::types::admin::project_users::ProjectUser; -/// Query parameters for listing project API keys. -#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] -#[builder(name = "ListProjectApiKeysQueryArgs")] -#[builder(pattern = "mutable")] -#[builder(setter(into, strip_option), default)] -#[builder(derive(Debug))] -#[builder(build_fn(error = "OpenAIError"))] -pub struct ListProjectApiKeysQuery { - /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. - #[serde(skip_serializing_if = "Option::is_none")] - pub limit: Option, - /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. - #[serde(skip_serializing_if = "Option::is_none")] - pub after: Option, -} - /// Represents an individual API key in a project. #[derive(Debug, Serialize, Deserialize)] pub struct ProjectApiKey { diff --git a/async-openai/src/types/admin/project_rate_limits/api.rs b/async-openai/src/types/admin/project_rate_limits/api.rs new file mode 100644 index 00000000..fabbbd14 --- /dev/null +++ b/async-openai/src/types/admin/project_rate_limits/api.rs @@ -0,0 +1,22 @@ +use crate::error::OpenAIError; +use derive_builder::Builder; +use serde::Serialize; + +/// Query parameters for listing project rate limits. +#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] +#[builder(name = "ListProjectRateLimitsQueryArgs")] +#[builder(pattern = "mutable")] +#[builder(setter(into, strip_option), default)] +#[builder(derive(Debug))] +#[builder(build_fn(error = "OpenAIError"))] +pub struct ListProjectRateLimitsQuery { + /// A limit on the number of objects to be returned. The default is 100. + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + #[serde(skip_serializing_if = "Option::is_none")] + pub after: Option, + /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. + #[serde(skip_serializing_if = "Option::is_none")] + pub before: Option, +} diff --git a/async-openai/src/types/admin/project_rate_limits/mod.rs b/async-openai/src/types/admin/project_rate_limits/mod.rs new file mode 100644 index 00000000..526a7b94 --- /dev/null +++ b/async-openai/src/types/admin/project_rate_limits/mod.rs @@ -0,0 +1,5 @@ +mod api; +mod project_rate_limits_; + +pub use api::*; +pub use project_rate_limits_::*; diff --git a/async-openai/src/types/admin/project_rate_limits.rs b/async-openai/src/types/admin/project_rate_limits/project_rate_limits_.rs similarity index 76% rename from async-openai/src/types/admin/project_rate_limits.rs rename to async-openai/src/types/admin/project_rate_limits/project_rate_limits_.rs index 22ca1e86..5f4ac7ed 100644 --- a/async-openai/src/types/admin/project_rate_limits.rs +++ b/async-openai/src/types/admin/project_rate_limits/project_rate_limits_.rs @@ -2,25 +2,6 @@ use crate::error::OpenAIError; use derive_builder::Builder; use serde::{Deserialize, Serialize}; -/// Query parameters for listing project rate limits. -#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] -#[builder(name = "ListProjectRateLimitsQueryArgs")] -#[builder(pattern = "mutable")] -#[builder(setter(into, strip_option), default)] -#[builder(derive(Debug))] -#[builder(build_fn(error = "OpenAIError"))] -pub struct ListProjectRateLimitsQuery { - /// A limit on the number of objects to be returned. The default is 100. - #[serde(skip_serializing_if = "Option::is_none")] - pub limit: Option, - /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. - #[serde(skip_serializing_if = "Option::is_none")] - pub after: Option, - /// A cursor for use in pagination. `before` is an object ID that defines your place in the list. - #[serde(skip_serializing_if = "Option::is_none")] - pub before: Option, -} - /// Represents a project rate limit config. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct ProjectRateLimit { diff --git a/async-openai/src/types/admin/project_service_accounts/api.rs b/async-openai/src/types/admin/project_service_accounts/api.rs new file mode 100644 index 00000000..8d43bb2a --- /dev/null +++ b/async-openai/src/types/admin/project_service_accounts/api.rs @@ -0,0 +1,19 @@ +use crate::error::OpenAIError; +use derive_builder::Builder; +use serde::Serialize; + +/// Query parameters for listing project service accounts. +#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] +#[builder(name = "ListProjectServiceAccountsQueryArgs")] +#[builder(pattern = "mutable")] +#[builder(setter(into, strip_option), default)] +#[builder(derive(Debug))] +#[builder(build_fn(error = "OpenAIError"))] +pub struct ListProjectServiceAccountsQuery { + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + #[serde(skip_serializing_if = "Option::is_none")] + pub after: Option, +} diff --git a/async-openai/src/types/admin/project_service_accounts/mod.rs b/async-openai/src/types/admin/project_service_accounts/mod.rs new file mode 100644 index 00000000..0850f470 --- /dev/null +++ b/async-openai/src/types/admin/project_service_accounts/mod.rs @@ -0,0 +1,5 @@ +mod api; +mod project_service_accounts_; + +pub use api::*; +pub use project_service_accounts_::*; diff --git a/async-openai/src/types/admin/project_service_accounts.rs b/async-openai/src/types/admin/project_service_accounts/project_service_accounts_.rs similarity index 79% rename from async-openai/src/types/admin/project_service_accounts.rs rename to async-openai/src/types/admin/project_service_accounts/project_service_accounts_.rs index a1d09f9b..1ec1ae6a 100644 --- a/async-openai/src/types/admin/project_service_accounts.rs +++ b/async-openai/src/types/admin/project_service_accounts/project_service_accounts_.rs @@ -1,25 +1,7 @@ -use crate::error::OpenAIError; -use derive_builder::Builder; use serde::{Deserialize, Serialize}; use crate::types::admin::project_users::ProjectUserRole; -/// Query parameters for listing project service accounts. -#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] -#[builder(name = "ListProjectServiceAccountsQueryArgs")] -#[builder(pattern = "mutable")] -#[builder(setter(into, strip_option), default)] -#[builder(derive(Debug))] -#[builder(build_fn(error = "OpenAIError"))] -pub struct ListProjectServiceAccountsQuery { - /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. - #[serde(skip_serializing_if = "Option::is_none")] - pub limit: Option, - /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. - #[serde(skip_serializing_if = "Option::is_none")] - pub after: Option, -} - /// Represents an individual service account in a project. #[derive(Debug, Serialize, Deserialize)] pub struct ProjectServiceAccount { diff --git a/async-openai/src/types/admin/project_users/api.rs b/async-openai/src/types/admin/project_users/api.rs new file mode 100644 index 00000000..c306b2c8 --- /dev/null +++ b/async-openai/src/types/admin/project_users/api.rs @@ -0,0 +1,19 @@ +use crate::types::OpenAIError; +use derive_builder::Builder; +use serde::Serialize; + +/// Query parameters for listing project users. +#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] +#[builder(name = "ListProjectUsersQueryArgs")] +#[builder(pattern = "mutable")] +#[builder(setter(into, strip_option), default)] +#[builder(derive(Debug))] +#[builder(build_fn(error = "OpenAIError"))] +pub struct ListProjectUsersQuery { + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + #[serde(skip_serializing_if = "Option::is_none")] + pub after: Option, +} diff --git a/async-openai/src/types/admin/project_users/mod.rs b/async-openai/src/types/admin/project_users/mod.rs new file mode 100644 index 00000000..625d94a5 --- /dev/null +++ b/async-openai/src/types/admin/project_users/mod.rs @@ -0,0 +1,5 @@ +mod api; +mod project_users_; + +pub use api::*; +pub use project_users_::*; diff --git a/async-openai/src/types/admin/project_users.rs b/async-openai/src/types/admin/project_users/project_users_.rs similarity index 73% rename from async-openai/src/types/admin/project_users.rs rename to async-openai/src/types/admin/project_users/project_users_.rs index 585b0e1b..51a88199 100644 --- a/async-openai/src/types/admin/project_users.rs +++ b/async-openai/src/types/admin/project_users/project_users_.rs @@ -2,22 +2,6 @@ use crate::types::OpenAIError; use derive_builder::Builder; use serde::{Deserialize, Serialize}; -/// Query parameters for listing project users. -#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] -#[builder(name = "ListProjectUsersQueryArgs")] -#[builder(pattern = "mutable")] -#[builder(setter(into, strip_option), default)] -#[builder(derive(Debug))] -#[builder(build_fn(error = "OpenAIError"))] -pub struct ListProjectUsersQuery { - /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. - #[serde(skip_serializing_if = "Option::is_none")] - pub limit: Option, - /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. - #[serde(skip_serializing_if = "Option::is_none")] - pub after: Option, -} - /// Represents an individual user in a project. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct ProjectUser { diff --git a/async-openai/src/types/admin/projects/api.rs b/async-openai/src/types/admin/projects/api.rs new file mode 100644 index 00000000..fbf7c16a --- /dev/null +++ b/async-openai/src/types/admin/projects/api.rs @@ -0,0 +1,22 @@ +use crate::types::OpenAIError; +use derive_builder::Builder; +use serde::Serialize; + +/// Query parameters for listing projects. +#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] +#[builder(name = "ListProjectsQueryArgs")] +#[builder(pattern = "mutable")] +#[builder(setter(into, strip_option), default)] +#[builder(derive(Debug))] +#[builder(build_fn(error = "OpenAIError"))] +pub struct ListProjectsQuery { + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + #[serde(skip_serializing_if = "Option::is_none")] + pub after: Option, + /// If `true` returns all projects including those that have been `archived`. Archived projects are not included by default. + #[serde(skip_serializing_if = "Option::is_none")] + pub include_archived: Option, +} diff --git a/async-openai/src/types/admin/projects/mod.rs b/async-openai/src/types/admin/projects/mod.rs new file mode 100644 index 00000000..52be6572 --- /dev/null +++ b/async-openai/src/types/admin/projects/mod.rs @@ -0,0 +1,5 @@ +mod api; +mod projects_; + +pub use api::*; +pub use projects_::*; diff --git a/async-openai/src/types/admin/projects.rs b/async-openai/src/types/admin/projects/projects_.rs similarity index 80% rename from async-openai/src/types/admin/projects.rs rename to async-openai/src/types/admin/projects/projects_.rs index 4e06c43a..ae2b5831 100644 --- a/async-openai/src/types/admin/projects.rs +++ b/async-openai/src/types/admin/projects/projects_.rs @@ -2,25 +2,6 @@ use crate::types::OpenAIError; use derive_builder::Builder; use serde::{Deserialize, Serialize}; -/// Query parameters for listing projects. -#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] -#[builder(name = "ListProjectsQueryArgs")] -#[builder(pattern = "mutable")] -#[builder(setter(into, strip_option), default)] -#[builder(derive(Debug))] -#[builder(build_fn(error = "OpenAIError"))] -pub struct ListProjectsQuery { - /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. - #[serde(skip_serializing_if = "Option::is_none")] - pub limit: Option, - /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. - #[serde(skip_serializing_if = "Option::is_none")] - pub after: Option, - /// If `true` returns all projects including those that have been `archived`. Archived projects are not included by default. - #[serde(skip_serializing_if = "Option::is_none")] - pub include_archived: Option, -} - /// `active` or `archived` #[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)] #[serde(rename_all = "lowercase")] diff --git a/async-openai/src/types/admin/roles/api.rs b/async-openai/src/types/admin/roles/api.rs new file mode 100644 index 00000000..d0d9f616 --- /dev/null +++ b/async-openai/src/types/admin/roles/api.rs @@ -0,0 +1,32 @@ +use crate::types::OpenAIError; +use derive_builder::Builder; +use serde::{Deserialize, Serialize}; + +/// Sort order for listing roles. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +#[serde(rename_all = "lowercase")] +pub enum ListRolesOrder { + /// Ascending order + Asc, + /// Descending order + Desc, +} + +/// Query parameters for listing roles. +#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] +#[builder(name = "ListRolesQueryArgs")] +#[builder(pattern = "mutable")] +#[builder(setter(into, strip_option), default)] +#[builder(derive(Debug))] +#[builder(build_fn(error = "OpenAIError"))] +pub struct ListRolesQuery { + /// A limit on the number of roles to return. Defaults to 1000. + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + /// Cursor for pagination. Provide the value from the previous response's `next` field to continue listing roles. + #[serde(skip_serializing_if = "Option::is_none")] + pub after: Option, + /// Sort order for the returned roles. + #[serde(skip_serializing_if = "Option::is_none")] + pub order: Option, +} diff --git a/async-openai/src/types/admin/roles/mod.rs b/async-openai/src/types/admin/roles/mod.rs new file mode 100644 index 00000000..01a33015 --- /dev/null +++ b/async-openai/src/types/admin/roles/mod.rs @@ -0,0 +1,5 @@ +mod api; +mod roles_; + +pub use api::*; +pub use roles_::*; diff --git a/async-openai/src/types/admin/roles.rs b/async-openai/src/types/admin/roles/roles_.rs similarity index 83% rename from async-openai/src/types/admin/roles.rs rename to async-openai/src/types/admin/roles/roles_.rs index 5b92379c..c7176b3b 100644 --- a/async-openai/src/types/admin/roles.rs +++ b/async-openai/src/types/admin/roles/roles_.rs @@ -2,33 +2,11 @@ use crate::types::OpenAIError; use derive_builder::Builder; use serde::{Deserialize, Serialize}; -/// Sort order for listing roles. -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)] #[serde(rename_all = "lowercase")] -pub enum ListRolesOrder { - /// Ascending order - Asc, - /// Descending order - Desc, -} - -/// Query parameters for listing roles. -#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] -#[builder(name = "ListRolesQueryArgs")] -#[builder(pattern = "mutable")] -#[builder(setter(into, strip_option), default)] -#[builder(derive(Debug))] -#[builder(build_fn(error = "OpenAIError"))] -pub struct ListRolesQuery { - /// A limit on the number of roles to return. Defaults to 1000. - #[serde(skip_serializing_if = "Option::is_none")] - pub limit: Option, - /// Cursor for pagination. Provide the value from the previous response's `next` field to continue listing roles. - #[serde(skip_serializing_if = "Option::is_none")] - pub after: Option, - /// Sort order for the returned roles. - #[serde(skip_serializing_if = "Option::is_none")] - pub order: Option, +pub enum OrganizationRole { + Owner, + Reader, } /// Details about a role that can be assigned through the public Roles API. diff --git a/async-openai/src/types/admin/usage/api.rs b/async-openai/src/types/admin/usage/api.rs new file mode 100644 index 00000000..efef043b --- /dev/null +++ b/async-openai/src/types/admin/usage/api.rs @@ -0,0 +1,69 @@ +use crate::types::OpenAIError; +use derive_builder::Builder; +use serde::{Deserialize, Serialize}; + +/// Query parameters for organization usage endpoints. +#[derive(Debug, Clone, Serialize, Default, Builder)] +#[builder(name = "UsageQueryParamsArgs")] +#[builder(pattern = "mutable")] +#[builder(setter(into, strip_option), default)] +#[builder(derive(Debug))] +#[builder(build_fn(error = "OpenAIError"))] +pub struct UsageQueryParams { + /// Start time (Unix seconds) of the query time range, inclusive. + pub start_time: u64, + /// End time (Unix seconds) of the query time range, exclusive. + #[serde(skip_serializing_if = "Option::is_none")] + pub end_time: Option, + /// Width of each time bucket in response. Currently `1m`, `1h` and `1d` are supported, default to `1d`. + #[serde(skip_serializing_if = "Option::is_none")] + pub bucket_width: Option, + /// Return only usage for these projects. + #[serde(skip_serializing_if = "Option::is_none")] + pub project_ids: Option>, + /// Return only usage for these users. + #[serde(skip_serializing_if = "Option::is_none")] + pub user_ids: Option>, + /// Return only usage for these API keys. + #[serde(skip_serializing_if = "Option::is_none")] + pub api_key_ids: Option>, + /// Return only usage for these models. + #[serde(skip_serializing_if = "Option::is_none")] + pub models: Option>, + /// If `true`, return batch jobs only. If `false`, return non-batch jobs only. By default, return both. + #[serde(skip_serializing_if = "Option::is_none")] + pub batch: Option, + /// Group the usage data by the specified fields. + #[serde(skip_serializing_if = "Option::is_none")] + pub group_by: Option>, + /// Specifies the number of buckets to return. + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + /// A cursor for use in pagination. Corresponding to the `next_page` field from the previous response. + #[serde(skip_serializing_if = "Option::is_none")] + pub page: Option, +} + +/// Width of each time bucket in response. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum UsageBucketWidth { + #[serde(rename = "1m")] + OneMinute, + #[serde(rename = "1h")] + OneHour, + #[serde(rename = "1d")] + OneDay, +} + +/// Fields to group usage data by. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum UsageGroupBy { + ProjectId, + UserId, + ApiKeyId, + Model, + Batch, + ServiceTier, +} diff --git a/async-openai/src/types/admin/usage/mod.rs b/async-openai/src/types/admin/usage/mod.rs new file mode 100644 index 00000000..4f532bf1 --- /dev/null +++ b/async-openai/src/types/admin/usage/mod.rs @@ -0,0 +1,5 @@ +mod api; +mod usage_; + +pub use api::*; +pub use usage_::*; diff --git a/async-openai/src/types/admin/usage.rs b/async-openai/src/types/admin/usage/usage_.rs similarity index 83% rename from async-openai/src/types/admin/usage.rs rename to async-openai/src/types/admin/usage/usage_.rs index 2b2f16df..f7085ca9 100644 --- a/async-openai/src/types/admin/usage.rs +++ b/async-openai/src/types/admin/usage/usage_.rs @@ -1,65 +1,4 @@ -use serde::{Deserialize, Serialize}; - -/// Query parameters for organization usage endpoints. -#[derive(Debug, Clone, Serialize, Default)] -pub struct UsageQueryParams { - /// Start time (Unix seconds) of the query time range, inclusive. - pub start_time: u64, - /// End time (Unix seconds) of the query time range, exclusive. - #[serde(skip_serializing_if = "Option::is_none")] - pub end_time: Option, - /// Width of each time bucket in response. Currently `1m`, `1h` and `1d` are supported, default to `1d`. - #[serde(skip_serializing_if = "Option::is_none")] - pub bucket_width: Option, - /// Return only usage for these projects. - #[serde(skip_serializing_if = "Option::is_none")] - pub project_ids: Option>, - /// Return only usage for these users. - #[serde(skip_serializing_if = "Option::is_none")] - pub user_ids: Option>, - /// Return only usage for these API keys. - #[serde(skip_serializing_if = "Option::is_none")] - pub api_key_ids: Option>, - /// Return only usage for these models. - #[serde(skip_serializing_if = "Option::is_none")] - pub models: Option>, - /// If `true`, return batch jobs only. If `false`, return non-batch jobs only. By default, return both. - #[serde(skip_serializing_if = "Option::is_none")] - pub batch: Option, - /// Group the usage data by the specified fields. - #[serde(skip_serializing_if = "Option::is_none")] - pub group_by: Option>, - /// Specifies the number of buckets to return. - #[serde(skip_serializing_if = "Option::is_none")] - pub limit: Option, - /// A cursor for use in pagination. Corresponding to the `next_page` field from the previous response. - #[serde(skip_serializing_if = "Option::is_none")] - pub page: Option, -} - -/// Width of each time bucket in response. -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "lowercase")] -pub enum UsageBucketWidth { - #[serde(rename = "1m")] - OneMinute, - #[serde(rename = "1h")] - OneHour, - #[serde(rename = "1d")] - OneDay, -} - -/// Fields to group usage data by. -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "snake_case")] -pub enum UsageGroupBy { - ProjectId, - UserId, - ApiKeyId, - Model, - Batch, - ServiceTier, -} +use serde::Deserialize; /// Response structure for organization usage endpoints. #[derive(Debug, Clone, Deserialize)] diff --git a/async-openai/src/types/admin/users/api.rs b/async-openai/src/types/admin/users/api.rs new file mode 100644 index 00000000..4e4c7728 --- /dev/null +++ b/async-openai/src/types/admin/users/api.rs @@ -0,0 +1,22 @@ +use crate::types::OpenAIError; +use derive_builder::Builder; +use serde::Serialize; + +/// Query parameters for listing users. +#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] +#[builder(name = "ListUsersQueryArgs")] +#[builder(pattern = "mutable")] +#[builder(setter(into, strip_option), default)] +#[builder(derive(Debug))] +#[builder(build_fn(error = "OpenAIError"))] +pub struct ListUsersQuery { + /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. + #[serde(skip_serializing_if = "Option::is_none")] + pub after: Option, + /// Filter by the email address of users. + #[serde(skip_serializing_if = "Option::is_none")] + pub emails: Option>, +} diff --git a/async-openai/src/types/admin/users/mod.rs b/async-openai/src/types/admin/users/mod.rs new file mode 100644 index 00000000..137772e9 --- /dev/null +++ b/async-openai/src/types/admin/users/mod.rs @@ -0,0 +1,5 @@ +mod api; +mod users_; + +pub use api::*; +pub use users_::*; diff --git a/async-openai/src/types/admin/users.rs b/async-openai/src/types/admin/users/users_.rs similarity index 75% rename from async-openai/src/types/admin/users.rs rename to async-openai/src/types/admin/users/users_.rs index d8d44056..d735103e 100644 --- a/async-openai/src/types/admin/users.rs +++ b/async-openai/src/types/admin/users/users_.rs @@ -1,28 +1,9 @@ +use crate::types::admin::roles::OrganizationRole; use crate::types::admin::roles::Role; use crate::types::OpenAIError; -use crate::types::OrganizationRole; use derive_builder::Builder; use serde::{Deserialize, Serialize}; -/// Query parameters for listing users. -#[derive(Debug, Serialize, Default, Clone, Builder, PartialEq)] -#[builder(name = "ListUsersQueryArgs")] -#[builder(pattern = "mutable")] -#[builder(setter(into, strip_option), default)] -#[builder(derive(Debug))] -#[builder(build_fn(error = "OpenAIError"))] -pub struct ListUsersQuery { - /// A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. - #[serde(skip_serializing_if = "Option::is_none")] - pub limit: Option, - /// A cursor for use in pagination. `after` is an object ID that defines your place in the list. - #[serde(skip_serializing_if = "Option::is_none")] - pub after: Option, - /// Filter by the email address of users. - #[serde(skip_serializing_if = "Option::is_none")] - pub emails: Option>, -} - /// Represents an individual `user` within an organization. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct User { diff --git a/async-openai/src/types/assistants/assistant.rs b/async-openai/src/types/assistants/assistant.rs index a4717d50..437a5961 100644 --- a/async-openai/src/types/assistants/assistant.rs +++ b/async-openai/src/types/assistants/assistant.rs @@ -5,7 +5,9 @@ use serde::{Deserialize, Serialize}; use crate::error::OpenAIError; -use crate::types::chat::{FunctionName, FunctionObject, ResponseFormat}; +use crate::types::assistants::{ + FunctionName, FunctionObject, ResponseFormat, StaticChunkingStrategy, +}; #[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)] pub struct AssistantToolCodeInterpreterResources { @@ -66,17 +68,6 @@ pub enum AssistantVectorStoreChunkingStrategy { Static { r#static: StaticChunkingStrategy }, } -/// Static Chunking Strategy -#[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)] -pub struct StaticChunkingStrategy { - /// The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. - pub max_chunk_size_tokens: u16, - /// The number of tokens that overlap between chunks. The default value is `400`. - /// - /// Note that the overlap must not exceed half of `max_chunk_size_tokens`. - pub chunk_overlap_tokens: u16, -} - /// Represents an `assistant` that can call the model and use tools. #[derive(Clone, Serialize, Debug, Deserialize, PartialEq)] pub struct AssistantObject { diff --git a/async-openai/src/types/assistants/assistant_impls.rs b/async-openai/src/types/assistants/assistant_impls.rs deleted file mode 100644 index f693fcac..00000000 --- a/async-openai/src/types/assistants/assistant_impls.rs +++ /dev/null @@ -1,66 +0,0 @@ -use crate::types::assistants::{ - AssistantToolCodeInterpreterResources, AssistantToolFileSearchResources, - AssistantToolResources, AssistantTools, AssistantToolsFileSearch, AssistantToolsFunction, - CreateAssistantToolFileSearchResources, CreateAssistantToolResources, -}; -use crate::types::chat::FunctionObject; - -impl From for AssistantTools { - fn from(value: AssistantToolsFileSearch) -> Self { - Self::FileSearch(value) - } -} - -impl From for AssistantTools { - fn from(value: AssistantToolsFunction) -> Self { - Self::Function(value) - } -} - -impl From for AssistantToolsFunction { - fn from(value: FunctionObject) -> Self { - Self { function: value } - } -} - -impl From for AssistantTools { - fn from(value: FunctionObject) -> Self { - Self::Function(value.into()) - } -} - -impl From for CreateAssistantToolResources { - fn from(value: CreateAssistantToolFileSearchResources) -> Self { - Self { - code_interpreter: None, - file_search: Some(value), - } - } -} - -impl From for CreateAssistantToolResources { - fn from(value: AssistantToolCodeInterpreterResources) -> Self { - Self { - code_interpreter: Some(value), - file_search: None, - } - } -} - -impl From for AssistantToolResources { - fn from(value: AssistantToolCodeInterpreterResources) -> Self { - Self { - code_interpreter: Some(value), - file_search: None, - } - } -} - -impl From for AssistantToolResources { - fn from(value: AssistantToolFileSearchResources) -> Self { - Self { - code_interpreter: None, - file_search: Some(value), - } - } -} diff --git a/async-openai/src/types/assistants/impls.rs b/async-openai/src/types/assistants/impls.rs index 6daa4e1a..e017ac4c 100644 --- a/async-openai/src/types/assistants/impls.rs +++ b/async-openai/src/types/assistants/impls.rs @@ -1,4 +1,69 @@ -use crate::types::assistants::CreateMessageRequestContent; +use crate::types::assistants::{ + AssistantToolCodeInterpreterResources, AssistantToolFileSearchResources, + AssistantToolResources, AssistantTools, AssistantToolsFileSearch, AssistantToolsFunction, + CreateAssistantToolFileSearchResources, CreateAssistantToolResources, + CreateMessageRequestContent, FunctionObject, +}; + +impl From for AssistantTools { + fn from(value: AssistantToolsFileSearch) -> Self { + Self::FileSearch(value) + } +} + +impl From for AssistantTools { + fn from(value: AssistantToolsFunction) -> Self { + Self::Function(value) + } +} + +impl From for AssistantToolsFunction { + fn from(value: FunctionObject) -> Self { + Self { function: value } + } +} + +impl From for AssistantTools { + fn from(value: FunctionObject) -> Self { + Self::Function(value.into()) + } +} + +impl From for CreateAssistantToolResources { + fn from(value: CreateAssistantToolFileSearchResources) -> Self { + Self { + code_interpreter: None, + file_search: Some(value), + } + } +} + +impl From for CreateAssistantToolResources { + fn from(value: AssistantToolCodeInterpreterResources) -> Self { + Self { + code_interpreter: Some(value), + file_search: None, + } + } +} + +impl From for AssistantToolResources { + fn from(value: AssistantToolCodeInterpreterResources) -> Self { + Self { + code_interpreter: Some(value), + file_search: None, + } + } +} + +impl From for AssistantToolResources { + fn from(value: AssistantToolFileSearchResources) -> Self { + Self { + code_interpreter: None, + file_search: Some(value), + } + } +} impl From for CreateMessageRequestContent { fn from(value: String) -> Self { diff --git a/async-openai/src/types/assistants/message.rs b/async-openai/src/types/assistants/message.rs index ab551f0f..1cd60d1e 100644 --- a/async-openai/src/types/assistants/message.rs +++ b/async-openai/src/types/assistants/message.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; use crate::error::OpenAIError; -use crate::types::chat::{ImageDetail, ImageUrl}; +use crate::types::assistants::{ImageDetail, ImageUrl}; #[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)] #[serde(rename_all = "lowercase")] diff --git a/async-openai/src/types/assistants/mod.rs b/async-openai/src/types/assistants/mod.rs index 303b7c05..c2d2debc 100644 --- a/async-openai/src/types/assistants/mod.rs +++ b/async-openai/src/types/assistants/mod.rs @@ -1,17 +1,27 @@ mod api; mod assistant; -mod assistant_impls; -mod assistant_stream; mod impls; mod message; mod run; mod step; +mod stream; mod thread; pub use api::*; pub use assistant::*; -pub use assistant_stream::*; pub use message::*; pub use run::*; pub use step::*; +pub use stream::*; pub use thread::*; + +// Re-export shared types that are used in assistants +pub use crate::types::shared::FunctionCall; +pub use crate::types::shared::FunctionName; +pub use crate::types::shared::FunctionObject; +pub use crate::types::shared::ImageDetail; +pub use crate::types::shared::ImageUrl; +pub use crate::types::shared::ImageUrlArgs; +pub use crate::types::shared::ResponseFormat; +pub use crate::types::shared::ResponseFormatJsonSchema; +pub use crate::types::shared::StaticChunkingStrategy; diff --git a/async-openai/src/types/assistants/run.rs b/async-openai/src/types/assistants/run.rs index 591db02b..63acecbe 100644 --- a/async-openai/src/types/assistants/run.rs +++ b/async-openai/src/types/assistants/run.rs @@ -3,11 +3,11 @@ use std::collections::HashMap; use derive_builder::Builder; use serde::{Deserialize, Serialize}; -use crate::{error::OpenAIError, types::chat::FunctionCall}; +use crate::error::OpenAIError; -use super::{ +use crate::types::assistants::{ AssistantTools, AssistantsApiResponseFormatOption, AssistantsApiToolChoiceOption, - CreateMessageRequest, + CreateMessageRequest, FunctionCall, }; /// Represents an execution run on a [thread](https://platform.openai.com/docs/api-reference/threads). diff --git a/async-openai/src/types/assistants/step.rs b/async-openai/src/types/assistants/step.rs index 390497e1..c9b07b50 100644 --- a/async-openai/src/types/assistants/step.rs +++ b/async-openai/src/types/assistants/step.rs @@ -2,7 +2,7 @@ use std::collections::HashMap; use serde::{Deserialize, Serialize}; -use super::{FileSearchRankingOptions, ImageFile, LastError, RunStatus}; +use crate::types::assistants::{FileSearchRankingOptions, ImageFile, LastError, RunStatus}; #[derive(Clone, Serialize, Debug, Deserialize, PartialEq)] #[serde(rename_all = "snake_case")] diff --git a/async-openai/src/types/assistants/assistant_stream.rs b/async-openai/src/types/assistants/stream.rs similarity index 100% rename from async-openai/src/types/assistants/assistant_stream.rs rename to async-openai/src/types/assistants/stream.rs diff --git a/async-openai/src/types/assistants/thread.rs b/async-openai/src/types/assistants/thread.rs index 7f45af6a..837f2647 100644 --- a/async-openai/src/types/assistants/thread.rs +++ b/async-openai/src/types/assistants/thread.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; use crate::error::OpenAIError; -use super::{ +use crate::types::assistants::{ AssistantToolResources, AssistantTools, AssistantsApiResponseFormatOption, AssistantsApiToolChoiceOption, CreateAssistantToolResources, CreateMessageRequest, TruncationObject, diff --git a/async-openai/src/types/audio/audio_types.rs b/async-openai/src/types/audio/audio_.rs similarity index 93% rename from async-openai/src/types/audio/audio_types.rs rename to async-openai/src/types/audio/audio_.rs index 56d9b898..156008a5 100644 --- a/async-openai/src/types/audio/audio_types.rs +++ b/async-openai/src/types/audio/audio_.rs @@ -3,8 +3,8 @@ use derive_builder::Builder; use serde::{Deserialize, Serialize}; use crate::error::OpenAIError; +use crate::types::audio::{LogProbProperties, TranscriptTextUsageDuration, TranscriptionUsage}; use crate::types::common::InputSource; -use crate::types::LogProbProperties; // openapi spec type: VoiceIdsShared #[derive(Debug, Default, Serialize, Deserialize, Clone, PartialEq)] @@ -222,41 +222,6 @@ pub struct CreateTranscriptionResponseJson { pub usage: TranscriptionUsage, } -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct TokenUsageInputTokenDetails { - /// Number of audio tokens billed for this request. - pub audio_tokens: u32, - /// Number of text tokens billed for this request. - pub text_tokens: u32, -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct TranscriptTextUsageTokens { - /// Number of input tokens billed for this request. - pub input_tokens: u32, - /// Number of output tokens generated. - pub output_tokens: u32, - /// Total number of tokens used (input + output). - pub total_tokens: u32, - /// Details about the input tokens billed for this request. - pub input_token_details: Option, -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct TranscriptTextUsageDuration { - ///Duration of the input audio in seconds. - pub seconds: f32, -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -#[serde(tag = "type")] -pub enum TranscriptionUsage { - #[serde(rename = "tokens")] - Tokens(TranscriptTextUsageTokens), - #[serde(rename = "duration")] - Duration(TranscriptTextUsageDuration), -} - #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "lowercase")] pub enum CreateTranscriptionResponseDiarizedJsonTask { diff --git a/async-openai/src/types/audio/mod.rs b/async-openai/src/types/audio/mod.rs index 368627c7..2d1472a7 100644 --- a/async-openai/src/types/audio/mod.rs +++ b/async-openai/src/types/audio/mod.rs @@ -1,8 +1,15 @@ -mod audio_types; +mod audio_; mod form; mod impls; mod sdk; mod stream; -pub use audio_types::*; +pub use audio_::*; pub use stream::*; + +// Re-export shared types that are used in audio +pub use crate::types::shared::LogProbProperties; +pub use crate::types::shared::TokenUsageInputTokenDetails; +pub use crate::types::shared::TranscriptTextUsageDuration; +pub use crate::types::shared::TranscriptTextUsageTokens; +pub use crate::types::shared::TranscriptionUsage; diff --git a/async-openai/src/types/audio/stream.rs b/async-openai/src/types/audio/stream.rs index ed7aa0d9..1197dff2 100644 --- a/async-openai/src/types/audio/stream.rs +++ b/async-openai/src/types/audio/stream.rs @@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize}; use crate::{ error::OpenAIError, traits::EventType, - types::{audio::TranscriptTextUsageTokens, LogProbProperties}, + types::audio::{LogProbProperties, TranscriptTextUsageTokens}, }; #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] diff --git a/async-openai/src/types/batches/batch.rs b/async-openai/src/types/batches/batch.rs index 7520bf73..e27f4159 100644 --- a/async-openai/src/types/batches/batch.rs +++ b/async-openai/src/types/batches/batch.rs @@ -4,7 +4,7 @@ use derive_builder::Builder; use serde::{Deserialize, Serialize}; use crate::error::OpenAIError; -use crate::types::responses::ResponseUsage; +use crate::types::batches::ResponseUsage; use crate::types::Metadata; #[derive(Debug, Serialize, Default, Clone, Builder, PartialEq, Deserialize)] diff --git a/async-openai/src/types/batches/mod.rs b/async-openai/src/types/batches/mod.rs index 98701977..c5c6ff0b 100644 --- a/async-openai/src/types/batches/mod.rs +++ b/async-openai/src/types/batches/mod.rs @@ -3,3 +3,8 @@ mod batch; pub use api::*; pub use batch::*; + +// Re-export shared types +pub use crate::types::shared::InputTokenDetails; +pub use crate::types::shared::OutputTokenDetails; +pub use crate::types::shared::ResponseUsage; diff --git a/async-openai/src/types/chat/chat_types.rs b/async-openai/src/types/chat/chat_.rs similarity index 90% rename from async-openai/src/types/chat/chat_types.rs rename to async-openai/src/types/chat/chat_.rs index a77dfbba..fe18a83a 100644 --- a/async-openai/src/types/chat/chat_types.rs +++ b/async-openai/src/types/chat/chat_.rs @@ -6,7 +6,13 @@ use serde::{Deserialize, Serialize}; use crate::{ error::OpenAIError, - types::{responses::CustomGrammarFormatParam, Metadata}, + types::{ + chat::{ + CompletionTokensDetails, CustomGrammarFormatParam, FunctionCall, FunctionName, + FunctionObject, ImageUrl, PromptTokensDetails, ReasoningEffort, ResponseFormat, + }, + Metadata, + }, }; #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] @@ -80,15 +86,6 @@ pub enum Role { Function, } -/// The name and arguments of a function that should be called, as generated by the model. -#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Default)] -pub struct FunctionCall { - /// The name of the function to call. - pub name: String, - /// The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. - pub arguments: String, -} - /// Usage statistics for the completion request. #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Default)] pub struct CompletionUsage { @@ -106,31 +103,6 @@ pub struct CompletionUsage { pub completion_tokens_details: Option, } -/// Breakdown of tokens used in a completion. -#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Default)] -pub struct PromptTokensDetails { - /// Audio input tokens present in the prompt. - pub audio_tokens: Option, - /// Cached tokens present in the prompt. - pub cached_tokens: Option, -} - -/// Breakdown of tokens used in a completion. -#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Default)] -pub struct CompletionTokensDetails { - pub accepted_prediction_tokens: Option, - /// Audio input tokens generated by the model. - pub audio_tokens: Option, - /// Tokens generated by the model for reasoning. - pub reasoning_tokens: Option, - /// When using Predicted Outputs, the number of tokens in the - /// prediction that did not appear in the completion. However, like - /// reasoning tokens, these tokens are still counted in the total - /// completion tokens for purposes of billing, output, and context - /// window limits. - pub rejected_prediction_tokens: Option, -} - #[derive(Debug, Serialize, Deserialize, Default, Clone, Builder, PartialEq)] #[builder(name = "ChatCompletionRequestDeveloperMessageArgs")] #[builder(pattern = "mutable")] @@ -190,28 +162,6 @@ pub struct ChatCompletionRequestMessageContentPartRefusal { pub refusal: String, } -#[derive(Debug, Serialize, Deserialize, Default, Clone, PartialEq)] -#[serde(rename_all = "lowercase")] -pub enum ImageDetail { - #[default] - Auto, - Low, - High, -} - -#[derive(Debug, Serialize, Deserialize, Default, Clone, Builder, PartialEq)] -#[builder(name = "ImageUrlArgs")] -#[builder(pattern = "mutable")] -#[builder(setter(into, strip_option), default)] -#[builder(derive(Debug))] -#[builder(build_fn(error = "OpenAIError"))] -pub struct ImageUrl { - /// Either a URL of the image or the base64 encoded image data. - pub url: String, - /// Specifies the detail level of the image. Learn more in the [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). - pub detail: Option, -} - #[derive(Debug, Serialize, Deserialize, Default, Clone, Builder, PartialEq)] #[builder(name = "ChatCompletionRequestMessageContentPartImageArgs")] #[builder(pattern = "mutable")] @@ -533,62 +483,6 @@ pub struct ChatCompletionFunctions { pub parameters: serde_json::Value, } -#[derive(Clone, Serialize, Default, Debug, Deserialize, Builder, PartialEq)] -#[builder(name = "FunctionObjectArgs")] -#[builder(pattern = "mutable")] -#[builder(setter(into, strip_option), default)] -#[builder(derive(Debug))] -#[builder(build_fn(error = "OpenAIError"))] -pub struct FunctionObject { - /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. - pub name: String, - /// A description of what the function does, used by the model to choose when and how to call the function. - #[serde(skip_serializing_if = "Option::is_none")] - pub description: Option, - /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. - /// - /// Omitting `parameters` defines a function with an empty parameter list. - #[serde(skip_serializing_if = "Option::is_none")] - pub parameters: Option, - - /// Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](https://platform.openai.com/docs/guides/function-calling). - #[serde(skip_serializing_if = "Option::is_none")] - pub strict: Option, -} - -#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] -#[serde(tag = "type", rename_all = "snake_case")] -pub enum ResponseFormat { - /// The type of response format being defined: `text` - Text, - /// The type of response format being defined: `json_object` - JsonObject, - /// The type of response format being defined: `json_schema` - JsonSchema { - json_schema: ResponseFormatJsonSchema, - }, -} - -#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] -pub struct ResponseFormatJsonSchema { - /// A description of what the response format is for, used by the model to determine how to respond in the format. - #[serde(skip_serializing_if = "Option::is_none")] - pub description: Option, - /// The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. - pub name: String, - /// The schema for the response format, described as a JSON Schema object. - /// Learn how to build JSON schemas [here](https://json-schema.org/). - #[serde(skip_serializing_if = "Option::is_none")] - pub schema: Option, - /// Whether to enable strict schema adherence when generating the output. - /// If set to true, the model will always follow the exact schema defined - /// in the `schema` field. Only a subset of JSON Schema is supported when - /// `strict` is `true`. To learn more, read the [Structured Outputs - /// guide](https://platform.openai.com/docs/guides/structured-outputs). - #[serde(skip_serializing_if = "Option::is_none")] - pub strict: Option, -} - #[derive(Clone, Serialize, Debug, Deserialize, PartialEq)] #[serde(tag = "type", rename_all = "snake_case")] pub enum ChatCompletionTools { @@ -630,12 +524,6 @@ pub enum CustomToolPropertiesFormat { Grammar { grammar: CustomGrammarFormatParam }, } -#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)] -pub struct FunctionName { - /// The name of the function to call. - pub name: String, -} - /// Specifies a tool the model should use. Use to force the model to call a specific function. #[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)] pub struct ChatCompletionNamedToolChoice { @@ -770,17 +658,6 @@ pub enum ServiceTier { Priority, } -#[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)] -#[serde(rename_all = "lowercase")] -pub enum ReasoningEffort { - None, - Minimal, - Low, - #[default] - Medium, - High, -} - /// Constrains the verbosity of the model's response. Lower values will result in more concise responses, while higher values will result in more verbose responses. Currently supported values are `low`, `medium`, and `high`. #[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)] #[serde(rename_all = "lowercase")] diff --git a/async-openai/src/types/chat/mod.rs b/async-openai/src/types/chat/mod.rs index 74a5e910..f0826a66 100644 --- a/async-openai/src/types/chat/mod.rs +++ b/async-openai/src/types/chat/mod.rs @@ -1,6 +1,22 @@ mod api; -mod chat_types; +mod chat_; mod impls; pub use api::*; -pub use chat_types::*; +pub use chat_::*; + +// Re-export shared types that are used in chat +pub use crate::types::shared::CompletionTokensDetails; +pub use crate::types::shared::CustomGrammarFormatParam; +pub use crate::types::shared::FunctionCall; +pub use crate::types::shared::FunctionName; +pub use crate::types::shared::FunctionObject; +pub use crate::types::shared::FunctionObjectArgs; +pub use crate::types::shared::GrammarSyntax; +pub use crate::types::shared::ImageDetail; +pub use crate::types::shared::ImageUrl; +pub use crate::types::shared::ImageUrlArgs; +pub use crate::types::shared::PromptTokensDetails; +pub use crate::types::shared::ReasoningEffort; +pub use crate::types::shared::ResponseFormat; +pub use crate::types::shared::ResponseFormatJsonSchema; diff --git a/async-openai/src/types/common.rs b/async-openai/src/types/common.rs index 6155e46b..b8170673 100644 --- a/async-openai/src/types/common.rs +++ b/async-openai/src/types/common.rs @@ -10,13 +10,6 @@ pub enum InputSource { VecU8 { filename: String, vec: Vec }, } -#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)] -#[serde(rename_all = "lowercase")] -pub enum OrganizationRole { - Owner, - Reader, -} - /// Set of 16 key-value pairs that can be attached to an object. /// This can be useful for storing additional information about the /// object in a structured format, and querying for objects via API diff --git a/async-openai/src/types/completion.rs b/async-openai/src/types/completions/completion.rs similarity index 100% rename from async-openai/src/types/completion.rs rename to async-openai/src/types/completions/completion.rs diff --git a/async-openai/src/types/completions/mod.rs b/async-openai/src/types/completions/mod.rs new file mode 100644 index 00000000..a66295ef --- /dev/null +++ b/async-openai/src/types/completions/mod.rs @@ -0,0 +1,3 @@ +mod completion; + +pub use completion::*; diff --git a/async-openai/src/types/embeddings/embedding.rs b/async-openai/src/types/embeddings/embedding.rs index a1db1501..974cb047 100644 --- a/async-openai/src/types/embeddings/embedding.rs +++ b/async-openai/src/types/embeddings/embedding.rs @@ -1,4 +1,3 @@ -use base64::engine::{general_purpose, Engine}; use derive_builder::Builder; use serde::{Deserialize, Serialize}; @@ -72,18 +71,6 @@ pub struct Embedding { #[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] pub struct Base64EmbeddingVector(pub String); -impl From for Vec { - fn from(value: Base64EmbeddingVector) -> Self { - let bytes = general_purpose::STANDARD - .decode(value.0) - .expect("openai base64 encoding to be valid"); - let chunks = bytes.chunks_exact(4); - chunks - .map(|chunk| f32::from_le_bytes([chunk[0], chunk[1], chunk[2], chunk[3]])) - .collect() - } -} - /// Represents an base64-encoded embedding vector returned by embedding endpoint. #[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] pub struct Base64Embedding { diff --git a/async-openai/src/types/embeddings/impls.rs b/async-openai/src/types/embeddings/impls.rs new file mode 100644 index 00000000..ca4f9c20 --- /dev/null +++ b/async-openai/src/types/embeddings/impls.rs @@ -0,0 +1,15 @@ +use base64::engine::{general_purpose, Engine}; + +use crate::types::embeddings::Base64EmbeddingVector; + +impl From for Vec { + fn from(value: Base64EmbeddingVector) -> Self { + let bytes = general_purpose::STANDARD + .decode(value.0) + .expect("openai base64 encoding to be valid"); + let chunks = bytes.chunks_exact(4); + chunks + .map(|chunk| f32::from_le_bytes([chunk[0], chunk[1], chunk[2], chunk[3]])) + .collect() + } +} diff --git a/async-openai/src/types/embeddings/mod.rs b/async-openai/src/types/embeddings/mod.rs index a20a174c..e3173eb2 100644 --- a/async-openai/src/types/embeddings/mod.rs +++ b/async-openai/src/types/embeddings/mod.rs @@ -1,3 +1,4 @@ mod embedding; +mod impls; pub use embedding::*; diff --git a/async-openai/src/types/images/image.rs b/async-openai/src/types/images/image.rs index 3376823a..e6543043 100644 --- a/async-openai/src/types/images/image.rs +++ b/async-openai/src/types/images/image.rs @@ -2,7 +2,7 @@ use derive_builder::Builder; use serde::{Deserialize, Serialize}; use crate::error::OpenAIError; -use crate::types::InputSource; +use crate::types::images::ImageInput; #[derive(Default, Debug, Serialize, Deserialize, Clone, Copy, PartialEq)] pub enum ImageSize { @@ -281,11 +281,6 @@ pub struct ImagesResponse { pub usage: Option, } -#[derive(Debug, Default, Clone, PartialEq)] -pub struct ImageInput { - pub source: InputSource, -} - #[derive(Debug, Default, Clone, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "lowercase")] pub enum InputFidelity { diff --git a/async-openai/src/types/images/mod.rs b/async-openai/src/types/images/mod.rs index 1610ce8f..3afe4dd1 100644 --- a/async-openai/src/types/images/mod.rs +++ b/async-openai/src/types/images/mod.rs @@ -6,3 +6,6 @@ mod stream; pub use image::*; pub use stream::*; + +// Re-export shared types that are used in images +pub use crate::types::shared::ImageInput; diff --git a/async-openai/src/types/impls.rs b/async-openai/src/types/impls.rs index 7de6e902..9289d972 100644 --- a/async-openai/src/types/impls.rs +++ b/async-openai/src/types/impls.rs @@ -5,8 +5,8 @@ use crate::types::{ chat::{Prompt, StopConfiguration}, embeddings::EmbeddingInput, files::FileInput, - images::ImageInput, moderations::ModerationInput, + shared::ImageInput, InputSource, }; diff --git a/async-openai/src/types/mcp/impls.rs b/async-openai/src/types/mcp/impls.rs new file mode 100644 index 00000000..01bbb0b7 --- /dev/null +++ b/async-openai/src/types/mcp/impls.rs @@ -0,0 +1,74 @@ +use crate::types::mcp::{ + MCPToolAllowedTools, MCPToolApprovalFilter, MCPToolApprovalSetting, MCPToolFilter, + MCPToolRequireApproval, +}; + +// MCPToolRequireApproval ergonomics + +impl From for MCPToolRequireApproval { + fn from(setting: MCPToolApprovalSetting) -> Self { + MCPToolRequireApproval::ApprovalSetting(setting) + } +} + +impl From for MCPToolRequireApproval { + fn from(filter: MCPToolApprovalFilter) -> Self { + MCPToolRequireApproval::Filter(filter) + } +} + +// MCPToolAllowedTools ergonomics + +impl From for MCPToolAllowedTools { + fn from(filter: MCPToolFilter) -> Self { + MCPToolAllowedTools::Filter(filter) + } +} + +impl From> for MCPToolAllowedTools { + fn from(tools: Vec) -> Self { + MCPToolAllowedTools::List(tools) + } +} + +impl From> for MCPToolAllowedTools { + fn from(tools: Vec<&str>) -> Self { + MCPToolAllowedTools::List(tools.into_iter().map(|s| s.to_string()).collect()) + } +} + +impl From<&[&str]> for MCPToolAllowedTools { + fn from(tools: &[&str]) -> Self { + MCPToolAllowedTools::List(tools.iter().map(|s| s.to_string()).collect()) + } +} + +impl From<[&str; N]> for MCPToolAllowedTools { + fn from(tools: [&str; N]) -> Self { + MCPToolAllowedTools::List(tools.iter().map(|s| s.to_string()).collect()) + } +} + +impl From<&Vec> for MCPToolAllowedTools { + fn from(tools: &Vec) -> Self { + MCPToolAllowedTools::List(tools.clone()) + } +} + +impl From<&Vec<&str>> for MCPToolAllowedTools { + fn from(tools: &Vec<&str>) -> Self { + MCPToolAllowedTools::List(tools.iter().map(|s| s.to_string()).collect()) + } +} + +impl From<&str> for MCPToolAllowedTools { + fn from(tool: &str) -> Self { + MCPToolAllowedTools::List(vec![tool.to_string()]) + } +} + +impl From for MCPToolAllowedTools { + fn from(tool: String) -> Self { + MCPToolAllowedTools::List(vec![tool]) + } +} diff --git a/async-openai/src/types/mcp.rs b/async-openai/src/types/mcp/mcp_.rs similarity index 74% rename from async-openai/src/types/mcp.rs rename to async-openai/src/types/mcp/mcp_.rs index deaad094..fae078a1 100644 --- a/async-openai/src/types/mcp.rs +++ b/async-openai/src/types/mcp/mcp_.rs @@ -135,73 +135,3 @@ pub struct MCPListToolsTool { #[serde(skip_serializing_if = "Option::is_none")] pub description: Option, } - -// MCPToolRequireApproval ergonomics - -impl From for MCPToolRequireApproval { - fn from(setting: MCPToolApprovalSetting) -> Self { - MCPToolRequireApproval::ApprovalSetting(setting) - } -} - -impl From for MCPToolRequireApproval { - fn from(filter: MCPToolApprovalFilter) -> Self { - MCPToolRequireApproval::Filter(filter) - } -} - -// MCPToolAllowedTools ergonomics - -impl From for MCPToolAllowedTools { - fn from(filter: MCPToolFilter) -> Self { - MCPToolAllowedTools::Filter(filter) - } -} - -impl From> for MCPToolAllowedTools { - fn from(tools: Vec) -> Self { - MCPToolAllowedTools::List(tools) - } -} - -impl From> for MCPToolAllowedTools { - fn from(tools: Vec<&str>) -> Self { - MCPToolAllowedTools::List(tools.into_iter().map(|s| s.to_string()).collect()) - } -} - -impl From<&[&str]> for MCPToolAllowedTools { - fn from(tools: &[&str]) -> Self { - MCPToolAllowedTools::List(tools.iter().map(|s| s.to_string()).collect()) - } -} - -impl From<[&str; N]> for MCPToolAllowedTools { - fn from(tools: [&str; N]) -> Self { - MCPToolAllowedTools::List(tools.iter().map(|s| s.to_string()).collect()) - } -} - -impl From<&Vec> for MCPToolAllowedTools { - fn from(tools: &Vec) -> Self { - MCPToolAllowedTools::List(tools.clone()) - } -} - -impl From<&Vec<&str>> for MCPToolAllowedTools { - fn from(tools: &Vec<&str>) -> Self { - MCPToolAllowedTools::List(tools.iter().map(|s| s.to_string()).collect()) - } -} - -impl From<&str> for MCPToolAllowedTools { - fn from(tool: &str) -> Self { - MCPToolAllowedTools::List(vec![tool.to_string()]) - } -} - -impl From for MCPToolAllowedTools { - fn from(tool: String) -> Self { - MCPToolAllowedTools::List(vec![tool]) - } -} diff --git a/async-openai/src/types/mcp/mod.rs b/async-openai/src/types/mcp/mod.rs new file mode 100644 index 00000000..c23db9f3 --- /dev/null +++ b/async-openai/src/types/mcp/mod.rs @@ -0,0 +1,4 @@ +mod impls; +mod mcp_; + +pub use mcp_::*; diff --git a/async-openai/src/types/mod.rs b/async-openai/src/types/mod.rs index cb932241..3309d519 100644 --- a/async-openai/src/types/mod.rs +++ b/async-openai/src/types/mod.rs @@ -7,7 +7,7 @@ pub mod batches; pub mod chat; pub mod chatkit; mod common; -mod completion; +pub mod completions; pub mod containers; pub mod embeddings; pub mod evals; @@ -15,14 +15,14 @@ pub mod files; pub mod finetuning; pub mod graders; pub mod images; -mod logprob; -mod mcp; +pub mod mcp; pub mod models; pub mod moderations; #[cfg_attr(docsrs, doc(cfg(feature = "realtime")))] #[cfg(feature = "realtime")] pub mod realtime; pub mod responses; +mod shared; pub mod uploads; pub mod vectorstores; pub mod videos; @@ -31,9 +31,6 @@ pub mod videos; pub mod webhooks; pub use common::*; -pub use completion::*; -pub use logprob::*; -pub use mcp::*; mod impls; use derive_builder::UninitializedFieldError; diff --git a/async-openai/src/types/realtime/mod.rs b/async-openai/src/types/realtime/mod.rs index 3aacf4f7..0f9c3e18 100644 --- a/async-openai/src/types/realtime/mod.rs +++ b/async-openai/src/types/realtime/mod.rs @@ -14,3 +14,10 @@ pub use error::*; pub use response::*; pub use server_event::*; pub use session::*; + +// Re-export shared types that are used in realtime +pub use crate::types::shared::LogProbProperties; +pub use crate::types::shared::TokenUsageInputTokenDetails; +pub use crate::types::shared::TranscriptTextUsageDuration; +pub use crate::types::shared::TranscriptTextUsageTokens; +pub use crate::types::shared::TranscriptionUsage; diff --git a/async-openai/src/types/realtime/server_event.rs b/async-openai/src/types/realtime/server_event.rs index bc328304..1c6c3fab 100644 --- a/async-openai/src/types/realtime/server_event.rs +++ b/async-openai/src/types/realtime/server_event.rs @@ -1,7 +1,7 @@ use serde::{Deserialize, Serialize}; use crate::traits::EventType; -use crate::types::{audio::TranscriptionUsage, LogProbProperties}; +use crate::types::realtime::{LogProbProperties, TranscriptionUsage}; use super::{ conversation_item::RealtimeConversationItem, error::RealtimeAPIError, diff --git a/async-openai/src/types/realtime/session.rs b/async-openai/src/types/realtime/session.rs index 98a9c64b..21c3e33e 100644 --- a/async-openai/src/types/realtime/session.rs +++ b/async-openai/src/types/realtime/session.rs @@ -1,8 +1,8 @@ use serde::{Deserialize, Serialize}; use crate::types::{ + mcp::MCPTool, responses::{Prompt, ToolChoiceFunction, ToolChoiceMCP, ToolChoiceOptions}, - MCPTool, }; #[derive(Debug, Default, Serialize, Deserialize, Clone)] diff --git a/async-openai/src/types/responses/api.rs b/async-openai/src/types/responses/api.rs index 4bb3aa93..3fdfa65c 100644 --- a/async-openai/src/types/responses/api.rs +++ b/async-openai/src/types/responses/api.rs @@ -2,7 +2,7 @@ use crate::error::OpenAIError; use derive_builder::Builder; use serde::{Deserialize, Serialize}; -use super::conversation::{IncludeParam, ListOrder}; +use crate::types::responses::{IncludeParam, ListOrder}; /// Query parameters for listing conversation items. #[derive(Clone, Serialize, Default, Debug, Deserialize, Builder, PartialEq)] diff --git a/async-openai/src/types/responses/impls.rs b/async-openai/src/types/responses/impls.rs index 9e7c755e..04cc8d85 100644 --- a/async-openai/src/types/responses/impls.rs +++ b/async-openai/src/types/responses/impls.rs @@ -1,3 +1,4 @@ +use crate::types::mcp::MCPTool; use crate::types::responses::{ ApplyPatchToolCallItemParam, ApplyPatchToolCallOutputItemParam, CodeInterpreterContainerAuto, CodeInterpreterTool, CodeInterpreterToolCall, CodeInterpreterToolContainer, @@ -10,11 +11,11 @@ use crate::types::responses::{ LocalShellToolCall, LocalShellToolCallOutput, MCPApprovalRequest, MCPApprovalResponse, MCPListTools, MCPToolCall, MessageItem, MessageType, OutputMessage, OutputMessageContent, OutputTextContent, Prompt, Reasoning, ReasoningEffort, ReasoningItem, ReasoningSummary, - RefusalContent, ResponsePromptVariables, ResponseStreamOptions, ResponseTextParam, Role, - TextResponseFormatConfiguration, Tool, ToolChoiceCustom, ToolChoiceFunction, ToolChoiceMCP, - ToolChoiceOptions, ToolChoiceParam, ToolChoiceTypes, WebSearchTool, WebSearchToolCall, + RefusalContent, ResponseFormatJsonSchema, ResponsePromptVariables, ResponseStreamOptions, + ResponseTextParam, Role, TextResponseFormatConfiguration, Tool, ToolChoiceCustom, + ToolChoiceFunction, ToolChoiceMCP, ToolChoiceOptions, ToolChoiceParam, ToolChoiceTypes, + WebSearchTool, WebSearchToolCall, }; -use crate::types::{chat::ResponseFormatJsonSchema, MCPTool}; impl> From for EasyInputMessage { fn from(value: S) -> Self { diff --git a/async-openai/src/types/responses/mod.rs b/async-openai/src/types/responses/mod.rs index 938a82f0..fe08e8a9 100644 --- a/async-openai/src/types/responses/mod.rs +++ b/async-openai/src/types/responses/mod.rs @@ -9,3 +9,21 @@ pub use api::*; pub use conversation::*; pub use response::*; pub use stream::*; + +// Re-export shared types +pub use crate::types::shared::ComparisonFilter; +pub use crate::types::shared::ComparisonType; +pub use crate::types::shared::CompletionTokensDetails; +pub use crate::types::shared::CompoundFilter; +pub use crate::types::shared::CompoundType; +pub use crate::types::shared::CustomGrammarFormatParam; +pub use crate::types::shared::Filter; +pub use crate::types::shared::GrammarSyntax; +pub use crate::types::shared::ImageDetail; +pub use crate::types::shared::InputTokenDetails; +pub use crate::types::shared::OutputTokenDetails; +pub use crate::types::shared::PromptTokensDetails; +pub use crate::types::shared::ReasoningEffort; +pub use crate::types::shared::ResponseFormat; +pub use crate::types::shared::ResponseFormatJsonSchema; +pub use crate::types::shared::ResponseUsage; diff --git a/async-openai/src/types/responses/response.rs b/async-openai/src/types/responses/response.rs index 62b548e2..d90ab66e 100644 --- a/async-openai/src/types/responses/response.rs +++ b/async-openai/src/types/responses/response.rs @@ -1,9 +1,9 @@ use crate::error::OpenAIError; -pub use crate::types::chat::{ - CompletionTokensDetails, ImageDetail, PromptTokensDetails, ReasoningEffort, - ResponseFormatJsonSchema, +use crate::types::mcp::{MCPListToolsTool, MCPTool}; +use crate::types::responses::{ + CustomGrammarFormatParam, Filter, ImageDetail, ReasoningEffort, ResponseFormatJsonSchema, + ResponseUsage, }; -use crate::types::{MCPListToolsTool, MCPTool}; use derive_builder::Builder; use serde::{Deserialize, Serialize}; use std::collections::HashMap; @@ -940,22 +940,6 @@ pub struct CustomToolParam { pub format: CustomToolParamFormat, } -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)] -#[serde(rename_all = "lowercase")] -pub enum GrammarSyntax { - Lark, - #[default] - Regex, -} - -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)] -pub struct CustomGrammarFormatParam { - /// The grammar definition. - pub definition: String, - /// The syntax of the grammar definition. One of `lark` or `regex`. - pub syntax: GrammarSyntax, -} - #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)] #[serde(tag = "type", rename_all = "lowercase")] pub enum CustomToolParamFormat { @@ -1107,72 +1091,6 @@ pub struct RankingOptions { pub score_threshold: Option, } -/// Filters for file search. -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] -#[serde(untagged)] -pub enum Filter { - /// A filter used to compare a specified attribute key to a given value using a defined - /// comparison operation. - Comparison(ComparisonFilter), - /// Combine multiple filters using `and` or `or`. - Compound(CompoundFilter), -} - -/// Single comparison filter. -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] -pub struct ComparisonFilter { - /// Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`, `in`, `nin`. - /// - `eq`: equals - /// - `ne`: not equal - /// - `gt`: greater than - /// - `gte`: greater than or equal - /// - `lt`: less than - /// - `lte`: less than or equal - /// - `in`: in - /// - `nin`: not in - pub r#type: ComparisonType, - /// The key to compare against the value. - pub key: String, - /// The value to compare against the attribute key; supports string, number, or boolean types. - pub value: serde_json::Value, -} - -#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)] -pub enum ComparisonType { - #[serde(rename = "eq")] - Equals, - #[serde(rename = "ne")] - NotEquals, - #[serde(rename = "gt")] - GreaterThan, - #[serde(rename = "gte")] - GreaterThanOrEqual, - #[serde(rename = "lt")] - LessThan, - #[serde(rename = "lte")] - LessThanOrEqual, - #[serde(rename = "in")] - In, - #[serde(rename = "nin")] - NotIn, -} - -/// Combine multiple filters using `and` or `or`. -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] -pub struct CompoundFilter { - /// 'Type of operation: `and` or `or`.' - pub r#type: CompoundType, - /// Array of filters to combine. Items can be ComparisonFilter or CompoundFilter. - pub filters: Vec, -} - -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] -#[serde(rename_all = "lowercase")] -pub enum CompoundType { - And, - Or, -} - #[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)] #[serde(rename_all = "lowercase")] pub enum WebSearchApproximateLocationType { @@ -2431,34 +2349,6 @@ pub struct MCPApprovalRequest { pub server_label: String, } -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] -pub struct InputTokenDetails { - /// The number of tokens that were retrieved from the cache. - /// [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching). - pub cached_tokens: u32, -} - -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] -pub struct OutputTokenDetails { - /// The number of reasoning tokens. - pub reasoning_tokens: u32, -} - -/// Usage statistics for a response. -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] -pub struct ResponseUsage { - /// The number of input tokens. - pub input_tokens: u32, - /// A detailed breakdown of the input tokens. - pub input_tokens_details: InputTokenDetails, - /// The number of output tokens. - pub output_tokens: u32, - /// A detailed breakdown of the output tokens. - pub output_tokens_details: OutputTokenDetails, - /// The total number of tokens used. - pub total_tokens: u32, -} - #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] #[serde(untagged)] pub enum Instructions { diff --git a/async-openai/src/types/shared/completion_tokens_details.rs b/async-openai/src/types/shared/completion_tokens_details.rs new file mode 100644 index 00000000..25f5ecca --- /dev/null +++ b/async-openai/src/types/shared/completion_tokens_details.rs @@ -0,0 +1,17 @@ +use serde::{Deserialize, Serialize}; + +/// Breakdown of tokens used in a completion. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Default)] +pub struct CompletionTokensDetails { + pub accepted_prediction_tokens: Option, + /// Audio input tokens generated by the model. + pub audio_tokens: Option, + /// Tokens generated by the model for reasoning. + pub reasoning_tokens: Option, + /// When using Predicted Outputs, the number of tokens in the + /// prediction that did not appear in the completion. However, like + /// reasoning tokens, these tokens are still counted in the total + /// completion tokens for purposes of billing, output, and context + /// window limits. + pub rejected_prediction_tokens: Option, +} diff --git a/async-openai/src/types/shared/custom_grammar_format_param.rs b/async-openai/src/types/shared/custom_grammar_format_param.rs new file mode 100644 index 00000000..8a3a01b8 --- /dev/null +++ b/async-openai/src/types/shared/custom_grammar_format_param.rs @@ -0,0 +1,21 @@ +use derive_builder::Builder; +use serde::{Deserialize, Serialize}; + +use crate::error::OpenAIError; + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)] +#[serde(rename_all = "lowercase")] +pub enum GrammarSyntax { + Lark, + #[default] + Regex, +} + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, Builder)] +#[builder(build_fn(error = "OpenAIError"))] +pub struct CustomGrammarFormatParam { + /// The grammar definition. + pub definition: String, + /// The syntax of the grammar definition. One of `lark` or `regex`. + pub syntax: GrammarSyntax, +} diff --git a/async-openai/src/types/shared/filter.rs b/async-openai/src/types/shared/filter.rs new file mode 100644 index 00000000..62e540e8 --- /dev/null +++ b/async-openai/src/types/shared/filter.rs @@ -0,0 +1,67 @@ +use serde::{Deserialize, Serialize}; + +/// Filters for file search. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[serde(untagged)] +pub enum Filter { + /// A filter used to compare a specified attribute key to a given value using a defined + /// comparison operation. + Comparison(ComparisonFilter), + /// Combine multiple filters using `and` or `or`. + Compound(CompoundFilter), +} + +/// Single comparison filter. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct ComparisonFilter { + /// Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`, `in`, `nin`. + /// - `eq`: equals + /// - `ne`: not equal + /// - `gt`: greater than + /// - `gte`: greater than or equal + /// - `lt`: less than + /// - `lte`: less than or equal + /// - `in`: in + /// - `nin`: not in + pub r#type: ComparisonType, + /// The key to compare against the value. + pub key: String, + /// The value to compare against the attribute key; supports string, number, or boolean types. + pub value: serde_json::Value, +} + +#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)] +pub enum ComparisonType { + #[serde(rename = "eq")] + Equals, + #[serde(rename = "ne")] + NotEquals, + #[serde(rename = "gt")] + GreaterThan, + #[serde(rename = "gte")] + GreaterThanOrEqual, + #[serde(rename = "lt")] + LessThan, + #[serde(rename = "lte")] + LessThanOrEqual, + #[serde(rename = "in")] + In, + #[serde(rename = "nin")] + NotIn, +} + +/// Combine multiple filters using `and` or `or`. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct CompoundFilter { + /// 'Type of operation: `and` or `or`.' + pub r#type: CompoundType, + /// Array of filters to combine. Items can be ComparisonFilter or CompoundFilter. + pub filters: Vec, +} + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[serde(rename_all = "lowercase")] +pub enum CompoundType { + And, + Or, +} diff --git a/async-openai/src/types/shared/function_call.rs b/async-openai/src/types/shared/function_call.rs new file mode 100644 index 00000000..505e7c47 --- /dev/null +++ b/async-openai/src/types/shared/function_call.rs @@ -0,0 +1,10 @@ +use serde::{Deserialize, Serialize}; + +/// The name and arguments of a function that should be called, as generated by the model. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Default)] +pub struct FunctionCall { + /// The name of the function to call. + pub name: String, + /// The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + pub arguments: String, +} diff --git a/async-openai/src/types/shared/function_name.rs b/async-openai/src/types/shared/function_name.rs new file mode 100644 index 00000000..0f798a07 --- /dev/null +++ b/async-openai/src/types/shared/function_name.rs @@ -0,0 +1,7 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Serialize, Default, Debug, Deserialize, PartialEq)] +pub struct FunctionName { + /// The name of the function to call. + pub name: String, +} diff --git a/async-openai/src/types/shared/function_object.rs b/async-openai/src/types/shared/function_object.rs new file mode 100644 index 00000000..b8e2b914 --- /dev/null +++ b/async-openai/src/types/shared/function_object.rs @@ -0,0 +1,27 @@ +use derive_builder::Builder; +use serde::{Deserialize, Serialize}; + +use crate::error::OpenAIError; + +#[derive(Clone, Serialize, Default, Debug, Deserialize, Builder, PartialEq)] +#[builder(name = "FunctionObjectArgs")] +#[builder(pattern = "mutable")] +#[builder(setter(into, strip_option), default)] +#[builder(derive(Debug))] +#[builder(build_fn(error = "OpenAIError"))] +pub struct FunctionObject { + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + pub name: String, + /// A description of what the function does, used by the model to choose when and how to call the function. + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// + /// Omitting `parameters` defines a function with an empty parameter list. + #[serde(skip_serializing_if = "Option::is_none")] + pub parameters: Option, + + /// Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](https://platform.openai.com/docs/guides/function-calling). + #[serde(skip_serializing_if = "Option::is_none")] + pub strict: Option, +} diff --git a/async-openai/src/types/shared/image_detail.rs b/async-openai/src/types/shared/image_detail.rs new file mode 100644 index 00000000..a2d6fc14 --- /dev/null +++ b/async-openai/src/types/shared/image_detail.rs @@ -0,0 +1,10 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Serialize, Deserialize, Default, Clone, PartialEq)] +#[serde(rename_all = "lowercase")] +pub enum ImageDetail { + #[default] + Auto, + Low, + High, +} diff --git a/async-openai/src/types/shared/image_input.rs b/async-openai/src/types/shared/image_input.rs new file mode 100644 index 00000000..7a3e358b --- /dev/null +++ b/async-openai/src/types/shared/image_input.rs @@ -0,0 +1,6 @@ +use crate::types::InputSource; + +#[derive(Debug, Default, Clone, PartialEq)] +pub struct ImageInput { + pub source: InputSource, +} diff --git a/async-openai/src/types/shared/image_url.rs b/async-openai/src/types/shared/image_url.rs new file mode 100644 index 00000000..c8c9fb1b --- /dev/null +++ b/async-openai/src/types/shared/image_url.rs @@ -0,0 +1,18 @@ +use derive_builder::Builder; +use serde::{Deserialize, Serialize}; + +use crate::error::OpenAIError; +use crate::types::shared::ImageDetail; + +#[derive(Debug, Serialize, Deserialize, Default, Clone, Builder, PartialEq)] +#[builder(name = "ImageUrlArgs")] +#[builder(pattern = "mutable")] +#[builder(setter(into, strip_option), default)] +#[builder(derive(Debug))] +#[builder(build_fn(error = "OpenAIError"))] +pub struct ImageUrl { + /// Either a URL of the image or the base64 encoded image data. + pub url: String, + /// Specifies the detail level of the image. Learn more in the [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). + pub detail: Option, +} diff --git a/async-openai/src/types/logprob.rs b/async-openai/src/types/shared/log_prob_properties.rs similarity index 100% rename from async-openai/src/types/logprob.rs rename to async-openai/src/types/shared/log_prob_properties.rs diff --git a/async-openai/src/types/shared/mod.rs b/async-openai/src/types/shared/mod.rs new file mode 100644 index 00000000..7847a550 --- /dev/null +++ b/async-openai/src/types/shared/mod.rs @@ -0,0 +1,36 @@ +//! Shared types - these types are use by multiple type modules +//! and not exported directly, instead they are re-exported +//! by the modules that use them. +mod completion_tokens_details; +mod custom_grammar_format_param; +mod filter; +mod function_call; +mod function_name; +mod function_object; +mod image_detail; +mod image_input; +mod image_url; +mod log_prob_properties; +mod prompt_tokens_details; +mod reasoning_effort; +mod response_format; +mod response_usage; +mod static_chunking_strategy; +mod transcription_usage; + +pub use completion_tokens_details::*; +pub use custom_grammar_format_param::*; +pub use filter::*; +pub use function_call::*; +pub use function_name::*; +pub use function_object::*; +pub use image_detail::*; +pub use image_input::*; +pub use image_url::*; +pub use log_prob_properties::*; +pub use prompt_tokens_details::*; +pub use reasoning_effort::*; +pub use response_format::*; +pub use response_usage::*; +pub use static_chunking_strategy::*; +pub use transcription_usage::*; diff --git a/async-openai/src/types/shared/prompt_tokens_details.rs b/async-openai/src/types/shared/prompt_tokens_details.rs new file mode 100644 index 00000000..5ddb0db4 --- /dev/null +++ b/async-openai/src/types/shared/prompt_tokens_details.rs @@ -0,0 +1,10 @@ +use serde::{Deserialize, Serialize}; + +/// Breakdown of tokens used in a completion. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Default)] +pub struct PromptTokensDetails { + /// Audio input tokens present in the prompt. + pub audio_tokens: Option, + /// Cached tokens present in the prompt. + pub cached_tokens: Option, +} diff --git a/async-openai/src/types/shared/reasoning_effort.rs b/async-openai/src/types/shared/reasoning_effort.rs new file mode 100644 index 00000000..80df41d9 --- /dev/null +++ b/async-openai/src/types/shared/reasoning_effort.rs @@ -0,0 +1,12 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)] +#[serde(rename_all = "lowercase")] +pub enum ReasoningEffort { + None, + Minimal, + Low, + #[default] + Medium, + High, +} diff --git a/async-openai/src/types/shared/response_format.rs b/async-openai/src/types/shared/response_format.rs new file mode 100644 index 00000000..d70b6cb2 --- /dev/null +++ b/async-openai/src/types/shared/response_format.rs @@ -0,0 +1,34 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum ResponseFormat { + /// The type of response format being defined: `text` + Text, + /// The type of response format being defined: `json_object` + JsonObject, + /// The type of response format being defined: `json_schema` + JsonSchema { + json_schema: ResponseFormatJsonSchema, + }, +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct ResponseFormatJsonSchema { + /// A description of what the response format is for, used by the model to determine how to respond in the format. + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + /// The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + pub name: String, + /// The schema for the response format, described as a JSON Schema object. + /// Learn how to build JSON schemas [here](https://json-schema.org/). + #[serde(skip_serializing_if = "Option::is_none")] + pub schema: Option, + /// Whether to enable strict schema adherence when generating the output. + /// If set to true, the model will always follow the exact schema defined + /// in the `schema` field. Only a subset of JSON Schema is supported when + /// `strict` is `true`. To learn more, read the [Structured Outputs + /// guide](https://platform.openai.com/docs/guides/structured-outputs). + #[serde(skip_serializing_if = "Option::is_none")] + pub strict: Option, +} diff --git a/async-openai/src/types/shared/response_usage.rs b/async-openai/src/types/shared/response_usage.rs new file mode 100644 index 00000000..92fc99da --- /dev/null +++ b/async-openai/src/types/shared/response_usage.rs @@ -0,0 +1,29 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct InputTokenDetails { + /// The number of tokens that were retrieved from the cache. + /// [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching). + pub cached_tokens: u32, +} + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct OutputTokenDetails { + /// The number of reasoning tokens. + pub reasoning_tokens: u32, +} + +/// Usage statistics for a response. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct ResponseUsage { + /// The number of input tokens. + pub input_tokens: u32, + /// A detailed breakdown of the input tokens. + pub input_tokens_details: InputTokenDetails, + /// The number of output tokens. + pub output_tokens: u32, + /// A detailed breakdown of the output tokens. + pub output_tokens_details: OutputTokenDetails, + /// The total number of tokens used. + pub total_tokens: u32, +} diff --git a/async-openai/src/types/shared/static_chunking_strategy.rs b/async-openai/src/types/shared/static_chunking_strategy.rs new file mode 100644 index 00000000..00f783f2 --- /dev/null +++ b/async-openai/src/types/shared/static_chunking_strategy.rs @@ -0,0 +1,12 @@ +use serde::{Deserialize, Serialize}; + +/// Static Chunking Strategy +#[derive(Clone, Serialize, Debug, Deserialize, PartialEq, Default)] +pub struct StaticChunkingStrategy { + /// The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`. + pub max_chunk_size_tokens: u16, + /// The number of tokens that overlap between chunks. The default value is `400`. + /// + /// Note that the overlap must not exceed half of `max_chunk_size_tokens`. + pub chunk_overlap_tokens: u16, +} diff --git a/async-openai/src/types/shared/transcription_usage.rs b/async-openai/src/types/shared/transcription_usage.rs new file mode 100644 index 00000000..c4e7eae1 --- /dev/null +++ b/async-openai/src/types/shared/transcription_usage.rs @@ -0,0 +1,36 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct TokenUsageInputTokenDetails { + /// Number of audio tokens billed for this request. + pub audio_tokens: u32, + /// Number of text tokens billed for this request. + pub text_tokens: u32, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct TranscriptTextUsageTokens { + /// Number of input tokens billed for this request. + pub input_tokens: u32, + /// Number of output tokens generated. + pub output_tokens: u32, + /// Total number of tokens used (input + output). + pub total_tokens: u32, + /// Details about the input tokens billed for this request. + pub input_token_details: Option, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct TranscriptTextUsageDuration { + ///Duration of the input audio in seconds. + pub seconds: f32, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +#[serde(tag = "type")] +pub enum TranscriptionUsage { + #[serde(rename = "tokens")] + Tokens(TranscriptTextUsageTokens), + #[serde(rename = "duration")] + Duration(TranscriptTextUsageDuration), +} diff --git a/async-openai/src/types/vectorstores/mod.rs b/async-openai/src/types/vectorstores/mod.rs index 8dda824f..91c48b62 100644 --- a/async-openai/src/types/vectorstores/mod.rs +++ b/async-openai/src/types/vectorstores/mod.rs @@ -3,3 +3,12 @@ mod vector_store; pub use api::*; pub use vector_store::*; + +// Re-export shared types + +pub use crate::types::shared::ComparisonFilter; +pub use crate::types::shared::ComparisonType; +pub use crate::types::shared::CompoundFilter; +pub use crate::types::shared::CompoundType; +pub use crate::types::shared::Filter; +pub use crate::types::shared::StaticChunkingStrategy; diff --git a/async-openai/src/types/vectorstores/vector_store.rs b/async-openai/src/types/vectorstores/vector_store.rs index 2cf6447b..4de6897f 100644 --- a/async-openai/src/types/vectorstores/vector_store.rs +++ b/async-openai/src/types/vectorstores/vector_store.rs @@ -5,10 +5,10 @@ use serde::{Deserialize, Serialize}; use crate::{ error::OpenAIError, - types::{responses::Filter, Metadata}, + types::{vectorstores::Filter, Metadata}, }; -use crate::types::assistants::StaticChunkingStrategy; +use crate::types::vectorstores::StaticChunkingStrategy; #[derive(Debug, Serialize, Deserialize, Default, Clone, Builder, PartialEq)] #[builder(name = "CreateVectorStoreRequestArgs")] diff --git a/async-openai/src/types/videos/mod.rs b/async-openai/src/types/videos/mod.rs index 847df591..9d3c3821 100644 --- a/async-openai/src/types/videos/mod.rs +++ b/async-openai/src/types/videos/mod.rs @@ -5,3 +5,6 @@ mod video; pub use api::*; pub use video::*; + +// Re-export shared types that are used in videos +pub use crate::types::shared::ImageInput; diff --git a/async-openai/src/types/videos/video.rs b/async-openai/src/types/videos/video.rs index 6eb93e22..6525d33b 100644 --- a/async-openai/src/types/videos/video.rs +++ b/async-openai/src/types/videos/video.rs @@ -1,7 +1,8 @@ use derive_builder::Builder; use serde::{Deserialize, Serialize}; -use crate::{error::OpenAIError, types::images::ImageInput}; +use crate::error::OpenAIError; +use crate::types::videos::ImageInput; #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] pub enum VideoSize { diff --git a/async-openai/src/types/webhooks/mod.rs b/async-openai/src/types/webhooks/mod.rs new file mode 100644 index 00000000..73f94904 --- /dev/null +++ b/async-openai/src/types/webhooks/mod.rs @@ -0,0 +1,3 @@ +mod webhooks; + +pub use webhooks::*; diff --git a/async-openai/src/types/webhooks.rs b/async-openai/src/types/webhooks/webhooks.rs similarity index 100% rename from async-openai/src/types/webhooks.rs rename to async-openai/src/types/webhooks/webhooks.rs diff --git a/async-openai/src/vectorstores/mod.rs b/async-openai/src/vectorstores/mod.rs new file mode 100644 index 00000000..2a8ac065 --- /dev/null +++ b/async-openai/src/vectorstores/mod.rs @@ -0,0 +1,7 @@ +mod vector_store_file_batches; +mod vector_store_files; +mod vector_stores; + +pub use vector_store_file_batches::*; +pub use vector_store_files::*; +pub use vector_stores::*; diff --git a/async-openai/src/vector_store_file_batches.rs b/async-openai/src/vectorstores/vector_store_file_batches.rs similarity index 100% rename from async-openai/src/vector_store_file_batches.rs rename to async-openai/src/vectorstores/vector_store_file_batches.rs diff --git a/async-openai/src/vector_store_files.rs b/async-openai/src/vectorstores/vector_store_files.rs similarity index 100% rename from async-openai/src/vector_store_files.rs rename to async-openai/src/vectorstores/vector_store_files.rs diff --git a/async-openai/src/vector_stores.rs b/async-openai/src/vectorstores/vector_stores.rs similarity index 97% rename from async-openai/src/vector_stores.rs rename to async-openai/src/vectorstores/vector_stores.rs index 57d75586..3c6e4fbb 100644 --- a/async-openai/src/vector_stores.rs +++ b/async-openai/src/vectorstores/vector_stores.rs @@ -6,8 +6,7 @@ use crate::{ UpdateVectorStoreRequest, VectorStoreObject, VectorStoreSearchRequest, VectorStoreSearchResultsPage, }, - vector_store_file_batches::VectorStoreFileBatches, - Client, RequestOptions, VectorStoreFiles, + Client, RequestOptions, VectorStoreFileBatches, VectorStoreFiles, }; pub struct VectorStores<'c, C: Config> { diff --git a/async-openai/tests/boxed_future.rs b/async-openai/tests/boxed_future.rs index fa03914a..b37d08bb 100644 --- a/async-openai/tests/boxed_future.rs +++ b/async-openai/tests/boxed_future.rs @@ -1,7 +1,7 @@ use futures::future::{BoxFuture, FutureExt}; use futures::StreamExt; -use async_openai::types::{CompletionResponseStream, CreateCompletionRequestArgs}; +use async_openai::types::completions::{CompletionResponseStream, CreateCompletionRequestArgs}; use async_openai::Client; #[tokio::test] diff --git a/async-openai/tests/bring-your-own-type.rs b/async-openai/tests/bring_your_own_type.rs similarity index 99% rename from async-openai/tests/bring-your-own-type.rs rename to async-openai/tests/bring_your_own_type.rs index f9628c8e..99737bed 100644 --- a/async-openai/tests/bring-your-own-type.rs +++ b/async-openai/tests/bring_your_own_type.rs @@ -645,54 +645,63 @@ async fn test_byot_usage() { let client = Client::new(); let _r: Result = client + .admin() .usage() .query(&[("limit", "2")]) .unwrap() .audio_speeches_byot() .await; let _r: Result = client + .admin() .usage() .query(&[("limit", "2")]) .unwrap() .audio_transcriptions_byot() .await; let _r: Result = client + .admin() .usage() .query(&[("limit", "2")]) .unwrap() .code_interpreter_sessions_byot() .await; let _r: Result = client + .admin() .usage() .query(&[("limit", "2")]) .unwrap() .completions_byot() .await; let _r: Result = client + .admin() .usage() .query(&[("limit", "2")]) .unwrap() .embeddings_byot() .await; let _r: Result = client + .admin() .usage() .query(&[("limit", "2")]) .unwrap() .images_byot() .await; let _r: Result = client + .admin() .usage() .query(&[("limit", "2")]) .unwrap() .moderations_byot() .await; let _r: Result = client + .admin() .usage() .query(&[("limit", "2")]) .unwrap() .vector_stores_byot() .await; let _r: Result = client + .admin() .usage() .query(&[("limit", "2")]) .unwrap() diff --git a/async-openai/tests/completion.rs b/async-openai/tests/chat_completion.rs similarity index 100% rename from async-openai/tests/completion.rs rename to async-openai/tests/chat_completion.rs diff --git a/examples/completions-stream/src/main.rs b/examples/completions-stream/src/main.rs index bd301938..b05ebd84 100644 --- a/examples/completions-stream/src/main.rs +++ b/examples/completions-stream/src/main.rs @@ -1,4 +1,4 @@ -use async_openai::{types::CreateCompletionRequestArgs, Client}; +use async_openai::{types::completions::CreateCompletionRequestArgs, Client}; use futures::StreamExt; #[tokio::main] diff --git a/examples/completions/src/main.rs b/examples/completions/src/main.rs index d54b4837..caa39804 100644 --- a/examples/completions/src/main.rs +++ b/examples/completions/src/main.rs @@ -1,6 +1,6 @@ use std::error::Error; -use async_openai::{types::CreateCompletionRequestArgs, Client}; +use async_openai::{types::completions::CreateCompletionRequestArgs, Client}; #[tokio::main] async fn main() -> Result<(), Box> { diff --git a/examples/responses/src/main.rs b/examples/responses/src/main.rs index e96082f4..456a7f20 100644 --- a/examples/responses/src/main.rs +++ b/examples/responses/src/main.rs @@ -2,11 +2,11 @@ use std::error::Error; use async_openai::{ types::{ + mcp::{MCPToolApprovalSetting, MCPToolArgs}, responses::{ CreateResponseArgs, ResponseTextParam, TextResponseFormatConfiguration, Tool, Verbosity, WebSearchTool, }, - MCPToolApprovalSetting, MCPToolArgs, }, Client, }; diff --git a/examples/usage/src/main.rs b/examples/usage/src/main.rs index a589f16e..f345e72b 100644 --- a/examples/usage/src/main.rs +++ b/examples/usage/src/main.rs @@ -37,7 +37,7 @@ async fn main() -> Result<(), Box> { // Audio Speeches println!("=== Audio Speeches Usage ==="); - match client.usage().query(&query)?.audio_speeches().await { + match client.admin().usage().query(&query)?.audio_speeches().await { Ok(response) => { println!("Found {} time buckets", response.data.len()); for bucket in &response.data { @@ -78,7 +78,13 @@ async fn main() -> Result<(), Box> { // Audio Transcriptions println!("=== Audio Transcriptions Usage ==="); - match client.usage().query(&query)?.audio_transcriptions().await { + match client + .admin() + .usage() + .query(&query)? + .audio_transcriptions() + .await + { Ok(response) => { println!("Found {} time buckets", response.data.len()); for bucket in &response.data { @@ -120,6 +126,7 @@ async fn main() -> Result<(), Box> { // Code Interpreter Sessions println!("=== Code Interpreter Sessions Usage ==="); match client + .admin() .usage() .query(&query)? .code_interpreter_sessions() @@ -162,7 +169,7 @@ async fn main() -> Result<(), Box> { // Completions println!("=== Completions Usage ==="); - match client.usage().query(&query)?.completions().await { + match client.admin().usage().query(&query)?.completions().await { Ok(response) => { println!("Found {} time buckets", response.data.len()); for bucket in &response.data { @@ -227,7 +234,7 @@ async fn main() -> Result<(), Box> { // Embeddings println!("=== Embeddings Usage ==="); - match client.usage().query(&query)?.embeddings().await { + match client.admin().usage().query(&query)?.embeddings().await { Ok(response) => { println!("Found {} time buckets", response.data.len()); for bucket in &response.data { @@ -268,7 +275,7 @@ async fn main() -> Result<(), Box> { // Images println!("=== Images Usage ==="); - match client.usage().query(&query)?.images().await { + match client.admin().usage().query(&query)?.images().await { Ok(response) => { println!("Found {} time buckets", response.data.len()); for bucket in &response.data { @@ -315,7 +322,7 @@ async fn main() -> Result<(), Box> { // Moderations println!("=== Moderations Usage ==="); - match client.usage().query(&query)?.moderations().await { + match client.admin().usage().query(&query)?.moderations().await { Ok(response) => { println!("Found {} time buckets", response.data.len()); for bucket in &response.data { @@ -356,7 +363,7 @@ async fn main() -> Result<(), Box> { // Vector Stores println!("=== Vector Stores Usage ==="); - match client.usage().query(&query)?.vector_stores().await { + match client.admin().usage().query(&query)?.vector_stores().await { Ok(response) => { println!("Found {} time buckets", response.data.len()); for bucket in &response.data { @@ -395,7 +402,7 @@ async fn main() -> Result<(), Box> { // Costs println!("=== Costs ==="); - match client.usage().query(&query)?.costs().await { + match client.admin().usage().query(&query)?.costs().await { Ok(response) => { println!("Found {} time buckets", response.data.len()); let mut total_cost = 0.0;