diff --git a/async-openai/src/batches.rs b/async-openai/src/batches.rs index 57910490..efa5b0da 100644 --- a/async-openai/src/batches.rs +++ b/async-openai/src/batches.rs @@ -3,7 +3,7 @@ use serde::Serialize; use crate::{ config::Config, error::OpenAIError, - types::{Batch, BatchRequest, ListBatchesResponse}, + types::batches::{Batch, BatchRequest, ListBatchesResponse}, Client, }; diff --git a/async-openai/src/client.rs b/async-openai/src/client.rs index abdf9c6f..dadef6a5 100644 --- a/async-openai/src/client.rs +++ b/async-openai/src/client.rs @@ -14,7 +14,7 @@ use crate::{ moderation::Moderations, traits::AsyncTryFrom, Assistants, Audio, AuditLogs, Batches, Chat, Completions, Containers, Conversations, - Embeddings, FineTuning, Invites, Models, Projects, Responses, Threads, Uploads, Users, + Embeddings, Evals, FineTuning, Invites, Models, Projects, Responses, Threads, Uploads, Users, VectorStores, Videos, }; @@ -183,6 +183,11 @@ impl Client { Containers::new(self) } + /// To call [Evals] group related APIs using this client. + pub fn evals(&self) -> Evals<'_, C> { + Evals::new(self) + } + pub fn config(&self) -> &C { &self.config } diff --git a/async-openai/src/embedding.rs b/async-openai/src/embedding.rs index f5759296..7b7f4395 100644 --- a/async-openai/src/embedding.rs +++ b/async-openai/src/embedding.rs @@ -1,17 +1,19 @@ use crate::{ config::Config, error::OpenAIError, - types::{CreateBase64EmbeddingResponse, CreateEmbeddingRequest, CreateEmbeddingResponse}, + types::embeddings::{ + CreateBase64EmbeddingResponse, CreateEmbeddingRequest, CreateEmbeddingResponse, + }, Client, }; #[cfg(not(feature = "byot"))] -use crate::types::EncodingFormat; +use crate::types::embeddings::EncodingFormat; /// Get a vector representation of a given input that can be easily /// consumed by machine learning models and algorithms. /// -/// Related guide: [Embeddings](https://platform.openai.com/docs/guides/embeddings/what-are-embeddings) +/// Related guide: [Embeddings](https://platform.openai.com/docs/guides/embeddings) pub struct Embeddings<'c, C: Config> { client: &'c Client, } @@ -65,8 +67,8 @@ impl<'c, C: Config> Embeddings<'c, C> { #[cfg(test)] mod tests { use crate::error::OpenAIError; - use crate::types::{CreateEmbeddingResponse, Embedding, EncodingFormat}; - use crate::{types::CreateEmbeddingRequestArgs, Client}; + use crate::types::embeddings::{CreateEmbeddingResponse, Embedding, EncodingFormat}; + use crate::{types::embeddings::CreateEmbeddingRequestArgs, Client}; #[tokio::test] async fn test_embedding_string() { @@ -165,7 +167,6 @@ mod tests { } #[tokio::test] - #[cfg(not(feature = "byot"))] async fn test_cannot_use_base64_encoding_with_normal_create_request() { let client = Client::new(); @@ -187,7 +188,7 @@ mod tests { let client = Client::new(); const MODEL: &str = "text-embedding-ada-002"; - const INPUT: &str = "CoLoop will eat the other qual research tools..."; + const INPUT: &str = "a head full of dreams"; let b64_request = CreateEmbeddingRequestArgs::default() .model(MODEL) diff --git a/async-openai/src/eval_run_output_items.rs b/async-openai/src/eval_run_output_items.rs new file mode 100644 index 00000000..7e89b8e2 --- /dev/null +++ b/async-openai/src/eval_run_output_items.rs @@ -0,0 +1,49 @@ +use serde::Serialize; + +use crate::{ + config::Config, + error::OpenAIError, + types::evals::{EvalRunOutputItem, EvalRunOutputItemList}, + Client, +}; + +pub struct EvalRunOutputItems<'c, C: Config> { + client: &'c Client, + pub eval_id: String, + pub run_id: String, +} + +impl<'c, C: Config> EvalRunOutputItems<'c, C> { + pub fn new(client: &'c Client, eval_id: &str, run_id: &str) -> Self { + Self { + client, + eval_id: eval_id.into(), + run_id: run_id.into(), + } + } + + /// Get a list of output items for an evaluation run. + #[crate::byot(T0 = serde::Serialize, R = serde::de::DeserializeOwned)] + pub async fn list(&self, query: &Q) -> Result + where + Q: Serialize + ?Sized, + { + self.client + .get_with_query( + &format!("/evals/{}/runs/{}/output_items", self.eval_id, self.run_id), + &query, + ) + .await + } + + /// Get an evaluation run output item by ID. + #[crate::byot(T0 = std::fmt::Display, R = serde::de::DeserializeOwned)] + pub async fn retrieve(&self, output_item_id: &str) -> Result { + self.client + .get(&format!( + "/evals/{}/runs/{}/output_items/{}", + self.eval_id, self.run_id, output_item_id + )) + .await + } +} diff --git a/async-openai/src/eval_runs.rs b/async-openai/src/eval_runs.rs new file mode 100644 index 00000000..f19e6dc7 --- /dev/null +++ b/async-openai/src/eval_runs.rs @@ -0,0 +1,74 @@ +use serde::Serialize; + +use crate::{ + config::Config, + error::OpenAIError, + eval_run_output_items::EvalRunOutputItems, + types::evals::{CreateEvalRunRequest, DeleteEvalRunResponse, EvalRun, EvalRunList}, + Client, +}; + +pub struct EvalRuns<'c, C: Config> { + client: &'c Client, + pub eval_id: String, +} + +impl<'c, C: Config> EvalRuns<'c, C> { + pub fn new(client: &'c Client, eval_id: &str) -> Self { + Self { + client, + eval_id: eval_id.into(), + } + } + + /// [EvalRunOutputItems] API group + pub fn output_items(&self, run_id: &str) -> EvalRunOutputItems<'_, C> { + EvalRunOutputItems::new(self.client, &self.eval_id, run_id) + } + + /// Get a list of runs for an evaluation. + #[crate::byot(T0 = serde::Serialize, R = serde::de::DeserializeOwned)] + pub async fn list(&self, query: &Q) -> Result + where + Q: Serialize + ?Sized, + { + self.client + .get_with_query(&format!("/evals/{}/runs", self.eval_id), &query) + .await + } + + /// Kicks off a new run for a given evaluation. + #[crate::byot(T0 = serde::Serialize, R = serde::de::DeserializeOwned)] + pub async fn create(&self, request: CreateEvalRunRequest) -> Result { + self.client + .post(&format!("/evals/{}/runs", self.eval_id), request) + .await + } + + /// Get an evaluation run by ID. + #[crate::byot(T0 = std::fmt::Display, R = serde::de::DeserializeOwned)] + pub async fn retrieve(&self, run_id: &str) -> Result { + self.client + .get(&format!("/evals/{}/runs/{}", self.eval_id, run_id)) + .await + } + + /// Cancel an ongoing evaluation run. + #[crate::byot(T0 = std::fmt::Display, R = serde::de::DeserializeOwned)] + pub async fn cancel(&self, run_id: &str) -> Result { + self.client + .post( + &format!("/evals/{}/runs/{}", self.eval_id, run_id), + serde_json::json!({}), + ) + .await + } + + /// Delete an eval run. + #[crate::byot(T0 = std::fmt::Display, R = serde::de::DeserializeOwned)] + pub async fn delete(&self, run_id: &str) -> Result { + self.client + .delete(&format!("/evals/{}/runs/{}", self.eval_id, run_id)) + .await + } +} diff --git a/async-openai/src/evals.rs b/async-openai/src/evals.rs new file mode 100644 index 00000000..1cc5aeac --- /dev/null +++ b/async-openai/src/evals.rs @@ -0,0 +1,69 @@ +use serde::Serialize; + +use crate::{ + config::Config, + error::OpenAIError, + eval_runs::EvalRuns, + types::evals::{CreateEvalRequest, DeleteEvalResponse, Eval, EvalList, UpdateEvalRequest}, + Client, +}; + +/// Create, manage, and run evals in the OpenAI platform. Related guide: +/// [Evals](https://platform.openai.com/docs/guides/evals) +pub struct Evals<'c, C: Config> { + client: &'c Client, +} + +impl<'c, C: Config> Evals<'c, C> { + pub fn new(client: &'c Client) -> Self { + Self { client } + } + + /// [EvalRuns] API group + pub fn runs(&self, eval_id: &str) -> EvalRuns<'_, C> { + EvalRuns::new(self.client, eval_id) + } + + /// List evaluations for a project. + #[crate::byot(T0 = serde::Serialize, R = serde::de::DeserializeOwned)] + pub async fn list(&self, query: &Q) -> Result + where + Q: Serialize + ?Sized, + { + self.client.get_with_query("/evals", &query).await + } + + /// Create the structure of an evaluation that can be used to test a model's performance. + /// An evaluation is a set of testing criteria and the config for a data source, which dictates + /// the schema of the data used in the evaluation. After creating an evaluation, you can run it + /// on different models and model parameters. We support several types of graders and + /// datasources. For more information, see the [Evals guide](https://platform.openai.com/docs/guides/evals). + #[crate::byot(T0 = serde::Serialize, R = serde::de::DeserializeOwned)] + pub async fn create(&self, request: CreateEvalRequest) -> Result { + self.client.post("/evals", request).await + } + + /// Get an evaluation by ID. + #[crate::byot(T0 = std::fmt::Display, R = serde::de::DeserializeOwned)] + pub async fn retrieve(&self, eval_id: &str) -> Result { + self.client.get(&format!("/evals/{eval_id}")).await + } + + /// Update certain properties of an evaluation. + #[crate::byot(T0 = std::fmt::Display, T1 = serde::Serialize, R = serde::de::DeserializeOwned)] + pub async fn update( + &self, + eval_id: &str, + request: UpdateEvalRequest, + ) -> Result { + self.client + .post(&format!("/evals/{eval_id}"), request) + .await + } + + /// Delete an evaluation. + #[crate::byot(T0 = std::fmt::Display, R = serde::de::DeserializeOwned)] + pub async fn delete(&self, eval_id: &str) -> Result { + self.client.delete(&format!("/evals/{eval_id}")).await + } +} diff --git a/async-openai/src/file.rs b/async-openai/src/file.rs index 4ec33880..057967b6 100644 --- a/async-openai/src/file.rs +++ b/async-openai/src/file.rs @@ -4,7 +4,7 @@ use serde::Serialize; use crate::{ config::Config, error::OpenAIError, - types::{CreateFileRequest, DeleteFileResponse, ListFilesResponse, OpenAIFile}, + types::files::{CreateFileRequest, DeleteFileResponse, ListFilesResponse, OpenAIFile}, Client, }; @@ -18,13 +18,13 @@ impl<'c, C: Config> Files<'c, C> { Self { client } } - /// Upload a file that can be used across various endpoints. Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB. + /// Upload a file that can be used across various endpoints. Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 1 TB. /// /// The Assistants API supports files up to 2 million tokens and of specific file types. See the [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for details. /// /// The Fine-tuning API only supports `.jsonl` files. The input also has certain required formats for fine-tuning [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) models. /// - ///The Batch API only supports `.jsonl` files up to 100 MB in size. The input also has a specific required [format](https://platform.openai.com/docs/api-reference/batch/request-input). + /// The Batch API only supports `.jsonl` files up to 200 MB in size. The input also has a specific required [format](https://platform.openai.com/docs/api-reference/batch/request-input). /// /// Please [contact us](https://help.openai.com/) if you need to increase these storage limits. #[crate::byot( @@ -70,7 +70,9 @@ impl<'c, C: Config> Files<'c, C> { #[cfg(test)] mod tests { use crate::{ - types::{CreateFileRequestArgs, FileExpiresAfter, FileExpiresAfterAnchor, FilePurpose}, + types::files::{ + CreateFileRequestArgs, FileExpirationAfter, FileExpirationAfterAnchor, FilePurpose, + }, Client, }; @@ -89,8 +91,8 @@ mod tests { let request = CreateFileRequestArgs::default() .file(test_file_path) .purpose(FilePurpose::FineTune) - .expires_after(FileExpiresAfter { - anchor: FileExpiresAfterAnchor::CreatedAt, + .expires_after(FileExpirationAfter { + anchor: FileExpirationAfterAnchor::CreatedAt, seconds: 3600, }) .build() diff --git a/async-openai/src/fine_tuning.rs b/async-openai/src/fine_tuning.rs index c599ae63..4a4d271c 100644 --- a/async-openai/src/fine_tuning.rs +++ b/async-openai/src/fine_tuning.rs @@ -3,8 +3,10 @@ use serde::Serialize; use crate::{ config::Config, error::OpenAIError, - types::{ - CreateFineTuningJobRequest, FineTuningJob, ListFineTuningJobCheckpointsResponse, + types::finetuning::{ + CreateFineTuningCheckpointPermissionRequest, CreateFineTuningJobRequest, + DeleteFineTuningCheckpointPermissionResponse, FineTuningJob, + ListFineTuningCheckpointPermissionResponse, ListFineTuningJobCheckpointsResponse, ListFineTuningJobEventsResponse, ListPaginatedFineTuningJobsResponse, }, Client, @@ -22,11 +24,12 @@ impl<'c, C: Config> FineTuning<'c, C> { Self { client } } - /// Creates a job that fine-tunes a specified model from a given dataset. + /// Creates a fine-tuning job which begins the process of creating a new model from a given dataset. /// - /// Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + /// Response includes details of the enqueued job including job status and the name of the fine-tuned + /// models once complete. /// - /// [Learn more about Fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) + /// [Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization) #[crate::byot(T0 = serde::Serialize, R = serde::de::DeserializeOwned)] pub async fn create( &self, @@ -49,9 +52,9 @@ impl<'c, C: Config> FineTuning<'c, C> { .await } - /// Gets info about the fine-tune job. + /// Get info about a fine-tuning job. /// - /// [Learn more about Fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) + /// [Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization) #[crate::byot(T0 = std::fmt::Display, R = serde::de::DeserializeOwned)] pub async fn retrieve(&self, fine_tuning_job_id: &str) -> Result { self.client @@ -70,7 +73,29 @@ impl<'c, C: Config> FineTuning<'c, C> { .await } - /// Get fine-grained status updates for a fine-tune job. + /// Pause a fine-tune job. + #[crate::byot(T0 = std::fmt::Display, R = serde::de::DeserializeOwned)] + pub async fn pause(&self, fine_tuning_job_id: &str) -> Result { + self.client + .post( + format!("/fine_tuning/jobs/{fine_tuning_job_id}/pause").as_str(), + (), + ) + .await + } + + /// Resume a fine-tune job. + #[crate::byot(T0 = std::fmt::Display, R = serde::de::DeserializeOwned)] + pub async fn resume(&self, fine_tuning_job_id: &str) -> Result { + self.client + .post( + format!("/fine_tuning/jobs/{fine_tuning_job_id}/resume").as_str(), + (), + ) + .await + } + + /// Get status updates for a fine-tuning job. #[crate::byot(T0 = std::fmt::Display, T1 = serde::Serialize, R = serde::de::DeserializeOwned)] pub async fn list_events( &self, @@ -88,6 +113,7 @@ impl<'c, C: Config> FineTuning<'c, C> { .await } + /// List checkpoints for a fine-tuning job. #[crate::byot(T0 = std::fmt::Display, T1 = serde::Serialize, R = serde::de::DeserializeOwned)] pub async fn list_checkpoints( &self, @@ -104,4 +130,51 @@ impl<'c, C: Config> FineTuning<'c, C> { ) .await } + + #[crate::byot(T0 = std::fmt::Display, T1 = serde::Serialize, R = serde::de::DeserializeOwned)] + pub async fn create_checkpoint_permissions( + &self, + fine_tuned_model_checkpoint: &str, + request: CreateFineTuningCheckpointPermissionRequest, + ) -> Result { + self.client + .post( + format!("/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions") + .as_str(), + request, + ) + .await + } + + #[crate::byot(T0 = std::fmt::Display, T1 = serde::Serialize, R = serde::de::DeserializeOwned)] + pub async fn list_checkpoint_permissions( + &self, + fine_tuned_model_checkpoint: &str, + query: &Q, + ) -> Result + where + Q: Serialize + ?Sized, + { + self.client + .get_with_query( + format!("/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions") + .as_str(), + &query, + ) + .await + } + + #[crate::byot(T0 = std::fmt::Display, T1 = std::fmt::Display, R = serde::de::DeserializeOwned)] + pub async fn delete_checkpoint_permissions( + &self, + fine_tuned_model_checkpoint: &str, + permission_id: &str, + ) -> Result { + self.client + .delete( + format!("/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions/{permission_id}") + .as_str(), + ) + .await + } } diff --git a/async-openai/src/lib.rs b/async-openai/src/lib.rs index 51530c94..b58904ae 100644 --- a/async-openai/src/lib.rs +++ b/async-openai/src/lib.rs @@ -155,6 +155,9 @@ mod conversations; mod download; mod embedding; pub mod error; +mod eval_run_output_items; +mod eval_runs; +mod evals; mod file; mod fine_tuning; mod image; @@ -197,6 +200,9 @@ pub use containers::Containers; pub use conversation_items::ConversationItems; pub use conversations::Conversations; pub use embedding::Embeddings; +pub use eval_run_output_items::EvalRunOutputItems; +pub use eval_runs::EvalRuns; +pub use evals::Evals; pub use file::Files; pub use fine_tuning::FineTuning; pub use image::Images; diff --git a/async-openai/src/model.rs b/async-openai/src/model.rs index 47cc8781..8d56bcb4 100644 --- a/async-openai/src/model.rs +++ b/async-openai/src/model.rs @@ -1,7 +1,7 @@ use crate::{ config::Config, error::OpenAIError, - types::{DeleteModelResponse, ListModelResponse, Model}, + types::models::{DeleteModelResponse, ListModelResponse, Model}, Client, }; @@ -27,8 +27,8 @@ impl<'c, C: Config> Models<'c, C> { /// Retrieves a model instance, providing basic information about the model /// such as the owner and permissioning. #[crate::byot(T0 = std::fmt::Display, R = serde::de::DeserializeOwned)] - pub async fn retrieve(&self, id: &str) -> Result { - self.client.get(format!("/models/{id}").as_str()).await + pub async fn retrieve(&self, model: &str) -> Result { + self.client.get(format!("/models/{model}").as_str()).await } /// Delete a fine-tuned model. You must have the Owner role in your organization. diff --git a/async-openai/src/moderation.rs b/async-openai/src/moderation.rs index 6f831374..2e900256 100644 --- a/async-openai/src/moderation.rs +++ b/async-openai/src/moderation.rs @@ -1,7 +1,7 @@ use crate::{ config::Config, error::OpenAIError, - types::{CreateModerationRequest, CreateModerationResponse}, + types::moderations::{CreateModerationRequest, CreateModerationResponse}, Client, }; diff --git a/async-openai/src/types/batch.rs b/async-openai/src/types/batches/batch.rs similarity index 76% rename from async-openai/src/types/batch.rs rename to async-openai/src/types/batches/batch.rs index 5546285e..1148a31a 100644 --- a/async-openai/src/types/batch.rs +++ b/async-openai/src/types/batches/batch.rs @@ -4,6 +4,8 @@ use derive_builder::Builder; use serde::{Deserialize, Serialize}; use crate::error::OpenAIError; +use crate::types::responses::ResponseUsage; +use crate::types::Metadata; #[derive(Debug, Serialize, Default, Clone, Builder, PartialEq, Deserialize)] #[builder(name = "BatchRequestArgs")] @@ -16,10 +18,10 @@ pub struct BatchRequest { /// /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. /// - /// Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 100 MB in size. + /// Your input file must be formatted as a [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input), and must be uploaded with the purpose `batch`. The file can contain up to 50,000 requests, and can be up to 200 MB in size. pub input_file_id: String, - /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. + /// The endpoint to be used for all requests in the batch. Currently `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. pub endpoint: BatchEndpoint, /// The time frame within which the batch should be processed. Currently only `24h` is supported. @@ -27,11 +29,16 @@ pub struct BatchRequest { /// Optional custom metadata for the batch. pub metadata: Option>, + + /// The expiration policy for the output and/or error file that are generated for a batch. + pub output_expires_after: Option, } #[derive(Debug, Clone, PartialEq, Deserialize, Serialize, Default)] pub enum BatchEndpoint { #[default] + #[serde(rename = "/v1/responses")] + V1Responses, #[serde(rename = "/v1/chat/completions")] V1ChatCompletions, #[serde(rename = "/v1/embeddings")] @@ -47,6 +54,23 @@ pub enum BatchCompletionWindow { W24H, } +/// File expiration policy +/// +/// The expiration policy for the output and/or error file that are generated for a batch. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct BatchFileExpirationAfter { + /// Anchor timestamp after which the expiration policy applies. Supported anchors: `created_at`. Note that the anchor is the file creation time, not the time the batch is created. + pub anchor: BatchFileExpirationAnchor, + /// The number of seconds after the anchor time that the file will expire. Must be between 3600 (1 hour) and 2592000 (30 days). + pub seconds: u32, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum BatchFileExpirationAnchor { + CreatedAt, +} + #[derive(Debug, Deserialize, Clone, PartialEq, Serialize)] pub struct Batch { pub id: String, @@ -54,6 +78,8 @@ pub struct Batch { pub object: String, /// The OpenAI API endpoint used by the batch. pub endpoint: String, + /// Model ID used to process the batch, like `gpt-5-2025-08-07`. OpenAI offers a wide range of models with different capabilities, performance characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models) to browse and compare available models. + pub model: Option, pub errors: Option, /// The ID of the input file for the batch. pub input_file_id: String, @@ -77,7 +103,7 @@ pub struct Batch { pub completed_at: Option, /// The Unix timestamp (in seconds) for when the batch failed. pub failed_at: Option, - /// he Unix timestamp (in seconds) for when the batch expired. + /// The Unix timestamp (in seconds) for when the batch expired. pub expired_at: Option, /// The Unix timestamp (in seconds) for when the batch started cancelling. pub cancelling_at: Option, @@ -85,8 +111,10 @@ pub struct Batch { pub cancelled_at: Option, /// The request counts for different statuses within the batch. pub request_counts: Option, + /// Represents token usage details including input tokens, output tokens, a breakdown of output tokens, and the total tokens used. Only populated on batches created after September 7, 2025. + pub usage: Option, /// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maximum of 512 characters long. - pub metadata: Option>, + pub metadata: Option, } #[derive(Debug, Deserialize, Clone, PartialEq, Serialize)] @@ -153,7 +181,7 @@ pub struct BatchRequestInput { pub custom_id: String, /// The HTTP method to be used for the request. Currently only `POST` is supported. pub method: BatchRequestInputMethod, - /// The OpenAI API relative URL to be used for the request. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. + /// The OpenAI API relative URL to be used for the request. Currently `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. pub url: BatchEndpoint, pub body: Option, } diff --git a/async-openai/src/types/batches/mod.rs b/async-openai/src/types/batches/mod.rs new file mode 100644 index 00000000..ab4c11ca --- /dev/null +++ b/async-openai/src/types/batches/mod.rs @@ -0,0 +1,3 @@ +mod batch; + +pub use batch::*; diff --git a/async-openai/src/types/embedding.rs b/async-openai/src/types/embeddings/embedding.rs similarity index 77% rename from async-openai/src/types/embedding.rs rename to async-openai/src/types/embeddings/embedding.rs index 641e19bc..a1db1501 100644 --- a/async-openai/src/types/embedding.rs +++ b/async-openai/src/types/embeddings/embedding.rs @@ -29,22 +29,26 @@ pub enum EncodingFormat { #[builder(derive(Debug))] #[builder(build_fn(error = "OpenAIError"))] pub struct CreateEmbeddingRequest { - /// ID of the model to use. You can use the - /// [List models](https://platform.openai.com/docs/api-reference/models/list) - /// API to see all of your available models, or see our - /// [Model overview](https://platform.openai.com/docs/models/overview) + /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) + /// API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models) /// for descriptions of them. pub model: String, - /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single + /// request, pass an array of strings or array of token arrays. The input must not exceed the max + /// input tokens for the model (8192 tokens for all embedding models), cannot be an empty string, and + /// any array must be 2048 dimensions or less. [Example Python + /// code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + /// In addition to the per-input token limit, all embedding models enforce a maximum of 300,000 + /// tokens summed across all inputs in a single request. pub input: EmbeddingInput, - /// The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/). Defaults to float + /// The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/). #[serde(skip_serializing_if = "Option::is_none")] pub encoding_format: Option, - /// A unique identifier representing your end-user, which will help OpenAI - /// to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/usage-policies/end-user-ids). + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. + /// [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). #[serde(skip_serializing_if = "Option::is_none")] pub user: Option, diff --git a/async-openai/src/types/embeddings/mod.rs b/async-openai/src/types/embeddings/mod.rs new file mode 100644 index 00000000..a20a174c --- /dev/null +++ b/async-openai/src/types/embeddings/mod.rs @@ -0,0 +1,3 @@ +mod embedding; + +pub use embedding::*; diff --git a/async-openai/src/types/evals/eval.rs b/async-openai/src/types/evals/eval.rs new file mode 100644 index 00000000..425424ed --- /dev/null +++ b/async-openai/src/types/evals/eval.rs @@ -0,0 +1,920 @@ +use derive_builder::Builder; +use serde::{Deserialize, Serialize}; + +use crate::error::OpenAIError; +use crate::types::graders::{ + GraderLabelModel, GraderPython, GraderScoreModel, GraderStringCheck, GraderTextSimilarity, +}; +use crate::types::responses::{ResponseTextParam, Tool}; +use crate::types::{ChatCompletionTool, ImageDetail, InputAudio, Metadata, ResponseFormat}; + +// Re-export commonly used types +pub use crate::types::responses::{EasyInputMessage, InputTextContent, ReasoningEffort}; + +/// An Eval object with a data source config and testing criteria. +/// An Eval represents a task to be done for your LLM integration. +/// Like: +/// - Improve the quality of my chatbot +/// - See how well my chatbot handles customer support +/// - Check if o4-mini is better at my usecase than gpt-4o +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct Eval { + /// The object type, which is always "eval". + pub object: String, + /// Unique identifier for the evaluation. + pub id: String, + /// The name of the evaluation. + pub name: String, + /// Configuration of data sources used in runs of the evaluation. + pub data_source_config: EvalDataSourceConfig, + /// A list of testing criteria. + pub testing_criteria: Vec, + /// The Unix timestamp (in seconds) for when the eval was created. + pub created_at: u64, + pub metadata: Metadata, +} + +/// Configuration of data sources used in runs of the evaluation. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum EvalDataSourceConfig { + /// Custom data source config. + Custom(EvalCustomDataSourceConfig), + /// Logs data source config. + Logs(EvalLogsDataSourceConfig), + /// Stored completions data source config (deprecated). + #[serde(rename = "stored_completions")] + StoredCompletions(EvalStoredCompletionsDataSourceConfig), +} + +/// Custom data source config. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct EvalCustomDataSourceConfig { + /// The type of data source. Always "custom". + #[serde(rename = "type")] + pub r#type: String, + /// The json schema for the run data source items. + pub schema: serde_json::Value, +} + +/// Logs data source config. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct EvalLogsDataSourceConfig { + /// The type of data source. Always "logs". + #[serde(rename = "type")] + pub r#type: String, + /// Metadata filters for the logs data source. + #[serde(skip_serializing_if = "Option::is_none")] + pub metadata: Option, + /// The json schema for the run data source items. + pub schema: serde_json::Value, +} + +/// Stored completions data source config (deprecated). +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct EvalStoredCompletionsDataSourceConfig { + /// The type of data source. Always "stored_completions". + #[serde(rename = "type")] + pub r#type: String, + /// Metadata filters for the stored completions data source. + #[serde(skip_serializing_if = "Option::is_none")] + pub metadata: Option, + /// The json schema for the run data source items. + pub schema: serde_json::Value, +} + +/// A list of testing criteria. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum EvalTestingCriterion { + /// Label model grader. + LabelModel(EvalGraderLabelModel), + /// String check grader. + StringCheck(EvalGraderStringCheck), + /// Text similarity grader. + TextSimilarity(EvalGraderTextSimilarity), + /// Python grader. + Python(EvalGraderPython), + /// Score model grader. + ScoreModel(EvalGraderScoreModel), +} + +/// Label model grader. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +#[serde(transparent)] +pub struct EvalGraderLabelModel(pub GraderLabelModel); + +/// String check grader. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +#[serde(transparent)] +pub struct EvalGraderStringCheck(pub GraderStringCheck); + +/// Text similarity grader. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct EvalGraderTextSimilarity { + #[serde(flatten)] + pub grader: GraderTextSimilarity, + pub pass_threshold: f64, +} + +/// Text similarity metric. +#[derive(Debug, Deserialize, Serialize, Clone, Copy, PartialEq)] +#[serde(rename_all = "snake_case")] +pub enum TextSimilarityMetric { + /// Cosine similarity. + Cosine, + /// Fuzzy match. + FuzzyMatch, + /// BLEU score. + Bleu, + /// GLEU score. + Gleu, + /// METEOR score. + Meteor, + /// ROUGE-1. + Rouge1, + /// ROUGE-2. + Rouge2, + /// ROUGE-3. + Rouge3, + /// ROUGE-4. + Rouge4, + /// ROUGE-5. + Rouge5, + /// ROUGE-L. + RougeL, +} + +/// Python grader. +/// also in openapi spec: GraderPython +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct EvalGraderPython { + #[serde(flatten)] + pub grader: GraderPython, + pub pass_threshold: Option, +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct SamplingParams { + /// A seed value to initialize the randomness, during sampling. + #[serde(skip_serializing_if = "Option::is_none")] + pub seed: Option, + /// An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + #[serde(skip_serializing_if = "Option::is_none")] + pub top_p: Option, + /// A higher temperature increases randomness in the outputs. + #[serde(skip_serializing_if = "Option::is_none")] + pub temperature: Option, + /// The maximum number of tokens the grader model may generate in its response. + #[serde(skip_serializing_if = "Option::is_none")] + pub max_completion_tokens: Option, + /// Optional reasoning effort parameter. + #[serde(skip_serializing_if = "Option::is_none")] + pub reasoning_effort: Option, +} + +/// Score model grader. +/// also in openapi spec: GraderScoreModel +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct EvalGraderScoreModel { + #[serde(flatten)] + pub grader: GraderScoreModel, + /// The threshold for the score. + pub pass_threshold: Option, +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct EvalItem { + /// The role of the message input. One of `user`, `assistant`, `system`, or + /// `developer`. + pub role: EvalItemRole, + /// Inputs to the model - can contain template strings. + pub content: EvalItemContent, +} + +/// The role of the message input. +#[derive(Debug, Deserialize, Serialize, Clone, Copy, PartialEq)] +#[serde(rename_all = "lowercase")] +pub enum EvalItemRole { + /// User role. + User, + /// Assistant role. + Assistant, + /// System role. + System, + /// Developer role. + Developer, +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct OutputText { + /// The text output from the model. + pub text: String, +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct InputImage { + /// The URL of the image input. + pub image_url: String, + /// The detail level of the image to be sent to the model. One of `high`, `low`, or `auto`. + /// Defaults to `auto`. + #[serde(skip_serializing_if = "Option::is_none")] + pub detail: Option, +} + +/// Inputs to the model - can contain template strings. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum EvalItemContent { + /// An input text content object. + InputText(InputTextContent), + /// An output text from the model. + OutputText(OutputText), + /// An image input to the model. + InputImage(InputImage), + /// An audio input to the model. + InputAudio(InputAudio), + /// An array of Input text, Input image, and Input audio + Array(Vec), + #[serde(untagged)] + /// A text input to the model. + Text(String), +} + +/// List of evals. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct EvalList { + /// The object type, which is always "list". + pub object: String, + /// An array of eval objects. + pub data: Vec, + /// The identifier of the first eval in the data array. + pub first_id: String, + /// The identifier of the last eval in the data array. + pub last_id: String, + /// Indicates whether there are more evals available. + pub has_more: bool, +} + +#[derive(Debug, Serialize, Clone, Builder, PartialEq, Default)] +#[builder(name = "CreateEvalRequestArgs")] +#[builder(pattern = "mutable")] +#[builder(setter(into, strip_option), default)] +#[builder(derive(Debug))] +#[builder(build_fn(error = "OpenAIError"))] +pub struct CreateEvalRequest { + /// The name of the evaluation. + pub name: Option, + ///The configuration for the data source used for the evaluation runs. + /// Dictates the schema of the data used in the evaluation. + pub data_source_config: CreateEvalDataSourceConfig, + /// A list of graders for all eval runs in this group. Graders can reference variables in the data + /// source using double curly braces notation, like `{{item.variable_name}}`. To reference the model's + /// output, use the `sample` namespace (ie, `{{sample.output_text}}`). + pub testing_criteria: Vec, + #[serde(skip_serializing_if = "Option::is_none")] + pub metadata: Option, +} + +#[derive(Debug, Serialize, Clone, PartialEq)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum CreateEvalDataSourceConfig { + /// A CustomDataSourceConfig object that defines the schema for the data source used for the evaluation + /// runs. This schema is used to define the shape of the data that will be: + /// - Used to define your testing criteria and + /// - What data is required when creating a run + Custom(CreateEvalCustomDataSourceConfig), + /// A data source config which specifies the metadata property of your logs query. + /// This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. + Logs(CreateEvalLogsDataSourceConfig), +} + +impl Default for CreateEvalDataSourceConfig { + fn default() -> Self { + Self::Custom(CreateEvalCustomDataSourceConfig::default()) + } +} + +#[derive(Debug, Serialize, Clone, PartialEq, Builder, Default)] +#[builder(name = "CreateEvalCustomDataSourceConfigArgs")] +#[builder(pattern = "mutable")] +#[builder(setter(into, strip_option), default)] +#[builder(derive(Debug))] +#[builder(build_fn(error = "OpenAIError"))] +pub struct CreateEvalCustomDataSourceConfig { + /// The json schema for each row in the data source. + pub item_schema: serde_json::Value, + /// Whether the eval should expect you to populate the sample namespace (ie, by generating responses + /// off of your data source). + #[serde(skip_serializing_if = "Option::is_none")] + pub include_sample_schema: Option, +} + +/// Logs data source config for creating an eval. +#[derive(Debug, Serialize, Clone, PartialEq, Builder, Default)] +#[builder(name = "CreateEvalLogsDataSourceConfigArgs")] +#[builder(pattern = "mutable")] +#[builder(setter(into, strip_option), default)] +#[builder(derive(Debug))] +#[builder(build_fn(error = "OpenAIError"))] +pub struct CreateEvalLogsDataSourceConfig { + /// Metadata filters for the logs data source. + #[serde(skip_serializing_if = "Option::is_none")] + pub metadata: Option, +} + +#[derive(Debug, Serialize, Clone, PartialEq)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum CreateEvalTestingCriterion { + /// A LabelModelGrader object which uses a model to assign labels to each item + /// in the evaluation. + LabelModel(CreateEvalLabelModelGrader), + /// A StringCheckGrader object that performs a string comparison between input and reference using a + /// specified operation. + StringCheck(EvalGraderStringCheck), + /// Text similarity grader. + TextSimilarity(EvalGraderTextSimilarity), + /// Python grader. + Python(EvalGraderPython), + /// Score model grader. + ScoreModel(EvalGraderScoreModel), +} + +/// Label model grader for creating an eval. +#[derive(Debug, Serialize, Clone, PartialEq, Builder, Default)] +#[builder(name = "CreateEvalLabelModelGraderArgs")] +#[builder(pattern = "mutable")] +#[builder(setter(into, strip_option), default)] +#[builder(derive(Debug))] +#[builder(build_fn(error = "OpenAIError"))] +pub struct CreateEvalLabelModelGrader { + /// The name of the grader. + pub name: String, + /// The model to use for the evaluation. Must support structured outputs. + pub model: String, + /// A list of chat messages forming the prompt or context. May include variable references to the + /// `item` namespace, ie `{{item.name}}`. + pub input: Vec, + /// The labels to classify to each item in the evaluation. + pub labels: Vec, + /// The labels that indicate a passing result. Must be a subset of labels. + pub passing_labels: Vec, +} + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct SimpleInputMessage { + /// The role of the message. + pub role: String, + /// The content of the message. + pub content: String, +} + +/// A chat message that makes up the prompt or context. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum CreateEvalItem { + /// A message input to the model with a role indicating instruction following + /// hierarchy. Instructions given with the `developer` or `system` role take + /// precedence over instructions given with the `user` role. Messages with the + /// `assistant` role are presumed to have been generated by the model in previous + /// interactions. + Message(EvalItem), + + /// SimpleInputMessage + #[serde(untagged)] + Simple(SimpleInputMessage), +} + +/// Request to update an eval. +#[derive(Debug, Serialize, Clone, Builder, PartialEq, Default)] +#[builder(name = "UpdateEvalRequestArgs")] +#[builder(pattern = "mutable")] +#[builder(setter(into, strip_option), default)] +#[builder(derive(Debug))] +#[builder(build_fn(error = "OpenAIError"))] +pub struct UpdateEvalRequest { + /// Rename the evaluation. + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Metadata attached to the eval. + #[serde(skip_serializing_if = "Option::is_none")] + pub metadata: Option, +} + +/// Response from deleting an eval. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct DeleteEvalResponse { + /// The object type, which is always "eval.deleted". + pub object: String, + /// Whether the eval was deleted. + pub deleted: bool, + /// The ID of the deleted eval. + pub eval_id: String, +} + +// EvalRun types + +/// A schema representing an evaluation run. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct EvalRun { + /// The object type, which is always "eval.run". + pub object: String, + /// Unique identifier for the evaluation run. + pub id: String, + /// The identifier of the associated evaluation. + pub eval_id: String, + /// The status of the evaluation run. + pub status: EvalRunStatus, + /// The model that is evaluated, if applicable. + pub model: String, + /// The name of the evaluation run. + pub name: String, + /// Unix timestamp (in seconds) when the evaluation run was created. + pub created_at: u64, + /// The URL to the rendered evaluation run report on the UI dashboard. + pub report_url: String, + /// Counters summarizing the outcomes of the evaluation run. + pub result_counts: EvalRunResultCounts, + /// Usage statistics for each model during the evaluation run. + pub per_model_usage: Option>, + /// Results per testing criteria applied during the evaluation run. + pub per_testing_criteria_results: Option>, + /// Information about the run's data source. + pub data_source: EvalRunDataSource, + /// Metadata attached to the run. + pub metadata: Metadata, + /// Error information, if any. + pub error: Option, +} + +/// Status of an evaluation run. +#[derive(Debug, Deserialize, Serialize, Clone, Copy, PartialEq)] +#[serde(rename_all = "snake_case")] +pub enum EvalRunStatus { + /// Queued. + Queued, + /// In progress. + InProgress, + /// Completed. + Completed, + /// Failed. + Failed, + /// Canceled. + Canceled, +} + +/// Counters summarizing the outcomes of the evaluation run. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct EvalRunResultCounts { + /// Total number of executed output items. + pub total: u32, + /// Number of output items that resulted in an error. + pub errored: u32, + /// Number of output items that failed to pass the evaluation. + pub failed: u32, + /// Number of output items that passed the evaluation. + pub passed: u32, +} + +/// Usage statistics for each model during the evaluation run. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct EvalRunModelUsage { + /// The name of the model. + pub model_name: String, + /// The number of invocations. + pub invocation_count: u32, + /// The number of prompt tokens used. + pub prompt_tokens: u32, + /// The number of completion tokens generated. + pub completion_tokens: u32, + /// The total number of tokens used. + pub total_tokens: u32, + /// The number of tokens retrieved from cache. + pub cached_tokens: u32, +} + +/// Results per testing criteria applied during the evaluation run. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct EvalRunTestingCriteriaResult { + /// A description of the testing criteria. + pub testing_criteria: String, + /// Number of tests passed for this criteria. + pub passed: u32, + /// Number of tests failed for this criteria. + pub failed: u32, +} + +/// Information about the run's data source. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum EvalRunDataSource { + /// A JsonlRunDataSource object with that specifies a JSONL file that matches the eval + Jsonl(CreateEvalJsonlRunDataSource), + /// A CompletionsRunDataSource object describing a model sampling configuration. + Completions(CreateEvalCompletionsRunDataSource), + /// A ResponsesRunDataSource object describing a model sampling configuration. + Responses(CreateEvalResponsesRunDataSource), +} + +/// JSONL run data source. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct CreateEvalJsonlRunDataSource { + /// Determines what populates the `item` namespace in the data source. + pub source: EvalJsonlSource, +} + +/// JSONL source. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum EvalJsonlSource { + /// File content source. + FileContent(EvalJsonlFileContentSource), + /// File ID source. + FileId(EvalJsonlFileIdSource), +} + +/// JSONL file content source. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct EvalJsonlFileContentSource { + /// The content of the jsonl file. + pub content: Vec, +} + +/// JSONL file ID source. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct EvalJsonlFileIdSource { + /// The identifier of the file. + pub id: String, +} + +/// JSONL content item. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct EvalJsonlContentItem { + /// The item data. + pub item: serde_json::Value, + /// The sample data, if any. + #[serde(skip_serializing_if = "Option::is_none")] + pub sample: Option, +} + +/// Completions run data source. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct CreateEvalCompletionsRunDataSource { + /// Used when sampling from a model. Dictates the structure of the messages passed into the model. Can + /// either be a reference to a prebuilt trajectory (ie, `item.input_trajectory`), or a template with + /// variable references to the `item` namespace. + pub input_messages: EvalInputMessages, + /// The sampling parameters for the model. + #[serde(skip_serializing_if = "Option::is_none")] + pub sampling_params: Option, + /// The name of the model to use for generating completions (e.g. "o3-mini"). + pub model: String, + /// Determines what populates the `item` namespace in this run's data source. + pub source: EvalCompletionsSource, +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct TemplateInputMessages { + /// A list of chat messages forming the prompt or context. May include variable references to + /// the `item` namespace, ie {{item.name}}. + pub template: Vec, +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct ItemReference { + /// A reference to a variable in the `item` namespace. Ie, "item.input_trajectory" + pub item_reference: String, +} + +/// Input messages for completions. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum EvalInputMessages { + /// Template input messages. + Template(TemplateInputMessages), + /// Item reference input messages. + ItemReference(ItemReference), +} + +/// Sampling parameters for the model. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Default)] +pub struct EvalSamplingParams { + /// A seed value to initialize the randomness, during sampling. + #[serde(skip_serializing_if = "Option::is_none")] + pub seed: Option, + /// An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + #[serde(skip_serializing_if = "Option::is_none")] + pub top_p: Option, + /// A higher temperature increases randomness in the outputs. + #[serde(skip_serializing_if = "Option::is_none")] + pub temperature: Option, + /// The maximum number of tokens in the generated output. + #[serde(skip_serializing_if = "Option::is_none")] + pub max_completion_tokens: Option, + /// Optional reasoning effort parameter. + #[serde(skip_serializing_if = "Option::is_none")] + pub reasoning_effort: Option, + /// An object specifying the format that the model must output. + #[serde(skip_serializing_if = "Option::is_none")] + pub response_format: Option, + /// A list of tools the model may call. + #[serde(skip_serializing_if = "Option::is_none")] + pub tools: Option>, +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Default)] +pub struct EvalResponsesSamplingParams { + /// A seed value to initialize the randomness, during sampling. + #[serde(skip_serializing_if = "Option::is_none")] + pub seed: Option, + /// An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + #[serde(skip_serializing_if = "Option::is_none")] + pub top_p: Option, + /// A higher temperature increases randomness in the outputs. + #[serde(skip_serializing_if = "Option::is_none")] + pub temperature: Option, + /// The maximum number of tokens in the generated output. + #[serde(skip_serializing_if = "Option::is_none")] + pub max_completion_tokens: Option, + /// Optional reasoning effort parameter. + #[serde(skip_serializing_if = "Option::is_none")] + pub reasoning_effort: Option, + /// An object specifying the format that the model must output. + #[serde(skip_serializing_if = "Option::is_none")] + pub response_format: Option, + /// A list of tools the model may call. + #[serde(skip_serializing_if = "Option::is_none")] + pub tools: Option>, + /// Configuration options for a text response from the model. Can be plain + /// text or structured JSON data. Learn more: + /// - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + /// - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + #[serde(skip_serializing_if = "Option::is_none")] + pub text: Option, +} + +/// Completions source. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum EvalCompletionsSource { + /// File content source. + FileContent(EvalJsonlFileContentSource), + /// File ID source. + FileId(EvalJsonlFileIdSource), + /// Stored completions source. + StoredCompletions(EvalStoredCompletionsSource), +} + +/// Stored completions source. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct EvalStoredCompletionsSource { + /// Metadata filters for the stored completions. + #[serde(skip_serializing_if = "Option::is_none")] + pub metadata: Option, + /// An optional model to filter by. + #[serde(skip_serializing_if = "Option::is_none")] + pub model: Option, + /// An optional Unix timestamp to filter items created after this time. + #[serde(skip_serializing_if = "Option::is_none")] + pub created_after: Option, + /// An optional Unix timestamp to filter items created before this time. + #[serde(skip_serializing_if = "Option::is_none")] + pub created_before: Option, + /// An optional maximum number of items to return. + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, +} + +/// Responses run data source. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct CreateEvalResponsesRunDataSource { + /// Used when sampling from a model. Dictates the structure of the messages passed into the model. + #[serde(skip_serializing_if = "Option::is_none")] + pub input_messages: Option, + /// The sampling parameters for the model. + #[serde(skip_serializing_if = "Option::is_none")] + pub sampling_params: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub model: Option, + /// Determines what populates the `item` namespace in this run's data source. + pub source: EvalResponsesRunSource, +} + +/// Responses source. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum EvalResponsesRunSource { + /// File content source. + FileContent(EvalJsonlFileContentSource), + /// File ID source. + FileId(EvalJsonlFileIdSource), + /// A EvalResponsesSource object describing a run data source configuration. + Responses(EvalResponsesSource), +} + +/// A EvalResponsesSource object describing a run data source configuration. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct EvalResponsesSource { + /// Metadata filter for the responses. This is a query parameter used to select responses. + #[serde(skip_serializing_if = "Option::is_none")] + pub metadata: Option, + /// The name of the model to find responses for. This is a query parameter used to select responses. + #[serde(skip_serializing_if = "Option::is_none")] + pub model: Option, + /// Optional string to search the 'instructions' field. This is a query parameter used to select responses. + #[serde(skip_serializing_if = "Option::is_none")] + pub instructions_search: Option, + /// Only include items created after this timestamp (inclusive). This is a query parameter used to select responses. + #[serde(skip_serializing_if = "Option::is_none")] + pub created_after: Option, + /// Only include items created before this timestamp (inclusive). This is a query parameter used to select responses. + #[serde(skip_serializing_if = "Option::is_none")] + pub created_before: Option, + /// Optional reasoning effort parameter. This is a query parameter used to select responses. + #[serde(skip_serializing_if = "Option::is_none")] + pub reasoning_effort: Option, + /// Sampling temperature. This is a query parameter used to select responses. + #[serde(skip_serializing_if = "Option::is_none")] + pub temperature: Option, + /// Nucleus sampling parameter. This is a query parameter used to select responses. + #[serde(skip_serializing_if = "Option::is_none")] + pub top_p: Option, + /// List of user identifiers. This is a query parameter used to select responses. + #[serde(skip_serializing_if = "Option::is_none")] + pub users: Option>, + /// List of tool names. This is a query parameter used to select responses. + #[serde(skip_serializing_if = "Option::is_none")] + pub tools: Option>, +} + +/// List of eval runs. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct EvalRunList { + /// The object type, which is always "list". + pub object: String, + /// An array of eval run objects. + pub data: Vec, + /// The identifier of the first eval run in the data array. + pub first_id: String, + /// The identifier of the last eval run in the data array. + pub last_id: String, + /// Indicates whether there are more evals available. + pub has_more: bool, +} + +/// Request to create an eval run. +#[derive(Debug, Serialize, Clone, Builder, PartialEq, Default)] +#[builder(name = "CreateEvalRunRequestArgs")] +#[builder(pattern = "mutable")] +#[builder(setter(into, strip_option), default)] +#[builder(derive(Debug))] +#[builder(build_fn(error = "OpenAIError"))] +pub struct CreateEvalRunRequest { + /// The name of the run. + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + /// Details about the run's data source. + pub data_source: CreateEvalRunDataSource, + /// Metadata attached to the run. + #[serde(skip_serializing_if = "Option::is_none")] + pub metadata: Option, +} + +/// Details about the run's data source. +#[derive(Debug, Serialize, Clone, PartialEq)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum CreateEvalRunDataSource { + /// JSONL data source. + Jsonl(CreateEvalJsonlRunDataSource), + /// Completions data source. + Completions(CreateEvalCompletionsRunDataSource), + /// Responses data source. + Responses(CreateEvalResponsesRunDataSource), +} + +// Manual Default implementation for Builder compatibility +impl Default for CreateEvalRunDataSource { + fn default() -> Self { + todo!() + } +} + +/// Response from deleting an eval run. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct DeleteEvalRunResponse { + /// The object type, which is always "eval.run.deleted". + pub object: String, + /// Whether the eval run was deleted. + pub deleted: bool, + /// The ID of the deleted eval run. + pub run_id: String, +} + +// EvalRunOutputItem types + +/// A schema representing an evaluation run output item. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct EvalRunOutputItem { + /// The object type, which is always "eval.run.output_item". + pub object: String, + /// Unique identifier for the evaluation run output item. + pub id: String, + /// The identifier of the evaluation run associated with this output item. + pub run_id: String, + /// The identifier of the evaluation group. + pub eval_id: String, + /// Unix timestamp (in seconds) when the evaluation run was created. + pub created_at: i64, + /// The status of the evaluation run. + pub status: String, + /// The identifier for the data source item. + pub datasource_item_id: u64, + /// Details of the input data source item. + pub datasource_item: serde_json::Value, + /// A list of grader results for this output item. + pub results: Vec, + /// A sample containing the input and output of the evaluation run. + pub sample: EvalRunOutputItemSample, +} + +/// A single grader result for an evaluation run output item. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct EvalRunOutputItemResult { + /// The name of the grader. + pub name: String, + /// The numeric score produced by the grader. + pub score: f64, + /// Whether the grader considered the output a pass. + pub passed: bool, + /// Optional sample or intermediate data produced by the grader. + #[serde(skip_serializing_if = "Option::is_none")] + pub sample: Option, +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct SimpleOutputMessage { + pub role: String, + pub content: String, +} + +/// A sample containing the input and output of the evaluation run. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct EvalRunOutputItemSample { + /// An array of input messages. + pub input: Vec, + /// An array of output messages. + pub output: Vec, + /// The reason why the sample generation was finished. + pub finish_reason: String, + /// The model used for generating the sample. + pub model: String, + /// Token usage details for the sample. + pub usage: EvalRunOutputItemUsage, + /// Error information, if any. + pub error: Option, + /// The sampling temperature used. + pub temperature: f64, + /// The maximum number of tokens allowed for completion. + pub max_completion_tokens: i32, + /// The top_p value used for sampling. + pub top_p: f64, + /// The seed used for generating the sample. + pub seed: i32, +} + +/// Token usage details for the sample. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct EvalRunOutputItemUsage { + /// The total number of tokens used. + pub total_tokens: i32, + /// The number of completion tokens generated. + pub completion_tokens: i32, + /// The number of prompt tokens used. + pub prompt_tokens: i32, + /// The number of tokens retrieved from cache. + pub cached_tokens: i32, +} + +/// List of eval run output items. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct EvalRunOutputItemList { + /// The object type, which is always "list". + pub object: String, + /// An array of eval run output item objects. + pub data: Vec, + /// The identifier of the first eval run output item in the data array. + pub first_id: String, + /// The identifier of the last eval run output item in the data array. + pub last_id: String, + /// Indicates whether there are more eval run output items available. + pub has_more: bool, +} + +/// An object representing an error response from the Eval API. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct EvalApiError { + /// The error code. + pub code: String, + /// The error message. + pub message: String, +} diff --git a/async-openai/src/types/evals/mod.rs b/async-openai/src/types/evals/mod.rs new file mode 100644 index 00000000..635e168e --- /dev/null +++ b/async-openai/src/types/evals/mod.rs @@ -0,0 +1,3 @@ +mod eval; + +pub use eval::*; diff --git a/async-openai/src/types/file.rs b/async-openai/src/types/files/file.rs similarity index 83% rename from async-openai/src/types/file.rs rename to async-openai/src/types/files/file.rs index 89e3d25f..32ea0306 100644 --- a/async-openai/src/types/file.rs +++ b/async-openai/src/types/files/file.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; use crate::error::OpenAIError; -use super::InputSource; +use crate::types::InputSource; #[derive(Debug, Default, Clone, PartialEq)] pub struct FileInput { @@ -17,19 +17,21 @@ pub enum FilePurpose { #[default] FineTune, Vision, + UserData, + Evals, } #[derive(Debug, Default, Clone, PartialEq, Deserialize, Serialize)] -pub enum FileExpiresAfterAnchor { +pub enum FileExpirationAfterAnchor { #[default] #[serde(rename = "created_at")] CreatedAt, } -#[derive(Debug, Default, Clone, PartialEq)] -pub struct FileExpiresAfter { +#[derive(Debug, Default, Deserialize, Serialize, Clone, PartialEq)] +pub struct FileExpirationAfter { /// Anchor timestamp after which the expiration policy applies. Supported anchors: `created_at`. - pub anchor: FileExpiresAfterAnchor, + pub anchor: FileExpirationAfterAnchor, /// The number of seconds after the anchor time that the file will expire. Must be between 3600 (1 hour) and 2592000 (30 days). pub seconds: u32, @@ -47,17 +49,20 @@ pub struct CreateFileRequest { /// The intended purpose of the uploaded file. /// - /// Use "assistants" for [Assistants](https://platform.openai.com/docs/api-reference/assistants) and [Message](https://platform.openai.com/docs/api-reference/messages) files, "vision" for Assistants image file inputs, "batch" for [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). + /// Use "assistants" for [Assistants](https://platform.openai.com/docs/api-reference/assistants) and [Message](https://platform.openai.com/docs/api-reference/messages) files, "vision" for Assistants image file inputs, "batch" for [Batch API](https://platform.openai.com/docs/guides/batch), "fine-tune" for [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning), "user_data" for flexible file type for any purpose, and "evals" for eval data sets. pub purpose: FilePurpose, /// The expiration policy for a file. By default, files with `purpose=batch` expire after 30 days and all other files are persisted until they are manually deleted. - pub expires_after: Option, + pub expires_after: Option, } #[derive(Debug, Deserialize, Clone, PartialEq, Serialize)] pub struct ListFilesResponse { pub object: String, pub data: Vec, + pub first_id: String, + pub last_id: String, + pub has_more: bool, } #[derive(Debug, Deserialize, Clone, PartialEq, Serialize)] @@ -83,6 +88,8 @@ pub enum OpenAIFilePurpose { FineTuneResults, #[serde(rename = "vision")] Vision, + #[serde(rename = "user_data")] + UserData, } /// The `File` object represents a document that has been uploaded to OpenAI. @@ -100,7 +107,7 @@ pub struct OpenAIFile { pub expires_at: Option, /// The name of the file. pub filename: String, - /// The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results` and `vision`. + /// The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results`, `vision`, and `user_data`. pub purpose: OpenAIFilePurpose, /// Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`. #[deprecated] diff --git a/async-openai/src/types/files/mod.rs b/async-openai/src/types/files/mod.rs new file mode 100644 index 00000000..529c201f --- /dev/null +++ b/async-openai/src/types/files/mod.rs @@ -0,0 +1,3 @@ +mod file; + +pub use file::*; diff --git a/async-openai/src/types/fine_tuning.rs b/async-openai/src/types/finetuning/fine_tuning.rs similarity index 72% rename from async-openai/src/types/fine_tuning.rs rename to async-openai/src/types/finetuning/fine_tuning.rs index bc0932e5..32f1dd5d 100644 --- a/async-openai/src/types/fine_tuning.rs +++ b/async-openai/src/types/finetuning/fine_tuning.rs @@ -1,37 +1,45 @@ use derive_builder::Builder; use serde::{Deserialize, Serialize}; -use crate::error::OpenAIError; +use crate::{ + error::OpenAIError, + types::{ + graders::{ + GraderMulti, GraderPython, GraderScoreModel, GraderStringCheck, GraderTextSimilarity, + }, + Metadata, + }, +}; #[derive(Debug, Serialize, Deserialize, Clone, Default, PartialEq)] -#[serde(untagged)] pub enum NEpochs { - NEpochs(u8), #[default] #[serde(rename = "auto")] Auto, + #[serde(untagged)] + NEpochs(u8), } #[derive(Debug, Serialize, Deserialize, Clone, Default, PartialEq)] -#[serde(untagged)] pub enum BatchSize { - BatchSize(u16), #[default] #[serde(rename = "auto")] Auto, + #[serde(untagged)] + BatchSize(u16), } #[derive(Debug, Serialize, Deserialize, Clone, Default, PartialEq)] -#[serde(untagged)] pub enum LearningRateMultiplier { - LearningRateMultiplier(f32), #[default] #[serde(rename = "auto")] Auto, + #[serde(untagged)] + LearningRateMultiplier(f32), } #[derive(Debug, Serialize, Deserialize, Clone, Default, PartialEq)] -pub struct Hyperparameters { +pub struct FineTuneSupervisedHyperparameters { /// Number of examples in each batch. A larger batch size means that model parameters /// are updated less frequently, but with lower variance. pub batch_size: BatchSize, @@ -43,16 +51,16 @@ pub struct Hyperparameters { } #[derive(Debug, Serialize, Deserialize, Clone, Default, PartialEq)] -#[serde(untagged)] pub enum Beta { - Beta(f32), #[default] #[serde(rename = "auto")] Auto, + #[serde(untagged)] + Beta(f32), } #[derive(Debug, Serialize, Deserialize, Clone, Default, PartialEq)] -pub struct DPOHyperparameters { +pub struct FineTuneDPOHyperparameters { /// The beta value for the DPO method. A higher beta value will increase the weight of the penalty between the policy and reference model. pub beta: Beta, /// Number of examples in each batch. A larger batch size means that model parameters @@ -82,16 +90,15 @@ pub struct CreateFineTuningJobRequest { /// /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. /// - /// The contents of the file should differ depending on if the model uses the [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) format, or if the fine-tuning method uses the [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) format. + /// The contents of the file should differ depending on if the model uses the [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), + /// [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) + /// format, or if the fine-tuning method uses the + /// [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) format. /// - /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization) for more + /// details. pub training_file: String, - /// The hyperparameters used for the fine-tuning job. - /// This value is now deprecated in favor of `method`, and should be passed in under the `method` parameter. - #[deprecated] - pub hyperparameters: Option, - /// A string of up to 64 characters that will be added to your fine-tuned model name. /// /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. @@ -107,7 +114,8 @@ pub struct CreateFineTuningJobRequest { /// /// Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. /// - /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization) for more + /// details. #[serde(skip_serializing_if = "Option::is_none")] pub validation_file: Option, @@ -122,6 +130,9 @@ pub struct CreateFineTuningJobRequest { #[serde(skip_serializing_if = "Option::is_none")] pub method: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub metadata: Option, } /// The method used for fine-tuning. @@ -134,16 +145,92 @@ pub enum FineTuneMethod { DPO { dpo: FineTuneDPOMethod, }, + Reinforcement { + reinforcement: FineTuneReinforcementMethod, + }, } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] pub struct FineTuneSupervisedMethod { - pub hyperparameters: Hyperparameters, + pub hyperparameters: FineTuneSupervisedHyperparameters, } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] pub struct FineTuneDPOMethod { - pub hyperparameters: DPOHyperparameters, + pub hyperparameters: FineTuneDPOHyperparameters, +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum FineTuneReinforcementMethodGrader { + StringCheck(GraderStringCheck), + TextSimilarity(GraderTextSimilarity), + Python(GraderPython), + ScoreModel(GraderScoreModel), + Multi(GraderMulti), +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Default)] +#[serde(rename_all = "lowercase")] +pub enum FineTuneReasoningEffort { + #[default] + Default, + Low, + Medium, + High, +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Default)] +pub enum ComputeMultiplier { + #[default] + #[serde(rename = "auto")] + Auto, + #[serde(untagged)] + ComputeMultiplier(f32), +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Default)] +pub enum EvalInterval { + #[default] + #[serde(rename = "auto")] + Auto, + #[serde(untagged)] + EvalInterval(u32), +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Default)] +pub enum EvalSamples { + #[default] + #[serde(rename = "auto")] + Auto, + #[serde(untagged)] + EvalSamples(u32), +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct FineTuneReinforcementHyperparameters { + /// Number of examples in each batch. A larger batch size means that model parameters + /// are updated less frequently, but with lower variance. + pub batch_size: BatchSize, + /// Scaling factor for the learning rate. A smaller learning rate may be useful to avoid + /// overfitting. + pub learning_rate_multiplier: LearningRateMultiplier, + /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + pub n_epochs: NEpochs, + /// Level of reasoning effort. + pub reasoning_effort: FineTuneReasoningEffort, + /// Multiplier on amount of compute used for exploring search space during training. + pub compute_multiplier: ComputeMultiplier, + /// The number of training steps between evaluation runs. + pub eval_interval: EvalInterval, + /// Number of evaluation samples to generate per training step. + pub eval_samples: EvalSamples, +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct FineTuneReinforcementMethod { + pub grader: FineTuneReinforcementMethodGrader, + pub hyperparameters: FineTuneReinforcementHyperparameters, } #[derive(Debug, Deserialize, Clone, PartialEq, Serialize, Default)] @@ -204,6 +291,13 @@ pub enum FineTuningJobStatus { Cancelled, } +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct Hyperparameters { + pub batch_size: BatchSize, + pub learning_rate_multiplier: LearningRateMultiplier, + pub n_epochs: NEpochs, +} + /// The `fine_tuning.job` object represents a fine-tuning job that has been created through the API. #[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] pub struct FineTuningJob { @@ -220,8 +314,8 @@ pub struct FineTuningJob { /// The value will be null if the fine-tuning job is still running. pub finished_at: Option, // nullable true - /// The hyperparameters used for the fine-tuning job. - /// See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + /// The hyperparameters used for the fine-tuning job. This value will only be returned when running + /// `supervised` jobs. pub hyperparameters: Hyperparameters, /// The base model that is being fine-tuned. @@ -259,6 +353,8 @@ pub struct FineTuningJob { pub estimated_finish: Option, pub method: Option, + + pub metadata: Option, } #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] @@ -346,3 +442,38 @@ pub struct FineTuningJobCheckpointMetrics { pub full_valid_loss: f32, pub full_valid_mean_token_accuracy: f32, } + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct CreateFineTuningCheckpointPermissionRequest { + /// The project identifiers to grant access to. + pub project_ids: Vec, +} + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct ListFineTuningCheckpointPermissionResponse { + pub data: Vec, + pub object: String, + pub first_id: Option, + pub last_id: Option, + pub has_more: bool, +} + +/// The `checkpoint.permission` object represents a permission for a fine-tuned model checkpoint. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct FineTuningCheckpointPermission { + /// The permission identifier, which can be referenced in the API endpoints. + pub id: String, + /// The Unix timestamp (in seconds) for when the permission was created. + pub created_at: u64, + /// The project identifier that the permission is for. + pub project_id: String, + /// The object type, which is always "checkpoint.permission". + pub object: String, +} + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct DeleteFineTuningCheckpointPermissionResponse { + pub object: String, + pub id: String, + pub deleted: bool, +} diff --git a/async-openai/src/types/finetuning/mod.rs b/async-openai/src/types/finetuning/mod.rs new file mode 100644 index 00000000..9e4375ee --- /dev/null +++ b/async-openai/src/types/finetuning/mod.rs @@ -0,0 +1,3 @@ +mod fine_tuning; + +pub use fine_tuning::*; diff --git a/async-openai/src/types/graders/grader.rs b/async-openai/src/types/graders/grader.rs new file mode 100644 index 00000000..3671ad49 --- /dev/null +++ b/async-openai/src/types/graders/grader.rs @@ -0,0 +1,141 @@ +use serde::{Deserialize, Serialize}; + +use crate::types::{evals::EvalItem, ReasoningEffort}; + +/// String check operation. +#[derive(Debug, Deserialize, Serialize, Clone, Copy, PartialEq)] +#[serde(rename_all = "lowercase")] +pub enum GraderStringCheckOperation { + /// Equal. + Eq, + /// Not equal. + Ne, + /// Like. + Like, + /// Case-insensitive like. + Ilike, +} + +/// String check grader. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct GraderStringCheck { + /// The name of the grader. + pub name: String, + /// The input text. This may include template strings. + pub input: String, + /// The reference text. This may include template strings. + pub reference: String, + /// The string check operation to perform. One of `eq`, `ne`, `like`, or `ilike`. + pub operation: GraderStringCheckOperation, +} + +/// Text similarity grader. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct GraderTextSimilarity { + /// The name of the grader. + pub name: String, + /// The text being graded. + pub input: String, + /// The text being graded against. + pub reference: String, + /// The evaluation metric to use. + pub evaluation_metric: GraderTextSimilarityEvaluationMetric, +} + +/// Text similarity metric. +#[derive(Debug, Deserialize, Serialize, Clone, Copy, PartialEq)] +#[serde(rename_all = "snake_case")] +pub enum GraderTextSimilarityEvaluationMetric { + Cosine, + FuzzyMatch, + Bleu, + Gleu, + Meteor, + Rouge1, + Rouge2, + Rouge3, + Rouge4, + Rouge5, + RougeL, +} + +/// Python grader. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct GraderPython { + /// The name of the grader. + pub name: String, + /// The source code of the python script. + pub source: String, + /// The image tag to use for the python script. + pub image_tag: Option, +} + +/// Score model grader. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct GraderScoreModel { + /// The name of the grader. + pub name: String, + /// The model to use for the evaluation. + pub model: String, + /// A list of chat messages forming the prompt or context. + pub input: Vec, + + /// Optional sampling parameters. + #[serde(skip_serializing_if = "Option::is_none")] + pub sampling_params: Option, + /// The range of the score. Defaults to [0, 1]. + #[serde(skip_serializing_if = "Option::is_none")] + pub range: Option>, +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct GraderScoreModelSamplingParams { + /// A seed value to initialize the randomness, during sampling. + #[serde(skip_serializing_if = "Option::is_none")] + pub seed: Option, + /// An alternative to temperature for nucleus sampling; 1.0 includes all tokens. + #[serde(skip_serializing_if = "Option::is_none")] + pub top_p: Option, + /// A higher temperature increases randomness in the outputs. + #[serde(skip_serializing_if = "Option::is_none")] + pub temperature: Option, + /// The maximum number of tokens the grader model may generate in its response. + #[serde(skip_serializing_if = "Option::is_none")] + pub max_completion_tokens: Option, + /// Optional reasoning effort parameter. + #[serde(skip_serializing_if = "Option::is_none")] + pub reasoning_effort: Option, +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct GraderLabelModel { + /// The name of the grader. + pub name: String, + /// The model to use for the evaluation. Must support structured outputs. + pub model: String, + /// A list of chat messages forming the prompt or context. + pub input: Vec, + /// The labels to classify to each item in the evaluation. + pub labels: Vec, + /// The labels that indicate a passing result. Must be a subset of labels. + pub passing_labels: Vec, +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum Graders { + StringCheck(GraderStringCheck), + TextSimilarity(GraderTextSimilarity), + Python(GraderPython), + ScoreModel(GraderScoreModel), + LabelModel(GraderLabelModel), +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] +pub struct GraderMulti { + /// The name of the grader. + pub name: String, + pub graders: Graders, + /// A formula to calculate the output based on grader results. + pub calculate_output: String, +} diff --git a/async-openai/src/types/graders/mod.rs b/async-openai/src/types/graders/mod.rs new file mode 100644 index 00000000..f8d5d1e4 --- /dev/null +++ b/async-openai/src/types/graders/mod.rs @@ -0,0 +1,3 @@ +mod grader; + +pub use grader::*; diff --git a/async-openai/src/types/impls.rs b/async-openai/src/types/impls.rs index cc353974..52990d48 100644 --- a/async-openai/src/types/impls.rs +++ b/async-openai/src/types/impls.rs @@ -22,24 +22,26 @@ use super::{ AudioInput, AudioResponseFormat, CreateSpeechResponse, CreateTranscriptionRequest, CreateTranslationRequest, TimestampGranularity, TranscriptionInclude, }, + embeddings::EmbeddingInput, + files::{CreateFileRequest, FileExpirationAfterAnchor, FileInput, FilePurpose}, images::{ CreateImageEditRequest, CreateImageVariationRequest, DallE2ImageSize, Image, ImageInput, ImageModel, ImageResponseFormat, ImageSize, ImagesResponse, }, + moderations::ModerationInput, responses::{EasyInputContent, Role as ResponsesRole}, - AddUploadPartRequest, ChatCompletionFunctionCall, ChatCompletionFunctions, - ChatCompletionNamedToolChoice, ChatCompletionRequestAssistantMessage, - ChatCompletionRequestAssistantMessageContent, ChatCompletionRequestDeveloperMessage, - ChatCompletionRequestDeveloperMessageContent, ChatCompletionRequestFunctionMessage, - ChatCompletionRequestMessage, ChatCompletionRequestMessageContentPartAudio, - ChatCompletionRequestMessageContentPartImage, ChatCompletionRequestMessageContentPartText, - ChatCompletionRequestSystemMessage, ChatCompletionRequestSystemMessageContent, - ChatCompletionRequestToolMessage, ChatCompletionRequestToolMessageContent, - ChatCompletionRequestUserMessage, ChatCompletionRequestUserMessageContent, - ChatCompletionRequestUserMessageContentPart, ChatCompletionToolChoiceOption, - CreateContainerFileRequest, CreateFileRequest, CreateMessageRequestContent, CreateVideoRequest, - EmbeddingInput, FileExpiresAfterAnchor, FileInput, FilePurpose, FunctionName, ImageUrl, - ModerationInput, Prompt, Role, Stop, + uploads::AddUploadPartRequest, + ChatCompletionFunctionCall, ChatCompletionFunctions, ChatCompletionNamedToolChoice, + ChatCompletionRequestAssistantMessage, ChatCompletionRequestAssistantMessageContent, + ChatCompletionRequestDeveloperMessage, ChatCompletionRequestDeveloperMessageContent, + ChatCompletionRequestFunctionMessage, ChatCompletionRequestMessage, + ChatCompletionRequestMessageContentPartAudio, ChatCompletionRequestMessageContentPartImage, + ChatCompletionRequestMessageContentPartText, ChatCompletionRequestSystemMessage, + ChatCompletionRequestSystemMessageContent, ChatCompletionRequestToolMessage, + ChatCompletionRequestToolMessageContent, ChatCompletionRequestUserMessage, + ChatCompletionRequestUserMessageContent, ChatCompletionRequestUserMessageContentPart, + ChatCompletionToolChoiceOption, CreateContainerFileRequest, CreateMessageRequestContent, + CreateVideoRequest, FunctionName, ImageUrl, Prompt, Role, Stop, }; /// for `impl_from!(T, Enum)`, implements @@ -483,12 +485,14 @@ impl Display for FilePurpose { Self::Batch => "batch", Self::FineTune => "fine-tune", Self::Vision => "vision", + Self::UserData => "user_data", + Self::Evals => "evals", } ) } } -impl Display for FileExpiresAfterAnchor { +impl Display for FileExpirationAfterAnchor { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, diff --git a/async-openai/src/types/mod.rs b/async-openai/src/types/mod.rs index cd86898d..25fd7f5c 100644 --- a/async-openai/src/types/mod.rs +++ b/async-openai/src/types/mod.rs @@ -5,21 +5,23 @@ mod assistant_impls; mod assistant_stream; pub mod audio; mod audit_log; -mod batch; +pub mod batches; mod chat; mod common; mod completion; mod containers; -mod embedding; -mod file; -mod fine_tuning; +pub mod embeddings; +pub mod evals; +pub mod files; +pub mod finetuning; +pub mod graders; pub mod images; mod invites; mod logprob; mod mcp; mod message; -mod model; -mod moderation; +pub mod models; +pub mod moderations; mod project_api_key; mod project_service_account; mod project_users; @@ -31,7 +33,7 @@ pub mod responses; mod run; mod step; mod thread; -mod upload; +pub mod uploads; mod users; mod vector_store; mod video; @@ -42,20 +44,14 @@ pub mod webhooks; pub use assistant::*; pub use assistant_stream::*; pub use audit_log::*; -pub use batch::*; pub use chat::*; pub use common::*; pub use completion::*; pub use containers::*; -pub use embedding::*; -pub use file::*; -pub use fine_tuning::*; pub use invites::*; pub use logprob::*; pub use mcp::*; pub use message::*; -pub use model::*; -pub use moderation::*; pub use project_api_key::*; pub use project_service_account::*; pub use project_users::*; @@ -63,7 +59,6 @@ pub use projects::*; pub use run::*; pub use step::*; pub use thread::*; -pub use upload::*; pub use users::*; pub use vector_store::*; pub use video::*; diff --git a/async-openai/src/types/models/mod.rs b/async-openai/src/types/models/mod.rs new file mode 100644 index 00000000..4a7ebf60 --- /dev/null +++ b/async-openai/src/types/models/mod.rs @@ -0,0 +1,3 @@ +mod model; + +pub use model::*; diff --git a/async-openai/src/types/model.rs b/async-openai/src/types/models/model.rs similarity index 100% rename from async-openai/src/types/model.rs rename to async-openai/src/types/models/model.rs diff --git a/async-openai/src/types/moderations/mod.rs b/async-openai/src/types/moderations/mod.rs new file mode 100644 index 00000000..c5c6d71d --- /dev/null +++ b/async-openai/src/types/moderations/mod.rs @@ -0,0 +1,3 @@ +mod moderation; + +pub use moderation::*; diff --git a/async-openai/src/types/moderation.rs b/async-openai/src/types/moderations/moderation.rs similarity index 94% rename from async-openai/src/types/moderation.rs rename to async-openai/src/types/moderations/moderation.rs index 979bd904..105e8b5f 100644 --- a/async-openai/src/types/moderation.rs +++ b/async-openai/src/types/moderations/moderation.rs @@ -16,30 +16,29 @@ pub enum ModerationInput { MultiModal(Vec), } +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct ModerationTextInput { + /// A string of text to classify + pub text: String, +} + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct ModerationImageURLInput { + /// Either a URL of the image or the base64 encoded image data. + pub image_url: String, +} + /// Content part for multi-modal moderation input #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] #[serde(tag = "type")] pub enum ModerationContentPart { /// An object describing text to classify #[serde(rename = "text")] - Text { - /// A string of text to classify - text: String, - }, + Text(ModerationTextInput), /// An object describing an image to classify #[serde(rename = "image_url")] - ImageUrl { - /// Contains either an image URL or a data URL for a base64 encoded image - image_url: ModerationImageUrl, - }, -} - -/// Image URL configuration for image moderation -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] -pub struct ModerationImageUrl { - /// Either a URL of the image or the base64 encoded image data - pub url: String, + ImageUrl(ModerationImageURLInput), } #[derive(Debug, Default, Clone, Serialize, Builder, PartialEq, Deserialize)] @@ -53,15 +52,15 @@ pub struct CreateModerationRequest { /// an array of multi-modal input objects similar to other models. pub input: ModerationInput, - /// The content moderation model you would like to use. Learn more in the - /// [moderation guide](https://platform.openai.com/docs/guides/moderation), and learn about - /// available models [here](https://platform.openai.com/docs/models/moderation). + /// The content moderation model you would like to use. Learn more in + /// [the moderation guide](https://platform.openai.com/docs/guides/moderation), and learn about + /// available models [here](https://platform.openai.com/docs/models#moderation). #[serde(skip_serializing_if = "Option::is_none")] pub model: Option, } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] -pub struct Category { +pub struct Categories { /// Content that expresses, incites, or promotes hate based on race, gender, /// ethnicity, religion, nationality, sexual orientation, disability status, or /// caste. Hateful content aimed at non-protected groups (e.g., chess players) @@ -147,7 +146,7 @@ pub struct ContentModerationResult { /// Whether any of the below categories are flagged. pub flagged: bool, /// A list of the categories, and whether they are flagged or not. - pub categories: Category, + pub categories: Categories, /// A list of the categories along with their scores as predicted by model. pub category_scores: CategoryScore, /// A list of the categories along with the input type(s) that the score applies to. diff --git a/async-openai/src/types/responses/response.rs b/async-openai/src/types/responses/response.rs index 1716357b..d4684b5c 100644 --- a/async-openai/src/types/responses/response.rs +++ b/async-openai/src/types/responses/response.rs @@ -1667,7 +1667,7 @@ pub struct ReasoningItem { /// Unique identifier of the reasoning content. pub id: String, /// Reasoning summary content. - pub summary: Vec, + pub summary: Vec, /// Reasoning text content. #[serde(skip_serializing_if = "Option::is_none")] pub content: Option>, @@ -1688,6 +1688,12 @@ pub struct Summary { pub text: String, } +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum SummaryPart { + SummaryText(Summary), +} + /// File search tool call output. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct FileSearchToolCall { diff --git a/async-openai/src/types/responses/stream.rs b/async-openai/src/types/responses/stream.rs index 58bcc82f..fed7c221 100644 --- a/async-openai/src/types/responses/stream.rs +++ b/async-openai/src/types/responses/stream.rs @@ -4,7 +4,7 @@ use std::pin::Pin; use crate::{ error::OpenAIError, - types::responses::{OutputContent, OutputItem, Response, ResponseLogProb, Summary}, + types::responses::{OutputContent, OutputItem, Response, ResponseLogProb, SummaryPart}, }; /// Stream of response events @@ -324,12 +324,6 @@ pub struct ResponseWebSearchCallCompletedEvent { pub item_id: String, } -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] -#[serde(tag = "type", rename_all = "snake_case")] -pub enum SummaryPart { - SummaryText(Summary), -} - #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct ResponseReasoningSummaryPartAddedEvent { pub sequence_number: u64, diff --git a/async-openai/src/types/uploads/mod.rs b/async-openai/src/types/uploads/mod.rs new file mode 100644 index 00000000..a90e3638 --- /dev/null +++ b/async-openai/src/types/uploads/mod.rs @@ -0,0 +1,3 @@ +mod upload; + +pub use upload::*; diff --git a/async-openai/src/types/upload.rs b/async-openai/src/types/uploads/upload.rs similarity index 83% rename from async-openai/src/types/upload.rs rename to async-openai/src/types/uploads/upload.rs index eb91c0e1..d63b06d1 100644 --- a/async-openai/src/types/upload.rs +++ b/async-openai/src/types/uploads/upload.rs @@ -1,8 +1,8 @@ -use crate::error::OpenAIError; +use crate::{error::OpenAIError, types::files::FileExpirationAfter}; use derive_builder::Builder; use serde::{Deserialize, Serialize}; -use super::{InputSource, OpenAIFile}; +use crate::types::{files::OpenAIFile, InputSource}; /// Request to create an upload object that can accept byte chunks in the form of Parts. #[derive(Clone, Serialize, Default, Debug, Deserialize, Builder, PartialEq)] @@ -28,20 +28,27 @@ pub struct CreateUploadRequest { /// This must fall within the supported MIME types for your file purpose. See the supported MIME /// types for assistants and vision. pub mime_type: String, + + /// The expiration policy for a file. By default, files with `purpose=batch` expire after 30 days and all + /// other files are persisted until they are manually deleted. + pub expires_after: Option, } /// The intended purpose of the uploaded file. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] -#[serde(rename_all = "snake_case")] pub enum UploadPurpose { /// For use with Assistants and Message files + #[serde(rename = "assistants")] Assistants, /// For Assistants image file inputs + #[serde(rename = "vision")] Vision, /// For use with the Batch API + #[serde(rename = "batch")] Batch, /// For use with Fine-tuning #[default] + #[serde(rename = "fine-tune")] FineTune, } @@ -60,13 +67,13 @@ pub struct Upload { /// The intended number of bytes to be uploaded pub bytes: u64, - /// The intended purpose of the file. [Pelase refer here]([Please refer here](/docs/api-reference/files/object#files/object-purpose) for acceptable values.) + /// The intended purpose of the file. [Please refer here](https://platform.openai.com/docs/api-reference/files/object#files/object-purpose) for acceptable values. pub purpose: UploadPurpose, /// The status of the Upload. pub status: UploadStatus, - /// The Unix timestamp (in seconds) for when the Upload was created + /// The Unix timestamp (in seconds) for when the Upload will expire pub expires_at: u32, /// The object type, which is always "upload" diff --git a/async-openai/src/uploads.rs b/async-openai/src/uploads.rs index ba3cced1..8a3ef7f3 100644 --- a/async-openai/src/uploads.rs +++ b/async-openai/src/uploads.rs @@ -1,7 +1,9 @@ use crate::{ config::Config, error::OpenAIError, - types::{AddUploadPartRequest, CompleteUploadRequest, CreateUploadRequest, Upload, UploadPart}, + types::uploads::{ + AddUploadPartRequest, CompleteUploadRequest, CreateUploadRequest, Upload, UploadPart, + }, Client, }; diff --git a/async-openai/src/vector_store_files.rs b/async-openai/src/vector_store_files.rs index cb7d2748..ed5b40e0 100644 --- a/async-openai/src/vector_store_files.rs +++ b/async-openai/src/vector_store_files.rs @@ -96,7 +96,8 @@ impl<'c, C: Config> VectorStoreFiles<'c, C> { #[cfg(test)] mod tests { - use crate::types::{CreateFileRequest, CreateVectorStoreRequest, FileInput, FilePurpose}; + use crate::types::files::{CreateFileRequest, FileInput, FilePurpose}; + use crate::types::CreateVectorStoreRequest; use crate::Client; #[tokio::test] diff --git a/async-openai/src/webhooks.rs b/async-openai/src/webhooks.rs index 90b87df1..259ba8a1 100644 --- a/async-openai/src/webhooks.rs +++ b/async-openai/src/webhooks.rs @@ -288,7 +288,7 @@ mod tests { Webhooks::verify_signature(body, &signature, old_timestamp, webhook_id, &secret); assert!(result.is_err()); match result.unwrap_err() { - WebhookError::InvalidSignature(msg) => { + WebhookError::Invalid(msg) => { assert!(msg.contains("too old")); } _ => panic!("Expected InvalidSignature error"), @@ -319,7 +319,7 @@ mod tests { Webhooks::verify_signature(body, &signature, &future_timestamp, webhook_id, &secret); assert!(result.is_err()); match result.unwrap_err() { - WebhookError::InvalidSignature(msg) => { + WebhookError::Invalid(msg) => { assert!(msg.contains("too new")); } _ => panic!("Expected InvalidSignature error"), @@ -342,7 +342,7 @@ mod tests { ); assert!(result.is_err()); match result.unwrap_err() { - WebhookError::InvalidSignature(msg) => { + WebhookError::Invalid(msg) => { assert!(msg.contains("timestamp")); } _ => panic!("Expected InvalidSignature error"), diff --git a/async-openai/tests/bring-your-own-type.rs b/async-openai/tests/bring-your-own-type.rs index 001a43c4..175a8cd2 100644 --- a/async-openai/tests/bring-your-own-type.rs +++ b/async-openai/tests/bring-your-own-type.rs @@ -58,8 +58,8 @@ async fn test_byot_moderations() { async fn test_byot_images() { let client = Client::new(); - let _r: Result = client.images().create_byot(json!({})).await; - let _r: Result = client.images().create_edit_byot(MyJson(json!({}))).await; + let _r: Result = client.images().generate_byot(json!({})).await; + let _r: Result = client.images().edit_byot(MyJson(json!({}))).await; let _r: Result = client .images() .create_variation_byot(MyJson(json!({}))) diff --git a/async-openai/tests/embeddings.rs b/async-openai/tests/embeddings.rs index 6bc1bacf..efcefbb7 100644 --- a/async-openai/tests/embeddings.rs +++ b/async-openai/tests/embeddings.rs @@ -1,5 +1,5 @@ //! This test is primarily to make sure that macros_rules for From traits are correct. -use async_openai::types::EmbeddingInput; +use async_openai::types::embeddings::EmbeddingInput; fn embedding_input(input: T) -> EmbeddingInput where diff --git a/examples/assistants-code-interpreter/src/main.rs b/examples/assistants-code-interpreter/src/main.rs index d82aa85e..4e3eea4f 100644 --- a/examples/assistants-code-interpreter/src/main.rs +++ b/examples/assistants-code-interpreter/src/main.rs @@ -1,10 +1,11 @@ use std::error::Error; use async_openai::{ + types::files::{CreateFileRequest, FilePurpose}, types::{ AssistantToolCodeInterpreterResources, AssistantTools, CreateAssistantRequestArgs, - CreateFileRequest, CreateMessageRequestArgs, CreateRunRequest, CreateThreadRequest, - FilePurpose, MessageContent, MessageContentTextAnnotations, MessageRole, RunStatus, + CreateMessageRequestArgs, CreateRunRequest, CreateThreadRequest, MessageContent, + MessageContentTextAnnotations, MessageRole, RunStatus, }, Client, }; diff --git a/examples/assistants-file-search/src/main.rs b/examples/assistants-file-search/src/main.rs index 06e3434b..fdfa32b3 100644 --- a/examples/assistants-file-search/src/main.rs +++ b/examples/assistants-file-search/src/main.rs @@ -1,11 +1,12 @@ use std::error::Error; use async_openai::{ + types::files::{CreateFileRequest, FilePurpose}, types::{ AssistantToolFileSearchResources, AssistantToolsFileSearch, CreateAssistantRequestArgs, - CreateFileRequest, CreateMessageRequestArgs, CreateRunRequest, CreateThreadRequest, - CreateVectorStoreRequest, FilePurpose, MessageAttachment, MessageAttachmentTool, - MessageContent, MessageRole, ModifyAssistantRequest, RunStatus, + CreateMessageRequestArgs, CreateRunRequest, CreateThreadRequest, CreateVectorStoreRequest, + MessageAttachment, MessageAttachmentTool, MessageContent, MessageRole, + ModifyAssistantRequest, RunStatus, }, Client, }; diff --git a/examples/azure-openai-service/src/main.rs b/examples/azure-openai-service/src/main.rs index 8137cc18..fc4d026c 100644 --- a/examples/azure-openai-service/src/main.rs +++ b/examples/azure-openai-service/src/main.rs @@ -3,8 +3,8 @@ use std::error::Error; use async_openai::{ config::AzureConfig, types::{ - ChatCompletionRequestSystemMessageArgs, ChatCompletionRequestUserMessageArgs, - CreateChatCompletionRequestArgs, CreateEmbeddingRequestArgs, + embeddings::CreateEmbeddingRequestArgs, ChatCompletionRequestSystemMessageArgs, + ChatCompletionRequestUserMessageArgs, CreateChatCompletionRequestArgs, }, Client, }; diff --git a/examples/embeddings/src/main.rs b/examples/embeddings/src/main.rs index f10a8630..e93e912f 100644 --- a/examples/embeddings/src/main.rs +++ b/examples/embeddings/src/main.rs @@ -1,6 +1,6 @@ use std::error::Error; -use async_openai::{types::CreateEmbeddingRequestArgs, Client}; +use async_openai::{types::embeddings::CreateEmbeddingRequestArgs, Client}; #[tokio::main] async fn main() -> Result<(), Box> { diff --git a/examples/gemini-openai-compatibility/src/main.rs b/examples/gemini-openai-compatibility/src/main.rs index abcf4072..7ef2de77 100644 --- a/examples/gemini-openai-compatibility/src/main.rs +++ b/examples/gemini-openai-compatibility/src/main.rs @@ -1,10 +1,11 @@ use async_openai::{ config::OpenAIConfig, types::{ + embeddings::CreateEmbeddingRequestArgs, images::{CreateImageRequestArgs, Image, ImageModel, ImageResponseFormat}, ChatCompletionRequestMessage, ChatCompletionRequestUserMessage, - ChatCompletionRequestUserMessageContentPart, CreateChatCompletionRequestArgs, - CreateEmbeddingRequestArgs, InputAudio, ResponseFormat, ResponseFormatJsonSchema, + ChatCompletionRequestUserMessageContentPart, CreateChatCompletionRequestArgs, InputAudio, + ResponseFormat, ResponseFormatJsonSchema, }, Client, }; diff --git a/examples/moderations/src/main.rs b/examples/moderations/src/main.rs index 3e37916c..8c908a4d 100644 --- a/examples/moderations/src/main.rs +++ b/examples/moderations/src/main.rs @@ -1,4 +1,4 @@ -use async_openai::{types::CreateModerationRequestArgs, Client}; +use async_openai::{types::moderations::CreateModerationRequestArgs, Client}; use std::error::Error; #[tokio::main] diff --git a/examples/vector-store-retrieval/src/main.rs b/examples/vector-store-retrieval/src/main.rs index 90b0dee7..32bfdfbb 100644 --- a/examples/vector-store-retrieval/src/main.rs +++ b/examples/vector-store-retrieval/src/main.rs @@ -2,8 +2,8 @@ use std::error::Error; use async_openai::{ types::{ - CreateFileRequest, CreateVectorStoreRequest, FilePurpose, VectorStoreSearchRequest, - VectorStoreStatus, + files::{CreateFileRequest, FilePurpose}, + CreateVectorStoreRequest, VectorStoreSearchRequest, VectorStoreStatus, }, Client, };