Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion async-openai/src/batches.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use serde::Serialize;
use crate::{
config::Config,
error::OpenAIError,
types::{Batch, BatchRequest, ListBatchesResponse},
types::batches::{Batch, BatchRequest, ListBatchesResponse},
Client,
};

Expand Down
7 changes: 6 additions & 1 deletion async-openai/src/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ use crate::{
moderation::Moderations,
traits::AsyncTryFrom,
Assistants, Audio, AuditLogs, Batches, Chat, Completions, Containers, Conversations,
Embeddings, FineTuning, Invites, Models, Projects, Responses, Threads, Uploads, Users,
Embeddings, Evals, FineTuning, Invites, Models, Projects, Responses, Threads, Uploads, Users,
VectorStores, Videos,
};

Expand Down Expand Up @@ -183,6 +183,11 @@ impl<C: Config> Client<C> {
Containers::new(self)
}

/// To call [Evals] group related APIs using this client.
pub fn evals(&self) -> Evals<'_, C> {
Evals::new(self)
}

pub fn config(&self) -> &C {
&self.config
}
Expand Down
15 changes: 8 additions & 7 deletions async-openai/src/embedding.rs
Original file line number Diff line number Diff line change
@@ -1,17 +1,19 @@
use crate::{
config::Config,
error::OpenAIError,
types::{CreateBase64EmbeddingResponse, CreateEmbeddingRequest, CreateEmbeddingResponse},
types::embeddings::{
CreateBase64EmbeddingResponse, CreateEmbeddingRequest, CreateEmbeddingResponse,
},
Client,
};

#[cfg(not(feature = "byot"))]
use crate::types::EncodingFormat;
use crate::types::embeddings::EncodingFormat;

/// Get a vector representation of a given input that can be easily
/// consumed by machine learning models and algorithms.
///
/// Related guide: [Embeddings](https://platform.openai.com/docs/guides/embeddings/what-are-embeddings)
/// Related guide: [Embeddings](https://platform.openai.com/docs/guides/embeddings)
pub struct Embeddings<'c, C: Config> {
client: &'c Client<C>,
}
Expand Down Expand Up @@ -65,8 +67,8 @@ impl<'c, C: Config> Embeddings<'c, C> {
#[cfg(test)]
mod tests {
use crate::error::OpenAIError;
use crate::types::{CreateEmbeddingResponse, Embedding, EncodingFormat};
use crate::{types::CreateEmbeddingRequestArgs, Client};
use crate::types::embeddings::{CreateEmbeddingResponse, Embedding, EncodingFormat};
use crate::{types::embeddings::CreateEmbeddingRequestArgs, Client};

#[tokio::test]
async fn test_embedding_string() {
Expand Down Expand Up @@ -165,7 +167,6 @@ mod tests {
}

#[tokio::test]
#[cfg(not(feature = "byot"))]
async fn test_cannot_use_base64_encoding_with_normal_create_request() {
let client = Client::new();

Expand All @@ -187,7 +188,7 @@ mod tests {
let client = Client::new();

const MODEL: &str = "text-embedding-ada-002";
const INPUT: &str = "CoLoop will eat the other qual research tools...";
const INPUT: &str = "a head full of dreams";

let b64_request = CreateEmbeddingRequestArgs::default()
.model(MODEL)
Expand Down
49 changes: 49 additions & 0 deletions async-openai/src/eval_run_output_items.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
use serde::Serialize;

use crate::{
config::Config,
error::OpenAIError,
types::evals::{EvalRunOutputItem, EvalRunOutputItemList},
Client,
};

pub struct EvalRunOutputItems<'c, C: Config> {
client: &'c Client<C>,
pub eval_id: String,
pub run_id: String,
}

impl<'c, C: Config> EvalRunOutputItems<'c, C> {
pub fn new(client: &'c Client<C>, eval_id: &str, run_id: &str) -> Self {
Self {
client,
eval_id: eval_id.into(),
run_id: run_id.into(),
}
}

/// Get a list of output items for an evaluation run.
#[crate::byot(T0 = serde::Serialize, R = serde::de::DeserializeOwned)]
pub async fn list<Q>(&self, query: &Q) -> Result<EvalRunOutputItemList, OpenAIError>
where
Q: Serialize + ?Sized,
{
self.client
.get_with_query(
&format!("/evals/{}/runs/{}/output_items", self.eval_id, self.run_id),
&query,
)
.await
}

/// Get an evaluation run output item by ID.
#[crate::byot(T0 = std::fmt::Display, R = serde::de::DeserializeOwned)]
pub async fn retrieve(&self, output_item_id: &str) -> Result<EvalRunOutputItem, OpenAIError> {
self.client
.get(&format!(
"/evals/{}/runs/{}/output_items/{}",
self.eval_id, self.run_id, output_item_id
))
.await
}
}
74 changes: 74 additions & 0 deletions async-openai/src/eval_runs.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
use serde::Serialize;

use crate::{
config::Config,
error::OpenAIError,
eval_run_output_items::EvalRunOutputItems,
types::evals::{CreateEvalRunRequest, DeleteEvalRunResponse, EvalRun, EvalRunList},
Client,
};

pub struct EvalRuns<'c, C: Config> {
client: &'c Client<C>,
pub eval_id: String,
}

impl<'c, C: Config> EvalRuns<'c, C> {
pub fn new(client: &'c Client<C>, eval_id: &str) -> Self {
Self {
client,
eval_id: eval_id.into(),
}
}

/// [EvalRunOutputItems] API group
pub fn output_items(&self, run_id: &str) -> EvalRunOutputItems<'_, C> {
EvalRunOutputItems::new(self.client, &self.eval_id, run_id)
}

/// Get a list of runs for an evaluation.
#[crate::byot(T0 = serde::Serialize, R = serde::de::DeserializeOwned)]
pub async fn list<Q>(&self, query: &Q) -> Result<EvalRunList, OpenAIError>
where
Q: Serialize + ?Sized,
{
self.client
.get_with_query(&format!("/evals/{}/runs", self.eval_id), &query)
.await
}

/// Kicks off a new run for a given evaluation.
#[crate::byot(T0 = serde::Serialize, R = serde::de::DeserializeOwned)]
pub async fn create(&self, request: CreateEvalRunRequest) -> Result<EvalRun, OpenAIError> {
self.client
.post(&format!("/evals/{}/runs", self.eval_id), request)
.await
}

/// Get an evaluation run by ID.
#[crate::byot(T0 = std::fmt::Display, R = serde::de::DeserializeOwned)]
pub async fn retrieve(&self, run_id: &str) -> Result<EvalRun, OpenAIError> {
self.client
.get(&format!("/evals/{}/runs/{}", self.eval_id, run_id))
.await
}

/// Cancel an ongoing evaluation run.
#[crate::byot(T0 = std::fmt::Display, R = serde::de::DeserializeOwned)]
pub async fn cancel(&self, run_id: &str) -> Result<EvalRun, OpenAIError> {
self.client
.post(
&format!("/evals/{}/runs/{}", self.eval_id, run_id),
serde_json::json!({}),
)
.await
}

/// Delete an eval run.
#[crate::byot(T0 = std::fmt::Display, R = serde::de::DeserializeOwned)]
pub async fn delete(&self, run_id: &str) -> Result<DeleteEvalRunResponse, OpenAIError> {
self.client
.delete(&format!("/evals/{}/runs/{}", self.eval_id, run_id))
.await
}
}
69 changes: 69 additions & 0 deletions async-openai/src/evals.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
use serde::Serialize;

use crate::{
config::Config,
error::OpenAIError,
eval_runs::EvalRuns,
types::evals::{CreateEvalRequest, DeleteEvalResponse, Eval, EvalList, UpdateEvalRequest},
Client,
};

/// Create, manage, and run evals in the OpenAI platform. Related guide:
/// [Evals](https://platform.openai.com/docs/guides/evals)
pub struct Evals<'c, C: Config> {
client: &'c Client<C>,
}

impl<'c, C: Config> Evals<'c, C> {
pub fn new(client: &'c Client<C>) -> Self {
Self { client }
}

/// [EvalRuns] API group
pub fn runs(&self, eval_id: &str) -> EvalRuns<'_, C> {
EvalRuns::new(self.client, eval_id)
}

/// List evaluations for a project.
#[crate::byot(T0 = serde::Serialize, R = serde::de::DeserializeOwned)]
pub async fn list<Q>(&self, query: &Q) -> Result<EvalList, OpenAIError>
where
Q: Serialize + ?Sized,
{
self.client.get_with_query("/evals", &query).await
}

/// Create the structure of an evaluation that can be used to test a model's performance.
/// An evaluation is a set of testing criteria and the config for a data source, which dictates
/// the schema of the data used in the evaluation. After creating an evaluation, you can run it
/// on different models and model parameters. We support several types of graders and
/// datasources. For more information, see the [Evals guide](https://platform.openai.com/docs/guides/evals).
#[crate::byot(T0 = serde::Serialize, R = serde::de::DeserializeOwned)]
pub async fn create(&self, request: CreateEvalRequest) -> Result<Eval, OpenAIError> {
self.client.post("/evals", request).await
}

/// Get an evaluation by ID.
#[crate::byot(T0 = std::fmt::Display, R = serde::de::DeserializeOwned)]
pub async fn retrieve(&self, eval_id: &str) -> Result<Eval, OpenAIError> {
self.client.get(&format!("/evals/{eval_id}")).await
}

/// Update certain properties of an evaluation.
#[crate::byot(T0 = std::fmt::Display, T1 = serde::Serialize, R = serde::de::DeserializeOwned)]
pub async fn update(
&self,
eval_id: &str,
request: UpdateEvalRequest,
) -> Result<Eval, OpenAIError> {
self.client
.post(&format!("/evals/{eval_id}"), request)
.await
}

/// Delete an evaluation.
#[crate::byot(T0 = std::fmt::Display, R = serde::de::DeserializeOwned)]
pub async fn delete(&self, eval_id: &str) -> Result<DeleteEvalResponse, OpenAIError> {
self.client.delete(&format!("/evals/{eval_id}")).await
}
}
14 changes: 8 additions & 6 deletions async-openai/src/file.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ use serde::Serialize;
use crate::{
config::Config,
error::OpenAIError,
types::{CreateFileRequest, DeleteFileResponse, ListFilesResponse, OpenAIFile},
types::files::{CreateFileRequest, DeleteFileResponse, ListFilesResponse, OpenAIFile},
Client,
};

Expand All @@ -18,13 +18,13 @@ impl<'c, C: Config> Files<'c, C> {
Self { client }
}

/// Upload a file that can be used across various endpoints. Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 100 GB.
/// Upload a file that can be used across various endpoints. Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up to 1 TB.
///
/// The Assistants API supports files up to 2 million tokens and of specific file types. See the [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for details.
///
/// The Fine-tuning API only supports `.jsonl` files. The input also has certain required formats for fine-tuning [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) models.
///
///The Batch API only supports `.jsonl` files up to 100 MB in size. The input also has a specific required [format](https://platform.openai.com/docs/api-reference/batch/request-input).
/// The Batch API only supports `.jsonl` files up to 200 MB in size. The input also has a specific required [format](https://platform.openai.com/docs/api-reference/batch/request-input).
///
/// Please [contact us](https://help.openai.com/) if you need to increase these storage limits.
#[crate::byot(
Expand Down Expand Up @@ -70,7 +70,9 @@ impl<'c, C: Config> Files<'c, C> {
#[cfg(test)]
mod tests {
use crate::{
types::{CreateFileRequestArgs, FileExpiresAfter, FileExpiresAfterAnchor, FilePurpose},
types::files::{
CreateFileRequestArgs, FileExpirationAfter, FileExpirationAfterAnchor, FilePurpose,
},
Client,
};

Expand All @@ -89,8 +91,8 @@ mod tests {
let request = CreateFileRequestArgs::default()
.file(test_file_path)
.purpose(FilePurpose::FineTune)
.expires_after(FileExpiresAfter {
anchor: FileExpiresAfterAnchor::CreatedAt,
.expires_after(FileExpirationAfter {
anchor: FileExpirationAfterAnchor::CreatedAt,
seconds: 3600,
})
.build()
Expand Down
Loading
Loading