From 16fe4c6814a828fb0d271a6793598f8369da259d Mon Sep 17 00:00:00 2001 From: David Miguel Lozano Date: Tue, 14 May 2024 23:08:57 +0200 Subject: [PATCH] feat: Add support for completions and embeddings in batch API in openai_dart (#425) --- .../openai_dart/lib/src/generated/schema/batch.dart | 2 +- .../lib/src/generated/schema/batch_endpoint.dart | 6 +++++- .../src/generated/schema/create_batch_request.dart | 2 +- .../lib/src/generated/schema/schema.freezed.dart | 12 ++++++------ .../lib/src/generated/schema/schema.g.dart | 2 ++ packages/openai_dart/oas/openapi_curated.yaml | 6 +++--- 6 files changed, 18 insertions(+), 12 deletions(-) diff --git a/packages/openai_dart/lib/src/generated/schema/batch.dart b/packages/openai_dart/lib/src/generated/schema/batch.dart index fdd6c1f5..94cc6080 100644 --- a/packages/openai_dart/lib/src/generated/schema/batch.dart +++ b/packages/openai_dart/lib/src/generated/schema/batch.dart @@ -21,7 +21,7 @@ class Batch with _$Batch { /// The object type, which is always `batch`. required BatchObject object, - /// The endpoint to be used for all requests in the batch. Currently only `/v1/chat/completions` is supported. + /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. required BatchEndpoint endpoint, /// No Description diff --git a/packages/openai_dart/lib/src/generated/schema/batch_endpoint.dart b/packages/openai_dart/lib/src/generated/schema/batch_endpoint.dart index 24510968..3a945090 100644 --- a/packages/openai_dart/lib/src/generated/schema/batch_endpoint.dart +++ b/packages/openai_dart/lib/src/generated/schema/batch_endpoint.dart @@ -8,8 +8,12 @@ part of open_a_i_schema; // ENUM: BatchEndpoint // ========================================== -/// The endpoint to be used for all requests in the batch. Currently only `/v1/chat/completions` is supported. +/// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. enum BatchEndpoint { @JsonValue('/v1/chat/completions') v1ChatCompletions, + @JsonValue('/v1/embeddings') + v1Embeddings, + @JsonValue('/v1/completions') + v1Completions, } diff --git a/packages/openai_dart/lib/src/generated/schema/create_batch_request.dart b/packages/openai_dart/lib/src/generated/schema/create_batch_request.dart index 7f3910d8..5014b4f1 100644 --- a/packages/openai_dart/lib/src/generated/schema/create_batch_request.dart +++ b/packages/openai_dart/lib/src/generated/schema/create_batch_request.dart @@ -22,7 +22,7 @@ class CreateBatchRequest with _$CreateBatchRequest { /// Your input file must be formatted as a JSONL file, and must be uploaded with the purpose `batch`. @JsonKey(name: 'input_file_id') required String inputFileId, - /// The endpoint to be used for all requests in the batch. Currently only `/v1/chat/completions` is supported. + /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. required BatchEndpoint endpoint, /// The time frame within which the batch should be processed. Currently only `24h` is supported. diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart index a8f532ba..472cae5b 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -48058,7 +48058,7 @@ mixin _$CreateBatchRequest { @JsonKey(name: 'input_file_id') String get inputFileId => throw _privateConstructorUsedError; - /// The endpoint to be used for all requests in the batch. Currently only `/v1/chat/completions` is supported. + /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. BatchEndpoint get endpoint => throw _privateConstructorUsedError; /// The time frame within which the batch should be processed. Currently only `24h` is supported. @@ -48205,7 +48205,7 @@ class _$CreateBatchRequestImpl extends _CreateBatchRequest { @JsonKey(name: 'input_file_id') final String inputFileId; - /// The endpoint to be used for all requests in the batch. Currently only `/v1/chat/completions` is supported. + /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. @override final BatchEndpoint endpoint; @@ -48291,7 +48291,7 @@ abstract class _CreateBatchRequest extends CreateBatchRequest { String get inputFileId; @override - /// The endpoint to be used for all requests in the batch. Currently only `/v1/chat/completions` is supported. + /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. BatchEndpoint get endpoint; @override @@ -48321,7 +48321,7 @@ mixin _$Batch { /// The object type, which is always `batch`. BatchObject get object => throw _privateConstructorUsedError; - /// The endpoint to be used for all requests in the batch. Currently only `/v1/chat/completions` is supported. + /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. BatchEndpoint get endpoint => throw _privateConstructorUsedError; /// No Description @@ -48766,7 +48766,7 @@ class _$BatchImpl extends _Batch { @override final BatchObject object; - /// The endpoint to be used for all requests in the batch. Currently only `/v1/chat/completions` is supported. + /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. @override final BatchEndpoint endpoint; @@ -48986,7 +48986,7 @@ abstract class _Batch extends Batch { BatchObject get object; @override - /// The endpoint to be used for all requests in the batch. Currently only `/v1/chat/completions` is supported. + /// The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. BatchEndpoint get endpoint; @override diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart index 64ee3a12..03a49b59 100644 --- a/packages/openai_dart/lib/src/generated/schema/schema.g.dart +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -4678,6 +4678,8 @@ Map _$$CreateBatchRequestImplToJson( const _$BatchEndpointEnumMap = { BatchEndpoint.v1ChatCompletions: '/v1/chat/completions', + BatchEndpoint.v1Embeddings: '/v1/embeddings', + BatchEndpoint.v1Completions: '/v1/completions', }; const _$BatchCompletionWindowEnumMap = { diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml index 6d6e726f..9490261d 100644 --- a/packages/openai_dart/oas/openapi_curated.yaml +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -6130,8 +6130,8 @@ components: nullable: true BatchEndpoint: type: string - enum: ["/v1/chat/completions"] - description: The endpoint to be used for all requests in the batch. Currently only `/v1/chat/completions` is supported. + enum: ["/v1/chat/completions", "/v1/embeddings", "/v1/completions"] + description: The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000 embedding inputs across all requests in the batch. BatchCompletionWindow: type: string enum: ["24h"] @@ -6268,7 +6268,7 @@ components: description: The HTTP method to be used for the request. Currently only `POST` is supported. url: type: string - description: The OpenAI API relative URL to be used for the request. Currently only `/v1/chat/completions` is supported. + description: The OpenAI API relative URL to be used for the request. Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported. BatchRequestOutput: type: object description: The per-line object of the batch output and error files