diff --git a/packages/openai_dart/.gitignore b/packages/openai_dart/.gitignore new file mode 100644 index 00000000..3cceda55 --- /dev/null +++ b/packages/openai_dart/.gitignore @@ -0,0 +1,7 @@ +# https://dart.dev/guides/libraries/private-files +# Created by `dart pub` +.dart_tool/ + +# Avoid committing pubspec.lock for library packages; see +# https://dart.dev/guides/libraries/private-files#pubspeclock. +pubspec.lock diff --git a/packages/openai_dart/CHANGELOG.md b/packages/openai_dart/CHANGELOG.md new file mode 100644 index 00000000..1e1e9c95 --- /dev/null +++ b/packages/openai_dart/CHANGELOG.md @@ -0,0 +1,3 @@ +## 0.0.1-dev.1 + +- Bootstrap project diff --git a/packages/openai_dart/LICENSE b/packages/openai_dart/LICENSE new file mode 100644 index 00000000..f407ffdd --- /dev/null +++ b/packages/openai_dart/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 David Miguel Lozano + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/packages/openai_dart/README.md b/packages/openai_dart/README.md new file mode 100644 index 00000000..cbab5de6 --- /dev/null +++ b/packages/openai_dart/README.md @@ -0,0 +1,434 @@ +# OpenAI Dart Client + +[![tests](https://img.shields.io/github/actions/workflow/status/davidmigloz/langchain_dart/test.yaml?logo=github&label=tests)](https://github.com/davidmigloz/langchain_dart/actions/workflows/test.yaml) +[![openai_dart](https://img.shields.io/pub/v/openai_dart.svg)](https://pub.dev/packages/openai_dart) +[![](https://dcbadge.vercel.app/api/server/x4qbhqecVR?style=flat)](https://discord.gg/x4qbhqecVR) +[![MIT](https://img.shields.io/badge/license-MIT-purple.svg)](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE) + +Dart client for [OpenAI](https://platform.openai.com/docs/api-reference) API. + +## Features + +- Generated from the official OpenAI [OpenAPI specification](https://github.com/openai/openai-openapi) (low maintenance effort) +- Fully documented and tested +- Authentication with organization support +- Custom base URL and headers support (e.g. HTTP proxies) +- Custom HTTP client support (e.g. SOCKS5 proxies or advanced use cases) +- Endpoints: + * Chat (with functions and streaming support) + * Completions (with streaming support) + * Embeddings + * Fine-tuning + * Images + * Models + * Moderations + +## Usage + +### Authentication + +The OpenAI API uses API keys for authentication. Visit your [API Keys](https://platform.openai.com/account/api-keys) page to retrieve the API key you'll use in your requests. + +> **Remember that your API key is a secret!** +> Do not share it with others or expose it in any client-side code (browsers, apps). Production requests must be routed through your own backend server where your API key can be securely loaded from an environment variable or key management service. + +```dart +final client = OpenAIClient( + apiKey: 'OPENAI_API_KEY', +); +``` + +#### Organization (optional) + +For users who belong to multiple organizations, you can specify which organization is used for an API request. Usage from these API requests will count against the specified organization's subscription quota. + +```dart +final client = OpenAIClient( + apiKey: 'OPENAI_API_KEY', + organization: 'org-dtDDtkEGoFccn5xaP5W1p3Rr', +); +``` + +### Chat + +Given a list of messages comprising a conversation, the model will return a response. + +**Create chat completion:** + +```dart +final res = await client.createChatCompletion( + request: CreateChatCompletionRequest( + model: ChatCompletionModel.string('gpt-4'), + messages: [ + ChatCompletionMessage( + role: ChatCompletionMessageRole.system, + content: 'You are a helpful assistant.', + ), + ChatCompletionMessage( + role: ChatCompletionMessageRole.user, + content: 'Hello!', + ), + ], + temperature: 0, + ), +); +print(res.choices.first.message.content); +// Hello! How can I assist you today? +``` + +`ChatCompletionModel` is a sealed class that offers two ways to specify the model: +- `ChatCompletionModel.string('model-id')`: the model ID as string (e.g. `'gpt-4'` or your fine-tuned model ID). +- `ChatCompletionModel.enumeration(ChatCompletionModels.gpt4)`: a value from `ChatCompletionModels` enum which lists all of the available models. + +**Stream chat completion:** + +```dart +final stream = client.createChatCompletionStream( + request: CreateChatCompletionRequest( + model: ChatCompletionModel.string('gpt-3.5-turbo'), + messages: [ + ChatCompletionMessage( + role: ChatCompletionMessageRole.system, + content: + 'You are a helpful assistant that replies only with numbers ' + 'in order without any spaces or commas', + ), + ChatCompletionMessage( + role: ChatCompletionMessageRole.user, + content: 'List the numbers from 1 to 9', + ), + ], + ), +); +await for (final res in stream) { + print(res.choices.first.delta.content); +} +``` + +**Function calling:** + +```dart +const function = ChatCompletionFunctions( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + parameters: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, +); + +final res1 = await client.createChatCompletion( + request: CreateChatCompletionRequest( + model: ChatCompletionModel.string('gpt-3.5-turbo'), + messages: [ + ChatCompletionMessage( + role: ChatCompletionMessageRole.system, + content: 'You are a helpful assistant.', + ), + ChatCompletionMessage( + role: ChatCompletionMessageRole.user, + content: 'What’s the weather like in Boston right now?', + ), + ], + functions: [function], + ), +); + +final arguments = json.decode( + res1.choices.first.message.functionCall!.arguments, +) as Map; +final functionResult = getCurrentWeather(arguments['location'], arguments['unit']); + +final res2 = await client.createChatCompletion( + request: CreateChatCompletionRequest( + model: ChatCompletionModel.string('gpt-3.5-turbo'), + messages: [ + ChatCompletionMessage( + role: ChatCompletionMessageRole.system, + content: 'You are a helpful assistant.', + ), + ChatCompletionMessage( + role: ChatCompletionMessageRole.user, + content: 'What’s the weather like in Boston right now?', + ), + ChatCompletionMessage( + role: ChatCompletionMessageRole.function, + name: function.name, + content: json.encode(functionResult), + ), + ], + functions: [function], + ), +); +final answer = res2.choices.first.message.content; +// The weather in Boston right now is sunny with a temperature of 22°C +``` + +### Completions + +Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position. + +**Create completion:** + +```dart +final res = await client.createCompletion( + request: CreateCompletionRequest( + model: CompletionModel.string('gpt-3.5-turbo-instruct'), + prompt: [ + 'Say this is a test', + ], + maxTokens: 7, + temperature: 0, + ), +); +print(res.choices.first.text); +// This is a test. +``` + +`CompletionModel` is a sealed class that offers two ways to specify the model: +- `CompletionModel.string('model-id')`: the model ID as string (e.g. `'gpt-3.5-turbo-instruct'` or your fine-tuned model ID). +- `CompletionModel.enumeration(CompletionModels.gpt35TurboInstruct)`: a value from `CompletionModels` enum which lists all of the available models. + +`CompletionPrompt` is a sealed class that offers four ways to specify the prompt: +- `CompletionPrompt.string('prompt')`: the prompt as string. +- `CompletionPrompt.arrayInteger([...])`: the tokenized prompt. +- `CompletionPrompt.arrayString(['prompt'])`: batch of string prompts. +- `CompletionPrompt.array([[...]])`: batch of tokenized prompts. + +**Stream completion:** + +```dart +final stream = client.createCompletionStream( + request: CreateCompletionRequest( + model: 'gpt-3.5-turbo-instruct', + prompt: [ + 'Say this is a test', + ], + maxTokens: 7, + temperature: 0, + ), +); +await for (final res in stream) { + print(res.choices.first.text); +} +``` + +### Embeddings + +Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. + +**Create embedding:** + +```dart +final res = await client.createEmbedding( + request: CreateEmbeddingRequest( + model: EmbeddingModel.string('text-embedding-ada-002'), + input: EmbeddingInput.string('The food was delicious and the waiter...'), + ), +); +print(res.data.first.embedding); +// [0.002253932, -0.009333183, 0.01574578, -0.007790351, -0.004711035, ...] +``` + +`EmbeddingModel` is a sealed class that offers two ways to specify the model: +- `EmbeddingModel.string('model-id')`: the model ID as string. +- `EmbeddingModel.enumeration(EmbeddingModels.textEmbeddingAda002)`: a value from `EmbeddingModels` enum which lists all of the available models. + +`EmbeddingInput` is a sealed class that offers four ways to specify the embedding input: +- `EmbeddingInput.string('input')`: the input as string. +- `EmbeddingInput.arrayInteger([...])`: the tokenized input. +- `EmbeddingInput.arrayString(['input'])`: batch of string inputs. +- `EmbeddingInput.array([[...]])`: batch of tokenized inputs. + +### Fine-tuning + +Manage fine-tuning jobs to tailor a model to your specific training data. + +**Create fine-tuning job:** + +```dart +const request = CreateFineTuningJobRequest( + model: FineTuningModel.string('gpt-3.5-turbo'), + trainingFile: 'file-abc123', + validationFile: 'file-abc123', + hyperparameters: FineTuningJobHyperparameters( + nEpochs: FineTuningNEpochs.enumeration(FineTuningNEpochsOptions.auto), + ), +); +final res = await client.createFineTuningJob(request: request); +``` + +**List fine-tuning jobs:** + +```dart +final res = await client.listPaginatedFineTuningJobs(); +``` + +**Retrieve fine-tuning job:** + +```dart +final res = await client.retrieveFineTuningJob( + fineTuningJobId: 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', +); +``` + +**Cancel fine-tuning job:** + +```dart +final res = await client.cancelFineTuningJob( + fineTuningJobId: 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', +); +``` + +**List fine-tuning events:** + +```dart +final res = await client.listFineTuningEvents( + fineTuningJobId: 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', +); +``` + +### Images + +Given a prompt and/or an input image, the model will generate a new image. + +**Create image:** + +```dart +final res = await client.createImage( + request: CreateImageRequest( + prompt: 'A cute baby sea otter', + n: 2, + size: ImageSize.v256x256, + ), +); +print(res.data.first.url); +// https://oaidalleapiprodscus.blob.core.windows.net/private/... +``` + +### Models + +List and describe the various models available in the API. You can refer to the [Models](https://platform.openai.com/docs/models) documentation to understand what models are available and the differences between them. + +**List models:** + +```dart +final res = await client.listModels(); +print(res.data.first.id); +// text-search-babbage-doc-001 +``` + +**Retrieve model:** + +```dart +final res = await client.retrieveModel(model: 'gpt-4'); +print(res.ownedBy); +// openai +``` + +**Delete fine-tune model:** + +```dart +final res = await client.deleteModel( + model: 'ft:gpt-3.5-turbo-0613:langchain::7qTVM5AR', +); +print(res.deleted); +// true +``` + +### Moderations + +Given a input text, outputs if the model classifies it as violating OpenAI's content policy. + +```dart +final res = await client.createModeration( + request: CreateModerationRequest( + model: ModerationModel.string('text-moderation-latest'), + input: ModerationInput.string('I want to kill them.'), + ), +); +print(res.results.first.categories.violence); +// true +print(res.results.first.categoryScores.violence); +// 0.9925811290740967 +``` + +`ModerationModel` is a sealed class that offers two ways to specify the model: +- `ModerationModel.string('model-id')`: the model ID as string. +- `ModerationModel.enumeration(ModerationModels.textModerationLatest)`: a value from `ModerationModels` enum which lists all of the available models. + +`EmbeddingInput` is a sealed class that offers four ways to specify the embedding input: +- `ModerationInput.string('input')`: the input as string. +- `EmbeddingInput.arrayString(['input'])`: batch of string inputs. + +### Advance + +#### Default HTTP client + +By default, the client uses the following implementation of `http.Client`: + +- Non-web: [`IOClient`](https://pub.dev/documentation/http/latest/io_client/IOClient-class.html) +- Web: [`FetchClient`](https://pub.dev/documentation/fetch_client/latest/fetch_client/FetchClient-class.html) (to support streaming on web) + +#### Custom HTTP client + +You can always provide your own implementation of `http.Client` for further customization: + +```dart +final client = OpenAIClient( + apiKey: 'OPENAI_API_KEY', + client: MyHttpClient(), +); +``` + +#### Using a proxy + +##### HTTP proxy + +You can use your own HTTP proxy by overriding the `baseUrl` and providing your required `headers`: + +```dart +final client = OpenAIClient( + baseUrl: 'https://my-proxy.com', + headers: { + 'x-my-proxy-header': 'value', + }, +); +``` + +If you need further customization, you can always provide your own `http.Client`. + +##### SOCKS5 proxy + +To use a SOCKS5 proxy, you can use the [`socks5_proxy`](https://pub.dev/packages/socks5_proxy) package: + +```dart +final baseHttpClient = HttpClient(); +SocksTCPClient.assignToHttpClient(baseHttpClient, [ + ProxySettings(InternetAddress.loopbackIPv4, 1080), +]); +final httpClient = IOClient(baseClient); + +final client = OpenAIClient( + client: httpClient, +); +``` + +## Acknowledgements + +The generation of this client was made possible by the [openapi_spec](https://github.com/tazatechnology/openapi_spec) package. + +## License + +OpenAI Dart Client is licensed under the [MIT License](https://github.com/davidmigloz/langchain_dart/blob/main/LICENSE). diff --git a/packages/openai_dart/analysis_options.yaml b/packages/openai_dart/analysis_options.yaml new file mode 100644 index 00000000..f04c6cf0 --- /dev/null +++ b/packages/openai_dart/analysis_options.yaml @@ -0,0 +1 @@ +include: ../../analysis_options.yaml diff --git a/packages/openai_dart/build.yaml b/packages/openai_dart/build.yaml new file mode 100644 index 00000000..e06811b0 --- /dev/null +++ b/packages/openai_dart/build.yaml @@ -0,0 +1,13 @@ +targets: + $default: + builders: + source_gen|combining_builder: + options: + ignore_for_file: + - prefer_final_parameters + - require_trailing_commas + - non_constant_identifier_names + json_serializable: + options: + explicit_to_json: true + include_if_null: false diff --git a/packages/openai_dart/example/openai_dart_example.dart b/packages/openai_dart/example/openai_dart_example.dart new file mode 100644 index 00000000..ac4b0ed2 --- /dev/null +++ b/packages/openai_dart/example/openai_dart_example.dart @@ -0,0 +1,92 @@ +// ignore_for_file: avoid_print +import 'dart:async'; +import 'dart:io'; + +import 'package:openai_dart/openai_dart.dart'; + +Future main() async { + final client = OpenAIClient(apiKey: Platform.environment['OPENAI_API_KEY']!); + + await _chatCompletions(client); + await _completions(client); + await _embeddings(client); + await _fineTuning(client); + await _images(client); + await _models(client); + await _moderations(client); + + client.endSession(); +} + +Future _chatCompletions(final OpenAIClient client) async { + final res = await client.createChatCompletion( + request: const CreateChatCompletionRequest( + model: ChatCompletionModel.string('gpt-4'), + messages: [ + ChatCompletionMessage( + role: ChatCompletionMessageRole.system, + content: 'You are a helpful assistant.', + ), + ChatCompletionMessage( + role: ChatCompletionMessageRole.user, + content: 'Hello!', + ), + ], + temperature: 0, + ), + ); + print(res.choices.first.message.content); +} + +Future _completions(final OpenAIClient client) async { + final res = await client.createCompletion( + request: const CreateCompletionRequest( + model: CompletionModel.string('gpt-3.5-turbo-instruct'), + prompt: CompletionPrompt.string('Say this is a test'), + maxTokens: 7, + temperature: 0, + ), + ); + print(res.choices.first.text); +} + +Future _embeddings(final OpenAIClient client) async { + final res = await client.createEmbedding( + request: const CreateEmbeddingRequest( + model: EmbeddingModel.string('text-embedding-ada-002'), + input: EmbeddingInput.string('The food was delicious and the waiter...'), + ), + ); + print(res.data.first.embedding); +} + +Future _fineTuning(final OpenAIClient client) async { + final res = await client.listPaginatedFineTuningJobs(); + print(res.data.first.id); +} + +Future _images(final OpenAIClient client) async { + final res = await client.createImage( + request: const CreateImageRequest( + prompt: 'A cute baby sea otter', + ), + ); + print(res.data.first.url); +} + +Future _models(final OpenAIClient client) async { + final res1 = await client.listModels(); + print(res1.data.first.id); + final res2 = await client.retrieveModel(model: 'gpt-4'); + print(res2.ownedBy); +} + +Future _moderations(final OpenAIClient client) async { + final res = await client.createModeration( + request: const CreateModerationRequest( + input: ModerationInput.string('I want to kill them.'), + ), + ); + print(res.results.first.categories.violence); + print(res.results.first.categoryScores.violence); +} diff --git a/packages/openai_dart/lib/openai_dart.dart b/packages/openai_dart/lib/openai_dart.dart new file mode 100644 index 00000000..1bc8c150 --- /dev/null +++ b/packages/openai_dart/lib/openai_dart.dart @@ -0,0 +1,6 @@ +/// Dart Client for the OpenAI API (completions, chat, embeddings, etc.). +library; + +export 'src/client.dart'; +export 'src/generated/client.dart' show OpenAIClientException; +export 'src/generated/schema/schema.dart'; diff --git a/packages/openai_dart/lib/src/client.dart b/packages/openai_dart/lib/src/client.dart new file mode 100644 index 00000000..5ceae6d4 --- /dev/null +++ b/packages/openai_dart/lib/src/client.dart @@ -0,0 +1,236 @@ +// ignore_for_file: use_super_parameters +import 'dart:async'; +import 'dart:convert'; + +import 'package:http/http.dart' as http; + +import 'generated/client.dart' as g; +import 'generated/schema/schema.dart'; +import 'http_client/http_client.dart'; + +/// Client for OpenAI API. +/// +/// Please see https://platform.openai.com/docs/api-reference for more details. +class OpenAIClient extends g.OpenAIClient { + /// Create a new OpenAI API client. + /// + /// Main configuration options: + /// - `apiKey`: your OpenAI API key. You can find your API key in the + /// [OpenAI dashboard](https://platform.openai.com/account/api-keys). + /// - `organization`: your OpenAI organization ID (if applicable). + /// + /// Advance configuration options: + /// - `baseUrl`: the base URL to use. Defaults to OpenAI's API URL. You can + /// override this to use a different API URL, or to use a proxy. + /// - `globalHeaders`: global headers to send with every request. You can use + /// this to set custom headers, or to override the default headers. + /// - `client`: the HTTP client to use. You can set your own HTTP client if + /// you need further customization (e.g. to use a Socks5 proxy). + OpenAIClient({ + final String apiKey = '', + final String? organization, + final String? baseUrl, + final Map? globalHeaders, + final http.Client? client, + }) : super( + bearerToken: apiKey, + baseUrl: baseUrl, + headers: { + if (organization != null) 'OpenAI-Organization': organization, + ...?globalHeaders, + }, + client: client ?? createDefaultHttpClient(), + ); + + // ------------------------------------------ + // METHOD: onStreamedResponse + // ------------------------------------------ + + /// Middleware for HTTP streamed responses (user can override) + Future onStreamedResponse( + final http.StreamedResponse response, + ) { + return Future.value(response); + } + + // ------------------------------------------ + // METHOD: createCompletionStream + // ------------------------------------------ + + /// Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position. We recommend most users use our Chat completions API. [Learn more](https://platform.openai.com/docs/deprecations/2023-07-06-gpt-and-embeddings) Related guide: [Legacy Completions](https://platform.openai.com/docs/guides/gpt/completions-api) + /// + /// `request`: Request object for the Create completion endpoint. + /// + /// `POST` `https://api.openai.com/v1/completions` + Stream createCompletionStream({ + required final CreateCompletionRequest request, + }) async* { + final r = await _requestStream( + baseUrl: 'https://api.openai.com/v1', + path: '/completions', + method: g.HttpMethod.post, + requestType: 'application/json', + responseType: 'application/json', + body: request.copyWith(stream: true), + ); + yield* r.stream + .transform(const _OpenAIStreamTransformer()) // + .map((final d) => CreateCompletionResponse.fromJson(json.decode(d))); + } + + // ------------------------------------------ + // METHOD: createChatCompletionStream + // ------------------------------------------ + + /// Creates a model response for the given chat conversation. + /// + /// `request`: Request object for the Create chat completion endpoint. + /// + /// `POST` `https://api.openai.com/v1/chat/completions` + Stream createChatCompletionStream({ + required final CreateChatCompletionRequest request, + }) async* { + final r = await _requestStream( + baseUrl: 'https://api.openai.com/v1', + path: '/chat/completions', + method: g.HttpMethod.post, + requestType: 'application/json', + responseType: 'application/json', + body: request.copyWith(stream: true), + ); + yield* r.stream + .transform(const _OpenAIStreamTransformer()) // + .map( + (final d) => + CreateChatCompletionStreamResponse.fromJson(json.decode(d)), + ); + } + + // ------------------------------------------ + // METHOD: _request + // ------------------------------------------ + + /// Reusable request stream method + Future _requestStream({ + required String baseUrl, + required final String path, + required final g.HttpMethod method, + final Map queryParams = const {}, + final Map headerParams = const {}, + final String requestType = '', + final String responseType = '', + final Object? body, + }) async { + // Override with the user provided baseUrl + baseUrl = this.baseUrl ?? baseUrl; + + // Ensure a baseUrl is provided + assert( + baseUrl.isNotEmpty, + 'baseUrl is required, but none defined in spec or provided by user', + ); + + // Ensure query parameters are strings or iterable of strings + final query = queryParams.map((final key, final value) { + if (value is Iterable) { + return MapEntry(key, value.map((final v) => v.toString())); + } else { + return MapEntry(key, value.toString()); + } + }); + + // Build the request URI + Uri uri = Uri.parse(baseUrl + path); + if (query.isNotEmpty) { + uri = uri.replace(queryParameters: query); + } + + // Build the headers + final Map headers = {...headerParams}; + + // Add bearer token to request headers + if (bearerToken.isNotEmpty) { + headers['authorization'] = 'Bearer $bearerToken'; + } + + // Define the request type being sent to server + if (requestType.isNotEmpty) { + headers['content-type'] = requestType; + } + + // Define the response type expected to receive from server + if (responseType.isNotEmpty) { + headers['accept'] = responseType; + } + + // Add global headers + headers.addAll(this.headers); + + // Build the request object + late http.StreamedResponse response; + try { + http.Request request = http.Request(method.name, uri); + try { + if (body != null) { + request.body = json.encode(body); + } + } catch (e) { + // Handle request encoding error + throw g.OpenAIClientException( + uri: uri, + method: method, + message: 'Could not encode: ${body.runtimeType}', + body: e, + ); + } + + // Add request headers + request.headers.addAll(headers); + + // Handle user request middleware + request = await onRequest(request) as http.Request; + + // Submit request + response = await client.send(request); + + // Handle user response middleware + response = await onStreamedResponse(response); + } catch (e) { + // Handle request and response errors + throw g.OpenAIClientException( + uri: uri, + method: method, + message: 'Response error', + body: e, + ); + } + + // Check for successful response + if ((response.statusCode ~/ 100) == 2) { + return response; + } + + // Handle unsuccessful response + throw g.OpenAIClientException( + uri: uri, + method: method, + message: 'Unsuccessful response', + code: response.statusCode, + body: (await http.Response.fromStream(response)).body, + ); + } +} + +class _OpenAIStreamTransformer + extends StreamTransformerBase, String> { + const _OpenAIStreamTransformer(); + + @override + Stream bind(final Stream> stream) { + return stream // + .transform(utf8.decoder) // + .transform(const LineSplitter()) // + .where((final i) => i.startsWith('data: ') && !i.endsWith('[DONE]')) + .map((final item) => item.substring(6)); + } +} diff --git a/packages/openai_dart/lib/src/generated/client.dart b/packages/openai_dart/lib/src/generated/client.dart new file mode 100644 index 00000000..3d2b60d9 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/client.dart @@ -0,0 +1,570 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target + +import 'dart:io' as io; +import 'dart:convert'; +import 'package:http/http.dart' as http; +import 'dart:typed_data'; +import 'package:http/retry.dart'; +import 'schema/schema.dart'; + +/// Enum of HTTP methods +enum HttpMethod { get, put, post, delete, options, head, patch, trace } + +// ========================================== +// CLASS: OpenAIClientException +// ========================================== + +/// HTTP exception handler for OpenAIClient +class OpenAIClientException implements io.HttpException { + OpenAIClientException({ + required this.message, + required this.uri, + required this.method, + this.code, + this.body, + }); + + @override + final String message; + @override + final Uri uri; + final HttpMethod method; + final int? code; + final Object? body; + + @override + String toString() { + Object? data; + try { + data = body is String ? jsonDecode(body as String) : body.toString(); + } catch (e) { + data = body.toString(); + } + final s = JsonEncoder.withIndent(' ').convert({ + 'uri': uri.toString(), + 'method': method.name.toUpperCase(), + 'code': code, + 'message': message, + 'body': data, + }); + return 'OpenAIClientException($s)'; + } +} + +// ========================================== +// CLASS: OpenAIClient +// ========================================== + +/// Client for OpenAI API (v.2.0.0) +/// +/// The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. +class OpenAIClient { + /// Creates a new OpenAIClient instance. + /// + /// - [OpenAIClient.baseUrl] Override base URL (default: server url defined in spec) + /// - [OpenAIClient.headers] Global headers to be sent with every request + /// - [OpenAIClient.client] Override HTTP client to use for requests + OpenAIClient({ + this.bearerToken = '', + this.baseUrl, + this.headers = const {}, + http.Client? client, + }) : assert( + baseUrl == null || baseUrl.startsWith('http'), + 'baseUrl must start with http', + ), + assert( + baseUrl == null || !baseUrl.endsWith('/'), + 'baseUrl must not end with /', + ), + client = RetryClient(client ?? http.Client()); + + /// Override base URL (default: server url defined in spec) + final String? baseUrl; + + /// Global headers to be sent with every request + final Map headers; + + /// HTTP client for requests + final http.Client client; + + /// Authentication related variables + String bearerToken; + + // ------------------------------------------ + // METHOD: endSession + // ------------------------------------------ + + /// Close the HTTP client and end session + void endSession() => client.close(); + + // ------------------------------------------ + // METHOD: onRequest + // ------------------------------------------ + + /// Middleware for HTTP requests (user can override) + /// + /// The request can be of type [http.Request] or [http.MultipartRequest] + Future onRequest(http.BaseRequest request) { + return Future.value(request); + } + + // ------------------------------------------ + // METHOD: onResponse + // ------------------------------------------ + + /// Middleware for HTTP responses (user can override) + Future onResponse(http.Response response) { + return Future.value(response); + } + + // ------------------------------------------ + // METHOD: _request + // ------------------------------------------ + + /// Reusable request method + Future _request({ + required String baseUrl, + required String path, + required HttpMethod method, + Map queryParams = const {}, + Map headerParams = const {}, + bool isMultipart = false, + String requestType = '', + String responseType = '', + Object? body, + }) async { + // Override with the user provided baseUrl + baseUrl = this.baseUrl ?? baseUrl; + + // Ensure a baseUrl is provided + assert( + baseUrl.isNotEmpty, + 'baseUrl is required, but none defined in spec or provided by user', + ); + + // Ensure query parameters are strings or iterable of strings + queryParams = queryParams.map((key, value) { + if (value is Iterable) { + return MapEntry(key, value.map((v) => v.toString())); + } else { + return MapEntry(key, value.toString()); + } + }); + + // Build the request URI + Uri uri = Uri.parse(baseUrl + path); + if (queryParams.isNotEmpty) { + uri = uri.replace(queryParameters: queryParams); + } + + // Build the headers + Map headers = {...headerParams}; + + // Add bearer token to request headers + if (bearerToken.isNotEmpty) { + headers['authorization'] = 'Bearer $bearerToken'; + } + + // Define the request type being sent to server + if (requestType.isNotEmpty) { + headers['content-type'] = requestType; + } + + // Define the response type expected to receive from server + if (responseType.isNotEmpty) { + headers['accept'] = responseType; + } + + // Add global headers + headers.addAll(this.headers); + + // Build the request object + late http.Response response; + try { + http.BaseRequest request; + if (isMultipart) { + // Handle multipart request + request = http.MultipartRequest(method.name, uri); + request = request as http.MultipartRequest; + if (body is List) { + request.files.addAll(body); + } else { + request.files.add(body as http.MultipartFile); + } + } else { + // Handle normal request + request = http.Request(method.name, uri); + request = request as http.Request; + try { + if (body != null) { + request.body = json.encode(body); + } + } catch (e) { + // Handle request encoding error + throw OpenAIClientException( + uri: uri, + method: method, + message: 'Could not encode: ${body.runtimeType}', + body: e, + ); + } + } + + // Add request headers + request.headers.addAll(headers); + + // Handle user request middleware + request = await onRequest(request); + + // Submit request + response = await http.Response.fromStream(await client.send(request)); + + // Handle user response middleware + response = await onResponse(response); + } catch (e) { + // Handle request and response errors + throw OpenAIClientException( + uri: uri, + method: method, + message: 'Response error', + body: e, + ); + } + + // Check for successful response + if ((response.statusCode ~/ 100) == 2) { + return response; + } + + // Handle unsuccessful response + throw OpenAIClientException( + uri: uri, + method: method, + message: 'Unsuccessful response', + code: response.statusCode, + body: response.body, + ); + } + + // ------------------------------------------ + // METHOD: createChatCompletion + // ------------------------------------------ + + /// Creates a model response for the given chat conversation. + /// + /// `request`: Request object for the Create chat completion endpoint. + /// + /// `POST` `https://api.openai.com/v1/chat/completions` + Future createChatCompletion({ + required CreateChatCompletionRequest request, + }) async { + final r = await _request( + baseUrl: 'https://api.openai.com/v1', + path: '/chat/completions', + method: HttpMethod.post, + isMultipart: false, + requestType: 'application/json', + responseType: 'application/json', + body: request, + ); + return CreateChatCompletionResponse.fromJson(json.decode(r.body)); + } + + // ------------------------------------------ + // METHOD: createCompletion + // ------------------------------------------ + + /// Creates a completion for the provided prompt and parameters. + /// + /// `request`: Request object for the Create completion endpoint. + /// + /// `POST` `https://api.openai.com/v1/completions` + Future createCompletion({ + required CreateCompletionRequest request, + }) async { + final r = await _request( + baseUrl: 'https://api.openai.com/v1', + path: '/completions', + method: HttpMethod.post, + isMultipart: false, + requestType: 'application/json', + responseType: 'application/json', + body: request, + ); + return CreateCompletionResponse.fromJson(json.decode(r.body)); + } + + // ------------------------------------------ + // METHOD: createEmbedding + // ------------------------------------------ + + /// Creates an embedding vector representing the input text. + /// + /// `request`: Request object for the Create embedding endpoint. + /// + /// `POST` `https://api.openai.com/v1/embeddings` + Future createEmbedding({ + required CreateEmbeddingRequest request, + }) async { + final r = await _request( + baseUrl: 'https://api.openai.com/v1', + path: '/embeddings', + method: HttpMethod.post, + isMultipart: false, + requestType: 'application/json', + responseType: 'application/json', + body: request, + ); + return CreateEmbeddingResponse.fromJson(json.decode(r.body)); + } + + // ------------------------------------------ + // METHOD: listPaginatedFineTuningJobs + // ------------------------------------------ + + /// List your organization's fine-tuning jobs. + /// + /// `after`: Identifier for the last job from the previous pagination request. + /// + /// `limit`: Number of fine-tuning jobs to retrieve. + /// + /// `GET` `https://api.openai.com/v1/fine_tuning/jobs` + Future listPaginatedFineTuningJobs({ + String? after, + int limit = 20, + }) async { + final r = await _request( + baseUrl: 'https://api.openai.com/v1', + path: '/fine_tuning/jobs', + method: HttpMethod.get, + isMultipart: false, + requestType: '', + responseType: 'application/json', + queryParams: { + if (after != null) 'after': after, + 'limit': limit, + }, + ); + return ListPaginatedFineTuningJobsResponse.fromJson(json.decode(r.body)); + } + + // ------------------------------------------ + // METHOD: createFineTuningJob + // ------------------------------------------ + + /// Creates a job that fine-tunes a specified model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning). + /// + /// `request`: Request object for the Create fine-tuning job endpoint. + /// + /// `POST` `https://api.openai.com/v1/fine_tuning/jobs` + Future createFineTuningJob({ + required CreateFineTuningJobRequest request, + }) async { + final r = await _request( + baseUrl: 'https://api.openai.com/v1', + path: '/fine_tuning/jobs', + method: HttpMethod.post, + isMultipart: false, + requestType: 'application/json', + responseType: 'application/json', + body: request, + ); + return FineTuningJob.fromJson(json.decode(r.body)); + } + + // ------------------------------------------ + // METHOD: retrieveFineTuningJob + // ------------------------------------------ + + /// Get info about a fine-tuning job. [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning). + /// + /// `fineTuningJobId`: The ID of the fine-tuning job. + /// + /// `GET` `https://api.openai.com/v1/fine_tuning/jobs/{fine_tuning_job_id}` + Future retrieveFineTuningJob({ + required String fineTuningJobId, + }) async { + final r = await _request( + baseUrl: 'https://api.openai.com/v1', + path: '/fine_tuning/jobs/$fineTuningJobId', + method: HttpMethod.get, + isMultipart: false, + requestType: '', + responseType: 'application/json', + ); + return FineTuningJob.fromJson(json.decode(r.body)); + } + + // ------------------------------------------ + // METHOD: listFineTuningEvents + // ------------------------------------------ + + /// Get status updates for a fine-tuning job. + /// + /// `fineTuningJobId`: The ID of the fine-tuning job to get events for. + /// + /// `after`: Identifier for the last event from the previous pagination request. + /// + /// `limit`: Number of events to retrieve. + /// + /// `GET` `https://api.openai.com/v1/fine_tuning/jobs/{fine_tuning_job_id}/events` + Future listFineTuningEvents({ + required String fineTuningJobId, + String? after, + int limit = 20, + }) async { + final r = await _request( + baseUrl: 'https://api.openai.com/v1', + path: '/fine_tuning/jobs/$fineTuningJobId/events', + method: HttpMethod.get, + isMultipart: false, + requestType: '', + responseType: 'application/json', + queryParams: { + if (after != null) 'after': after, + 'limit': limit, + }, + ); + return ListFineTuningJobEventsResponse.fromJson(json.decode(r.body)); + } + + // ------------------------------------------ + // METHOD: cancelFineTuningJob + // ------------------------------------------ + + /// Immediately cancel a fine-tune job. + /// + /// `fineTuningJobId`: The ID of the fine-tuning job to cancel. + /// + /// `POST` `https://api.openai.com/v1/fine_tuning/jobs/{fine_tuning_job_id}/cancel` + Future cancelFineTuningJob({ + required String fineTuningJobId, + }) async { + final r = await _request( + baseUrl: 'https://api.openai.com/v1', + path: '/fine_tuning/jobs/$fineTuningJobId/cancel', + method: HttpMethod.post, + isMultipart: false, + requestType: '', + responseType: 'application/json', + ); + return FineTuningJob.fromJson(json.decode(r.body)); + } + + // ------------------------------------------ + // METHOD: createImage + // ------------------------------------------ + + /// Creates an image given a prompt. + /// + /// `request`: Request object for the Create image endpoint. + /// + /// `POST` `https://api.openai.com/v1/images/generations` + Future createImage({ + required CreateImageRequest request, + }) async { + final r = await _request( + baseUrl: 'https://api.openai.com/v1', + path: '/images/generations', + method: HttpMethod.post, + isMultipart: false, + requestType: 'application/json', + responseType: 'application/json', + body: request, + ); + return ImagesResponse.fromJson(json.decode(r.body)); + } + + // ------------------------------------------ + // METHOD: listModels + // ------------------------------------------ + + /// Lists the currently available models, and provides basic information about each one such as the owner and availability. + /// + /// `GET` `https://api.openai.com/v1/models` + Future listModels() async { + final r = await _request( + baseUrl: 'https://api.openai.com/v1', + path: '/models', + method: HttpMethod.get, + isMultipart: false, + requestType: '', + responseType: 'application/json', + ); + return ListModelsResponse.fromJson(json.decode(r.body)); + } + + // ------------------------------------------ + // METHOD: retrieveModel + // ------------------------------------------ + + /// Retrieves a model instance, providing basic information about the model such as the owner and permissioning. + /// + /// `model`: The ID of the model to use for this request + /// + /// `GET` `https://api.openai.com/v1/models/{model}` + Future retrieveModel({ + required String model, + }) async { + final r = await _request( + baseUrl: 'https://api.openai.com/v1', + path: '/models/$model', + method: HttpMethod.get, + isMultipart: false, + requestType: '', + responseType: 'application/json', + ); + return Model.fromJson(json.decode(r.body)); + } + + // ------------------------------------------ + // METHOD: deleteModel + // ------------------------------------------ + + /// Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. + /// + /// `model`: The model to delete + /// + /// `DELETE` `https://api.openai.com/v1/models/{model}` + Future deleteModel({ + required String model, + }) async { + final r = await _request( + baseUrl: 'https://api.openai.com/v1', + path: '/models/$model', + method: HttpMethod.delete, + isMultipart: false, + requestType: '', + responseType: 'application/json', + ); + return DeleteModelResponse.fromJson(json.decode(r.body)); + } + + // ------------------------------------------ + // METHOD: createModeration + // ------------------------------------------ + + /// Classifies if text violates OpenAI's Content Policy. + /// + /// `request`: Request object for the Create moderation endpoint. + /// + /// `POST` `https://api.openai.com/v1/moderations` + Future createModeration({ + required CreateModerationRequest request, + }) async { + final r = await _request( + baseUrl: 'https://api.openai.com/v1', + path: '/moderations', + method: HttpMethod.post, + isMultipart: false, + requestType: 'application/json', + responseType: 'application/json', + body: request, + ); + return CreateModerationResponse.fromJson(json.decode(r.body)); + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_finish_reason.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_finish_reason.dart new file mode 100644 index 00000000..bb90b96e --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_finish_reason.dart @@ -0,0 +1,24 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// ENUM: ChatCompletionFinishReason +// ========================================== + +/// The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, +/// `length` if the maximum number of tokens specified in the request was reached, +/// `content_filter` if content was omitted due to a flag from our content filters, +/// or `function_call` if the model called a function. +enum ChatCompletionFinishReason { + @JsonValue('stop') + stop, + @JsonValue('length') + length, + @JsonValue('function_call') + functionCall, + @JsonValue('content_filter') + contentFilter, +} diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_function_call_option.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_function_call_option.dart new file mode 100644 index 00000000..1e057f06 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_function_call_option.dart @@ -0,0 +1,41 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: ChatCompletionFunctionCallOption +// ========================================== + +/// Forces the model to call the specified function. +@freezed +class ChatCompletionFunctionCallOption with _$ChatCompletionFunctionCallOption { + const ChatCompletionFunctionCallOption._(); + + /// Factory constructor for ChatCompletionFunctionCallOption + const factory ChatCompletionFunctionCallOption({ + /// The name of the function to call. + required String name, + }) = _ChatCompletionFunctionCallOption; + + /// Object construction from a JSON representation + factory ChatCompletionFunctionCallOption.fromJson( + Map json) => + _$ChatCompletionFunctionCallOptionFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['name']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'name': name, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_function_parameters.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_function_parameters.dart new file mode 100644 index 00000000..3573e169 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_function_parameters.dart @@ -0,0 +1,14 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// TYPE: ChatCompletionFunctionParameters +// ========================================== + +/// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. +/// +/// To describe a function that accepts no parameters, provide the value `{"type": "object", "properties": {}}`. +typedef ChatCompletionFunctionParameters = Map; diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_functions.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_functions.dart new file mode 100644 index 00000000..6e18c088 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_functions.dart @@ -0,0 +1,54 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: ChatCompletionFunctions +// ========================================== + +/// A function that the model may call. +@freezed +class ChatCompletionFunctions with _$ChatCompletionFunctions { + const ChatCompletionFunctions._(); + + /// Factory constructor for ChatCompletionFunctions + const factory ChatCompletionFunctions({ + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + required String name, + + /// A description of what the function does, used by the model to choose when and how to call the function. + @JsonKey(includeIfNull: false) String? description, + + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// + /// To describe a function that accepts no parameters, provide the value `{"type": "object", "properties": {}}`. + required ChatCompletionFunctionParameters parameters, + }) = _ChatCompletionFunctions; + + /// Object construction from a JSON representation + factory ChatCompletionFunctions.fromJson(Map json) => + _$ChatCompletionFunctionsFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'name', + 'description', + 'parameters' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'name': name, + 'description': description, + 'parameters': parameters, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart new file mode 100644 index 00000000..54b56f96 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_message.dart @@ -0,0 +1,58 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: ChatCompletionMessage +// ========================================== + +/// A message in a chat conversation. +@freezed +class ChatCompletionMessage with _$ChatCompletionMessage { + const ChatCompletionMessage._(); + + /// Factory constructor for ChatCompletionMessage + const factory ChatCompletionMessage({ + /// The role of the messages author. One of `system`, `user`, `assistant`, or `function`. + required ChatCompletionMessageRole role, + + /// The contents of the message. `content` is required for all messages, and may be null for assistant messages with function calls. + required String? content, + + /// The name and arguments of a function that should be called, as generated by the model. + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall, + + /// The name of the author of this message. `name` is required if role is `function`, and it should be the name of the function whose response is in the `content`. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters. + @JsonKey(includeIfNull: false) String? name, + }) = _ChatCompletionMessage; + + /// Object construction from a JSON representation + factory ChatCompletionMessage.fromJson(Map json) => + _$ChatCompletionMessageFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'role', + 'content', + 'function_call', + 'name' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'role': role, + 'content': content, + 'function_call': functionCall, + 'name': name, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_message_function_call.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_message_function_call.dart new file mode 100644 index 00000000..e91cfb1a --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_message_function_call.dart @@ -0,0 +1,46 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: ChatCompletionMessageFunctionCall +// ========================================== + +/// The name and arguments of a function that should be called, as generated by the model. +@freezed +class ChatCompletionMessageFunctionCall + with _$ChatCompletionMessageFunctionCall { + const ChatCompletionMessageFunctionCall._(); + + /// Factory constructor for ChatCompletionMessageFunctionCall + const factory ChatCompletionMessageFunctionCall({ + /// The name of the function to call. + required String name, + + /// The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + required String arguments, + }) = _ChatCompletionMessageFunctionCall; + + /// Object construction from a JSON representation + factory ChatCompletionMessageFunctionCall.fromJson( + Map json) => + _$ChatCompletionMessageFunctionCallFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['name', 'arguments']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'name': name, + 'arguments': arguments, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_message_role.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_message_role.dart new file mode 100644 index 00000000..145f6392 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_message_role.dart @@ -0,0 +1,21 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// ENUM: ChatCompletionMessageRole +// ========================================== + +/// The role of the messages author. One of `system`, `user`, `assistant`, or `function`. +enum ChatCompletionMessageRole { + @JsonValue('system') + system, + @JsonValue('user') + user, + @JsonValue('assistant') + assistant, + @JsonValue('function') + function, +} diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_response_choice.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_response_choice.dart new file mode 100644 index 00000000..a61724f2 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_response_choice.dart @@ -0,0 +1,56 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: ChatCompletionResponseChoice +// ========================================== + +/// A choice the model generated for the input prompt. +@freezed +class ChatCompletionResponseChoice with _$ChatCompletionResponseChoice { + const ChatCompletionResponseChoice._(); + + /// Factory constructor for ChatCompletionResponseChoice + const factory ChatCompletionResponseChoice({ + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + /// `length` if the maximum number of tokens specified in the request was reached, + /// `content_filter` if content was omitted due to a flag from our content filters, + /// or `function_call` if the model called a function. + @JsonKey(name: 'finish_reason') + required ChatCompletionFinishReason finishReason, + + /// The index of the choice in the list of choices. + required int index, + + /// A message in a chat conversation. + required ChatCompletionMessage message, + }) = _ChatCompletionResponseChoice; + + /// Object construction from a JSON representation + factory ChatCompletionResponseChoice.fromJson(Map json) => + _$ChatCompletionResponseChoiceFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'finish_reason', + 'index', + 'message' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'finish_reason': finishReason, + 'index': index, + 'message': message, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_choice.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_choice.dart new file mode 100644 index 00000000..3b79b005 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_choice.dart @@ -0,0 +1,57 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: ChatCompletionStreamResponseChoice +// ========================================== + +/// A choice the model generated for the input prompt. +@freezed +class ChatCompletionStreamResponseChoice + with _$ChatCompletionStreamResponseChoice { + const ChatCompletionStreamResponseChoice._(); + + /// Factory constructor for ChatCompletionStreamResponseChoice + const factory ChatCompletionStreamResponseChoice({ + /// A chat completion delta generated by streamed model responses. + required ChatCompletionStreamResponseDelta delta, + + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + /// `length` if the maximum number of tokens specified in the request was reached, + /// `content_filter` if content was omitted due to a flag from our content filters, + /// or `function_call` if the model called a function. + @JsonKey( + name: 'finish_reason', + unknownEnumValue: JsonKey.nullForUndefinedEnumValue, + ) + required ChatCompletionFinishReason? finishReason, + + /// The index of the choice in the list of choices. + required int index, + }) = _ChatCompletionStreamResponseChoice; + + /// Object construction from a JSON representation + factory ChatCompletionStreamResponseChoice.fromJson( + Map json) => + _$ChatCompletionStreamResponseChoiceFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['delta', 'finish_reason', 'index']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'delta': delta, + 'finish_reason': finishReason, + 'index': index, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_delta.dart b/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_delta.dart new file mode 100644 index 00000000..adad615d --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/chat_completion_stream_response_delta.dart @@ -0,0 +1,59 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: ChatCompletionStreamResponseDelta +// ========================================== + +/// A chat completion delta generated by streamed model responses. +@freezed +class ChatCompletionStreamResponseDelta + with _$ChatCompletionStreamResponseDelta { + const ChatCompletionStreamResponseDelta._(); + + /// Factory constructor for ChatCompletionStreamResponseDelta + const factory ChatCompletionStreamResponseDelta({ + /// The contents of the chunk message. + @JsonKey(includeIfNull: false) String? content, + + /// The name and arguments of a function that should be called, as generated by the model. + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall, + + /// The role of the messages author. One of `system`, `user`, `assistant`, or `function`. + @JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue, + ) + ChatCompletionMessageRole? role, + }) = _ChatCompletionStreamResponseDelta; + + /// Object construction from a JSON representation + factory ChatCompletionStreamResponseDelta.fromJson( + Map json) => + _$ChatCompletionStreamResponseDeltaFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'content', + 'function_call', + 'role' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'content': content, + 'function_call': functionCall, + 'role': role, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/completion_choice.dart b/packages/openai_dart/lib/src/generated/schema/completion_choice.dart new file mode 100644 index 00000000..2d87c8c6 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/completion_choice.dart @@ -0,0 +1,63 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: CompletionChoice +// ========================================== + +/// A choice the model generated for the input prompt. +@freezed +class CompletionChoice with _$CompletionChoice { + const CompletionChoice._(); + + /// Factory constructor for CompletionChoice + const factory CompletionChoice({ + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + /// `length` if the maximum number of tokens specified in the request was reached, + /// or `content_filter` if content was omitted due to a flag from our content filters. + @JsonKey( + name: 'finish_reason', + unknownEnumValue: JsonKey.nullForUndefinedEnumValue, + ) + required CompletionFinishReason? finishReason, + + /// The index of the choice in the list of generated choices. + required int index, + + /// The probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. + required CompletionLogprobs? logprobs, + + /// The text of the completion. + required String text, + }) = _CompletionChoice; + + /// Object construction from a JSON representation + factory CompletionChoice.fromJson(Map json) => + _$CompletionChoiceFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'finish_reason', + 'index', + 'logprobs', + 'text' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'finish_reason': finishReason, + 'index': index, + 'logprobs': logprobs, + 'text': text, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/completion_finish_reason.dart b/packages/openai_dart/lib/src/generated/schema/completion_finish_reason.dart new file mode 100644 index 00000000..e72a97f4 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/completion_finish_reason.dart @@ -0,0 +1,21 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// ENUM: CompletionFinishReason +// ========================================== + +/// The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, +/// `length` if the maximum number of tokens specified in the request was reached, +/// or `content_filter` if content was omitted due to a flag from our content filters. +enum CompletionFinishReason { + @JsonValue('stop') + stop, + @JsonValue('length') + length, + @JsonValue('content_filter') + contentFilter, +} diff --git a/packages/openai_dart/lib/src/generated/schema/completion_logprobs.dart b/packages/openai_dart/lib/src/generated/schema/completion_logprobs.dart new file mode 100644 index 00000000..f2a3fdd0 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/completion_logprobs.dart @@ -0,0 +1,59 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: CompletionLogprobs +// ========================================== + +/// The probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. +@freezed +class CompletionLogprobs with _$CompletionLogprobs { + const CompletionLogprobs._(); + + /// Factory constructor for CompletionLogprobs + const factory CompletionLogprobs({ + /// The offset of the token from the beginning of the prompt. + @JsonKey(name: 'text_offset', includeIfNull: false) List? textOffset, + + /// The log probabilities of tokens in the completion. + @JsonKey(name: 'token_logprobs', includeIfNull: false) + List? tokenLogprobs, + + /// The tokens generated by the model converted back to text. + @JsonKey(includeIfNull: false) List? tokens, + + /// The log probabilities of the `logprobs` most likely tokens. + @JsonKey(name: 'top_logprobs', includeIfNull: false) + List?>? topLogprobs, + }) = _CompletionLogprobs; + + /// Object construction from a JSON representation + factory CompletionLogprobs.fromJson(Map json) => + _$CompletionLogprobsFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'text_offset', + 'token_logprobs', + 'tokens', + 'top_logprobs' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'text_offset': textOffset, + 'token_logprobs': tokenLogprobs, + 'tokens': tokens, + 'top_logprobs': topLogprobs, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/completion_usage.dart b/packages/openai_dart/lib/src/generated/schema/completion_usage.dart new file mode 100644 index 00000000..17826175 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/completion_usage.dart @@ -0,0 +1,52 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: CompletionUsage +// ========================================== + +/// Usage statistics for the completion request. +@freezed +class CompletionUsage with _$CompletionUsage { + const CompletionUsage._(); + + /// Factory constructor for CompletionUsage + const factory CompletionUsage({ + /// Number of tokens in the generated completion. + @JsonKey(name: 'completion_tokens') required int? completionTokens, + + /// Number of tokens in the prompt. + @JsonKey(name: 'prompt_tokens') required int promptTokens, + + /// Total number of tokens used in the request (prompt + completion). + @JsonKey(name: 'total_tokens') required int totalTokens, + }) = _CompletionUsage; + + /// Object construction from a JSON representation + factory CompletionUsage.fromJson(Map json) => + _$CompletionUsageFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'completion_tokens', + 'prompt_tokens', + 'total_tokens' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'completion_tokens': completionTokens, + 'prompt_tokens': promptTokens, + 'total_tokens': totalTokens, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart new file mode 100644 index 00000000..60bb293d --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_request.dart @@ -0,0 +1,303 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: CreateChatCompletionRequest +// ========================================== + +/// Request object for the Create chat completion endpoint. +@freezed +class CreateChatCompletionRequest with _$CreateChatCompletionRequest { + const CreateChatCompletionRequest._(); + + /// Factory constructor for CreateChatCompletionRequest + const factory CreateChatCompletionRequest({ + /// ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + @_ChatCompletionModelConverter() required ChatCompletionModel model, + + /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + required List messages, + + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + /// + /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + @JsonKey(name: 'frequency_penalty', includeIfNull: false) + @Default(0.0) + double? frequencyPenalty, + + /// Controls how the model calls functions. "none" means the model will not call a function and instead generates a message. "auto" means the model can pick between generating a message or calling a function. Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. + @JsonKey(name: 'function_call', includeIfNull: false) dynamic functionCall, + + /// A list of functions the model may generate JSON inputs for. + @JsonKey(includeIfNull: false) List? functions, + + /// Modify the likelihood of specified tokens appearing in the completion. + /// + /// Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + @JsonKey(name: 'logit_bias', includeIfNull: false) + Map? logitBias, + + /// The maximum number of [tokens](https://platform.openai.com/tokenizer) to generate in the chat completion. + /// + /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + @JsonKey(name: 'max_tokens', includeIfNull: false) int? maxTokens, + + /// How many chat completion choices to generate for each input message. + @JsonKey(includeIfNull: false) @Default(1) int? n, + + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + /// + /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + @JsonKey(name: 'presence_penalty', includeIfNull: false) + @Default(0.0) + double? presencePenalty, + + /// Up to 4 sequences where the API will stop generating further tokens. + @_ChatCompletionStopConverter() + @JsonKey(includeIfNull: false) + ChatCompletionStop? stop, + + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + @JsonKey(includeIfNull: false) @Default(false) bool? stream, + + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// + /// We generally recommend altering this or `top_p` but not both. + @JsonKey(includeIfNull: false) @Default(1.0) double? temperature, + + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// + /// We generally recommend altering this or `temperature` but not both. + @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, + + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @JsonKey(includeIfNull: false) String? user, + }) = _CreateChatCompletionRequest; + + /// Object construction from a JSON representation + factory CreateChatCompletionRequest.fromJson(Map json) => + _$CreateChatCompletionRequestFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'model', + 'messages', + 'frequency_penalty', + 'function_call', + 'functions', + 'logit_bias', + 'max_tokens', + 'n', + 'presence_penalty', + 'stop', + 'stream', + 'temperature', + 'top_p', + 'user' + ]; + + /// Validation constants + static const frequencyPenaltyDefaultValue = 0.0; + static const frequencyPenaltyMinValue = -2.0; + static const frequencyPenaltyMaxValue = 2.0; + static const nDefaultValue = 1; + static const nMinValue = 1; + static const nMaxValue = 128; + static const presencePenaltyDefaultValue = 0.0; + static const presencePenaltyMinValue = -2.0; + static const presencePenaltyMaxValue = 2.0; + static const temperatureDefaultValue = 1.0; + static const temperatureMinValue = 0.0; + static const temperatureMaxValue = 2.0; + static const topPDefaultValue = 1.0; + static const topPMinValue = 0.0; + static const topPMaxValue = 1.0; + + /// Perform validations on the schema property values + String? validateSchema() { + if (frequencyPenalty != null && + frequencyPenalty! < frequencyPenaltyMinValue) { + return "The value of 'frequencyPenalty' cannot be < $frequencyPenaltyMinValue"; + } + if (frequencyPenalty != null && + frequencyPenalty! > frequencyPenaltyMaxValue) { + return "The value of 'frequencyPenalty' cannot be > $frequencyPenaltyMaxValue"; + } + if (n != null && n! < nMinValue) { + return "The value of 'n' cannot be < $nMinValue"; + } + if (n != null && n! > nMaxValue) { + return "The value of 'n' cannot be > $nMaxValue"; + } + if (presencePenalty != null && presencePenalty! < presencePenaltyMinValue) { + return "The value of 'presencePenalty' cannot be < $presencePenaltyMinValue"; + } + if (presencePenalty != null && presencePenalty! > presencePenaltyMaxValue) { + return "The value of 'presencePenalty' cannot be > $presencePenaltyMaxValue"; + } + if (temperature != null && temperature! < temperatureMinValue) { + return "The value of 'temperature' cannot be < $temperatureMinValue"; + } + if (temperature != null && temperature! > temperatureMaxValue) { + return "The value of 'temperature' cannot be > $temperatureMaxValue"; + } + if (topP != null && topP! < topPMinValue) { + return "The value of 'topP' cannot be < $topPMinValue"; + } + if (topP != null && topP! > topPMaxValue) { + return "The value of 'topP' cannot be > $topPMaxValue"; + } + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'model': model, + 'messages': messages, + 'frequency_penalty': frequencyPenalty, + 'function_call': functionCall, + 'functions': functions, + 'logit_bias': logitBias, + 'max_tokens': maxTokens, + 'n': n, + 'presence_penalty': presencePenalty, + 'stop': stop, + 'stream': stream, + 'temperature': temperature, + 'top_p': topP, + 'user': user, + }; + } +} + +// ========================================== +// ENUM: ChatCompletionModels +// ========================================== + +/// No Description +enum ChatCompletionModels { + @JsonValue('gpt-4') + gpt4, + @JsonValue('gpt-4-0314') + gpt40314, + @JsonValue('gpt-4-0613') + gpt40613, + @JsonValue('gpt-4-32k') + gpt432k, + @JsonValue('gpt-4-32k-0314') + gpt432k0314, + @JsonValue('gpt-4-32k-0613') + gpt432k0613, + @JsonValue('gpt-3.5-turbo') + gpt35Turbo, + @JsonValue('gpt-3.5-turbo-16k') + gpt35Turbo16k, + @JsonValue('gpt-3.5-turbo-0301') + gpt35Turbo0301, + @JsonValue('gpt-3.5-turbo-0613') + gpt35Turbo0613, + @JsonValue('gpt-3.5-turbo-16k-0613') + gpt35Turbo16k0613, +} + +// ========================================== +// CLASS: ChatCompletionModel +// ========================================== + +/// ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. +@freezed +sealed class ChatCompletionModel with _$ChatCompletionModel { + const ChatCompletionModel._(); + + const factory ChatCompletionModel.string( + String value, + ) = _UnionChatCompletionModelString; + + const factory ChatCompletionModel.enumeration( + ChatCompletionModels value, + ) = _UnionChatCompletionModelEnum; + + /// Object construction from a JSON representation + factory ChatCompletionModel.fromJson(Map json) => + _$ChatCompletionModelFromJson(json); +} + +/// Custom JSON converter for [ChatCompletionModel] +class _ChatCompletionModelConverter + implements JsonConverter { + const _ChatCompletionModelConverter(); + + @override + ChatCompletionModel fromJson(Object? data) { + if (data is String && _$ChatCompletionModelsEnumMap.values.contains(data)) { + return ChatCompletionModel.enumeration( + _$ChatCompletionModelsEnumMap.keys.elementAt( + _$ChatCompletionModelsEnumMap.values.toList().indexOf(data), + ), + ); + } + if (data is String) { + return ChatCompletionModel.string(data); + } + throw Exception('Unexpected value for ChatCompletionModel: $data'); + } + + @override + Object? toJson(ChatCompletionModel data) { + return switch (data) { + _UnionChatCompletionModelString(value: final v) => v, + _UnionChatCompletionModelEnum(value: final v) => + _$ChatCompletionModelsEnumMap[v]!, + }; + } +} +// ========================================== +// CLASS: ChatCompletionStop +// ========================================== + +/// Up to 4 sequences where the API will stop generating further tokens. +@freezed +sealed class ChatCompletionStop with _$ChatCompletionStop { + const ChatCompletionStop._(); + + const factory ChatCompletionStop.string( + String value, + ) = _UnionChatCompletionStopString; + + const factory ChatCompletionStop.arrayString( + List value, + ) = _UnionChatCompletionStopArrayString; + + /// Object construction from a JSON representation + factory ChatCompletionStop.fromJson(Map json) => + _$ChatCompletionStopFromJson(json); +} + +/// Custom JSON converter for [ChatCompletionStop] +class _ChatCompletionStopConverter + implements JsonConverter { + const _ChatCompletionStopConverter(); + + @override + ChatCompletionStop fromJson(Object? data) { + if (data is String) { + return ChatCompletionStop.string(data); + } + if (data is List) { + return ChatCompletionStop.arrayString(data); + } + throw Exception('Unexpected value for ChatCompletionStop: $data'); + } + + @override + Object? toJson(ChatCompletionStop data) { + return switch (data) { + _UnionChatCompletionStopString(value: final v) => v, + _UnionChatCompletionStopArrayString(value: final v) => v, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_response.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_response.dart new file mode 100644 index 00000000..761a884e --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_response.dart @@ -0,0 +1,67 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: CreateChatCompletionResponse +// ========================================== + +/// Represents a chat completion response returned by model, based on the provided input. +@freezed +class CreateChatCompletionResponse with _$CreateChatCompletionResponse { + const CreateChatCompletionResponse._(); + + /// Factory constructor for CreateChatCompletionResponse + const factory CreateChatCompletionResponse({ + /// A unique identifier for the chat completion. + required String id, + + /// A list of chat completion choices. Can be more than one if `n` is greater than 1. + required List choices, + + /// The Unix timestamp (in seconds) of when the chat completion was created. + required int created, + + /// The model used for the chat completion. + required String model, + + /// The object type, which is always `chat.completion`. + required String object, + + /// Usage statistics for the completion request. + @JsonKey(includeIfNull: false) CompletionUsage? usage, + }) = _CreateChatCompletionResponse; + + /// Object construction from a JSON representation + factory CreateChatCompletionResponse.fromJson(Map json) => + _$CreateChatCompletionResponseFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'id', + 'choices', + 'created', + 'model', + 'object', + 'usage' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'id': id, + 'choices': choices, + 'created': created, + 'model': model, + 'object': object, + 'usage': usage, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart new file mode 100644 index 00000000..587715ec --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/create_chat_completion_stream_response.dart @@ -0,0 +1,64 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: CreateChatCompletionStreamResponse +// ========================================== + +/// Represents a streamed chunk of a chat completion response returned by model, based on the provided input. +@freezed +class CreateChatCompletionStreamResponse + with _$CreateChatCompletionStreamResponse { + const CreateChatCompletionStreamResponse._(); + + /// Factory constructor for CreateChatCompletionStreamResponse + const factory CreateChatCompletionStreamResponse({ + /// A unique identifier for the chat completion. Each chunk has the same ID. + required String id, + + /// A list of chat completion choices. Can be more than one if `n` is greater than 1. + required List choices, + + /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. + required int created, + + /// The model to generate the completion. + required String model, + + /// The object type, which is always `chat.completion.chunk`. + required String object, + }) = _CreateChatCompletionStreamResponse; + + /// Object construction from a JSON representation + factory CreateChatCompletionStreamResponse.fromJson( + Map json) => + _$CreateChatCompletionStreamResponseFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'id', + 'choices', + 'created', + 'model', + 'object' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'id': id, + 'choices': choices, + 'created': created, + 'model': model, + 'object': object, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/create_completion_request.dart b/packages/openai_dart/lib/src/generated/schema/create_completion_request.dart new file mode 100644 index 00000000..ae545379 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/create_completion_request.dart @@ -0,0 +1,418 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: CreateCompletionRequest +// ========================================== + +/// Request object for the Create completion endpoint. +@freezed +class CreateCompletionRequest with _$CreateCompletionRequest { + const CreateCompletionRequest._(); + + /// Factory constructor for CreateCompletionRequest + const factory CreateCompletionRequest({ + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + @_CompletionModelConverter() required CompletionModel model, + + /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. + /// + /// Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. + @_CompletionPromptConverter() required CompletionPrompt? prompt, + + /// Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. + /// + /// When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. + /// + /// **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + @JsonKey(name: 'best_of', includeIfNull: false) int? bestOf, + + /// Echo back the prompt in addition to the completion + @JsonKey(includeIfNull: false) @Default(false) bool? echo, + + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + /// + /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + @JsonKey(name: 'frequency_penalty', includeIfNull: false) + @Default(0.0) + double? frequencyPenalty, + + /// Modify the likelihood of specified tokens appearing in the completion. + /// + /// Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](https://platform.openai.com/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// + /// As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. + @JsonKey(name: 'logit_bias', includeIfNull: false) + Map? logitBias, + + /// Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. + /// + /// The maximum value for `logprobs` is 5. + @JsonKey(includeIfNull: false) int? logprobs, + + /// The maximum number of [tokens](https://platform.openai.com/tokenizer) to generate in the completion. + /// + /// The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + @JsonKey(name: 'max_tokens', includeIfNull: false) + @Default(16) + int? maxTokens, + + /// How many completions to generate for each prompt. + /// + /// **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + @JsonKey(includeIfNull: false) @Default(1) int? n, + + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + /// + /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + @JsonKey(name: 'presence_penalty', includeIfNull: false) + @Default(0.0) + double? presencePenalty, + + /// Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + @_CompletionStopConverter() + @JsonKey(includeIfNull: false) + CompletionStop? stop, + + /// Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + @JsonKey(includeIfNull: false) @Default(false) bool? stream, + + /// The suffix that comes after a completion of inserted text. + @JsonKey(includeIfNull: false) String? suffix, + + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// + /// We generally recommend altering this or `top_p` but not both. + @JsonKey(includeIfNull: false) @Default(1.0) double? temperature, + + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// + /// We generally recommend altering this or `temperature` but not both. + @JsonKey(name: 'top_p', includeIfNull: false) @Default(1.0) double? topP, + + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @JsonKey(includeIfNull: false) String? user, + }) = _CreateCompletionRequest; + + /// Object construction from a JSON representation + factory CreateCompletionRequest.fromJson(Map json) => + _$CreateCompletionRequestFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'model', + 'prompt', + 'best_of', + 'echo', + 'frequency_penalty', + 'logit_bias', + 'logprobs', + 'max_tokens', + 'n', + 'presence_penalty', + 'stop', + 'stream', + 'suffix', + 'temperature', + 'top_p', + 'user' + ]; + + /// Validation constants + static const bestOfMinValue = 0; + static const bestOfMaxValue = 20; + static const frequencyPenaltyDefaultValue = 0.0; + static const frequencyPenaltyMinValue = -2.0; + static const frequencyPenaltyMaxValue = 2.0; + static const logprobsMinValue = 0; + static const logprobsMaxValue = 5; + static const maxTokensDefaultValue = 16; + static const maxTokensMinValue = 0; + static const nDefaultValue = 1; + static const nMinValue = 1; + static const nMaxValue = 128; + static const presencePenaltyDefaultValue = 0.0; + static const presencePenaltyMinValue = -2.0; + static const presencePenaltyMaxValue = 2.0; + static const temperatureDefaultValue = 1.0; + static const temperatureMinValue = 0.0; + static const temperatureMaxValue = 2.0; + static const topPDefaultValue = 1.0; + static const topPMinValue = 0.0; + static const topPMaxValue = 1.0; + + /// Perform validations on the schema property values + String? validateSchema() { + if (bestOf != null && bestOf! < bestOfMinValue) { + return "The value of 'bestOf' cannot be < $bestOfMinValue"; + } + if (bestOf != null && bestOf! > bestOfMaxValue) { + return "The value of 'bestOf' cannot be > $bestOfMaxValue"; + } + if (frequencyPenalty != null && + frequencyPenalty! < frequencyPenaltyMinValue) { + return "The value of 'frequencyPenalty' cannot be < $frequencyPenaltyMinValue"; + } + if (frequencyPenalty != null && + frequencyPenalty! > frequencyPenaltyMaxValue) { + return "The value of 'frequencyPenalty' cannot be > $frequencyPenaltyMaxValue"; + } + if (logprobs != null && logprobs! < logprobsMinValue) { + return "The value of 'logprobs' cannot be < $logprobsMinValue"; + } + if (logprobs != null && logprobs! > logprobsMaxValue) { + return "The value of 'logprobs' cannot be > $logprobsMaxValue"; + } + if (maxTokens != null && maxTokens! < maxTokensMinValue) { + return "The value of 'maxTokens' cannot be < $maxTokensMinValue"; + } + if (n != null && n! < nMinValue) { + return "The value of 'n' cannot be < $nMinValue"; + } + if (n != null && n! > nMaxValue) { + return "The value of 'n' cannot be > $nMaxValue"; + } + if (presencePenalty != null && presencePenalty! < presencePenaltyMinValue) { + return "The value of 'presencePenalty' cannot be < $presencePenaltyMinValue"; + } + if (presencePenalty != null && presencePenalty! > presencePenaltyMaxValue) { + return "The value of 'presencePenalty' cannot be > $presencePenaltyMaxValue"; + } + if (temperature != null && temperature! < temperatureMinValue) { + return "The value of 'temperature' cannot be < $temperatureMinValue"; + } + if (temperature != null && temperature! > temperatureMaxValue) { + return "The value of 'temperature' cannot be > $temperatureMaxValue"; + } + if (topP != null && topP! < topPMinValue) { + return "The value of 'topP' cannot be < $topPMinValue"; + } + if (topP != null && topP! > topPMaxValue) { + return "The value of 'topP' cannot be > $topPMaxValue"; + } + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'model': model, + 'prompt': prompt, + 'best_of': bestOf, + 'echo': echo, + 'frequency_penalty': frequencyPenalty, + 'logit_bias': logitBias, + 'logprobs': logprobs, + 'max_tokens': maxTokens, + 'n': n, + 'presence_penalty': presencePenalty, + 'stop': stop, + 'stream': stream, + 'suffix': suffix, + 'temperature': temperature, + 'top_p': topP, + 'user': user, + }; + } +} + +// ========================================== +// ENUM: CompletionModels +// ========================================== + +/// Available completion models. Mind that the list may not be exhaustive nor up-to-date. +enum CompletionModels { + @JsonValue('babbage-002') + babbage002, + @JsonValue('davinci-002') + davinci002, + @JsonValue('gpt-3.5-turbo-instruct') + gpt35TurboInstruct, + @JsonValue('text-davinci-003') + textDavinci003, + @JsonValue('text-davinci-002') + textDavinci002, + @JsonValue('text-davinci-001') + textDavinci001, + @JsonValue('code-davinci-002') + codeDavinci002, + @JsonValue('text-curie-001') + textCurie001, + @JsonValue('text-babbage-001') + textBabbage001, + @JsonValue('text-ada-001') + textAda001, +} + +// ========================================== +// CLASS: CompletionModel +// ========================================== + +/// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. +@freezed +sealed class CompletionModel with _$CompletionModel { + const CompletionModel._(); + + const factory CompletionModel.string( + String value, + ) = _UnionCompletionModelString; + + const factory CompletionModel.enumeration( + CompletionModels value, + ) = _UnionCompletionModelEnum; + + /// Object construction from a JSON representation + factory CompletionModel.fromJson(Map json) => + _$CompletionModelFromJson(json); +} + +/// Custom JSON converter for [CompletionModel] +class _CompletionModelConverter + implements JsonConverter { + const _CompletionModelConverter(); + + @override + CompletionModel fromJson(Object? data) { + if (data is String && _$CompletionModelsEnumMap.values.contains(data)) { + return CompletionModel.enumeration( + _$CompletionModelsEnumMap.keys.elementAt( + _$CompletionModelsEnumMap.values.toList().indexOf(data), + ), + ); + } + if (data is String) { + return CompletionModel.string(data); + } + throw Exception('Unexpected value for CompletionModel: $data'); + } + + @override + Object? toJson(CompletionModel data) { + return switch (data) { + _UnionCompletionModelString(value: final v) => v, + _UnionCompletionModelEnum(value: final v) => + _$CompletionModelsEnumMap[v]!, + }; + } +} +// ========================================== +// CLASS: CompletionPrompt +// ========================================== + +/// The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. +/// +/// Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. +@freezed +sealed class CompletionPrompt with _$CompletionPrompt { + const CompletionPrompt._(); + + const factory CompletionPrompt.string( + String value, + ) = _UnionCompletionPromptString; + + const factory CompletionPrompt.arrayString( + List value, + ) = _UnionCompletionPromptArrayString; + + const factory CompletionPrompt.arrayInteger( + List value, + ) = _UnionCompletionPromptArrayInteger; + + const factory CompletionPrompt.array( + List> value, + ) = _UnionCompletionPromptArray; + + /// Object construction from a JSON representation + factory CompletionPrompt.fromJson(Map json) => + _$CompletionPromptFromJson(json); +} + +/// Custom JSON converter for [CompletionPrompt] +class _CompletionPromptConverter + implements JsonConverter { + const _CompletionPromptConverter(); + + @override + CompletionPrompt? fromJson(Object? data) { + if (data == null) { + return null; + } + if (data is String) { + return CompletionPrompt.string(data); + } + if (data is List) { + return CompletionPrompt.arrayString(data); + } + if (data is List) { + return CompletionPrompt.arrayInteger(data); + } + if (data is List>) { + return CompletionPrompt.array(data); + } + return CompletionPrompt.string('<|endoftext|>'); + } + + @override + Object? toJson(CompletionPrompt? data) { + return switch (data) { + _UnionCompletionPromptString(value: final v) => v, + _UnionCompletionPromptArrayString(value: final v) => v, + _UnionCompletionPromptArrayInteger(value: final v) => v, + _UnionCompletionPromptArray(value: final v) => v, + null => null, + }; + } +} +// ========================================== +// CLASS: CompletionStop +// ========================================== + +/// Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. +@freezed +sealed class CompletionStop with _$CompletionStop { + const CompletionStop._(); + + const factory CompletionStop.string( + String value, + ) = _UnionCompletionStopString; + + const factory CompletionStop.arrayString( + List value, + ) = _UnionCompletionStopArrayString; + + /// Object construction from a JSON representation + factory CompletionStop.fromJson(Map json) => + _$CompletionStopFromJson(json); +} + +/// Custom JSON converter for [CompletionStop] +class _CompletionStopConverter + implements JsonConverter { + const _CompletionStopConverter(); + + @override + CompletionStop? fromJson(Object? data) { + if (data == null) { + return null; + } + if (data is String) { + return CompletionStop.string(data); + } + if (data is List) { + return CompletionStop.arrayString(data); + } + throw Exception('Unexpected value for CompletionStop: $data'); + } + + @override + Object? toJson(CompletionStop? data) { + return switch (data) { + _UnionCompletionStopString(value: final v) => v, + _UnionCompletionStopArrayString(value: final v) => v, + null => null, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/create_completion_response.dart b/packages/openai_dart/lib/src/generated/schema/create_completion_response.dart new file mode 100644 index 00000000..7ce076a1 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/create_completion_response.dart @@ -0,0 +1,67 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: CreateCompletionResponse +// ========================================== + +/// Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint). +@freezed +class CreateCompletionResponse with _$CreateCompletionResponse { + const CreateCompletionResponse._(); + + /// Factory constructor for CreateCompletionResponse + const factory CreateCompletionResponse({ + /// A unique identifier for the completion. + required String id, + + /// The list of completion choices the model generated for the input prompt. + required List choices, + + /// The Unix timestamp (in seconds) of when the completion was created. + required int created, + + /// The model used for completion. + required String model, + + /// The object type, which is always "text_completion" + required String object, + + /// Usage statistics for the completion request. + @JsonKey(includeIfNull: false) CompletionUsage? usage, + }) = _CreateCompletionResponse; + + /// Object construction from a JSON representation + factory CreateCompletionResponse.fromJson(Map json) => + _$CreateCompletionResponseFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'id', + 'choices', + 'created', + 'model', + 'object', + 'usage' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'id': id, + 'choices': choices, + 'created': created, + 'model': model, + 'object': object, + 'usage': usage, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/create_embedding_request.dart b/packages/openai_dart/lib/src/generated/schema/create_embedding_request.dart new file mode 100644 index 00000000..1cd86bc5 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/create_embedding_request.dart @@ -0,0 +1,193 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: CreateEmbeddingRequest +// ========================================== + +/// Request object for the Create embedding endpoint. +@freezed +class CreateEmbeddingRequest with _$CreateEmbeddingRequest { + const CreateEmbeddingRequest._(); + + /// Factory constructor for CreateEmbeddingRequest + const factory CreateEmbeddingRequest({ + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + @_EmbeddingModelConverter() required EmbeddingModel model, + + /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`) and cannot be an empty string. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + @_EmbeddingInputConverter() required EmbeddingInput input, + + /// The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/). + @JsonKey(name: 'encoding_format') + @Default(EmbeddingEncodingFormat.float) + EmbeddingEncodingFormat encodingFormat, + + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @JsonKey(includeIfNull: false) String? user, + }) = _CreateEmbeddingRequest; + + /// Object construction from a JSON representation + factory CreateEmbeddingRequest.fromJson(Map json) => + _$CreateEmbeddingRequestFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'model', + 'input', + 'encoding_format', + 'user' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'model': model, + 'input': input, + 'encoding_format': encodingFormat, + 'user': user, + }; + } +} + +// ========================================== +// ENUM: EmbeddingModels +// ========================================== + +/// No Description +enum EmbeddingModels { + @JsonValue('text-embedding-ada-002') + textEmbeddingAda002, +} + +// ========================================== +// CLASS: EmbeddingModel +// ========================================== + +/// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. +@freezed +sealed class EmbeddingModel with _$EmbeddingModel { + const EmbeddingModel._(); + + const factory EmbeddingModel.string( + String value, + ) = _UnionEmbeddingModelString; + + const factory EmbeddingModel.enumeration( + EmbeddingModels value, + ) = _UnionEmbeddingModelEnum; + + /// Object construction from a JSON representation + factory EmbeddingModel.fromJson(Map json) => + _$EmbeddingModelFromJson(json); +} + +/// Custom JSON converter for [EmbeddingModel] +class _EmbeddingModelConverter + implements JsonConverter { + const _EmbeddingModelConverter(); + + @override + EmbeddingModel fromJson(Object? data) { + if (data is String && _$EmbeddingModelsEnumMap.values.contains(data)) { + return EmbeddingModel.enumeration( + _$EmbeddingModelsEnumMap.keys.elementAt( + _$EmbeddingModelsEnumMap.values.toList().indexOf(data), + ), + ); + } + if (data is String) { + return EmbeddingModel.string(data); + } + throw Exception('Unexpected value for EmbeddingModel: $data'); + } + + @override + Object? toJson(EmbeddingModel data) { + return switch (data) { + _UnionEmbeddingModelString(value: final v) => v, + _UnionEmbeddingModelEnum(value: final v) => _$EmbeddingModelsEnumMap[v]!, + }; + } +} +// ========================================== +// CLASS: EmbeddingInput +// ========================================== + +/// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`) and cannot be an empty string. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. +@freezed +sealed class EmbeddingInput with _$EmbeddingInput { + const EmbeddingInput._(); + + const factory EmbeddingInput.string( + String value, + ) = _UnionEmbeddingInputString; + + const factory EmbeddingInput.arrayString( + List value, + ) = _UnionEmbeddingInputArrayString; + + const factory EmbeddingInput.arrayInteger( + List value, + ) = _UnionEmbeddingInputArrayInteger; + + const factory EmbeddingInput.array( + List> value, + ) = _UnionEmbeddingInputArray; + + /// Object construction from a JSON representation + factory EmbeddingInput.fromJson(Map json) => + _$EmbeddingInputFromJson(json); +} + +/// Custom JSON converter for [EmbeddingInput] +class _EmbeddingInputConverter + implements JsonConverter { + const _EmbeddingInputConverter(); + + @override + EmbeddingInput fromJson(Object? data) { + if (data is String) { + return EmbeddingInput.string(data); + } + if (data is List) { + return EmbeddingInput.arrayString(data); + } + if (data is List) { + return EmbeddingInput.arrayInteger(data); + } + if (data is List>) { + return EmbeddingInput.array(data); + } + throw Exception('Unexpected value for EmbeddingInput: $data'); + } + + @override + Object? toJson(EmbeddingInput data) { + return switch (data) { + _UnionEmbeddingInputString(value: final v) => v, + _UnionEmbeddingInputArrayString(value: final v) => v, + _UnionEmbeddingInputArrayInteger(value: final v) => v, + _UnionEmbeddingInputArray(value: final v) => v, + }; + } +} +// ========================================== +// ENUM: EmbeddingEncodingFormat +// ========================================== + +/// The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/). +enum EmbeddingEncodingFormat { + @JsonValue('float') + float, + @JsonValue('base64') + base64, +} diff --git a/packages/openai_dart/lib/src/generated/schema/create_embedding_response.dart b/packages/openai_dart/lib/src/generated/schema/create_embedding_response.dart new file mode 100644 index 00000000..dd3a5d0d --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/create_embedding_response.dart @@ -0,0 +1,57 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: CreateEmbeddingResponse +// ========================================== + +/// Represents an embedding vector returned by embedding endpoint. +@freezed +class CreateEmbeddingResponse with _$CreateEmbeddingResponse { + const CreateEmbeddingResponse._(); + + /// Factory constructor for CreateEmbeddingResponse + const factory CreateEmbeddingResponse({ + /// The list of embeddings generated by the model. + required List data, + + /// The name of the model used to generate the embedding. + required String model, + + /// The object type, which is always "list". + required String object, + + /// The usage information for the request. + required EmbeddingUsage usage, + }) = _CreateEmbeddingResponse; + + /// Object construction from a JSON representation + factory CreateEmbeddingResponse.fromJson(Map json) => + _$CreateEmbeddingResponseFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'data', + 'model', + 'object', + 'usage' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'data': data, + 'model': model, + 'object': object, + 'usage': usage, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart b/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart new file mode 100644 index 00000000..c9e628f7 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/create_fine_tuning_job_request.dart @@ -0,0 +1,159 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: CreateFineTuningJobRequest +// ========================================== + +/// Request object for the Create fine-tuning job endpoint. +@freezed +class CreateFineTuningJobRequest with _$CreateFineTuningJobRequest { + const CreateFineTuningJobRequest._(); + + /// Factory constructor for CreateFineTuningJobRequest + const factory CreateFineTuningJobRequest({ + /// The name of the model to fine-tune. You can select one of the + /// [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + @_FineTuningModelConverter() required FineTuningModel model, + + /// The ID of an uploaded file that contains training data. + /// + /// See [upload file](https://platform.openai.com/docs/api-reference/files/upload) for how to upload a file. + /// + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + /// + /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + @JsonKey(name: 'training_file') required String trainingFile, + + /// The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + @JsonKey(includeIfNull: false) + FineTuningJobHyperparameters? hyperparameters, + + /// A string of up to 18 characters that will be added to your fine-tuned model name. + /// + /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + @JsonKey(includeIfNull: false) String? suffix, + + /// The ID of an uploaded file that contains validation data. + /// + /// If you provide this file, the data is used to generate validation + /// metrics periodically during fine-tuning. These metrics can be viewed in + /// the fine-tuning results file. + /// The same data should not be present in both train and validation files. + /// + /// Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. + /// + /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + @JsonKey(name: 'validation_file', includeIfNull: false) + String? validationFile, + }) = _CreateFineTuningJobRequest; + + /// Object construction from a JSON representation + factory CreateFineTuningJobRequest.fromJson(Map json) => + _$CreateFineTuningJobRequestFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'model', + 'training_file', + 'hyperparameters', + 'suffix', + 'validation_file' + ]; + + /// Validation constants + static const suffixMinLengthValue = 1; + static const suffixMaxLengthValue = 40; + + /// Perform validations on the schema property values + String? validateSchema() { + if (suffix != null && suffix!.length < suffixMinLengthValue) { + return "The value of 'suffix' cannot be < $suffixMinLengthValue characters"; + } + if (suffix != null && suffix!.length > suffixMaxLengthValue) { + return "The length of 'suffix' cannot be > $suffixMaxLengthValue characters"; + } + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'model': model, + 'training_file': trainingFile, + 'hyperparameters': hyperparameters, + 'suffix': suffix, + 'validation_file': validationFile, + }; + } +} + +// ========================================== +// ENUM: FineTuningModels +// ========================================== + +/// No Description +enum FineTuningModels { + @JsonValue('babbage-002') + babbage002, + @JsonValue('davinci-002') + davinci002, + @JsonValue('gpt-3.5-turbo') + gpt35Turbo, +} + +// ========================================== +// CLASS: FineTuningModel +// ========================================== + +/// The name of the model to fine-tune. You can select one of the +/// [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). +@freezed +sealed class FineTuningModel with _$FineTuningModel { + const FineTuningModel._(); + + const factory FineTuningModel.string( + String value, + ) = _UnionFineTuningModelString; + + const factory FineTuningModel.enumeration( + FineTuningModels value, + ) = _UnionFineTuningModelEnum; + + /// Object construction from a JSON representation + factory FineTuningModel.fromJson(Map json) => + _$FineTuningModelFromJson(json); +} + +/// Custom JSON converter for [FineTuningModel] +class _FineTuningModelConverter + implements JsonConverter { + const _FineTuningModelConverter(); + + @override + FineTuningModel fromJson(Object? data) { + if (data is String && _$FineTuningModelsEnumMap.values.contains(data)) { + return FineTuningModel.enumeration( + _$FineTuningModelsEnumMap.keys.elementAt( + _$FineTuningModelsEnumMap.values.toList().indexOf(data), + ), + ); + } + if (data is String) { + return FineTuningModel.string(data); + } + throw Exception('Unexpected value for FineTuningModel: $data'); + } + + @override + Object? toJson(FineTuningModel data) { + return switch (data) { + _UnionFineTuningModelString(value: final v) => v, + _UnionFineTuningModelEnum(value: final v) => + _$FineTuningModelsEnumMap[v]!, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/create_image_request.dart b/packages/openai_dart/lib/src/generated/schema/create_image_request.dart new file mode 100644 index 00000000..d9e2c9b2 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/create_image_request.dart @@ -0,0 +1,110 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: CreateImageRequest +// ========================================== + +/// Request object for the Create image endpoint. +@freezed +class CreateImageRequest with _$CreateImageRequest { + const CreateImageRequest._(); + + /// Factory constructor for CreateImageRequest + const factory CreateImageRequest({ + /// A text description of the desired image(s). The maximum length is 1000 characters. + required String prompt, + + /// The number of images to generate. Must be between 1 and 10. + @JsonKey(includeIfNull: false) @Default(1) int? n, + + /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. + @JsonKey( + name: 'response_format', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue, + ) + @Default(ImageResponseFormat.url) + ImageResponseFormat? responseFormat, + + /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + @JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue, + ) + @Default(ImageSize.v1024x1024) + ImageSize? size, + + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @JsonKey(includeIfNull: false) String? user, + }) = _CreateImageRequest; + + /// Object construction from a JSON representation + factory CreateImageRequest.fromJson(Map json) => + _$CreateImageRequestFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'prompt', + 'n', + 'response_format', + 'size', + 'user' + ]; + + /// Validation constants + static const nDefaultValue = 1; + static const nMinValue = 1; + static const nMaxValue = 10; + + /// Perform validations on the schema property values + String? validateSchema() { + if (n != null && n! < nMinValue) { + return "The value of 'n' cannot be < $nMinValue"; + } + if (n != null && n! > nMaxValue) { + return "The value of 'n' cannot be > $nMaxValue"; + } + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'prompt': prompt, + 'n': n, + 'response_format': responseFormat, + 'size': size, + 'user': user, + }; + } +} + +// ========================================== +// ENUM: ImageResponseFormat +// ========================================== + +/// The format in which the generated images are returned. Must be one of `url` or `b64_json`. +enum ImageResponseFormat { + @JsonValue('url') + url, + @JsonValue('b64_json') + b64Json, +} + +// ========================================== +// ENUM: ImageSize +// ========================================== + +/// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. +enum ImageSize { + @JsonValue('256x256') + v256x256, + @JsonValue('512x512') + v512x512, + @JsonValue('1024x1024') + v1024x1024, +} diff --git a/packages/openai_dart/lib/src/generated/schema/create_moderation_request.dart b/packages/openai_dart/lib/src/generated/schema/create_moderation_request.dart new file mode 100644 index 00000000..c939b1f2 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/create_moderation_request.dart @@ -0,0 +1,164 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: CreateModerationRequest +// ========================================== + +/// Request object for the Create moderation endpoint. +@freezed +class CreateModerationRequest with _$CreateModerationRequest { + const CreateModerationRequest._(); + + /// Factory constructor for CreateModerationRequest + const factory CreateModerationRequest({ + /// Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. + /// + /// The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + @_ModerationModelConverter() + @Default( + ModerationModel.string('text-moderation-latest'), + ) + ModerationModel model, + + /// The input text to classify + @_ModerationInputConverter() required ModerationInput input, + }) = _CreateModerationRequest; + + /// Object construction from a JSON representation + factory CreateModerationRequest.fromJson(Map json) => + _$CreateModerationRequestFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['model', 'input']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'model': model, + 'input': input, + }; + } +} + +// ========================================== +// ENUM: ModerationModels +// ========================================== + +/// No Description +enum ModerationModels { + @JsonValue('text-moderation-latest') + textModerationLatest, + @JsonValue('text-moderation-stable') + textModerationStable, +} + +// ========================================== +// CLASS: ModerationModel +// ========================================== + +/// Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. +/// +/// The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. +@freezed +sealed class ModerationModel with _$ModerationModel { + const ModerationModel._(); + + const factory ModerationModel.string( + String value, + ) = _UnionModerationModelString; + + const factory ModerationModel.enumeration( + ModerationModels value, + ) = _UnionModerationModelEnum; + + /// Object construction from a JSON representation + factory ModerationModel.fromJson(Map json) => + _$ModerationModelFromJson(json); +} + +/// Custom JSON converter for [ModerationModel] +class _ModerationModelConverter + implements JsonConverter { + const _ModerationModelConverter(); + + @override + ModerationModel fromJson(Object? data) { + if (data is String && _$ModerationModelsEnumMap.values.contains(data)) { + return ModerationModel.enumeration( + _$ModerationModelsEnumMap.keys.elementAt( + _$ModerationModelsEnumMap.values.toList().indexOf(data), + ), + ); + } + if (data is String) { + return ModerationModel.string(data); + } + return ModerationModel.enumeration( + ModerationModels.textModerationLatest, + ); + } + + @override + Object? toJson(ModerationModel data) { + return switch (data) { + _UnionModerationModelString(value: final v) => v, + _UnionModerationModelEnum(value: final v) => + _$ModerationModelsEnumMap[v]!, + }; + } +} +// ========================================== +// CLASS: ModerationInput +// ========================================== + +/// The input text to classify +@freezed +sealed class ModerationInput with _$ModerationInput { + const ModerationInput._(); + + const factory ModerationInput.string( + String value, + ) = _UnionModerationInputString; + + const factory ModerationInput.arrayString( + List value, + ) = _UnionModerationInputArrayString; + + /// Object construction from a JSON representation + factory ModerationInput.fromJson(Map json) => + _$ModerationInputFromJson(json); +} + +/// Custom JSON converter for [ModerationInput] +class _ModerationInputConverter + implements JsonConverter { + const _ModerationInputConverter(); + + @override + ModerationInput fromJson(Object? data) { + if (data is String) { + return ModerationInput.string(data); + } + if (data is List) { + return ModerationInput.arrayString(data); + } + throw Exception('Unexpected value for ModerationInput: $data'); + } + + @override + Object? toJson(ModerationInput data) { + return switch (data) { + _UnionModerationInputString(value: final v) => v, + _UnionModerationInputArrayString(value: final v) => v, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/create_moderation_response.dart b/packages/openai_dart/lib/src/generated/schema/create_moderation_response.dart new file mode 100644 index 00000000..eae24da3 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/create_moderation_response.dart @@ -0,0 +1,48 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: CreateModerationResponse +// ========================================== + +/// Represents policy compliance report by OpenAI's content moderation model against a given input. +@freezed +class CreateModerationResponse with _$CreateModerationResponse { + const CreateModerationResponse._(); + + /// Factory constructor for CreateModerationResponse + const factory CreateModerationResponse({ + /// The unique identifier for the moderation request. + required String id, + + /// The model used to generate the moderation results. + required String model, + + /// A list of moderation objects. + required List results, + }) = _CreateModerationResponse; + + /// Object construction from a JSON representation + factory CreateModerationResponse.fromJson(Map json) => + _$CreateModerationResponseFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['id', 'model', 'results']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'id': id, + 'model': model, + 'results': results, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/delete_model_response.dart b/packages/openai_dart/lib/src/generated/schema/delete_model_response.dart new file mode 100644 index 00000000..9b69e5f1 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/delete_model_response.dart @@ -0,0 +1,48 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: DeleteModelResponse +// ========================================== + +/// Represents a deleted response returned by the Delete model endpoint. +@freezed +class DeleteModelResponse with _$DeleteModelResponse { + const DeleteModelResponse._(); + + /// Factory constructor for DeleteModelResponse + const factory DeleteModelResponse({ + /// The model identifier. + required String id, + + /// Whether the model was deleted. + required bool deleted, + + /// The object type, which is always "model". + required String object, + }) = _DeleteModelResponse; + + /// Object construction from a JSON representation + factory DeleteModelResponse.fromJson(Map json) => + _$DeleteModelResponseFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['id', 'deleted', 'object']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'id': id, + 'deleted': deleted, + 'object': object, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/embedding.dart b/packages/openai_dart/lib/src/generated/schema/embedding.dart new file mode 100644 index 00000000..16ba732f --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/embedding.dart @@ -0,0 +1,48 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: Embedding +// ========================================== + +/// Represents an embedding vector returned by embedding endpoint. +@freezed +class Embedding with _$Embedding { + const Embedding._(); + + /// Factory constructor for Embedding + const factory Embedding({ + /// The index of the embedding in the list of embeddings. + required int index, + + /// The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](https://platform.openai.com/docs/guides/embeddings). + required List embedding, + + /// The object type, which is always "embedding". + required String object, + }) = _Embedding; + + /// Object construction from a JSON representation + factory Embedding.fromJson(Map json) => + _$EmbeddingFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['index', 'embedding', 'object']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'index': index, + 'embedding': embedding, + 'object': object, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/embedding_usage.dart b/packages/openai_dart/lib/src/generated/schema/embedding_usage.dart new file mode 100644 index 00000000..4fe8b81a --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/embedding_usage.dart @@ -0,0 +1,44 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: EmbeddingUsage +// ========================================== + +/// The usage information for the request. +@freezed +class EmbeddingUsage with _$EmbeddingUsage { + const EmbeddingUsage._(); + + /// Factory constructor for EmbeddingUsage + const factory EmbeddingUsage({ + /// The number of tokens used by the prompt. + @JsonKey(name: 'prompt_tokens') required int promptTokens, + + /// The total number of tokens used by the request. + @JsonKey(name: 'total_tokens') required int totalTokens, + }) = _EmbeddingUsage; + + /// Object construction from a JSON representation + factory EmbeddingUsage.fromJson(Map json) => + _$EmbeddingUsageFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['prompt_tokens', 'total_tokens']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'prompt_tokens': promptTokens, + 'total_tokens': totalTokens, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/fine_tuning_job.dart b/packages/openai_dart/lib/src/generated/schema/fine_tuning_job.dart new file mode 100644 index 00000000..39ef4585 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/fine_tuning_job.dart @@ -0,0 +1,107 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: FineTuningJob +// ========================================== + +/// The `fine_tuning.job` object represents a fine-tuning job that has been created through the API. +@freezed +class FineTuningJob with _$FineTuningJob { + const FineTuningJob._(); + + /// Factory constructor for FineTuningJob + const factory FineTuningJob({ + /// The object identifier, which can be referenced in the API endpoints. + required String id, + + /// The Unix timestamp (in seconds) for when the fine-tuning job was created. + @JsonKey(name: 'created_at') required int createdAt, + + /// For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. + required FineTuningJobError? error, + + /// The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. + @JsonKey(name: 'fine_tuned_model') required String? fineTunedModel, + + /// The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. + @JsonKey(name: 'finished_at') required int? finishedAt, + + /// The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + required FineTuningJobHyperparameters hyperparameters, + + /// The base model that is being fine-tuned. + required String model, + + /// The object type, which is always "fine_tuning.job". + required String object, + + /// The organization that owns the fine-tuning job. + @JsonKey(name: 'organization_id') required String organizationId, + + /// The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + @JsonKey(name: 'result_files') required List resultFiles, + + /// The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. + required FineTuningJobStatus status, + + /// The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. + @JsonKey(name: 'trained_tokens') required int? trainedTokens, + + /// The file ID used for training. You can retrieve the training data with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + @JsonKey(name: 'training_file') required String trainingFile, + + /// The file ID used for validation. You can retrieve the validation results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + @JsonKey(name: 'validation_file') required String? validationFile, + }) = _FineTuningJob; + + /// Object construction from a JSON representation + factory FineTuningJob.fromJson(Map json) => + _$FineTuningJobFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'id', + 'created_at', + 'error', + 'fine_tuned_model', + 'finished_at', + 'hyperparameters', + 'model', + 'object', + 'organization_id', + 'result_files', + 'status', + 'trained_tokens', + 'training_file', + 'validation_file' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'id': id, + 'created_at': createdAt, + 'error': error, + 'fine_tuned_model': fineTunedModel, + 'finished_at': finishedAt, + 'hyperparameters': hyperparameters, + 'model': model, + 'object': object, + 'organization_id': organizationId, + 'result_files': resultFiles, + 'status': status, + 'trained_tokens': trainedTokens, + 'training_file': trainingFile, + 'validation_file': validationFile, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_error.dart b/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_error.dart new file mode 100644 index 00000000..aeb382d9 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_error.dart @@ -0,0 +1,48 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: FineTuningJobError +// ========================================== + +/// For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. +@freezed +class FineTuningJobError with _$FineTuningJobError { + const FineTuningJobError._(); + + /// Factory constructor for FineTuningJobError + const factory FineTuningJobError({ + /// A machine-readable error code. + required String code, + + /// A human-readable error message. + required String message, + + /// The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. + required String? param, + }) = _FineTuningJobError; + + /// Object construction from a JSON representation + factory FineTuningJobError.fromJson(Map json) => + _$FineTuningJobErrorFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['code', 'message', 'param']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'code': code, + 'message': message, + 'param': param, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_event.dart b/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_event.dart new file mode 100644 index 00000000..b8fac5f9 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_event.dart @@ -0,0 +1,76 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: FineTuningJobEvent +// ========================================== + +/// Fine-tuning job event object. +@freezed +class FineTuningJobEvent with _$FineTuningJobEvent { + const FineTuningJobEvent._(); + + /// Factory constructor for FineTuningJobEvent + const factory FineTuningJobEvent({ + /// The event identifier, which can be referenced in the API endpoints. + required String id, + + /// The Unix timestamp (in seconds) for when the event was created. + @JsonKey(name: 'created_at') required int createdAt, + + /// The log level of the event. + required FineTuningJobEventLevel level, + + /// The message of the event. + required String message, + + /// The object type, which is always "fine_tuning.job.event". + required String object, + }) = _FineTuningJobEvent; + + /// Object construction from a JSON representation + factory FineTuningJobEvent.fromJson(Map json) => + _$FineTuningJobEventFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'id', + 'created_at', + 'level', + 'message', + 'object' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'id': id, + 'created_at': createdAt, + 'level': level, + 'message': message, + 'object': object, + }; + } +} + +// ========================================== +// ENUM: FineTuningJobEventLevel +// ========================================== + +/// The log level of the event. +enum FineTuningJobEventLevel { + @JsonValue('info') + info, + @JsonValue('warn') + warn, + @JsonValue('error') + error, +} diff --git a/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_hyperparameters.dart b/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_hyperparameters.dart new file mode 100644 index 00000000..961d24b8 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_hyperparameters.dart @@ -0,0 +1,109 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: FineTuningJobHyperparameters +// ========================================== + +/// The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. +@freezed +class FineTuningJobHyperparameters with _$FineTuningJobHyperparameters { + const FineTuningJobHyperparameters._(); + + /// Factory constructor for FineTuningJobHyperparameters + const factory FineTuningJobHyperparameters({ + /// The number of epochs to train the model for. An epoch refers to one + /// full cycle through the training dataset. + @_FineTuningNEpochsConverter() + @JsonKey(name: 'n_epochs') + required FineTuningNEpochs nEpochs, + }) = _FineTuningJobHyperparameters; + + /// Object construction from a JSON representation + factory FineTuningJobHyperparameters.fromJson(Map json) => + _$FineTuningJobHyperparametersFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['n_epochs']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'n_epochs': nEpochs, + }; + } +} + +// ========================================== +// ENUM: FineTuningNEpochsOptions +// ========================================== + +/// No Description +enum FineTuningNEpochsOptions { + @JsonValue('auto') + auto, +} + +// ========================================== +// CLASS: FineTuningNEpochs +// ========================================== + +/// The number of epochs to train the model for. An epoch refers to one +/// full cycle through the training dataset. +@freezed +sealed class FineTuningNEpochs with _$FineTuningNEpochs { + const FineTuningNEpochs._(); + + const factory FineTuningNEpochs.enumeration( + FineTuningNEpochsOptions value, + ) = _UnionFineTuningNEpochsEnum; + + const factory FineTuningNEpochs.integer( + int value, + ) = _UnionFineTuningNEpochsInteger; + + /// Object construction from a JSON representation + factory FineTuningNEpochs.fromJson(Map json) => + _$FineTuningNEpochsFromJson(json); +} + +/// Custom JSON converter for [FineTuningNEpochs] +class _FineTuningNEpochsConverter + implements JsonConverter { + const _FineTuningNEpochsConverter(); + + @override + FineTuningNEpochs fromJson(Object? data) { + if (data is String && + _$FineTuningNEpochsOptionsEnumMap.values.contains(data)) { + return FineTuningNEpochs.enumeration( + _$FineTuningNEpochsOptionsEnumMap.keys.elementAt( + _$FineTuningNEpochsOptionsEnumMap.values.toList().indexOf(data), + ), + ); + } + if (data is int) { + return FineTuningNEpochs.integer(data); + } + return FineTuningNEpochs.enumeration( + FineTuningNEpochsOptions.auto, + ); + } + + @override + Object? toJson(FineTuningNEpochs data) { + return switch (data) { + _UnionFineTuningNEpochsEnum(value: final v) => + _$FineTuningNEpochsOptionsEnumMap[v]!, + _UnionFineTuningNEpochsInteger(value: final v) => v, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_status.dart b/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_status.dart new file mode 100644 index 00000000..205794b8 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/fine_tuning_job_status.dart @@ -0,0 +1,25 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// ENUM: FineTuningJobStatus +// ========================================== + +/// The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. +enum FineTuningJobStatus { + @JsonValue('validating_files') + validatingFiles, + @JsonValue('queued') + queued, + @JsonValue('running') + running, + @JsonValue('succeeded') + succeeded, + @JsonValue('failed') + failed, + @JsonValue('cancelled') + cancelled, +} diff --git a/packages/openai_dart/lib/src/generated/schema/image.dart b/packages/openai_dart/lib/src/generated/schema/image.dart new file mode 100644 index 00000000..345abdd4 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/image.dart @@ -0,0 +1,43 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: Image +// ========================================== + +/// Represents the url or the content of an image generated by the OpenAI API. +@freezed +class Image with _$Image { + const Image._(); + + /// Factory constructor for Image + const factory Image({ + /// The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. + @JsonKey(name: 'b64_json', includeIfNull: false) String? b64Json, + + /// The URL of the generated image, if `response_format` is `url` (default). + @JsonKey(includeIfNull: false) String? url, + }) = _Image; + + /// Object construction from a JSON representation + factory Image.fromJson(Map json) => _$ImageFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['b64_json', 'url']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'b64_json': b64Json, + 'url': url, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/images_response.dart b/packages/openai_dart/lib/src/generated/schema/images_response.dart new file mode 100644 index 00000000..37829602 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/images_response.dart @@ -0,0 +1,44 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: ImagesResponse +// ========================================== + +/// Represents a generated image returned by the images endpoint. +@freezed +class ImagesResponse with _$ImagesResponse { + const ImagesResponse._(); + + /// Factory constructor for ImagesResponse + const factory ImagesResponse({ + /// The Unix timestamp (in seconds) when the image was created. + required int created, + + /// The list of images generated by the model. + required List data, + }) = _ImagesResponse; + + /// Object construction from a JSON representation + factory ImagesResponse.fromJson(Map json) => + _$ImagesResponseFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['created', 'data']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'created': created, + 'data': data, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/list_fine_tuning_job_events_response.dart b/packages/openai_dart/lib/src/generated/schema/list_fine_tuning_job_events_response.dart new file mode 100644 index 00000000..2006cb97 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/list_fine_tuning_job_events_response.dart @@ -0,0 +1,44 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: ListFineTuningJobEventsResponse +// ========================================== + +/// Represents a list of fine-tuning job events. +@freezed +class ListFineTuningJobEventsResponse with _$ListFineTuningJobEventsResponse { + const ListFineTuningJobEventsResponse._(); + + /// Factory constructor for ListFineTuningJobEventsResponse + const factory ListFineTuningJobEventsResponse({ + /// The list of fine-tuning job events. + required List data, + + /// The object type, which is always "list". + required String object, + }) = _ListFineTuningJobEventsResponse; + + /// Object construction from a JSON representation + factory ListFineTuningJobEventsResponse.fromJson(Map json) => + _$ListFineTuningJobEventsResponseFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['data', 'object']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'data': data, + 'object': object, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/list_models_response.dart b/packages/openai_dart/lib/src/generated/schema/list_models_response.dart new file mode 100644 index 00000000..01c72b51 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/list_models_response.dart @@ -0,0 +1,44 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: ListModelsResponse +// ========================================== + +/// Represents a list of models returned by the List models endpoint. +@freezed +class ListModelsResponse with _$ListModelsResponse { + const ListModelsResponse._(); + + /// Factory constructor for ListModelsResponse + const factory ListModelsResponse({ + /// The object type, which is always "list". + required String object, + + /// The list of models. + required List data, + }) = _ListModelsResponse; + + /// Object construction from a JSON representation + factory ListModelsResponse.fromJson(Map json) => + _$ListModelsResponseFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['object', 'data']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'object': object, + 'data': data, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/list_paginated_fine_tuning_jobs_response.dart b/packages/openai_dart/lib/src/generated/schema/list_paginated_fine_tuning_jobs_response.dart new file mode 100644 index 00000000..53c9aa7e --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/list_paginated_fine_tuning_jobs_response.dart @@ -0,0 +1,50 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: ListPaginatedFineTuningJobsResponse +// ========================================== + +/// Represents a list of fine-tuning jobs. +@freezed +class ListPaginatedFineTuningJobsResponse + with _$ListPaginatedFineTuningJobsResponse { + const ListPaginatedFineTuningJobsResponse._(); + + /// Factory constructor for ListPaginatedFineTuningJobsResponse + const factory ListPaginatedFineTuningJobsResponse({ + /// The list of fine-tuning jobs. + required List data, + + /// Whether there are more fine-tuning jobs to retrieve. + @JsonKey(name: 'has_more') required bool hasMore, + + /// The object type, which is always "list". + required String object, + }) = _ListPaginatedFineTuningJobsResponse; + + /// Object construction from a JSON representation + factory ListPaginatedFineTuningJobsResponse.fromJson( + Map json) => + _$ListPaginatedFineTuningJobsResponseFromJson(json); + + /// List of all property names of schema + static const List propertyNames = ['data', 'has_more', 'object']; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'data': data, + 'has_more': hasMore, + 'object': object, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/model.dart b/packages/openai_dart/lib/src/generated/schema/model.dart new file mode 100644 index 00000000..66a22842 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/model.dart @@ -0,0 +1,56 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: Model +// ========================================== + +/// Describes an OpenAI model offering that can be used with the API. +@freezed +class Model with _$Model { + const Model._(); + + /// Factory constructor for Model + const factory Model({ + /// The model identifier, which can be referenced in the API endpoints. + required String id, + + /// The Unix timestamp (in seconds) when the model was created. + required int created, + + /// The object type, which is always "model". + required String object, + + /// The organization that owns the model. + @JsonKey(name: 'owned_by') required String ownedBy, + }) = _Model; + + /// Object construction from a JSON representation + factory Model.fromJson(Map json) => _$ModelFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'id', + 'created', + 'object', + 'owned_by' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'id': id, + 'created': created, + 'object': object, + 'owned_by': ownedBy, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/moderation.dart b/packages/openai_dart/lib/src/generated/schema/moderation.dart new file mode 100644 index 00000000..9162fb03 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/moderation.dart @@ -0,0 +1,53 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: Moderation +// ========================================== + +/// Represents policy compliance report by OpenAI's content moderation model against a given input. +@freezed +class Moderation with _$Moderation { + const Moderation._(); + + /// Factory constructor for Moderation + const factory Moderation({ + /// Whether the content violates [OpenAI's usage policies](https://platform.openai.com/policies/usage-policies). + required bool flagged, + + /// A list of the categories, and whether they are flagged or not. + required ModerationCategories categories, + + /// A list of the categories along with their scores as predicted by model. + @JsonKey(name: 'category_scores') + required ModerationCategoriesScores categoryScores, + }) = _Moderation; + + /// Object construction from a JSON representation + factory Moderation.fromJson(Map json) => + _$ModerationFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'flagged', + 'categories', + 'category_scores' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'flagged': flagged, + 'categories': categories, + 'category_scores': categoryScores, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/moderation_categories.dart b/packages/openai_dart/lib/src/generated/schema/moderation_categories.dart new file mode 100644 index 00000000..493364ef --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/moderation_categories.dart @@ -0,0 +1,93 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: ModerationCategories +// ========================================== + +/// A list of the categories, and whether they are flagged or not. +@freezed +class ModerationCategories with _$ModerationCategories { + const ModerationCategories._(); + + /// Factory constructor for ModerationCategories + const factory ModerationCategories({ + /// Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harrassment. + required bool hate, + + /// Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. + @JsonKey(name: 'hate/threatening') required bool hateThreatening, + + /// Content that expresses, incites, or promotes harassing language towards any target. + required bool harassment, + + /// Harassment content that also includes violence or serious harm towards any target. + @JsonKey(name: 'harassment/threatening') + required bool harassmentThreatening, + + /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. + @JsonKey(name: 'self-harm') required bool selfHarm, + + /// Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. + @JsonKey(name: 'self-harm/intent') required bool selfHarmIntent, + + /// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. + @JsonKey(name: 'self-harm/instructions') required bool selfHarmInstructions, + + /// Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). + required bool sexual, + + /// Sexual content that includes an individual who is under 18 years old. + @JsonKey(name: 'sexual/minors') required bool sexualMinors, + + /// Content that depicts death, violence, or physical injury. + required bool violence, + + /// Content that depicts death, violence, or physical injury in graphic detail. + @JsonKey(name: 'violence/graphic') required bool violenceGraphic, + }) = _ModerationCategories; + + /// Object construction from a JSON representation + factory ModerationCategories.fromJson(Map json) => + _$ModerationCategoriesFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'hate', + 'hate/threatening', + 'harassment', + 'harassment/threatening', + 'self-harm', + 'self-harm/intent', + 'self-harm/instructions', + 'sexual', + 'sexual/minors', + 'violence', + 'violence/graphic' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'hate': hate, + 'hate/threatening': hateThreatening, + 'harassment': harassment, + 'harassment/threatening': harassmentThreatening, + 'self-harm': selfHarm, + 'self-harm/intent': selfHarmIntent, + 'self-harm/instructions': selfHarmInstructions, + 'sexual': sexual, + 'sexual/minors': sexualMinors, + 'violence': violence, + 'violence/graphic': violenceGraphic, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/moderation_categories_scores.dart b/packages/openai_dart/lib/src/generated/schema/moderation_categories_scores.dart new file mode 100644 index 00000000..ee2a6848 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/moderation_categories_scores.dart @@ -0,0 +1,94 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target +part of open_a_i_schema; + +// ========================================== +// CLASS: ModerationCategoriesScores +// ========================================== + +/// A list of the categories along with their scores as predicted by model. +@freezed +class ModerationCategoriesScores with _$ModerationCategoriesScores { + const ModerationCategoriesScores._(); + + /// Factory constructor for ModerationCategoriesScores + const factory ModerationCategoriesScores({ + /// The score for the category 'hate'. + required double hate, + + /// The score for the category 'hate/threatening'. + @JsonKey(name: 'hate/threatening') required double hateThreatening, + + /// The score for the category 'harassment'. + required double harassment, + + /// The score for the category 'harassment/threatening'. + @JsonKey(name: 'harassment/threatening') + required double harassmentThreatening, + + /// The score for the category 'self-harm'. + @JsonKey(name: 'self-harm') required double selfHarm, + + /// The score for the category 'self-harm/intent'. + @JsonKey(name: 'self-harm/intent') required double selfHarmIntent, + + /// The score for the category 'self-harm/instructions'. + @JsonKey(name: 'self-harm/instructions') + required double selfHarmInstructions, + + /// The score for the category 'sexual'. + required double sexual, + + /// The score for the category 'sexual/minors'. + @JsonKey(name: 'sexual/minors') required double sexualMinors, + + /// The score for the category 'violence'. + required double violence, + + /// The score for the category 'violence/graphic'. + @JsonKey(name: 'violence/graphic') required double violenceGraphic, + }) = _ModerationCategoriesScores; + + /// Object construction from a JSON representation + factory ModerationCategoriesScores.fromJson(Map json) => + _$ModerationCategoriesScoresFromJson(json); + + /// List of all property names of schema + static const List propertyNames = [ + 'hate', + 'hate/threatening', + 'harassment', + 'harassment/threatening', + 'self-harm', + 'self-harm/intent', + 'self-harm/instructions', + 'sexual', + 'sexual/minors', + 'violence', + 'violence/graphic' + ]; + + /// Perform validations on the schema property values + String? validateSchema() { + return null; + } + + /// Map representation of object (not serialized) + Map toMap() { + return { + 'hate': hate, + 'hate/threatening': hateThreatening, + 'harassment': harassment, + 'harassment/threatening': harassmentThreatening, + 'self-harm': selfHarm, + 'self-harm/intent': selfHarmIntent, + 'self-harm/instructions': selfHarmInstructions, + 'sexual': sexual, + 'sexual/minors': sexualMinors, + 'violence': violence, + 'violence/graphic': violenceGraphic, + }; + } +} diff --git a/packages/openai_dart/lib/src/generated/schema/schema.dart b/packages/openai_dart/lib/src/generated/schema/schema.dart new file mode 100644 index 00000000..00ec3d79 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/schema.dart @@ -0,0 +1,54 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: invalid_annotation_target + +library open_a_i_schema; + +import 'package:freezed_annotation/freezed_annotation.dart'; + +part 'schema.g.dart'; +part 'schema.freezed.dart'; + +part 'create_completion_request.dart'; +part 'create_completion_response.dart'; +part 'completion_choice.dart'; +part 'completion_finish_reason.dart'; +part 'completion_logprobs.dart'; +part 'create_chat_completion_request.dart'; +part 'chat_completion_message.dart'; +part 'chat_completion_message_role.dart'; +part 'chat_completion_message_function_call.dart'; +part 'chat_completion_functions.dart'; +part 'chat_completion_function_parameters.dart'; +part 'chat_completion_function_call_option.dart'; +part 'create_chat_completion_response.dart'; +part 'chat_completion_response_choice.dart'; +part 'chat_completion_finish_reason.dart'; +part 'create_chat_completion_stream_response.dart'; +part 'chat_completion_stream_response_choice.dart'; +part 'chat_completion_stream_response_delta.dart'; +part 'completion_usage.dart'; +part 'create_embedding_request.dart'; +part 'create_embedding_response.dart'; +part 'embedding.dart'; +part 'embedding_usage.dart'; +part 'create_fine_tuning_job_request.dart'; +part 'fine_tuning_job.dart'; +part 'fine_tuning_job_status.dart'; +part 'fine_tuning_job_error.dart'; +part 'fine_tuning_job_hyperparameters.dart'; +part 'list_paginated_fine_tuning_jobs_response.dart'; +part 'list_fine_tuning_job_events_response.dart'; +part 'fine_tuning_job_event.dart'; +part 'create_image_request.dart'; +part 'images_response.dart'; +part 'image.dart'; +part 'model.dart'; +part 'list_models_response.dart'; +part 'delete_model_response.dart'; +part 'create_moderation_request.dart'; +part 'create_moderation_response.dart'; +part 'moderation.dart'; +part 'moderation_categories.dart'; +part 'moderation_categories_scores.dart'; diff --git a/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart new file mode 100644 index 00000000..29a01cf5 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/schema.freezed.dart @@ -0,0 +1,15701 @@ +// coverage:ignore-file +// GENERATED CODE - DO NOT MODIFY BY HAND +// ignore_for_file: type=lint +// ignore_for_file: unused_element, deprecated_member_use, deprecated_member_use_from_same_package, use_function_type_syntax_for_parameters, unnecessary_const, avoid_init_to_null, invalid_override_different_default_values_named, prefer_expression_function_bodies, annotate_overrides, invalid_annotation_target, unnecessary_question_mark + +part of 'schema.dart'; + +// ************************************************************************** +// FreezedGenerator +// ************************************************************************** + +T _$identity(T value) => value; + +final _privateConstructorUsedError = UnsupportedError( + 'It seems like you constructed your class using `MyClass._()`. This constructor is only meant to be used by freezed and you are not supposed to need it nor use it.\nPlease check the documentation here for more information: https://github.com/rrousselGit/freezed#custom-getters-and-methods'); + +CreateCompletionRequest _$CreateCompletionRequestFromJson( + Map json) { + return _CreateCompletionRequest.fromJson(json); +} + +/// @nodoc +mixin _$CreateCompletionRequest { + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + @_CompletionModelConverter() + CompletionModel get model => throw _privateConstructorUsedError; + + /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. + /// + /// Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. + @_CompletionPromptConverter() + CompletionPrompt? get prompt => throw _privateConstructorUsedError; + + /// Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. + /// + /// When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. + /// + /// **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + @JsonKey(name: 'best_of', includeIfNull: false) + int? get bestOf => throw _privateConstructorUsedError; + + /// Echo back the prompt in addition to the completion + @JsonKey(includeIfNull: false) + bool? get echo => throw _privateConstructorUsedError; + + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + /// + /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + @JsonKey(name: 'frequency_penalty', includeIfNull: false) + double? get frequencyPenalty => throw _privateConstructorUsedError; + + /// Modify the likelihood of specified tokens appearing in the completion. + /// + /// Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](https://platform.openai.com/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// + /// As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. + @JsonKey(name: 'logit_bias', includeIfNull: false) + Map? get logitBias => throw _privateConstructorUsedError; + + /// Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. + /// + /// The maximum value for `logprobs` is 5. + @JsonKey(includeIfNull: false) + int? get logprobs => throw _privateConstructorUsedError; + + /// The maximum number of [tokens](https://platform.openai.com/tokenizer) to generate in the completion. + /// + /// The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + @JsonKey(name: 'max_tokens', includeIfNull: false) + int? get maxTokens => throw _privateConstructorUsedError; + + /// How many completions to generate for each prompt. + /// + /// **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + @JsonKey(includeIfNull: false) + int? get n => throw _privateConstructorUsedError; + + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + /// + /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + @JsonKey(name: 'presence_penalty', includeIfNull: false) + double? get presencePenalty => throw _privateConstructorUsedError; + + /// Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + @_CompletionStopConverter() + @JsonKey(includeIfNull: false) + CompletionStop? get stop => throw _privateConstructorUsedError; + + /// Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + @JsonKey(includeIfNull: false) + bool? get stream => throw _privateConstructorUsedError; + + /// The suffix that comes after a completion of inserted text. + @JsonKey(includeIfNull: false) + String? get suffix => throw _privateConstructorUsedError; + + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// + /// We generally recommend altering this or `top_p` but not both. + @JsonKey(includeIfNull: false) + double? get temperature => throw _privateConstructorUsedError; + + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// + /// We generally recommend altering this or `temperature` but not both. + @JsonKey(name: 'top_p', includeIfNull: false) + double? get topP => throw _privateConstructorUsedError; + + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @JsonKey(includeIfNull: false) + String? get user => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $CreateCompletionRequestCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CreateCompletionRequestCopyWith<$Res> { + factory $CreateCompletionRequestCopyWith(CreateCompletionRequest value, + $Res Function(CreateCompletionRequest) then) = + _$CreateCompletionRequestCopyWithImpl<$Res, CreateCompletionRequest>; + @useResult + $Res call( + {@_CompletionModelConverter() CompletionModel model, + @_CompletionPromptConverter() CompletionPrompt? prompt, + @JsonKey(name: 'best_of', includeIfNull: false) int? bestOf, + @JsonKey(includeIfNull: false) bool? echo, + @JsonKey(name: 'frequency_penalty', includeIfNull: false) + double? frequencyPenalty, + @JsonKey(name: 'logit_bias', includeIfNull: false) + Map? logitBias, + @JsonKey(includeIfNull: false) int? logprobs, + @JsonKey(name: 'max_tokens', includeIfNull: false) int? maxTokens, + @JsonKey(includeIfNull: false) int? n, + @JsonKey(name: 'presence_penalty', includeIfNull: false) + double? presencePenalty, + @_CompletionStopConverter() + @JsonKey(includeIfNull: false) + CompletionStop? stop, + @JsonKey(includeIfNull: false) bool? stream, + @JsonKey(includeIfNull: false) String? suffix, + @JsonKey(includeIfNull: false) double? temperature, + @JsonKey(name: 'top_p', includeIfNull: false) double? topP, + @JsonKey(includeIfNull: false) String? user}); + + $CompletionModelCopyWith<$Res> get model; + $CompletionPromptCopyWith<$Res>? get prompt; + $CompletionStopCopyWith<$Res>? get stop; +} + +/// @nodoc +class _$CreateCompletionRequestCopyWithImpl<$Res, + $Val extends CreateCompletionRequest> + implements $CreateCompletionRequestCopyWith<$Res> { + _$CreateCompletionRequestCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = null, + Object? prompt = freezed, + Object? bestOf = freezed, + Object? echo = freezed, + Object? frequencyPenalty = freezed, + Object? logitBias = freezed, + Object? logprobs = freezed, + Object? maxTokens = freezed, + Object? n = freezed, + Object? presencePenalty = freezed, + Object? stop = freezed, + Object? stream = freezed, + Object? suffix = freezed, + Object? temperature = freezed, + Object? topP = freezed, + Object? user = freezed, + }) { + return _then(_value.copyWith( + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as CompletionModel, + prompt: freezed == prompt + ? _value.prompt + : prompt // ignore: cast_nullable_to_non_nullable + as CompletionPrompt?, + bestOf: freezed == bestOf + ? _value.bestOf + : bestOf // ignore: cast_nullable_to_non_nullable + as int?, + echo: freezed == echo + ? _value.echo + : echo // ignore: cast_nullable_to_non_nullable + as bool?, + frequencyPenalty: freezed == frequencyPenalty + ? _value.frequencyPenalty + : frequencyPenalty // ignore: cast_nullable_to_non_nullable + as double?, + logitBias: freezed == logitBias + ? _value.logitBias + : logitBias // ignore: cast_nullable_to_non_nullable + as Map?, + logprobs: freezed == logprobs + ? _value.logprobs + : logprobs // ignore: cast_nullable_to_non_nullable + as int?, + maxTokens: freezed == maxTokens + ? _value.maxTokens + : maxTokens // ignore: cast_nullable_to_non_nullable + as int?, + n: freezed == n + ? _value.n + : n // ignore: cast_nullable_to_non_nullable + as int?, + presencePenalty: freezed == presencePenalty + ? _value.presencePenalty + : presencePenalty // ignore: cast_nullable_to_non_nullable + as double?, + stop: freezed == stop + ? _value.stop + : stop // ignore: cast_nullable_to_non_nullable + as CompletionStop?, + stream: freezed == stream + ? _value.stream + : stream // ignore: cast_nullable_to_non_nullable + as bool?, + suffix: freezed == suffix + ? _value.suffix + : suffix // ignore: cast_nullable_to_non_nullable + as String?, + temperature: freezed == temperature + ? _value.temperature + : temperature // ignore: cast_nullable_to_non_nullable + as double?, + topP: freezed == topP + ? _value.topP + : topP // ignore: cast_nullable_to_non_nullable + as double?, + user: freezed == user + ? _value.user + : user // ignore: cast_nullable_to_non_nullable + as String?, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $CompletionModelCopyWith<$Res> get model { + return $CompletionModelCopyWith<$Res>(_value.model, (value) { + return _then(_value.copyWith(model: value) as $Val); + }); + } + + @override + @pragma('vm:prefer-inline') + $CompletionPromptCopyWith<$Res>? get prompt { + if (_value.prompt == null) { + return null; + } + + return $CompletionPromptCopyWith<$Res>(_value.prompt!, (value) { + return _then(_value.copyWith(prompt: value) as $Val); + }); + } + + @override + @pragma('vm:prefer-inline') + $CompletionStopCopyWith<$Res>? get stop { + if (_value.stop == null) { + return null; + } + + return $CompletionStopCopyWith<$Res>(_value.stop!, (value) { + return _then(_value.copyWith(stop: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$CreateCompletionRequestImplCopyWith<$Res> + implements $CreateCompletionRequestCopyWith<$Res> { + factory _$$CreateCompletionRequestImplCopyWith( + _$CreateCompletionRequestImpl value, + $Res Function(_$CreateCompletionRequestImpl) then) = + __$$CreateCompletionRequestImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@_CompletionModelConverter() CompletionModel model, + @_CompletionPromptConverter() CompletionPrompt? prompt, + @JsonKey(name: 'best_of', includeIfNull: false) int? bestOf, + @JsonKey(includeIfNull: false) bool? echo, + @JsonKey(name: 'frequency_penalty', includeIfNull: false) + double? frequencyPenalty, + @JsonKey(name: 'logit_bias', includeIfNull: false) + Map? logitBias, + @JsonKey(includeIfNull: false) int? logprobs, + @JsonKey(name: 'max_tokens', includeIfNull: false) int? maxTokens, + @JsonKey(includeIfNull: false) int? n, + @JsonKey(name: 'presence_penalty', includeIfNull: false) + double? presencePenalty, + @_CompletionStopConverter() + @JsonKey(includeIfNull: false) + CompletionStop? stop, + @JsonKey(includeIfNull: false) bool? stream, + @JsonKey(includeIfNull: false) String? suffix, + @JsonKey(includeIfNull: false) double? temperature, + @JsonKey(name: 'top_p', includeIfNull: false) double? topP, + @JsonKey(includeIfNull: false) String? user}); + + @override + $CompletionModelCopyWith<$Res> get model; + @override + $CompletionPromptCopyWith<$Res>? get prompt; + @override + $CompletionStopCopyWith<$Res>? get stop; +} + +/// @nodoc +class __$$CreateCompletionRequestImplCopyWithImpl<$Res> + extends _$CreateCompletionRequestCopyWithImpl<$Res, + _$CreateCompletionRequestImpl> + implements _$$CreateCompletionRequestImplCopyWith<$Res> { + __$$CreateCompletionRequestImplCopyWithImpl( + _$CreateCompletionRequestImpl _value, + $Res Function(_$CreateCompletionRequestImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = null, + Object? prompt = freezed, + Object? bestOf = freezed, + Object? echo = freezed, + Object? frequencyPenalty = freezed, + Object? logitBias = freezed, + Object? logprobs = freezed, + Object? maxTokens = freezed, + Object? n = freezed, + Object? presencePenalty = freezed, + Object? stop = freezed, + Object? stream = freezed, + Object? suffix = freezed, + Object? temperature = freezed, + Object? topP = freezed, + Object? user = freezed, + }) { + return _then(_$CreateCompletionRequestImpl( + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as CompletionModel, + prompt: freezed == prompt + ? _value.prompt + : prompt // ignore: cast_nullable_to_non_nullable + as CompletionPrompt?, + bestOf: freezed == bestOf + ? _value.bestOf + : bestOf // ignore: cast_nullable_to_non_nullable + as int?, + echo: freezed == echo + ? _value.echo + : echo // ignore: cast_nullable_to_non_nullable + as bool?, + frequencyPenalty: freezed == frequencyPenalty + ? _value.frequencyPenalty + : frequencyPenalty // ignore: cast_nullable_to_non_nullable + as double?, + logitBias: freezed == logitBias + ? _value._logitBias + : logitBias // ignore: cast_nullable_to_non_nullable + as Map?, + logprobs: freezed == logprobs + ? _value.logprobs + : logprobs // ignore: cast_nullable_to_non_nullable + as int?, + maxTokens: freezed == maxTokens + ? _value.maxTokens + : maxTokens // ignore: cast_nullable_to_non_nullable + as int?, + n: freezed == n + ? _value.n + : n // ignore: cast_nullable_to_non_nullable + as int?, + presencePenalty: freezed == presencePenalty + ? _value.presencePenalty + : presencePenalty // ignore: cast_nullable_to_non_nullable + as double?, + stop: freezed == stop + ? _value.stop + : stop // ignore: cast_nullable_to_non_nullable + as CompletionStop?, + stream: freezed == stream + ? _value.stream + : stream // ignore: cast_nullable_to_non_nullable + as bool?, + suffix: freezed == suffix + ? _value.suffix + : suffix // ignore: cast_nullable_to_non_nullable + as String?, + temperature: freezed == temperature + ? _value.temperature + : temperature // ignore: cast_nullable_to_non_nullable + as double?, + topP: freezed == topP + ? _value.topP + : topP // ignore: cast_nullable_to_non_nullable + as double?, + user: freezed == user + ? _value.user + : user // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CreateCompletionRequestImpl extends _CreateCompletionRequest { + const _$CreateCompletionRequestImpl( + {@_CompletionModelConverter() required this.model, + @_CompletionPromptConverter() required this.prompt, + @JsonKey(name: 'best_of', includeIfNull: false) this.bestOf, + @JsonKey(includeIfNull: false) this.echo = false, + @JsonKey(name: 'frequency_penalty', includeIfNull: false) + this.frequencyPenalty = 0.0, + @JsonKey(name: 'logit_bias', includeIfNull: false) + final Map? logitBias, + @JsonKey(includeIfNull: false) this.logprobs, + @JsonKey(name: 'max_tokens', includeIfNull: false) this.maxTokens = 16, + @JsonKey(includeIfNull: false) this.n = 1, + @JsonKey(name: 'presence_penalty', includeIfNull: false) + this.presencePenalty = 0.0, + @_CompletionStopConverter() @JsonKey(includeIfNull: false) this.stop, + @JsonKey(includeIfNull: false) this.stream = false, + @JsonKey(includeIfNull: false) this.suffix, + @JsonKey(includeIfNull: false) this.temperature = 1.0, + @JsonKey(name: 'top_p', includeIfNull: false) this.topP = 1.0, + @JsonKey(includeIfNull: false) this.user}) + : _logitBias = logitBias, + super._(); + + factory _$CreateCompletionRequestImpl.fromJson(Map json) => + _$$CreateCompletionRequestImplFromJson(json); + + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + @override + @_CompletionModelConverter() + final CompletionModel model; + + /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. + /// + /// Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. + @override + @_CompletionPromptConverter() + final CompletionPrompt? prompt; + + /// Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. + /// + /// When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. + /// + /// **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + @override + @JsonKey(name: 'best_of', includeIfNull: false) + final int? bestOf; + + /// Echo back the prompt in addition to the completion + @override + @JsonKey(includeIfNull: false) + final bool? echo; + + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + /// + /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + @override + @JsonKey(name: 'frequency_penalty', includeIfNull: false) + final double? frequencyPenalty; + + /// Modify the likelihood of specified tokens appearing in the completion. + /// + /// Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](https://platform.openai.com/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// + /// As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. + final Map? _logitBias; + + /// Modify the likelihood of specified tokens appearing in the completion. + /// + /// Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](https://platform.openai.com/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// + /// As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. + @override + @JsonKey(name: 'logit_bias', includeIfNull: false) + Map? get logitBias { + final value = _logitBias; + if (value == null) return null; + if (_logitBias is EqualUnmodifiableMapView) return _logitBias; + // ignore: implicit_dynamic_type + return EqualUnmodifiableMapView(value); + } + + /// Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. + /// + /// The maximum value for `logprobs` is 5. + @override + @JsonKey(includeIfNull: false) + final int? logprobs; + + /// The maximum number of [tokens](https://platform.openai.com/tokenizer) to generate in the completion. + /// + /// The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + @override + @JsonKey(name: 'max_tokens', includeIfNull: false) + final int? maxTokens; + + /// How many completions to generate for each prompt. + /// + /// **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + @override + @JsonKey(includeIfNull: false) + final int? n; + + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + /// + /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + @override + @JsonKey(name: 'presence_penalty', includeIfNull: false) + final double? presencePenalty; + + /// Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + @override + @_CompletionStopConverter() + @JsonKey(includeIfNull: false) + final CompletionStop? stop; + + /// Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + @override + @JsonKey(includeIfNull: false) + final bool? stream; + + /// The suffix that comes after a completion of inserted text. + @override + @JsonKey(includeIfNull: false) + final String? suffix; + + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// + /// We generally recommend altering this or `top_p` but not both. + @override + @JsonKey(includeIfNull: false) + final double? temperature; + + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// + /// We generally recommend altering this or `temperature` but not both. + @override + @JsonKey(name: 'top_p', includeIfNull: false) + final double? topP; + + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @override + @JsonKey(includeIfNull: false) + final String? user; + + @override + String toString() { + return 'CreateCompletionRequest(model: $model, prompt: $prompt, bestOf: $bestOf, echo: $echo, frequencyPenalty: $frequencyPenalty, logitBias: $logitBias, logprobs: $logprobs, maxTokens: $maxTokens, n: $n, presencePenalty: $presencePenalty, stop: $stop, stream: $stream, suffix: $suffix, temperature: $temperature, topP: $topP, user: $user)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CreateCompletionRequestImpl && + (identical(other.model, model) || other.model == model) && + (identical(other.prompt, prompt) || other.prompt == prompt) && + (identical(other.bestOf, bestOf) || other.bestOf == bestOf) && + (identical(other.echo, echo) || other.echo == echo) && + (identical(other.frequencyPenalty, frequencyPenalty) || + other.frequencyPenalty == frequencyPenalty) && + const DeepCollectionEquality() + .equals(other._logitBias, _logitBias) && + (identical(other.logprobs, logprobs) || + other.logprobs == logprobs) && + (identical(other.maxTokens, maxTokens) || + other.maxTokens == maxTokens) && + (identical(other.n, n) || other.n == n) && + (identical(other.presencePenalty, presencePenalty) || + other.presencePenalty == presencePenalty) && + (identical(other.stop, stop) || other.stop == stop) && + (identical(other.stream, stream) || other.stream == stream) && + (identical(other.suffix, suffix) || other.suffix == suffix) && + (identical(other.temperature, temperature) || + other.temperature == temperature) && + (identical(other.topP, topP) || other.topP == topP) && + (identical(other.user, user) || other.user == user)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, + model, + prompt, + bestOf, + echo, + frequencyPenalty, + const DeepCollectionEquality().hash(_logitBias), + logprobs, + maxTokens, + n, + presencePenalty, + stop, + stream, + suffix, + temperature, + topP, + user); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$CreateCompletionRequestImplCopyWith<_$CreateCompletionRequestImpl> + get copyWith => __$$CreateCompletionRequestImplCopyWithImpl< + _$CreateCompletionRequestImpl>(this, _$identity); + + @override + Map toJson() { + return _$$CreateCompletionRequestImplToJson( + this, + ); + } +} + +abstract class _CreateCompletionRequest extends CreateCompletionRequest { + const factory _CreateCompletionRequest( + {@_CompletionModelConverter() required final CompletionModel model, + @_CompletionPromptConverter() required final CompletionPrompt? prompt, + @JsonKey(name: 'best_of', includeIfNull: false) final int? bestOf, + @JsonKey(includeIfNull: false) final bool? echo, + @JsonKey(name: 'frequency_penalty', includeIfNull: false) + final double? frequencyPenalty, + @JsonKey(name: 'logit_bias', includeIfNull: false) + final Map? logitBias, + @JsonKey(includeIfNull: false) final int? logprobs, + @JsonKey(name: 'max_tokens', includeIfNull: false) final int? maxTokens, + @JsonKey(includeIfNull: false) final int? n, + @JsonKey(name: 'presence_penalty', includeIfNull: false) + final double? presencePenalty, + @_CompletionStopConverter() + @JsonKey(includeIfNull: false) + final CompletionStop? stop, + @JsonKey(includeIfNull: false) final bool? stream, + @JsonKey(includeIfNull: false) final String? suffix, + @JsonKey(includeIfNull: false) final double? temperature, + @JsonKey(name: 'top_p', includeIfNull: false) final double? topP, + @JsonKey(includeIfNull: false) + final String? user}) = _$CreateCompletionRequestImpl; + const _CreateCompletionRequest._() : super._(); + + factory _CreateCompletionRequest.fromJson(Map json) = + _$CreateCompletionRequestImpl.fromJson; + + @override + + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + @_CompletionModelConverter() + CompletionModel get model; + @override + + /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. + /// + /// Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. + @_CompletionPromptConverter() + CompletionPrompt? get prompt; + @override + + /// Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. + /// + /// When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. + /// + /// **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + @JsonKey(name: 'best_of', includeIfNull: false) + int? get bestOf; + @override + + /// Echo back the prompt in addition to the completion + @JsonKey(includeIfNull: false) + bool? get echo; + @override + + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + /// + /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + @JsonKey(name: 'frequency_penalty', includeIfNull: false) + double? get frequencyPenalty; + @override + + /// Modify the likelihood of specified tokens appearing in the completion. + /// + /// Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](https://platform.openai.com/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// + /// As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. + @JsonKey(name: 'logit_bias', includeIfNull: false) + Map? get logitBias; + @override + + /// Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. + /// + /// The maximum value for `logprobs` is 5. + @JsonKey(includeIfNull: false) + int? get logprobs; + @override + + /// The maximum number of [tokens](https://platform.openai.com/tokenizer) to generate in the completion. + /// + /// The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + @JsonKey(name: 'max_tokens', includeIfNull: false) + int? get maxTokens; + @override + + /// How many completions to generate for each prompt. + /// + /// **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + @JsonKey(includeIfNull: false) + int? get n; + @override + + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + /// + /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + @JsonKey(name: 'presence_penalty', includeIfNull: false) + double? get presencePenalty; + @override + + /// Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + @_CompletionStopConverter() + @JsonKey(includeIfNull: false) + CompletionStop? get stop; + @override + + /// Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + @JsonKey(includeIfNull: false) + bool? get stream; + @override + + /// The suffix that comes after a completion of inserted text. + @JsonKey(includeIfNull: false) + String? get suffix; + @override + + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// + /// We generally recommend altering this or `top_p` but not both. + @JsonKey(includeIfNull: false) + double? get temperature; + @override + + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// + /// We generally recommend altering this or `temperature` but not both. + @JsonKey(name: 'top_p', includeIfNull: false) + double? get topP; + @override + + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @JsonKey(includeIfNull: false) + String? get user; + @override + @JsonKey(ignore: true) + _$$CreateCompletionRequestImplCopyWith<_$CreateCompletionRequestImpl> + get copyWith => throw _privateConstructorUsedError; +} + +CompletionModel _$CompletionModelFromJson(Map json) { + switch (json['runtimeType']) { + case 'string': + return _UnionCompletionModelString.fromJson(json); + case 'enumeration': + return _UnionCompletionModelEnum.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'runtimeType', 'CompletionModel', + 'Invalid union type "${json['runtimeType']}"!'); + } +} + +/// @nodoc +mixin _$CompletionModel { + Object get value => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(CompletionModels value) enumeration, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(CompletionModels value)? enumeration, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(CompletionModels value)? enumeration, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionCompletionModelString value) string, + required TResult Function(_UnionCompletionModelEnum value) enumeration, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionCompletionModelString value)? string, + TResult? Function(_UnionCompletionModelEnum value)? enumeration, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionCompletionModelString value)? string, + TResult Function(_UnionCompletionModelEnum value)? enumeration, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CompletionModelCopyWith<$Res> { + factory $CompletionModelCopyWith( + CompletionModel value, $Res Function(CompletionModel) then) = + _$CompletionModelCopyWithImpl<$Res, CompletionModel>; +} + +/// @nodoc +class _$CompletionModelCopyWithImpl<$Res, $Val extends CompletionModel> + implements $CompletionModelCopyWith<$Res> { + _$CompletionModelCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; +} + +/// @nodoc +abstract class _$$UnionCompletionModelStringImplCopyWith<$Res> { + factory _$$UnionCompletionModelStringImplCopyWith( + _$UnionCompletionModelStringImpl value, + $Res Function(_$UnionCompletionModelStringImpl) then) = + __$$UnionCompletionModelStringImplCopyWithImpl<$Res>; + @useResult + $Res call({String value}); +} + +/// @nodoc +class __$$UnionCompletionModelStringImplCopyWithImpl<$Res> + extends _$CompletionModelCopyWithImpl<$Res, + _$UnionCompletionModelStringImpl> + implements _$$UnionCompletionModelStringImplCopyWith<$Res> { + __$$UnionCompletionModelStringImplCopyWithImpl( + _$UnionCompletionModelStringImpl _value, + $Res Function(_$UnionCompletionModelStringImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$UnionCompletionModelStringImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UnionCompletionModelStringImpl extends _UnionCompletionModelString { + const _$UnionCompletionModelStringImpl(this.value, {final String? $type}) + : $type = $type ?? 'string', + super._(); + + factory _$UnionCompletionModelStringImpl.fromJson( + Map json) => + _$$UnionCompletionModelStringImplFromJson(json); + + @override + final String value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'CompletionModel.string(value: $value)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UnionCompletionModelStringImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UnionCompletionModelStringImplCopyWith<_$UnionCompletionModelStringImpl> + get copyWith => __$$UnionCompletionModelStringImplCopyWithImpl< + _$UnionCompletionModelStringImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(CompletionModels value) enumeration, + }) { + return string(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(CompletionModels value)? enumeration, + }) { + return string?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(CompletionModels value)? enumeration, + required TResult orElse(), + }) { + if (string != null) { + return string(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionCompletionModelString value) string, + required TResult Function(_UnionCompletionModelEnum value) enumeration, + }) { + return string(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionCompletionModelString value)? string, + TResult? Function(_UnionCompletionModelEnum value)? enumeration, + }) { + return string?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionCompletionModelString value)? string, + TResult Function(_UnionCompletionModelEnum value)? enumeration, + required TResult orElse(), + }) { + if (string != null) { + return string(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$UnionCompletionModelStringImplToJson( + this, + ); + } +} + +abstract class _UnionCompletionModelString extends CompletionModel { + const factory _UnionCompletionModelString(final String value) = + _$UnionCompletionModelStringImpl; + const _UnionCompletionModelString._() : super._(); + + factory _UnionCompletionModelString.fromJson(Map json) = + _$UnionCompletionModelStringImpl.fromJson; + + @override + String get value; + @JsonKey(ignore: true) + _$$UnionCompletionModelStringImplCopyWith<_$UnionCompletionModelStringImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$UnionCompletionModelEnumImplCopyWith<$Res> { + factory _$$UnionCompletionModelEnumImplCopyWith( + _$UnionCompletionModelEnumImpl value, + $Res Function(_$UnionCompletionModelEnumImpl) then) = + __$$UnionCompletionModelEnumImplCopyWithImpl<$Res>; + @useResult + $Res call({CompletionModels value}); +} + +/// @nodoc +class __$$UnionCompletionModelEnumImplCopyWithImpl<$Res> + extends _$CompletionModelCopyWithImpl<$Res, _$UnionCompletionModelEnumImpl> + implements _$$UnionCompletionModelEnumImplCopyWith<$Res> { + __$$UnionCompletionModelEnumImplCopyWithImpl( + _$UnionCompletionModelEnumImpl _value, + $Res Function(_$UnionCompletionModelEnumImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$UnionCompletionModelEnumImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as CompletionModels, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UnionCompletionModelEnumImpl extends _UnionCompletionModelEnum { + const _$UnionCompletionModelEnumImpl(this.value, {final String? $type}) + : $type = $type ?? 'enumeration', + super._(); + + factory _$UnionCompletionModelEnumImpl.fromJson(Map json) => + _$$UnionCompletionModelEnumImplFromJson(json); + + @override + final CompletionModels value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'CompletionModel.enumeration(value: $value)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UnionCompletionModelEnumImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UnionCompletionModelEnumImplCopyWith<_$UnionCompletionModelEnumImpl> + get copyWith => __$$UnionCompletionModelEnumImplCopyWithImpl< + _$UnionCompletionModelEnumImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(CompletionModels value) enumeration, + }) { + return enumeration(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(CompletionModels value)? enumeration, + }) { + return enumeration?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(CompletionModels value)? enumeration, + required TResult orElse(), + }) { + if (enumeration != null) { + return enumeration(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionCompletionModelString value) string, + required TResult Function(_UnionCompletionModelEnum value) enumeration, + }) { + return enumeration(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionCompletionModelString value)? string, + TResult? Function(_UnionCompletionModelEnum value)? enumeration, + }) { + return enumeration?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionCompletionModelString value)? string, + TResult Function(_UnionCompletionModelEnum value)? enumeration, + required TResult orElse(), + }) { + if (enumeration != null) { + return enumeration(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$UnionCompletionModelEnumImplToJson( + this, + ); + } +} + +abstract class _UnionCompletionModelEnum extends CompletionModel { + const factory _UnionCompletionModelEnum(final CompletionModels value) = + _$UnionCompletionModelEnumImpl; + const _UnionCompletionModelEnum._() : super._(); + + factory _UnionCompletionModelEnum.fromJson(Map json) = + _$UnionCompletionModelEnumImpl.fromJson; + + @override + CompletionModels get value; + @JsonKey(ignore: true) + _$$UnionCompletionModelEnumImplCopyWith<_$UnionCompletionModelEnumImpl> + get copyWith => throw _privateConstructorUsedError; +} + +CompletionPrompt _$CompletionPromptFromJson(Map json) { + switch (json['runtimeType']) { + case 'string': + return _UnionCompletionPromptString.fromJson(json); + case 'arrayString': + return _UnionCompletionPromptArrayString.fromJson(json); + case 'arrayInteger': + return _UnionCompletionPromptArrayInteger.fromJson(json); + case 'array': + return _UnionCompletionPromptArray.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'runtimeType', 'CompletionPrompt', + 'Invalid union type "${json['runtimeType']}"!'); + } +} + +/// @nodoc +mixin _$CompletionPrompt { + Object get value => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(List value) arrayString, + required TResult Function(List value) arrayInteger, + required TResult Function(List> value) array, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(List value)? arrayString, + TResult? Function(List value)? arrayInteger, + TResult? Function(List> value)? array, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(List value)? arrayString, + TResult Function(List value)? arrayInteger, + TResult Function(List> value)? array, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionCompletionPromptString value) string, + required TResult Function(_UnionCompletionPromptArrayString value) + arrayString, + required TResult Function(_UnionCompletionPromptArrayInteger value) + arrayInteger, + required TResult Function(_UnionCompletionPromptArray value) array, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionCompletionPromptString value)? string, + TResult? Function(_UnionCompletionPromptArrayString value)? arrayString, + TResult? Function(_UnionCompletionPromptArrayInteger value)? arrayInteger, + TResult? Function(_UnionCompletionPromptArray value)? array, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionCompletionPromptString value)? string, + TResult Function(_UnionCompletionPromptArrayString value)? arrayString, + TResult Function(_UnionCompletionPromptArrayInteger value)? arrayInteger, + TResult Function(_UnionCompletionPromptArray value)? array, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CompletionPromptCopyWith<$Res> { + factory $CompletionPromptCopyWith( + CompletionPrompt value, $Res Function(CompletionPrompt) then) = + _$CompletionPromptCopyWithImpl<$Res, CompletionPrompt>; +} + +/// @nodoc +class _$CompletionPromptCopyWithImpl<$Res, $Val extends CompletionPrompt> + implements $CompletionPromptCopyWith<$Res> { + _$CompletionPromptCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; +} + +/// @nodoc +abstract class _$$UnionCompletionPromptStringImplCopyWith<$Res> { + factory _$$UnionCompletionPromptStringImplCopyWith( + _$UnionCompletionPromptStringImpl value, + $Res Function(_$UnionCompletionPromptStringImpl) then) = + __$$UnionCompletionPromptStringImplCopyWithImpl<$Res>; + @useResult + $Res call({String value}); +} + +/// @nodoc +class __$$UnionCompletionPromptStringImplCopyWithImpl<$Res> + extends _$CompletionPromptCopyWithImpl<$Res, + _$UnionCompletionPromptStringImpl> + implements _$$UnionCompletionPromptStringImplCopyWith<$Res> { + __$$UnionCompletionPromptStringImplCopyWithImpl( + _$UnionCompletionPromptStringImpl _value, + $Res Function(_$UnionCompletionPromptStringImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$UnionCompletionPromptStringImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UnionCompletionPromptStringImpl extends _UnionCompletionPromptString { + const _$UnionCompletionPromptStringImpl(this.value, {final String? $type}) + : $type = $type ?? 'string', + super._(); + + factory _$UnionCompletionPromptStringImpl.fromJson( + Map json) => + _$$UnionCompletionPromptStringImplFromJson(json); + + @override + final String value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'CompletionPrompt.string(value: $value)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UnionCompletionPromptStringImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UnionCompletionPromptStringImplCopyWith<_$UnionCompletionPromptStringImpl> + get copyWith => __$$UnionCompletionPromptStringImplCopyWithImpl< + _$UnionCompletionPromptStringImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(List value) arrayString, + required TResult Function(List value) arrayInteger, + required TResult Function(List> value) array, + }) { + return string(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(List value)? arrayString, + TResult? Function(List value)? arrayInteger, + TResult? Function(List> value)? array, + }) { + return string?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(List value)? arrayString, + TResult Function(List value)? arrayInteger, + TResult Function(List> value)? array, + required TResult orElse(), + }) { + if (string != null) { + return string(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionCompletionPromptString value) string, + required TResult Function(_UnionCompletionPromptArrayString value) + arrayString, + required TResult Function(_UnionCompletionPromptArrayInteger value) + arrayInteger, + required TResult Function(_UnionCompletionPromptArray value) array, + }) { + return string(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionCompletionPromptString value)? string, + TResult? Function(_UnionCompletionPromptArrayString value)? arrayString, + TResult? Function(_UnionCompletionPromptArrayInteger value)? arrayInteger, + TResult? Function(_UnionCompletionPromptArray value)? array, + }) { + return string?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionCompletionPromptString value)? string, + TResult Function(_UnionCompletionPromptArrayString value)? arrayString, + TResult Function(_UnionCompletionPromptArrayInteger value)? arrayInteger, + TResult Function(_UnionCompletionPromptArray value)? array, + required TResult orElse(), + }) { + if (string != null) { + return string(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$UnionCompletionPromptStringImplToJson( + this, + ); + } +} + +abstract class _UnionCompletionPromptString extends CompletionPrompt { + const factory _UnionCompletionPromptString(final String value) = + _$UnionCompletionPromptStringImpl; + const _UnionCompletionPromptString._() : super._(); + + factory _UnionCompletionPromptString.fromJson(Map json) = + _$UnionCompletionPromptStringImpl.fromJson; + + @override + String get value; + @JsonKey(ignore: true) + _$$UnionCompletionPromptStringImplCopyWith<_$UnionCompletionPromptStringImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$UnionCompletionPromptArrayStringImplCopyWith<$Res> { + factory _$$UnionCompletionPromptArrayStringImplCopyWith( + _$UnionCompletionPromptArrayStringImpl value, + $Res Function(_$UnionCompletionPromptArrayStringImpl) then) = + __$$UnionCompletionPromptArrayStringImplCopyWithImpl<$Res>; + @useResult + $Res call({List value}); +} + +/// @nodoc +class __$$UnionCompletionPromptArrayStringImplCopyWithImpl<$Res> + extends _$CompletionPromptCopyWithImpl<$Res, + _$UnionCompletionPromptArrayStringImpl> + implements _$$UnionCompletionPromptArrayStringImplCopyWith<$Res> { + __$$UnionCompletionPromptArrayStringImplCopyWithImpl( + _$UnionCompletionPromptArrayStringImpl _value, + $Res Function(_$UnionCompletionPromptArrayStringImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$UnionCompletionPromptArrayStringImpl( + null == value + ? _value._value + : value // ignore: cast_nullable_to_non_nullable + as List, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UnionCompletionPromptArrayStringImpl + extends _UnionCompletionPromptArrayString { + const _$UnionCompletionPromptArrayStringImpl(final List value, + {final String? $type}) + : _value = value, + $type = $type ?? 'arrayString', + super._(); + + factory _$UnionCompletionPromptArrayStringImpl.fromJson( + Map json) => + _$$UnionCompletionPromptArrayStringImplFromJson(json); + + final List _value; + @override + List get value { + if (_value is EqualUnmodifiableListView) return _value; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_value); + } + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'CompletionPrompt.arrayString(value: $value)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UnionCompletionPromptArrayStringImpl && + const DeepCollectionEquality().equals(other._value, _value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UnionCompletionPromptArrayStringImplCopyWith< + _$UnionCompletionPromptArrayStringImpl> + get copyWith => __$$UnionCompletionPromptArrayStringImplCopyWithImpl< + _$UnionCompletionPromptArrayStringImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(List value) arrayString, + required TResult Function(List value) arrayInteger, + required TResult Function(List> value) array, + }) { + return arrayString(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(List value)? arrayString, + TResult? Function(List value)? arrayInteger, + TResult? Function(List> value)? array, + }) { + return arrayString?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(List value)? arrayString, + TResult Function(List value)? arrayInteger, + TResult Function(List> value)? array, + required TResult orElse(), + }) { + if (arrayString != null) { + return arrayString(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionCompletionPromptString value) string, + required TResult Function(_UnionCompletionPromptArrayString value) + arrayString, + required TResult Function(_UnionCompletionPromptArrayInteger value) + arrayInteger, + required TResult Function(_UnionCompletionPromptArray value) array, + }) { + return arrayString(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionCompletionPromptString value)? string, + TResult? Function(_UnionCompletionPromptArrayString value)? arrayString, + TResult? Function(_UnionCompletionPromptArrayInteger value)? arrayInteger, + TResult? Function(_UnionCompletionPromptArray value)? array, + }) { + return arrayString?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionCompletionPromptString value)? string, + TResult Function(_UnionCompletionPromptArrayString value)? arrayString, + TResult Function(_UnionCompletionPromptArrayInteger value)? arrayInteger, + TResult Function(_UnionCompletionPromptArray value)? array, + required TResult orElse(), + }) { + if (arrayString != null) { + return arrayString(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$UnionCompletionPromptArrayStringImplToJson( + this, + ); + } +} + +abstract class _UnionCompletionPromptArrayString extends CompletionPrompt { + const factory _UnionCompletionPromptArrayString(final List value) = + _$UnionCompletionPromptArrayStringImpl; + const _UnionCompletionPromptArrayString._() : super._(); + + factory _UnionCompletionPromptArrayString.fromJson( + Map json) = + _$UnionCompletionPromptArrayStringImpl.fromJson; + + @override + List get value; + @JsonKey(ignore: true) + _$$UnionCompletionPromptArrayStringImplCopyWith< + _$UnionCompletionPromptArrayStringImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$UnionCompletionPromptArrayIntegerImplCopyWith<$Res> { + factory _$$UnionCompletionPromptArrayIntegerImplCopyWith( + _$UnionCompletionPromptArrayIntegerImpl value, + $Res Function(_$UnionCompletionPromptArrayIntegerImpl) then) = + __$$UnionCompletionPromptArrayIntegerImplCopyWithImpl<$Res>; + @useResult + $Res call({List value}); +} + +/// @nodoc +class __$$UnionCompletionPromptArrayIntegerImplCopyWithImpl<$Res> + extends _$CompletionPromptCopyWithImpl<$Res, + _$UnionCompletionPromptArrayIntegerImpl> + implements _$$UnionCompletionPromptArrayIntegerImplCopyWith<$Res> { + __$$UnionCompletionPromptArrayIntegerImplCopyWithImpl( + _$UnionCompletionPromptArrayIntegerImpl _value, + $Res Function(_$UnionCompletionPromptArrayIntegerImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$UnionCompletionPromptArrayIntegerImpl( + null == value + ? _value._value + : value // ignore: cast_nullable_to_non_nullable + as List, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UnionCompletionPromptArrayIntegerImpl + extends _UnionCompletionPromptArrayInteger { + const _$UnionCompletionPromptArrayIntegerImpl(final List value, + {final String? $type}) + : _value = value, + $type = $type ?? 'arrayInteger', + super._(); + + factory _$UnionCompletionPromptArrayIntegerImpl.fromJson( + Map json) => + _$$UnionCompletionPromptArrayIntegerImplFromJson(json); + + final List _value; + @override + List get value { + if (_value is EqualUnmodifiableListView) return _value; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_value); + } + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'CompletionPrompt.arrayInteger(value: $value)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UnionCompletionPromptArrayIntegerImpl && + const DeepCollectionEquality().equals(other._value, _value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UnionCompletionPromptArrayIntegerImplCopyWith< + _$UnionCompletionPromptArrayIntegerImpl> + get copyWith => __$$UnionCompletionPromptArrayIntegerImplCopyWithImpl< + _$UnionCompletionPromptArrayIntegerImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(List value) arrayString, + required TResult Function(List value) arrayInteger, + required TResult Function(List> value) array, + }) { + return arrayInteger(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(List value)? arrayString, + TResult? Function(List value)? arrayInteger, + TResult? Function(List> value)? array, + }) { + return arrayInteger?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(List value)? arrayString, + TResult Function(List value)? arrayInteger, + TResult Function(List> value)? array, + required TResult orElse(), + }) { + if (arrayInteger != null) { + return arrayInteger(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionCompletionPromptString value) string, + required TResult Function(_UnionCompletionPromptArrayString value) + arrayString, + required TResult Function(_UnionCompletionPromptArrayInteger value) + arrayInteger, + required TResult Function(_UnionCompletionPromptArray value) array, + }) { + return arrayInteger(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionCompletionPromptString value)? string, + TResult? Function(_UnionCompletionPromptArrayString value)? arrayString, + TResult? Function(_UnionCompletionPromptArrayInteger value)? arrayInteger, + TResult? Function(_UnionCompletionPromptArray value)? array, + }) { + return arrayInteger?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionCompletionPromptString value)? string, + TResult Function(_UnionCompletionPromptArrayString value)? arrayString, + TResult Function(_UnionCompletionPromptArrayInteger value)? arrayInteger, + TResult Function(_UnionCompletionPromptArray value)? array, + required TResult orElse(), + }) { + if (arrayInteger != null) { + return arrayInteger(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$UnionCompletionPromptArrayIntegerImplToJson( + this, + ); + } +} + +abstract class _UnionCompletionPromptArrayInteger extends CompletionPrompt { + const factory _UnionCompletionPromptArrayInteger(final List value) = + _$UnionCompletionPromptArrayIntegerImpl; + const _UnionCompletionPromptArrayInteger._() : super._(); + + factory _UnionCompletionPromptArrayInteger.fromJson( + Map json) = + _$UnionCompletionPromptArrayIntegerImpl.fromJson; + + @override + List get value; + @JsonKey(ignore: true) + _$$UnionCompletionPromptArrayIntegerImplCopyWith< + _$UnionCompletionPromptArrayIntegerImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$UnionCompletionPromptArrayImplCopyWith<$Res> { + factory _$$UnionCompletionPromptArrayImplCopyWith( + _$UnionCompletionPromptArrayImpl value, + $Res Function(_$UnionCompletionPromptArrayImpl) then) = + __$$UnionCompletionPromptArrayImplCopyWithImpl<$Res>; + @useResult + $Res call({List> value}); +} + +/// @nodoc +class __$$UnionCompletionPromptArrayImplCopyWithImpl<$Res> + extends _$CompletionPromptCopyWithImpl<$Res, + _$UnionCompletionPromptArrayImpl> + implements _$$UnionCompletionPromptArrayImplCopyWith<$Res> { + __$$UnionCompletionPromptArrayImplCopyWithImpl( + _$UnionCompletionPromptArrayImpl _value, + $Res Function(_$UnionCompletionPromptArrayImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$UnionCompletionPromptArrayImpl( + null == value + ? _value._value + : value // ignore: cast_nullable_to_non_nullable + as List>, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UnionCompletionPromptArrayImpl extends _UnionCompletionPromptArray { + const _$UnionCompletionPromptArrayImpl(final List> value, + {final String? $type}) + : _value = value, + $type = $type ?? 'array', + super._(); + + factory _$UnionCompletionPromptArrayImpl.fromJson( + Map json) => + _$$UnionCompletionPromptArrayImplFromJson(json); + + final List> _value; + @override + List> get value { + if (_value is EqualUnmodifiableListView) return _value; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_value); + } + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'CompletionPrompt.array(value: $value)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UnionCompletionPromptArrayImpl && + const DeepCollectionEquality().equals(other._value, _value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UnionCompletionPromptArrayImplCopyWith<_$UnionCompletionPromptArrayImpl> + get copyWith => __$$UnionCompletionPromptArrayImplCopyWithImpl< + _$UnionCompletionPromptArrayImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(List value) arrayString, + required TResult Function(List value) arrayInteger, + required TResult Function(List> value) array, + }) { + return array(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(List value)? arrayString, + TResult? Function(List value)? arrayInteger, + TResult? Function(List> value)? array, + }) { + return array?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(List value)? arrayString, + TResult Function(List value)? arrayInteger, + TResult Function(List> value)? array, + required TResult orElse(), + }) { + if (array != null) { + return array(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionCompletionPromptString value) string, + required TResult Function(_UnionCompletionPromptArrayString value) + arrayString, + required TResult Function(_UnionCompletionPromptArrayInteger value) + arrayInteger, + required TResult Function(_UnionCompletionPromptArray value) array, + }) { + return array(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionCompletionPromptString value)? string, + TResult? Function(_UnionCompletionPromptArrayString value)? arrayString, + TResult? Function(_UnionCompletionPromptArrayInteger value)? arrayInteger, + TResult? Function(_UnionCompletionPromptArray value)? array, + }) { + return array?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionCompletionPromptString value)? string, + TResult Function(_UnionCompletionPromptArrayString value)? arrayString, + TResult Function(_UnionCompletionPromptArrayInteger value)? arrayInteger, + TResult Function(_UnionCompletionPromptArray value)? array, + required TResult orElse(), + }) { + if (array != null) { + return array(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$UnionCompletionPromptArrayImplToJson( + this, + ); + } +} + +abstract class _UnionCompletionPromptArray extends CompletionPrompt { + const factory _UnionCompletionPromptArray(final List> value) = + _$UnionCompletionPromptArrayImpl; + const _UnionCompletionPromptArray._() : super._(); + + factory _UnionCompletionPromptArray.fromJson(Map json) = + _$UnionCompletionPromptArrayImpl.fromJson; + + @override + List> get value; + @JsonKey(ignore: true) + _$$UnionCompletionPromptArrayImplCopyWith<_$UnionCompletionPromptArrayImpl> + get copyWith => throw _privateConstructorUsedError; +} + +CompletionStop _$CompletionStopFromJson(Map json) { + switch (json['runtimeType']) { + case 'string': + return _UnionCompletionStopString.fromJson(json); + case 'arrayString': + return _UnionCompletionStopArrayString.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'runtimeType', 'CompletionStop', + 'Invalid union type "${json['runtimeType']}"!'); + } +} + +/// @nodoc +mixin _$CompletionStop { + Object get value => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(List value) arrayString, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(List value)? arrayString, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(List value)? arrayString, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionCompletionStopString value) string, + required TResult Function(_UnionCompletionStopArrayString value) + arrayString, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionCompletionStopString value)? string, + TResult? Function(_UnionCompletionStopArrayString value)? arrayString, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionCompletionStopString value)? string, + TResult Function(_UnionCompletionStopArrayString value)? arrayString, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CompletionStopCopyWith<$Res> { + factory $CompletionStopCopyWith( + CompletionStop value, $Res Function(CompletionStop) then) = + _$CompletionStopCopyWithImpl<$Res, CompletionStop>; +} + +/// @nodoc +class _$CompletionStopCopyWithImpl<$Res, $Val extends CompletionStop> + implements $CompletionStopCopyWith<$Res> { + _$CompletionStopCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; +} + +/// @nodoc +abstract class _$$UnionCompletionStopStringImplCopyWith<$Res> { + factory _$$UnionCompletionStopStringImplCopyWith( + _$UnionCompletionStopStringImpl value, + $Res Function(_$UnionCompletionStopStringImpl) then) = + __$$UnionCompletionStopStringImplCopyWithImpl<$Res>; + @useResult + $Res call({String value}); +} + +/// @nodoc +class __$$UnionCompletionStopStringImplCopyWithImpl<$Res> + extends _$CompletionStopCopyWithImpl<$Res, _$UnionCompletionStopStringImpl> + implements _$$UnionCompletionStopStringImplCopyWith<$Res> { + __$$UnionCompletionStopStringImplCopyWithImpl( + _$UnionCompletionStopStringImpl _value, + $Res Function(_$UnionCompletionStopStringImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$UnionCompletionStopStringImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UnionCompletionStopStringImpl extends _UnionCompletionStopString { + const _$UnionCompletionStopStringImpl(this.value, {final String? $type}) + : $type = $type ?? 'string', + super._(); + + factory _$UnionCompletionStopStringImpl.fromJson(Map json) => + _$$UnionCompletionStopStringImplFromJson(json); + + @override + final String value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'CompletionStop.string(value: $value)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UnionCompletionStopStringImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UnionCompletionStopStringImplCopyWith<_$UnionCompletionStopStringImpl> + get copyWith => __$$UnionCompletionStopStringImplCopyWithImpl< + _$UnionCompletionStopStringImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(List value) arrayString, + }) { + return string(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(List value)? arrayString, + }) { + return string?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(List value)? arrayString, + required TResult orElse(), + }) { + if (string != null) { + return string(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionCompletionStopString value) string, + required TResult Function(_UnionCompletionStopArrayString value) + arrayString, + }) { + return string(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionCompletionStopString value)? string, + TResult? Function(_UnionCompletionStopArrayString value)? arrayString, + }) { + return string?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionCompletionStopString value)? string, + TResult Function(_UnionCompletionStopArrayString value)? arrayString, + required TResult orElse(), + }) { + if (string != null) { + return string(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$UnionCompletionStopStringImplToJson( + this, + ); + } +} + +abstract class _UnionCompletionStopString extends CompletionStop { + const factory _UnionCompletionStopString(final String value) = + _$UnionCompletionStopStringImpl; + const _UnionCompletionStopString._() : super._(); + + factory _UnionCompletionStopString.fromJson(Map json) = + _$UnionCompletionStopStringImpl.fromJson; + + @override + String get value; + @JsonKey(ignore: true) + _$$UnionCompletionStopStringImplCopyWith<_$UnionCompletionStopStringImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$UnionCompletionStopArrayStringImplCopyWith<$Res> { + factory _$$UnionCompletionStopArrayStringImplCopyWith( + _$UnionCompletionStopArrayStringImpl value, + $Res Function(_$UnionCompletionStopArrayStringImpl) then) = + __$$UnionCompletionStopArrayStringImplCopyWithImpl<$Res>; + @useResult + $Res call({List value}); +} + +/// @nodoc +class __$$UnionCompletionStopArrayStringImplCopyWithImpl<$Res> + extends _$CompletionStopCopyWithImpl<$Res, + _$UnionCompletionStopArrayStringImpl> + implements _$$UnionCompletionStopArrayStringImplCopyWith<$Res> { + __$$UnionCompletionStopArrayStringImplCopyWithImpl( + _$UnionCompletionStopArrayStringImpl _value, + $Res Function(_$UnionCompletionStopArrayStringImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$UnionCompletionStopArrayStringImpl( + null == value + ? _value._value + : value // ignore: cast_nullable_to_non_nullable + as List, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UnionCompletionStopArrayStringImpl + extends _UnionCompletionStopArrayString { + const _$UnionCompletionStopArrayStringImpl(final List value, + {final String? $type}) + : _value = value, + $type = $type ?? 'arrayString', + super._(); + + factory _$UnionCompletionStopArrayStringImpl.fromJson( + Map json) => + _$$UnionCompletionStopArrayStringImplFromJson(json); + + final List _value; + @override + List get value { + if (_value is EqualUnmodifiableListView) return _value; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_value); + } + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'CompletionStop.arrayString(value: $value)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UnionCompletionStopArrayStringImpl && + const DeepCollectionEquality().equals(other._value, _value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UnionCompletionStopArrayStringImplCopyWith< + _$UnionCompletionStopArrayStringImpl> + get copyWith => __$$UnionCompletionStopArrayStringImplCopyWithImpl< + _$UnionCompletionStopArrayStringImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(List value) arrayString, + }) { + return arrayString(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(List value)? arrayString, + }) { + return arrayString?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(List value)? arrayString, + required TResult orElse(), + }) { + if (arrayString != null) { + return arrayString(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionCompletionStopString value) string, + required TResult Function(_UnionCompletionStopArrayString value) + arrayString, + }) { + return arrayString(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionCompletionStopString value)? string, + TResult? Function(_UnionCompletionStopArrayString value)? arrayString, + }) { + return arrayString?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionCompletionStopString value)? string, + TResult Function(_UnionCompletionStopArrayString value)? arrayString, + required TResult orElse(), + }) { + if (arrayString != null) { + return arrayString(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$UnionCompletionStopArrayStringImplToJson( + this, + ); + } +} + +abstract class _UnionCompletionStopArrayString extends CompletionStop { + const factory _UnionCompletionStopArrayString(final List value) = + _$UnionCompletionStopArrayStringImpl; + const _UnionCompletionStopArrayString._() : super._(); + + factory _UnionCompletionStopArrayString.fromJson(Map json) = + _$UnionCompletionStopArrayStringImpl.fromJson; + + @override + List get value; + @JsonKey(ignore: true) + _$$UnionCompletionStopArrayStringImplCopyWith< + _$UnionCompletionStopArrayStringImpl> + get copyWith => throw _privateConstructorUsedError; +} + +CreateCompletionResponse _$CreateCompletionResponseFromJson( + Map json) { + return _CreateCompletionResponse.fromJson(json); +} + +/// @nodoc +mixin _$CreateCompletionResponse { + /// A unique identifier for the completion. + String get id => throw _privateConstructorUsedError; + + /// The list of completion choices the model generated for the input prompt. + List get choices => throw _privateConstructorUsedError; + + /// The Unix timestamp (in seconds) of when the completion was created. + int get created => throw _privateConstructorUsedError; + + /// The model used for completion. + String get model => throw _privateConstructorUsedError; + + /// The object type, which is always "text_completion" + String get object => throw _privateConstructorUsedError; + + /// Usage statistics for the completion request. + @JsonKey(includeIfNull: false) + CompletionUsage? get usage => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $CreateCompletionResponseCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CreateCompletionResponseCopyWith<$Res> { + factory $CreateCompletionResponseCopyWith(CreateCompletionResponse value, + $Res Function(CreateCompletionResponse) then) = + _$CreateCompletionResponseCopyWithImpl<$Res, CreateCompletionResponse>; + @useResult + $Res call( + {String id, + List choices, + int created, + String model, + String object, + @JsonKey(includeIfNull: false) CompletionUsage? usage}); + + $CompletionUsageCopyWith<$Res>? get usage; +} + +/// @nodoc +class _$CreateCompletionResponseCopyWithImpl<$Res, + $Val extends CreateCompletionResponse> + implements $CreateCompletionResponseCopyWith<$Res> { + _$CreateCompletionResponseCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = null, + Object? choices = null, + Object? created = null, + Object? model = null, + Object? object = null, + Object? usage = freezed, + }) { + return _then(_value.copyWith( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + choices: null == choices + ? _value.choices + : choices // ignore: cast_nullable_to_non_nullable + as List, + created: null == created + ? _value.created + : created // ignore: cast_nullable_to_non_nullable + as int, + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, + usage: freezed == usage + ? _value.usage + : usage // ignore: cast_nullable_to_non_nullable + as CompletionUsage?, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $CompletionUsageCopyWith<$Res>? get usage { + if (_value.usage == null) { + return null; + } + + return $CompletionUsageCopyWith<$Res>(_value.usage!, (value) { + return _then(_value.copyWith(usage: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$CreateCompletionResponseImplCopyWith<$Res> + implements $CreateCompletionResponseCopyWith<$Res> { + factory _$$CreateCompletionResponseImplCopyWith( + _$CreateCompletionResponseImpl value, + $Res Function(_$CreateCompletionResponseImpl) then) = + __$$CreateCompletionResponseImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {String id, + List choices, + int created, + String model, + String object, + @JsonKey(includeIfNull: false) CompletionUsage? usage}); + + @override + $CompletionUsageCopyWith<$Res>? get usage; +} + +/// @nodoc +class __$$CreateCompletionResponseImplCopyWithImpl<$Res> + extends _$CreateCompletionResponseCopyWithImpl<$Res, + _$CreateCompletionResponseImpl> + implements _$$CreateCompletionResponseImplCopyWith<$Res> { + __$$CreateCompletionResponseImplCopyWithImpl( + _$CreateCompletionResponseImpl _value, + $Res Function(_$CreateCompletionResponseImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = null, + Object? choices = null, + Object? created = null, + Object? model = null, + Object? object = null, + Object? usage = freezed, + }) { + return _then(_$CreateCompletionResponseImpl( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + choices: null == choices + ? _value._choices + : choices // ignore: cast_nullable_to_non_nullable + as List, + created: null == created + ? _value.created + : created // ignore: cast_nullable_to_non_nullable + as int, + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, + usage: freezed == usage + ? _value.usage + : usage // ignore: cast_nullable_to_non_nullable + as CompletionUsage?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CreateCompletionResponseImpl extends _CreateCompletionResponse { + const _$CreateCompletionResponseImpl( + {required this.id, + required final List choices, + required this.created, + required this.model, + required this.object, + @JsonKey(includeIfNull: false) this.usage}) + : _choices = choices, + super._(); + + factory _$CreateCompletionResponseImpl.fromJson(Map json) => + _$$CreateCompletionResponseImplFromJson(json); + + /// A unique identifier for the completion. + @override + final String id; + + /// The list of completion choices the model generated for the input prompt. + final List _choices; + + /// The list of completion choices the model generated for the input prompt. + @override + List get choices { + if (_choices is EqualUnmodifiableListView) return _choices; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_choices); + } + + /// The Unix timestamp (in seconds) of when the completion was created. + @override + final int created; + + /// The model used for completion. + @override + final String model; + + /// The object type, which is always "text_completion" + @override + final String object; + + /// Usage statistics for the completion request. + @override + @JsonKey(includeIfNull: false) + final CompletionUsage? usage; + + @override + String toString() { + return 'CreateCompletionResponse(id: $id, choices: $choices, created: $created, model: $model, object: $object, usage: $usage)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CreateCompletionResponseImpl && + (identical(other.id, id) || other.id == id) && + const DeepCollectionEquality().equals(other._choices, _choices) && + (identical(other.created, created) || other.created == created) && + (identical(other.model, model) || other.model == model) && + (identical(other.object, object) || other.object == object) && + (identical(other.usage, usage) || other.usage == usage)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, + id, + const DeepCollectionEquality().hash(_choices), + created, + model, + object, + usage); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$CreateCompletionResponseImplCopyWith<_$CreateCompletionResponseImpl> + get copyWith => __$$CreateCompletionResponseImplCopyWithImpl< + _$CreateCompletionResponseImpl>(this, _$identity); + + @override + Map toJson() { + return _$$CreateCompletionResponseImplToJson( + this, + ); + } +} + +abstract class _CreateCompletionResponse extends CreateCompletionResponse { + const factory _CreateCompletionResponse( + {required final String id, + required final List choices, + required final int created, + required final String model, + required final String object, + @JsonKey(includeIfNull: false) final CompletionUsage? usage}) = + _$CreateCompletionResponseImpl; + const _CreateCompletionResponse._() : super._(); + + factory _CreateCompletionResponse.fromJson(Map json) = + _$CreateCompletionResponseImpl.fromJson; + + @override + + /// A unique identifier for the completion. + String get id; + @override + + /// The list of completion choices the model generated for the input prompt. + List get choices; + @override + + /// The Unix timestamp (in seconds) of when the completion was created. + int get created; + @override + + /// The model used for completion. + String get model; + @override + + /// The object type, which is always "text_completion" + String get object; + @override + + /// Usage statistics for the completion request. + @JsonKey(includeIfNull: false) + CompletionUsage? get usage; + @override + @JsonKey(ignore: true) + _$$CreateCompletionResponseImplCopyWith<_$CreateCompletionResponseImpl> + get copyWith => throw _privateConstructorUsedError; +} + +CompletionChoice _$CompletionChoiceFromJson(Map json) { + return _CompletionChoice.fromJson(json); +} + +/// @nodoc +mixin _$CompletionChoice { + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + /// `length` if the maximum number of tokens specified in the request was reached, + /// or `content_filter` if content was omitted due to a flag from our content filters. + @JsonKey( + name: 'finish_reason', + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + CompletionFinishReason? get finishReason => + throw _privateConstructorUsedError; + + /// The index of the choice in the list of generated choices. + int get index => throw _privateConstructorUsedError; + + /// The probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. + CompletionLogprobs? get logprobs => throw _privateConstructorUsedError; + + /// The text of the completion. + String get text => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $CompletionChoiceCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CompletionChoiceCopyWith<$Res> { + factory $CompletionChoiceCopyWith( + CompletionChoice value, $Res Function(CompletionChoice) then) = + _$CompletionChoiceCopyWithImpl<$Res, CompletionChoice>; + @useResult + $Res call( + {@JsonKey( + name: 'finish_reason', + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + CompletionFinishReason? finishReason, + int index, + CompletionLogprobs? logprobs, + String text}); + + $CompletionLogprobsCopyWith<$Res>? get logprobs; +} + +/// @nodoc +class _$CompletionChoiceCopyWithImpl<$Res, $Val extends CompletionChoice> + implements $CompletionChoiceCopyWith<$Res> { + _$CompletionChoiceCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? finishReason = freezed, + Object? index = null, + Object? logprobs = freezed, + Object? text = null, + }) { + return _then(_value.copyWith( + finishReason: freezed == finishReason + ? _value.finishReason + : finishReason // ignore: cast_nullable_to_non_nullable + as CompletionFinishReason?, + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + logprobs: freezed == logprobs + ? _value.logprobs + : logprobs // ignore: cast_nullable_to_non_nullable + as CompletionLogprobs?, + text: null == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $CompletionLogprobsCopyWith<$Res>? get logprobs { + if (_value.logprobs == null) { + return null; + } + + return $CompletionLogprobsCopyWith<$Res>(_value.logprobs!, (value) { + return _then(_value.copyWith(logprobs: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$CompletionChoiceImplCopyWith<$Res> + implements $CompletionChoiceCopyWith<$Res> { + factory _$$CompletionChoiceImplCopyWith(_$CompletionChoiceImpl value, + $Res Function(_$CompletionChoiceImpl) then) = + __$$CompletionChoiceImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey( + name: 'finish_reason', + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + CompletionFinishReason? finishReason, + int index, + CompletionLogprobs? logprobs, + String text}); + + @override + $CompletionLogprobsCopyWith<$Res>? get logprobs; +} + +/// @nodoc +class __$$CompletionChoiceImplCopyWithImpl<$Res> + extends _$CompletionChoiceCopyWithImpl<$Res, _$CompletionChoiceImpl> + implements _$$CompletionChoiceImplCopyWith<$Res> { + __$$CompletionChoiceImplCopyWithImpl(_$CompletionChoiceImpl _value, + $Res Function(_$CompletionChoiceImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? finishReason = freezed, + Object? index = null, + Object? logprobs = freezed, + Object? text = null, + }) { + return _then(_$CompletionChoiceImpl( + finishReason: freezed == finishReason + ? _value.finishReason + : finishReason // ignore: cast_nullable_to_non_nullable + as CompletionFinishReason?, + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + logprobs: freezed == logprobs + ? _value.logprobs + : logprobs // ignore: cast_nullable_to_non_nullable + as CompletionLogprobs?, + text: null == text + ? _value.text + : text // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CompletionChoiceImpl extends _CompletionChoice { + const _$CompletionChoiceImpl( + {@JsonKey( + name: 'finish_reason', + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + required this.finishReason, + required this.index, + required this.logprobs, + required this.text}) + : super._(); + + factory _$CompletionChoiceImpl.fromJson(Map json) => + _$$CompletionChoiceImplFromJson(json); + + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + /// `length` if the maximum number of tokens specified in the request was reached, + /// or `content_filter` if content was omitted due to a flag from our content filters. + @override + @JsonKey( + name: 'finish_reason', + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final CompletionFinishReason? finishReason; + + /// The index of the choice in the list of generated choices. + @override + final int index; + + /// The probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. + @override + final CompletionLogprobs? logprobs; + + /// The text of the completion. + @override + final String text; + + @override + String toString() { + return 'CompletionChoice(finishReason: $finishReason, index: $index, logprobs: $logprobs, text: $text)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CompletionChoiceImpl && + (identical(other.finishReason, finishReason) || + other.finishReason == finishReason) && + (identical(other.index, index) || other.index == index) && + (identical(other.logprobs, logprobs) || + other.logprobs == logprobs) && + (identical(other.text, text) || other.text == text)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, finishReason, index, logprobs, text); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$CompletionChoiceImplCopyWith<_$CompletionChoiceImpl> get copyWith => + __$$CompletionChoiceImplCopyWithImpl<_$CompletionChoiceImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$CompletionChoiceImplToJson( + this, + ); + } +} + +abstract class _CompletionChoice extends CompletionChoice { + const factory _CompletionChoice( + {@JsonKey( + name: 'finish_reason', + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + required final CompletionFinishReason? finishReason, + required final int index, + required final CompletionLogprobs? logprobs, + required final String text}) = _$CompletionChoiceImpl; + const _CompletionChoice._() : super._(); + + factory _CompletionChoice.fromJson(Map json) = + _$CompletionChoiceImpl.fromJson; + + @override + + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + /// `length` if the maximum number of tokens specified in the request was reached, + /// or `content_filter` if content was omitted due to a flag from our content filters. + @JsonKey( + name: 'finish_reason', + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + CompletionFinishReason? get finishReason; + @override + + /// The index of the choice in the list of generated choices. + int get index; + @override + + /// The probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. + CompletionLogprobs? get logprobs; + @override + + /// The text of the completion. + String get text; + @override + @JsonKey(ignore: true) + _$$CompletionChoiceImplCopyWith<_$CompletionChoiceImpl> get copyWith => + throw _privateConstructorUsedError; +} + +CompletionLogprobs _$CompletionLogprobsFromJson(Map json) { + return _CompletionLogprobs.fromJson(json); +} + +/// @nodoc +mixin _$CompletionLogprobs { + /// The offset of the token from the beginning of the prompt. + @JsonKey(name: 'text_offset', includeIfNull: false) + List? get textOffset => throw _privateConstructorUsedError; + + /// The log probabilities of tokens in the completion. + @JsonKey(name: 'token_logprobs', includeIfNull: false) + List? get tokenLogprobs => throw _privateConstructorUsedError; + + /// The tokens generated by the model converted back to text. + @JsonKey(includeIfNull: false) + List? get tokens => throw _privateConstructorUsedError; + + /// The log probabilities of the `logprobs` most likely tokens. + @JsonKey(name: 'top_logprobs', includeIfNull: false) + List?>? get topLogprobs => + throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $CompletionLogprobsCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CompletionLogprobsCopyWith<$Res> { + factory $CompletionLogprobsCopyWith( + CompletionLogprobs value, $Res Function(CompletionLogprobs) then) = + _$CompletionLogprobsCopyWithImpl<$Res, CompletionLogprobs>; + @useResult + $Res call( + {@JsonKey(name: 'text_offset', includeIfNull: false) + List? textOffset, + @JsonKey(name: 'token_logprobs', includeIfNull: false) + List? tokenLogprobs, + @JsonKey(includeIfNull: false) List? tokens, + @JsonKey(name: 'top_logprobs', includeIfNull: false) + List?>? topLogprobs}); +} + +/// @nodoc +class _$CompletionLogprobsCopyWithImpl<$Res, $Val extends CompletionLogprobs> + implements $CompletionLogprobsCopyWith<$Res> { + _$CompletionLogprobsCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? textOffset = freezed, + Object? tokenLogprobs = freezed, + Object? tokens = freezed, + Object? topLogprobs = freezed, + }) { + return _then(_value.copyWith( + textOffset: freezed == textOffset + ? _value.textOffset + : textOffset // ignore: cast_nullable_to_non_nullable + as List?, + tokenLogprobs: freezed == tokenLogprobs + ? _value.tokenLogprobs + : tokenLogprobs // ignore: cast_nullable_to_non_nullable + as List?, + tokens: freezed == tokens + ? _value.tokens + : tokens // ignore: cast_nullable_to_non_nullable + as List?, + topLogprobs: freezed == topLogprobs + ? _value.topLogprobs + : topLogprobs // ignore: cast_nullable_to_non_nullable + as List?>?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$CompletionLogprobsImplCopyWith<$Res> + implements $CompletionLogprobsCopyWith<$Res> { + factory _$$CompletionLogprobsImplCopyWith(_$CompletionLogprobsImpl value, + $Res Function(_$CompletionLogprobsImpl) then) = + __$$CompletionLogprobsImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'text_offset', includeIfNull: false) + List? textOffset, + @JsonKey(name: 'token_logprobs', includeIfNull: false) + List? tokenLogprobs, + @JsonKey(includeIfNull: false) List? tokens, + @JsonKey(name: 'top_logprobs', includeIfNull: false) + List?>? topLogprobs}); +} + +/// @nodoc +class __$$CompletionLogprobsImplCopyWithImpl<$Res> + extends _$CompletionLogprobsCopyWithImpl<$Res, _$CompletionLogprobsImpl> + implements _$$CompletionLogprobsImplCopyWith<$Res> { + __$$CompletionLogprobsImplCopyWithImpl(_$CompletionLogprobsImpl _value, + $Res Function(_$CompletionLogprobsImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? textOffset = freezed, + Object? tokenLogprobs = freezed, + Object? tokens = freezed, + Object? topLogprobs = freezed, + }) { + return _then(_$CompletionLogprobsImpl( + textOffset: freezed == textOffset + ? _value._textOffset + : textOffset // ignore: cast_nullable_to_non_nullable + as List?, + tokenLogprobs: freezed == tokenLogprobs + ? _value._tokenLogprobs + : tokenLogprobs // ignore: cast_nullable_to_non_nullable + as List?, + tokens: freezed == tokens + ? _value._tokens + : tokens // ignore: cast_nullable_to_non_nullable + as List?, + topLogprobs: freezed == topLogprobs + ? _value._topLogprobs + : topLogprobs // ignore: cast_nullable_to_non_nullable + as List?>?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CompletionLogprobsImpl extends _CompletionLogprobs { + const _$CompletionLogprobsImpl( + {@JsonKey(name: 'text_offset', includeIfNull: false) + final List? textOffset, + @JsonKey(name: 'token_logprobs', includeIfNull: false) + final List? tokenLogprobs, + @JsonKey(includeIfNull: false) final List? tokens, + @JsonKey(name: 'top_logprobs', includeIfNull: false) + final List?>? topLogprobs}) + : _textOffset = textOffset, + _tokenLogprobs = tokenLogprobs, + _tokens = tokens, + _topLogprobs = topLogprobs, + super._(); + + factory _$CompletionLogprobsImpl.fromJson(Map json) => + _$$CompletionLogprobsImplFromJson(json); + + /// The offset of the token from the beginning of the prompt. + final List? _textOffset; + + /// The offset of the token from the beginning of the prompt. + @override + @JsonKey(name: 'text_offset', includeIfNull: false) + List? get textOffset { + final value = _textOffset; + if (value == null) return null; + if (_textOffset is EqualUnmodifiableListView) return _textOffset; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + /// The log probabilities of tokens in the completion. + final List? _tokenLogprobs; + + /// The log probabilities of tokens in the completion. + @override + @JsonKey(name: 'token_logprobs', includeIfNull: false) + List? get tokenLogprobs { + final value = _tokenLogprobs; + if (value == null) return null; + if (_tokenLogprobs is EqualUnmodifiableListView) return _tokenLogprobs; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + /// The tokens generated by the model converted back to text. + final List? _tokens; + + /// The tokens generated by the model converted back to text. + @override + @JsonKey(includeIfNull: false) + List? get tokens { + final value = _tokens; + if (value == null) return null; + if (_tokens is EqualUnmodifiableListView) return _tokens; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + /// The log probabilities of the `logprobs` most likely tokens. + final List?>? _topLogprobs; + + /// The log probabilities of the `logprobs` most likely tokens. + @override + @JsonKey(name: 'top_logprobs', includeIfNull: false) + List?>? get topLogprobs { + final value = _topLogprobs; + if (value == null) return null; + if (_topLogprobs is EqualUnmodifiableListView) return _topLogprobs; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + @override + String toString() { + return 'CompletionLogprobs(textOffset: $textOffset, tokenLogprobs: $tokenLogprobs, tokens: $tokens, topLogprobs: $topLogprobs)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CompletionLogprobsImpl && + const DeepCollectionEquality() + .equals(other._textOffset, _textOffset) && + const DeepCollectionEquality() + .equals(other._tokenLogprobs, _tokenLogprobs) && + const DeepCollectionEquality().equals(other._tokens, _tokens) && + const DeepCollectionEquality() + .equals(other._topLogprobs, _topLogprobs)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, + const DeepCollectionEquality().hash(_textOffset), + const DeepCollectionEquality().hash(_tokenLogprobs), + const DeepCollectionEquality().hash(_tokens), + const DeepCollectionEquality().hash(_topLogprobs)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$CompletionLogprobsImplCopyWith<_$CompletionLogprobsImpl> get copyWith => + __$$CompletionLogprobsImplCopyWithImpl<_$CompletionLogprobsImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$CompletionLogprobsImplToJson( + this, + ); + } +} + +abstract class _CompletionLogprobs extends CompletionLogprobs { + const factory _CompletionLogprobs( + {@JsonKey(name: 'text_offset', includeIfNull: false) + final List? textOffset, + @JsonKey(name: 'token_logprobs', includeIfNull: false) + final List? tokenLogprobs, + @JsonKey(includeIfNull: false) final List? tokens, + @JsonKey(name: 'top_logprobs', includeIfNull: false) + final List?>? topLogprobs}) = + _$CompletionLogprobsImpl; + const _CompletionLogprobs._() : super._(); + + factory _CompletionLogprobs.fromJson(Map json) = + _$CompletionLogprobsImpl.fromJson; + + @override + + /// The offset of the token from the beginning of the prompt. + @JsonKey(name: 'text_offset', includeIfNull: false) + List? get textOffset; + @override + + /// The log probabilities of tokens in the completion. + @JsonKey(name: 'token_logprobs', includeIfNull: false) + List? get tokenLogprobs; + @override + + /// The tokens generated by the model converted back to text. + @JsonKey(includeIfNull: false) + List? get tokens; + @override + + /// The log probabilities of the `logprobs` most likely tokens. + @JsonKey(name: 'top_logprobs', includeIfNull: false) + List?>? get topLogprobs; + @override + @JsonKey(ignore: true) + _$$CompletionLogprobsImplCopyWith<_$CompletionLogprobsImpl> get copyWith => + throw _privateConstructorUsedError; +} + +CreateChatCompletionRequest _$CreateChatCompletionRequestFromJson( + Map json) { + return _CreateChatCompletionRequest.fromJson(json); +} + +/// @nodoc +mixin _$CreateChatCompletionRequest { + /// ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + @_ChatCompletionModelConverter() + ChatCompletionModel get model => throw _privateConstructorUsedError; + + /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + List get messages => + throw _privateConstructorUsedError; + + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + /// + /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + @JsonKey(name: 'frequency_penalty', includeIfNull: false) + double? get frequencyPenalty => throw _privateConstructorUsedError; + + /// Controls how the model calls functions. "none" means the model will not call a function and instead generates a message. "auto" means the model can pick between generating a message or calling a function. Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. + @JsonKey(name: 'function_call', includeIfNull: false) + dynamic get functionCall => throw _privateConstructorUsedError; + + /// A list of functions the model may generate JSON inputs for. + @JsonKey(includeIfNull: false) + List? get functions => + throw _privateConstructorUsedError; + + /// Modify the likelihood of specified tokens appearing in the completion. + /// + /// Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + @JsonKey(name: 'logit_bias', includeIfNull: false) + Map? get logitBias => throw _privateConstructorUsedError; + + /// The maximum number of [tokens](https://platform.openai.com/tokenizer) to generate in the chat completion. + /// + /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + @JsonKey(name: 'max_tokens', includeIfNull: false) + int? get maxTokens => throw _privateConstructorUsedError; + + /// How many chat completion choices to generate for each input message. + @JsonKey(includeIfNull: false) + int? get n => throw _privateConstructorUsedError; + + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + /// + /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + @JsonKey(name: 'presence_penalty', includeIfNull: false) + double? get presencePenalty => throw _privateConstructorUsedError; + + /// Up to 4 sequences where the API will stop generating further tokens. + @_ChatCompletionStopConverter() + @JsonKey(includeIfNull: false) + ChatCompletionStop? get stop => throw _privateConstructorUsedError; + + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + @JsonKey(includeIfNull: false) + bool? get stream => throw _privateConstructorUsedError; + + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// + /// We generally recommend altering this or `top_p` but not both. + @JsonKey(includeIfNull: false) + double? get temperature => throw _privateConstructorUsedError; + + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// + /// We generally recommend altering this or `temperature` but not both. + @JsonKey(name: 'top_p', includeIfNull: false) + double? get topP => throw _privateConstructorUsedError; + + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @JsonKey(includeIfNull: false) + String? get user => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $CreateChatCompletionRequestCopyWith + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CreateChatCompletionRequestCopyWith<$Res> { + factory $CreateChatCompletionRequestCopyWith( + CreateChatCompletionRequest value, + $Res Function(CreateChatCompletionRequest) then) = + _$CreateChatCompletionRequestCopyWithImpl<$Res, + CreateChatCompletionRequest>; + @useResult + $Res call( + {@_ChatCompletionModelConverter() ChatCompletionModel model, + List messages, + @JsonKey(name: 'frequency_penalty', includeIfNull: false) + double? frequencyPenalty, + @JsonKey(name: 'function_call', includeIfNull: false) + dynamic functionCall, + @JsonKey(includeIfNull: false) List? functions, + @JsonKey(name: 'logit_bias', includeIfNull: false) + Map? logitBias, + @JsonKey(name: 'max_tokens', includeIfNull: false) int? maxTokens, + @JsonKey(includeIfNull: false) int? n, + @JsonKey(name: 'presence_penalty', includeIfNull: false) + double? presencePenalty, + @_ChatCompletionStopConverter() + @JsonKey(includeIfNull: false) + ChatCompletionStop? stop, + @JsonKey(includeIfNull: false) bool? stream, + @JsonKey(includeIfNull: false) double? temperature, + @JsonKey(name: 'top_p', includeIfNull: false) double? topP, + @JsonKey(includeIfNull: false) String? user}); + + $ChatCompletionModelCopyWith<$Res> get model; + $ChatCompletionStopCopyWith<$Res>? get stop; +} + +/// @nodoc +class _$CreateChatCompletionRequestCopyWithImpl<$Res, + $Val extends CreateChatCompletionRequest> + implements $CreateChatCompletionRequestCopyWith<$Res> { + _$CreateChatCompletionRequestCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = null, + Object? messages = null, + Object? frequencyPenalty = freezed, + Object? functionCall = freezed, + Object? functions = freezed, + Object? logitBias = freezed, + Object? maxTokens = freezed, + Object? n = freezed, + Object? presencePenalty = freezed, + Object? stop = freezed, + Object? stream = freezed, + Object? temperature = freezed, + Object? topP = freezed, + Object? user = freezed, + }) { + return _then(_value.copyWith( + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as ChatCompletionModel, + messages: null == messages + ? _value.messages + : messages // ignore: cast_nullable_to_non_nullable + as List, + frequencyPenalty: freezed == frequencyPenalty + ? _value.frequencyPenalty + : frequencyPenalty // ignore: cast_nullable_to_non_nullable + as double?, + functionCall: freezed == functionCall + ? _value.functionCall + : functionCall // ignore: cast_nullable_to_non_nullable + as dynamic, + functions: freezed == functions + ? _value.functions + : functions // ignore: cast_nullable_to_non_nullable + as List?, + logitBias: freezed == logitBias + ? _value.logitBias + : logitBias // ignore: cast_nullable_to_non_nullable + as Map?, + maxTokens: freezed == maxTokens + ? _value.maxTokens + : maxTokens // ignore: cast_nullable_to_non_nullable + as int?, + n: freezed == n + ? _value.n + : n // ignore: cast_nullable_to_non_nullable + as int?, + presencePenalty: freezed == presencePenalty + ? _value.presencePenalty + : presencePenalty // ignore: cast_nullable_to_non_nullable + as double?, + stop: freezed == stop + ? _value.stop + : stop // ignore: cast_nullable_to_non_nullable + as ChatCompletionStop?, + stream: freezed == stream + ? _value.stream + : stream // ignore: cast_nullable_to_non_nullable + as bool?, + temperature: freezed == temperature + ? _value.temperature + : temperature // ignore: cast_nullable_to_non_nullable + as double?, + topP: freezed == topP + ? _value.topP + : topP // ignore: cast_nullable_to_non_nullable + as double?, + user: freezed == user + ? _value.user + : user // ignore: cast_nullable_to_non_nullable + as String?, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $ChatCompletionModelCopyWith<$Res> get model { + return $ChatCompletionModelCopyWith<$Res>(_value.model, (value) { + return _then(_value.copyWith(model: value) as $Val); + }); + } + + @override + @pragma('vm:prefer-inline') + $ChatCompletionStopCopyWith<$Res>? get stop { + if (_value.stop == null) { + return null; + } + + return $ChatCompletionStopCopyWith<$Res>(_value.stop!, (value) { + return _then(_value.copyWith(stop: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$CreateChatCompletionRequestImplCopyWith<$Res> + implements $CreateChatCompletionRequestCopyWith<$Res> { + factory _$$CreateChatCompletionRequestImplCopyWith( + _$CreateChatCompletionRequestImpl value, + $Res Function(_$CreateChatCompletionRequestImpl) then) = + __$$CreateChatCompletionRequestImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@_ChatCompletionModelConverter() ChatCompletionModel model, + List messages, + @JsonKey(name: 'frequency_penalty', includeIfNull: false) + double? frequencyPenalty, + @JsonKey(name: 'function_call', includeIfNull: false) + dynamic functionCall, + @JsonKey(includeIfNull: false) List? functions, + @JsonKey(name: 'logit_bias', includeIfNull: false) + Map? logitBias, + @JsonKey(name: 'max_tokens', includeIfNull: false) int? maxTokens, + @JsonKey(includeIfNull: false) int? n, + @JsonKey(name: 'presence_penalty', includeIfNull: false) + double? presencePenalty, + @_ChatCompletionStopConverter() + @JsonKey(includeIfNull: false) + ChatCompletionStop? stop, + @JsonKey(includeIfNull: false) bool? stream, + @JsonKey(includeIfNull: false) double? temperature, + @JsonKey(name: 'top_p', includeIfNull: false) double? topP, + @JsonKey(includeIfNull: false) String? user}); + + @override + $ChatCompletionModelCopyWith<$Res> get model; + @override + $ChatCompletionStopCopyWith<$Res>? get stop; +} + +/// @nodoc +class __$$CreateChatCompletionRequestImplCopyWithImpl<$Res> + extends _$CreateChatCompletionRequestCopyWithImpl<$Res, + _$CreateChatCompletionRequestImpl> + implements _$$CreateChatCompletionRequestImplCopyWith<$Res> { + __$$CreateChatCompletionRequestImplCopyWithImpl( + _$CreateChatCompletionRequestImpl _value, + $Res Function(_$CreateChatCompletionRequestImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = null, + Object? messages = null, + Object? frequencyPenalty = freezed, + Object? functionCall = freezed, + Object? functions = freezed, + Object? logitBias = freezed, + Object? maxTokens = freezed, + Object? n = freezed, + Object? presencePenalty = freezed, + Object? stop = freezed, + Object? stream = freezed, + Object? temperature = freezed, + Object? topP = freezed, + Object? user = freezed, + }) { + return _then(_$CreateChatCompletionRequestImpl( + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as ChatCompletionModel, + messages: null == messages + ? _value._messages + : messages // ignore: cast_nullable_to_non_nullable + as List, + frequencyPenalty: freezed == frequencyPenalty + ? _value.frequencyPenalty + : frequencyPenalty // ignore: cast_nullable_to_non_nullable + as double?, + functionCall: freezed == functionCall + ? _value.functionCall + : functionCall // ignore: cast_nullable_to_non_nullable + as dynamic, + functions: freezed == functions + ? _value._functions + : functions // ignore: cast_nullable_to_non_nullable + as List?, + logitBias: freezed == logitBias + ? _value._logitBias + : logitBias // ignore: cast_nullable_to_non_nullable + as Map?, + maxTokens: freezed == maxTokens + ? _value.maxTokens + : maxTokens // ignore: cast_nullable_to_non_nullable + as int?, + n: freezed == n + ? _value.n + : n // ignore: cast_nullable_to_non_nullable + as int?, + presencePenalty: freezed == presencePenalty + ? _value.presencePenalty + : presencePenalty // ignore: cast_nullable_to_non_nullable + as double?, + stop: freezed == stop + ? _value.stop + : stop // ignore: cast_nullable_to_non_nullable + as ChatCompletionStop?, + stream: freezed == stream + ? _value.stream + : stream // ignore: cast_nullable_to_non_nullable + as bool?, + temperature: freezed == temperature + ? _value.temperature + : temperature // ignore: cast_nullable_to_non_nullable + as double?, + topP: freezed == topP + ? _value.topP + : topP // ignore: cast_nullable_to_non_nullable + as double?, + user: freezed == user + ? _value.user + : user // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest { + const _$CreateChatCompletionRequestImpl( + {@_ChatCompletionModelConverter() required this.model, + required final List messages, + @JsonKey(name: 'frequency_penalty', includeIfNull: false) + this.frequencyPenalty = 0.0, + @JsonKey(name: 'function_call', includeIfNull: false) this.functionCall, + @JsonKey(includeIfNull: false) + final List? functions, + @JsonKey(name: 'logit_bias', includeIfNull: false) + final Map? logitBias, + @JsonKey(name: 'max_tokens', includeIfNull: false) this.maxTokens, + @JsonKey(includeIfNull: false) this.n = 1, + @JsonKey(name: 'presence_penalty', includeIfNull: false) + this.presencePenalty = 0.0, + @_ChatCompletionStopConverter() @JsonKey(includeIfNull: false) this.stop, + @JsonKey(includeIfNull: false) this.stream = false, + @JsonKey(includeIfNull: false) this.temperature = 1.0, + @JsonKey(name: 'top_p', includeIfNull: false) this.topP = 1.0, + @JsonKey(includeIfNull: false) this.user}) + : _messages = messages, + _functions = functions, + _logitBias = logitBias, + super._(); + + factory _$CreateChatCompletionRequestImpl.fromJson( + Map json) => + _$$CreateChatCompletionRequestImplFromJson(json); + + /// ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + @override + @_ChatCompletionModelConverter() + final ChatCompletionModel model; + + /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + final List _messages; + + /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + @override + List get messages { + if (_messages is EqualUnmodifiableListView) return _messages; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_messages); + } + + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + /// + /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + @override + @JsonKey(name: 'frequency_penalty', includeIfNull: false) + final double? frequencyPenalty; + + /// Controls how the model calls functions. "none" means the model will not call a function and instead generates a message. "auto" means the model can pick between generating a message or calling a function. Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. + @override + @JsonKey(name: 'function_call', includeIfNull: false) + final dynamic functionCall; + + /// A list of functions the model may generate JSON inputs for. + final List? _functions; + + /// A list of functions the model may generate JSON inputs for. + @override + @JsonKey(includeIfNull: false) + List? get functions { + final value = _functions; + if (value == null) return null; + if (_functions is EqualUnmodifiableListView) return _functions; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(value); + } + + /// Modify the likelihood of specified tokens appearing in the completion. + /// + /// Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + final Map? _logitBias; + + /// Modify the likelihood of specified tokens appearing in the completion. + /// + /// Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + @override + @JsonKey(name: 'logit_bias', includeIfNull: false) + Map? get logitBias { + final value = _logitBias; + if (value == null) return null; + if (_logitBias is EqualUnmodifiableMapView) return _logitBias; + // ignore: implicit_dynamic_type + return EqualUnmodifiableMapView(value); + } + + /// The maximum number of [tokens](https://platform.openai.com/tokenizer) to generate in the chat completion. + /// + /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + @override + @JsonKey(name: 'max_tokens', includeIfNull: false) + final int? maxTokens; + + /// How many chat completion choices to generate for each input message. + @override + @JsonKey(includeIfNull: false) + final int? n; + + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + /// + /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + @override + @JsonKey(name: 'presence_penalty', includeIfNull: false) + final double? presencePenalty; + + /// Up to 4 sequences where the API will stop generating further tokens. + @override + @_ChatCompletionStopConverter() + @JsonKey(includeIfNull: false) + final ChatCompletionStop? stop; + + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + @override + @JsonKey(includeIfNull: false) + final bool? stream; + + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// + /// We generally recommend altering this or `top_p` but not both. + @override + @JsonKey(includeIfNull: false) + final double? temperature; + + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// + /// We generally recommend altering this or `temperature` but not both. + @override + @JsonKey(name: 'top_p', includeIfNull: false) + final double? topP; + + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @override + @JsonKey(includeIfNull: false) + final String? user; + + @override + String toString() { + return 'CreateChatCompletionRequest(model: $model, messages: $messages, frequencyPenalty: $frequencyPenalty, functionCall: $functionCall, functions: $functions, logitBias: $logitBias, maxTokens: $maxTokens, n: $n, presencePenalty: $presencePenalty, stop: $stop, stream: $stream, temperature: $temperature, topP: $topP, user: $user)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CreateChatCompletionRequestImpl && + (identical(other.model, model) || other.model == model) && + const DeepCollectionEquality().equals(other._messages, _messages) && + (identical(other.frequencyPenalty, frequencyPenalty) || + other.frequencyPenalty == frequencyPenalty) && + const DeepCollectionEquality() + .equals(other.functionCall, functionCall) && + const DeepCollectionEquality() + .equals(other._functions, _functions) && + const DeepCollectionEquality() + .equals(other._logitBias, _logitBias) && + (identical(other.maxTokens, maxTokens) || + other.maxTokens == maxTokens) && + (identical(other.n, n) || other.n == n) && + (identical(other.presencePenalty, presencePenalty) || + other.presencePenalty == presencePenalty) && + (identical(other.stop, stop) || other.stop == stop) && + (identical(other.stream, stream) || other.stream == stream) && + (identical(other.temperature, temperature) || + other.temperature == temperature) && + (identical(other.topP, topP) || other.topP == topP) && + (identical(other.user, user) || other.user == user)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, + model, + const DeepCollectionEquality().hash(_messages), + frequencyPenalty, + const DeepCollectionEquality().hash(functionCall), + const DeepCollectionEquality().hash(_functions), + const DeepCollectionEquality().hash(_logitBias), + maxTokens, + n, + presencePenalty, + stop, + stream, + temperature, + topP, + user); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$CreateChatCompletionRequestImplCopyWith<_$CreateChatCompletionRequestImpl> + get copyWith => __$$CreateChatCompletionRequestImplCopyWithImpl< + _$CreateChatCompletionRequestImpl>(this, _$identity); + + @override + Map toJson() { + return _$$CreateChatCompletionRequestImplToJson( + this, + ); + } +} + +abstract class _CreateChatCompletionRequest + extends CreateChatCompletionRequest { + const factory _CreateChatCompletionRequest( + {@_ChatCompletionModelConverter() + required final ChatCompletionModel model, + required final List messages, + @JsonKey(name: 'frequency_penalty', includeIfNull: false) + final double? frequencyPenalty, + @JsonKey(name: 'function_call', includeIfNull: false) + final dynamic functionCall, + @JsonKey(includeIfNull: false) + final List? functions, + @JsonKey(name: 'logit_bias', includeIfNull: false) + final Map? logitBias, + @JsonKey(name: 'max_tokens', includeIfNull: false) final int? maxTokens, + @JsonKey(includeIfNull: false) final int? n, + @JsonKey(name: 'presence_penalty', includeIfNull: false) + final double? presencePenalty, + @_ChatCompletionStopConverter() + @JsonKey(includeIfNull: false) + final ChatCompletionStop? stop, + @JsonKey(includeIfNull: false) final bool? stream, + @JsonKey(includeIfNull: false) final double? temperature, + @JsonKey(name: 'top_p', includeIfNull: false) final double? topP, + @JsonKey(includeIfNull: false) + final String? user}) = _$CreateChatCompletionRequestImpl; + const _CreateChatCompletionRequest._() : super._(); + + factory _CreateChatCompletionRequest.fromJson(Map json) = + _$CreateChatCompletionRequestImpl.fromJson; + + @override + + /// ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + @_ChatCompletionModelConverter() + ChatCompletionModel get model; + @override + + /// A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + List get messages; + @override + + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + /// + /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + @JsonKey(name: 'frequency_penalty', includeIfNull: false) + double? get frequencyPenalty; + @override + + /// Controls how the model calls functions. "none" means the model will not call a function and instead generates a message. "auto" means the model can pick between generating a message or calling a function. Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. + @JsonKey(name: 'function_call', includeIfNull: false) + dynamic get functionCall; + @override + + /// A list of functions the model may generate JSON inputs for. + @JsonKey(includeIfNull: false) + List? get functions; + @override + + /// Modify the likelihood of specified tokens appearing in the completion. + /// + /// Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + @JsonKey(name: 'logit_bias', includeIfNull: false) + Map? get logitBias; + @override + + /// The maximum number of [tokens](https://platform.openai.com/tokenizer) to generate in the chat completion. + /// + /// The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + @JsonKey(name: 'max_tokens', includeIfNull: false) + int? get maxTokens; + @override + + /// How many chat completion choices to generate for each input message. + @JsonKey(includeIfNull: false) + int? get n; + @override + + /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + /// + /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + @JsonKey(name: 'presence_penalty', includeIfNull: false) + double? get presencePenalty; + @override + + /// Up to 4 sequences where the API will stop generating further tokens. + @_ChatCompletionStopConverter() + @JsonKey(includeIfNull: false) + ChatCompletionStop? get stop; + @override + + /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + @JsonKey(includeIfNull: false) + bool? get stream; + @override + + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + /// + /// We generally recommend altering this or `top_p` but not both. + @JsonKey(includeIfNull: false) + double? get temperature; + @override + + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + /// + /// We generally recommend altering this or `temperature` but not both. + @JsonKey(name: 'top_p', includeIfNull: false) + double? get topP; + @override + + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @JsonKey(includeIfNull: false) + String? get user; + @override + @JsonKey(ignore: true) + _$$CreateChatCompletionRequestImplCopyWith<_$CreateChatCompletionRequestImpl> + get copyWith => throw _privateConstructorUsedError; +} + +ChatCompletionModel _$ChatCompletionModelFromJson(Map json) { + switch (json['runtimeType']) { + case 'string': + return _UnionChatCompletionModelString.fromJson(json); + case 'enumeration': + return _UnionChatCompletionModelEnum.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'runtimeType', 'ChatCompletionModel', + 'Invalid union type "${json['runtimeType']}"!'); + } +} + +/// @nodoc +mixin _$ChatCompletionModel { + Object get value => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(ChatCompletionModels value) enumeration, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(ChatCompletionModels value)? enumeration, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(ChatCompletionModels value)? enumeration, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionChatCompletionModelString value) string, + required TResult Function(_UnionChatCompletionModelEnum value) enumeration, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionChatCompletionModelString value)? string, + TResult? Function(_UnionChatCompletionModelEnum value)? enumeration, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionChatCompletionModelString value)? string, + TResult Function(_UnionChatCompletionModelEnum value)? enumeration, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ChatCompletionModelCopyWith<$Res> { + factory $ChatCompletionModelCopyWith( + ChatCompletionModel value, $Res Function(ChatCompletionModel) then) = + _$ChatCompletionModelCopyWithImpl<$Res, ChatCompletionModel>; +} + +/// @nodoc +class _$ChatCompletionModelCopyWithImpl<$Res, $Val extends ChatCompletionModel> + implements $ChatCompletionModelCopyWith<$Res> { + _$ChatCompletionModelCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; +} + +/// @nodoc +abstract class _$$UnionChatCompletionModelStringImplCopyWith<$Res> { + factory _$$UnionChatCompletionModelStringImplCopyWith( + _$UnionChatCompletionModelStringImpl value, + $Res Function(_$UnionChatCompletionModelStringImpl) then) = + __$$UnionChatCompletionModelStringImplCopyWithImpl<$Res>; + @useResult + $Res call({String value}); +} + +/// @nodoc +class __$$UnionChatCompletionModelStringImplCopyWithImpl<$Res> + extends _$ChatCompletionModelCopyWithImpl<$Res, + _$UnionChatCompletionModelStringImpl> + implements _$$UnionChatCompletionModelStringImplCopyWith<$Res> { + __$$UnionChatCompletionModelStringImplCopyWithImpl( + _$UnionChatCompletionModelStringImpl _value, + $Res Function(_$UnionChatCompletionModelStringImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$UnionChatCompletionModelStringImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UnionChatCompletionModelStringImpl + extends _UnionChatCompletionModelString { + const _$UnionChatCompletionModelStringImpl(this.value, {final String? $type}) + : $type = $type ?? 'string', + super._(); + + factory _$UnionChatCompletionModelStringImpl.fromJson( + Map json) => + _$$UnionChatCompletionModelStringImplFromJson(json); + + @override + final String value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'ChatCompletionModel.string(value: $value)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UnionChatCompletionModelStringImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UnionChatCompletionModelStringImplCopyWith< + _$UnionChatCompletionModelStringImpl> + get copyWith => __$$UnionChatCompletionModelStringImplCopyWithImpl< + _$UnionChatCompletionModelStringImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(ChatCompletionModels value) enumeration, + }) { + return string(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(ChatCompletionModels value)? enumeration, + }) { + return string?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(ChatCompletionModels value)? enumeration, + required TResult orElse(), + }) { + if (string != null) { + return string(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionChatCompletionModelString value) string, + required TResult Function(_UnionChatCompletionModelEnum value) enumeration, + }) { + return string(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionChatCompletionModelString value)? string, + TResult? Function(_UnionChatCompletionModelEnum value)? enumeration, + }) { + return string?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionChatCompletionModelString value)? string, + TResult Function(_UnionChatCompletionModelEnum value)? enumeration, + required TResult orElse(), + }) { + if (string != null) { + return string(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$UnionChatCompletionModelStringImplToJson( + this, + ); + } +} + +abstract class _UnionChatCompletionModelString extends ChatCompletionModel { + const factory _UnionChatCompletionModelString(final String value) = + _$UnionChatCompletionModelStringImpl; + const _UnionChatCompletionModelString._() : super._(); + + factory _UnionChatCompletionModelString.fromJson(Map json) = + _$UnionChatCompletionModelStringImpl.fromJson; + + @override + String get value; + @JsonKey(ignore: true) + _$$UnionChatCompletionModelStringImplCopyWith< + _$UnionChatCompletionModelStringImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$UnionChatCompletionModelEnumImplCopyWith<$Res> { + factory _$$UnionChatCompletionModelEnumImplCopyWith( + _$UnionChatCompletionModelEnumImpl value, + $Res Function(_$UnionChatCompletionModelEnumImpl) then) = + __$$UnionChatCompletionModelEnumImplCopyWithImpl<$Res>; + @useResult + $Res call({ChatCompletionModels value}); +} + +/// @nodoc +class __$$UnionChatCompletionModelEnumImplCopyWithImpl<$Res> + extends _$ChatCompletionModelCopyWithImpl<$Res, + _$UnionChatCompletionModelEnumImpl> + implements _$$UnionChatCompletionModelEnumImplCopyWith<$Res> { + __$$UnionChatCompletionModelEnumImplCopyWithImpl( + _$UnionChatCompletionModelEnumImpl _value, + $Res Function(_$UnionChatCompletionModelEnumImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$UnionChatCompletionModelEnumImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as ChatCompletionModels, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UnionChatCompletionModelEnumImpl extends _UnionChatCompletionModelEnum { + const _$UnionChatCompletionModelEnumImpl(this.value, {final String? $type}) + : $type = $type ?? 'enumeration', + super._(); + + factory _$UnionChatCompletionModelEnumImpl.fromJson( + Map json) => + _$$UnionChatCompletionModelEnumImplFromJson(json); + + @override + final ChatCompletionModels value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'ChatCompletionModel.enumeration(value: $value)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UnionChatCompletionModelEnumImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UnionChatCompletionModelEnumImplCopyWith< + _$UnionChatCompletionModelEnumImpl> + get copyWith => __$$UnionChatCompletionModelEnumImplCopyWithImpl< + _$UnionChatCompletionModelEnumImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(ChatCompletionModels value) enumeration, + }) { + return enumeration(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(ChatCompletionModels value)? enumeration, + }) { + return enumeration?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(ChatCompletionModels value)? enumeration, + required TResult orElse(), + }) { + if (enumeration != null) { + return enumeration(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionChatCompletionModelString value) string, + required TResult Function(_UnionChatCompletionModelEnum value) enumeration, + }) { + return enumeration(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionChatCompletionModelString value)? string, + TResult? Function(_UnionChatCompletionModelEnum value)? enumeration, + }) { + return enumeration?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionChatCompletionModelString value)? string, + TResult Function(_UnionChatCompletionModelEnum value)? enumeration, + required TResult orElse(), + }) { + if (enumeration != null) { + return enumeration(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$UnionChatCompletionModelEnumImplToJson( + this, + ); + } +} + +abstract class _UnionChatCompletionModelEnum extends ChatCompletionModel { + const factory _UnionChatCompletionModelEnum( + final ChatCompletionModels value) = _$UnionChatCompletionModelEnumImpl; + const _UnionChatCompletionModelEnum._() : super._(); + + factory _UnionChatCompletionModelEnum.fromJson(Map json) = + _$UnionChatCompletionModelEnumImpl.fromJson; + + @override + ChatCompletionModels get value; + @JsonKey(ignore: true) + _$$UnionChatCompletionModelEnumImplCopyWith< + _$UnionChatCompletionModelEnumImpl> + get copyWith => throw _privateConstructorUsedError; +} + +ChatCompletionStop _$ChatCompletionStopFromJson(Map json) { + switch (json['runtimeType']) { + case 'string': + return _UnionChatCompletionStopString.fromJson(json); + case 'arrayString': + return _UnionChatCompletionStopArrayString.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'runtimeType', 'ChatCompletionStop', + 'Invalid union type "${json['runtimeType']}"!'); + } +} + +/// @nodoc +mixin _$ChatCompletionStop { + Object get value => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(List value) arrayString, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(List value)? arrayString, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(List value)? arrayString, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionChatCompletionStopString value) string, + required TResult Function(_UnionChatCompletionStopArrayString value) + arrayString, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionChatCompletionStopString value)? string, + TResult? Function(_UnionChatCompletionStopArrayString value)? arrayString, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionChatCompletionStopString value)? string, + TResult Function(_UnionChatCompletionStopArrayString value)? arrayString, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ChatCompletionStopCopyWith<$Res> { + factory $ChatCompletionStopCopyWith( + ChatCompletionStop value, $Res Function(ChatCompletionStop) then) = + _$ChatCompletionStopCopyWithImpl<$Res, ChatCompletionStop>; +} + +/// @nodoc +class _$ChatCompletionStopCopyWithImpl<$Res, $Val extends ChatCompletionStop> + implements $ChatCompletionStopCopyWith<$Res> { + _$ChatCompletionStopCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; +} + +/// @nodoc +abstract class _$$UnionChatCompletionStopStringImplCopyWith<$Res> { + factory _$$UnionChatCompletionStopStringImplCopyWith( + _$UnionChatCompletionStopStringImpl value, + $Res Function(_$UnionChatCompletionStopStringImpl) then) = + __$$UnionChatCompletionStopStringImplCopyWithImpl<$Res>; + @useResult + $Res call({String value}); +} + +/// @nodoc +class __$$UnionChatCompletionStopStringImplCopyWithImpl<$Res> + extends _$ChatCompletionStopCopyWithImpl<$Res, + _$UnionChatCompletionStopStringImpl> + implements _$$UnionChatCompletionStopStringImplCopyWith<$Res> { + __$$UnionChatCompletionStopStringImplCopyWithImpl( + _$UnionChatCompletionStopStringImpl _value, + $Res Function(_$UnionChatCompletionStopStringImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$UnionChatCompletionStopStringImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UnionChatCompletionStopStringImpl + extends _UnionChatCompletionStopString { + const _$UnionChatCompletionStopStringImpl(this.value, {final String? $type}) + : $type = $type ?? 'string', + super._(); + + factory _$UnionChatCompletionStopStringImpl.fromJson( + Map json) => + _$$UnionChatCompletionStopStringImplFromJson(json); + + @override + final String value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'ChatCompletionStop.string(value: $value)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UnionChatCompletionStopStringImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UnionChatCompletionStopStringImplCopyWith< + _$UnionChatCompletionStopStringImpl> + get copyWith => __$$UnionChatCompletionStopStringImplCopyWithImpl< + _$UnionChatCompletionStopStringImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(List value) arrayString, + }) { + return string(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(List value)? arrayString, + }) { + return string?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(List value)? arrayString, + required TResult orElse(), + }) { + if (string != null) { + return string(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionChatCompletionStopString value) string, + required TResult Function(_UnionChatCompletionStopArrayString value) + arrayString, + }) { + return string(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionChatCompletionStopString value)? string, + TResult? Function(_UnionChatCompletionStopArrayString value)? arrayString, + }) { + return string?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionChatCompletionStopString value)? string, + TResult Function(_UnionChatCompletionStopArrayString value)? arrayString, + required TResult orElse(), + }) { + if (string != null) { + return string(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$UnionChatCompletionStopStringImplToJson( + this, + ); + } +} + +abstract class _UnionChatCompletionStopString extends ChatCompletionStop { + const factory _UnionChatCompletionStopString(final String value) = + _$UnionChatCompletionStopStringImpl; + const _UnionChatCompletionStopString._() : super._(); + + factory _UnionChatCompletionStopString.fromJson(Map json) = + _$UnionChatCompletionStopStringImpl.fromJson; + + @override + String get value; + @JsonKey(ignore: true) + _$$UnionChatCompletionStopStringImplCopyWith< + _$UnionChatCompletionStopStringImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$UnionChatCompletionStopArrayStringImplCopyWith<$Res> { + factory _$$UnionChatCompletionStopArrayStringImplCopyWith( + _$UnionChatCompletionStopArrayStringImpl value, + $Res Function(_$UnionChatCompletionStopArrayStringImpl) then) = + __$$UnionChatCompletionStopArrayStringImplCopyWithImpl<$Res>; + @useResult + $Res call({List value}); +} + +/// @nodoc +class __$$UnionChatCompletionStopArrayStringImplCopyWithImpl<$Res> + extends _$ChatCompletionStopCopyWithImpl<$Res, + _$UnionChatCompletionStopArrayStringImpl> + implements _$$UnionChatCompletionStopArrayStringImplCopyWith<$Res> { + __$$UnionChatCompletionStopArrayStringImplCopyWithImpl( + _$UnionChatCompletionStopArrayStringImpl _value, + $Res Function(_$UnionChatCompletionStopArrayStringImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$UnionChatCompletionStopArrayStringImpl( + null == value + ? _value._value + : value // ignore: cast_nullable_to_non_nullable + as List, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UnionChatCompletionStopArrayStringImpl + extends _UnionChatCompletionStopArrayString { + const _$UnionChatCompletionStopArrayStringImpl(final List value, + {final String? $type}) + : _value = value, + $type = $type ?? 'arrayString', + super._(); + + factory _$UnionChatCompletionStopArrayStringImpl.fromJson( + Map json) => + _$$UnionChatCompletionStopArrayStringImplFromJson(json); + + final List _value; + @override + List get value { + if (_value is EqualUnmodifiableListView) return _value; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_value); + } + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'ChatCompletionStop.arrayString(value: $value)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UnionChatCompletionStopArrayStringImpl && + const DeepCollectionEquality().equals(other._value, _value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UnionChatCompletionStopArrayStringImplCopyWith< + _$UnionChatCompletionStopArrayStringImpl> + get copyWith => __$$UnionChatCompletionStopArrayStringImplCopyWithImpl< + _$UnionChatCompletionStopArrayStringImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(List value) arrayString, + }) { + return arrayString(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(List value)? arrayString, + }) { + return arrayString?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(List value)? arrayString, + required TResult orElse(), + }) { + if (arrayString != null) { + return arrayString(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionChatCompletionStopString value) string, + required TResult Function(_UnionChatCompletionStopArrayString value) + arrayString, + }) { + return arrayString(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionChatCompletionStopString value)? string, + TResult? Function(_UnionChatCompletionStopArrayString value)? arrayString, + }) { + return arrayString?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionChatCompletionStopString value)? string, + TResult Function(_UnionChatCompletionStopArrayString value)? arrayString, + required TResult orElse(), + }) { + if (arrayString != null) { + return arrayString(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$UnionChatCompletionStopArrayStringImplToJson( + this, + ); + } +} + +abstract class _UnionChatCompletionStopArrayString extends ChatCompletionStop { + const factory _UnionChatCompletionStopArrayString(final List value) = + _$UnionChatCompletionStopArrayStringImpl; + const _UnionChatCompletionStopArrayString._() : super._(); + + factory _UnionChatCompletionStopArrayString.fromJson( + Map json) = + _$UnionChatCompletionStopArrayStringImpl.fromJson; + + @override + List get value; + @JsonKey(ignore: true) + _$$UnionChatCompletionStopArrayStringImplCopyWith< + _$UnionChatCompletionStopArrayStringImpl> + get copyWith => throw _privateConstructorUsedError; +} + +ChatCompletionMessage _$ChatCompletionMessageFromJson( + Map json) { + return _ChatCompletionMessage.fromJson(json); +} + +/// @nodoc +mixin _$ChatCompletionMessage { + /// The role of the messages author. One of `system`, `user`, `assistant`, or `function`. + ChatCompletionMessageRole get role => throw _privateConstructorUsedError; + + /// The contents of the message. `content` is required for all messages, and may be null for assistant messages with function calls. + String? get content => throw _privateConstructorUsedError; + + /// The name and arguments of a function that should be called, as generated by the model. + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? get functionCall => + throw _privateConstructorUsedError; + + /// The name of the author of this message. `name` is required if role is `function`, and it should be the name of the function whose response is in the `content`. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters. + @JsonKey(includeIfNull: false) + String? get name => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ChatCompletionMessageCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ChatCompletionMessageCopyWith<$Res> { + factory $ChatCompletionMessageCopyWith(ChatCompletionMessage value, + $Res Function(ChatCompletionMessage) then) = + _$ChatCompletionMessageCopyWithImpl<$Res, ChatCompletionMessage>; + @useResult + $Res call( + {ChatCompletionMessageRole role, + String? content, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall, + @JsonKey(includeIfNull: false) String? name}); + + $ChatCompletionMessageFunctionCallCopyWith<$Res>? get functionCall; +} + +/// @nodoc +class _$ChatCompletionMessageCopyWithImpl<$Res, + $Val extends ChatCompletionMessage> + implements $ChatCompletionMessageCopyWith<$Res> { + _$ChatCompletionMessageCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? role = null, + Object? content = freezed, + Object? functionCall = freezed, + Object? name = freezed, + }) { + return _then(_value.copyWith( + role: null == role + ? _value.role + : role // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageRole, + content: freezed == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as String?, + functionCall: freezed == functionCall + ? _value.functionCall + : functionCall // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageFunctionCall?, + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $ChatCompletionMessageFunctionCallCopyWith<$Res>? get functionCall { + if (_value.functionCall == null) { + return null; + } + + return $ChatCompletionMessageFunctionCallCopyWith<$Res>( + _value.functionCall!, (value) { + return _then(_value.copyWith(functionCall: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$ChatCompletionMessageImplCopyWith<$Res> + implements $ChatCompletionMessageCopyWith<$Res> { + factory _$$ChatCompletionMessageImplCopyWith( + _$ChatCompletionMessageImpl value, + $Res Function(_$ChatCompletionMessageImpl) then) = + __$$ChatCompletionMessageImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {ChatCompletionMessageRole role, + String? content, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall, + @JsonKey(includeIfNull: false) String? name}); + + @override + $ChatCompletionMessageFunctionCallCopyWith<$Res>? get functionCall; +} + +/// @nodoc +class __$$ChatCompletionMessageImplCopyWithImpl<$Res> + extends _$ChatCompletionMessageCopyWithImpl<$Res, + _$ChatCompletionMessageImpl> + implements _$$ChatCompletionMessageImplCopyWith<$Res> { + __$$ChatCompletionMessageImplCopyWithImpl(_$ChatCompletionMessageImpl _value, + $Res Function(_$ChatCompletionMessageImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? role = null, + Object? content = freezed, + Object? functionCall = freezed, + Object? name = freezed, + }) { + return _then(_$ChatCompletionMessageImpl( + role: null == role + ? _value.role + : role // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageRole, + content: freezed == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as String?, + functionCall: freezed == functionCall + ? _value.functionCall + : functionCall // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageFunctionCall?, + name: freezed == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ChatCompletionMessageImpl extends _ChatCompletionMessage { + const _$ChatCompletionMessageImpl( + {required this.role, + required this.content, + @JsonKey(name: 'function_call', includeIfNull: false) this.functionCall, + @JsonKey(includeIfNull: false) this.name}) + : super._(); + + factory _$ChatCompletionMessageImpl.fromJson(Map json) => + _$$ChatCompletionMessageImplFromJson(json); + + /// The role of the messages author. One of `system`, `user`, `assistant`, or `function`. + @override + final ChatCompletionMessageRole role; + + /// The contents of the message. `content` is required for all messages, and may be null for assistant messages with function calls. + @override + final String? content; + + /// The name and arguments of a function that should be called, as generated by the model. + @override + @JsonKey(name: 'function_call', includeIfNull: false) + final ChatCompletionMessageFunctionCall? functionCall; + + /// The name of the author of this message. `name` is required if role is `function`, and it should be the name of the function whose response is in the `content`. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters. + @override + @JsonKey(includeIfNull: false) + final String? name; + + @override + String toString() { + return 'ChatCompletionMessage(role: $role, content: $content, functionCall: $functionCall, name: $name)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ChatCompletionMessageImpl && + (identical(other.role, role) || other.role == role) && + (identical(other.content, content) || other.content == content) && + (identical(other.functionCall, functionCall) || + other.functionCall == functionCall) && + (identical(other.name, name) || other.name == name)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, role, content, functionCall, name); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ChatCompletionMessageImplCopyWith<_$ChatCompletionMessageImpl> + get copyWith => __$$ChatCompletionMessageImplCopyWithImpl< + _$ChatCompletionMessageImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ChatCompletionMessageImplToJson( + this, + ); + } +} + +abstract class _ChatCompletionMessage extends ChatCompletionMessage { + const factory _ChatCompletionMessage( + {required final ChatCompletionMessageRole role, + required final String? content, + @JsonKey(name: 'function_call', includeIfNull: false) + final ChatCompletionMessageFunctionCall? functionCall, + @JsonKey(includeIfNull: false) final String? name}) = + _$ChatCompletionMessageImpl; + const _ChatCompletionMessage._() : super._(); + + factory _ChatCompletionMessage.fromJson(Map json) = + _$ChatCompletionMessageImpl.fromJson; + + @override + + /// The role of the messages author. One of `system`, `user`, `assistant`, or `function`. + ChatCompletionMessageRole get role; + @override + + /// The contents of the message. `content` is required for all messages, and may be null for assistant messages with function calls. + String? get content; + @override + + /// The name and arguments of a function that should be called, as generated by the model. + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? get functionCall; + @override + + /// The name of the author of this message. `name` is required if role is `function`, and it should be the name of the function whose response is in the `content`. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters. + @JsonKey(includeIfNull: false) + String? get name; + @override + @JsonKey(ignore: true) + _$$ChatCompletionMessageImplCopyWith<_$ChatCompletionMessageImpl> + get copyWith => throw _privateConstructorUsedError; +} + +ChatCompletionMessageFunctionCall _$ChatCompletionMessageFunctionCallFromJson( + Map json) { + return _ChatCompletionMessageFunctionCall.fromJson(json); +} + +/// @nodoc +mixin _$ChatCompletionMessageFunctionCall { + /// The name of the function to call. + String get name => throw _privateConstructorUsedError; + + /// The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + String get arguments => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ChatCompletionMessageFunctionCallCopyWith + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ChatCompletionMessageFunctionCallCopyWith<$Res> { + factory $ChatCompletionMessageFunctionCallCopyWith( + ChatCompletionMessageFunctionCall value, + $Res Function(ChatCompletionMessageFunctionCall) then) = + _$ChatCompletionMessageFunctionCallCopyWithImpl<$Res, + ChatCompletionMessageFunctionCall>; + @useResult + $Res call({String name, String arguments}); +} + +/// @nodoc +class _$ChatCompletionMessageFunctionCallCopyWithImpl<$Res, + $Val extends ChatCompletionMessageFunctionCall> + implements $ChatCompletionMessageFunctionCallCopyWith<$Res> { + _$ChatCompletionMessageFunctionCallCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = null, + Object? arguments = null, + }) { + return _then(_value.copyWith( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + arguments: null == arguments + ? _value.arguments + : arguments // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ChatCompletionMessageFunctionCallImplCopyWith<$Res> + implements $ChatCompletionMessageFunctionCallCopyWith<$Res> { + factory _$$ChatCompletionMessageFunctionCallImplCopyWith( + _$ChatCompletionMessageFunctionCallImpl value, + $Res Function(_$ChatCompletionMessageFunctionCallImpl) then) = + __$$ChatCompletionMessageFunctionCallImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String name, String arguments}); +} + +/// @nodoc +class __$$ChatCompletionMessageFunctionCallImplCopyWithImpl<$Res> + extends _$ChatCompletionMessageFunctionCallCopyWithImpl<$Res, + _$ChatCompletionMessageFunctionCallImpl> + implements _$$ChatCompletionMessageFunctionCallImplCopyWith<$Res> { + __$$ChatCompletionMessageFunctionCallImplCopyWithImpl( + _$ChatCompletionMessageFunctionCallImpl _value, + $Res Function(_$ChatCompletionMessageFunctionCallImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = null, + Object? arguments = null, + }) { + return _then(_$ChatCompletionMessageFunctionCallImpl( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + arguments: null == arguments + ? _value.arguments + : arguments // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ChatCompletionMessageFunctionCallImpl + extends _ChatCompletionMessageFunctionCall { + const _$ChatCompletionMessageFunctionCallImpl( + {required this.name, required this.arguments}) + : super._(); + + factory _$ChatCompletionMessageFunctionCallImpl.fromJson( + Map json) => + _$$ChatCompletionMessageFunctionCallImplFromJson(json); + + /// The name of the function to call. + @override + final String name; + + /// The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + @override + final String arguments; + + @override + String toString() { + return 'ChatCompletionMessageFunctionCall(name: $name, arguments: $arguments)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ChatCompletionMessageFunctionCallImpl && + (identical(other.name, name) || other.name == name) && + (identical(other.arguments, arguments) || + other.arguments == arguments)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, name, arguments); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ChatCompletionMessageFunctionCallImplCopyWith< + _$ChatCompletionMessageFunctionCallImpl> + get copyWith => __$$ChatCompletionMessageFunctionCallImplCopyWithImpl< + _$ChatCompletionMessageFunctionCallImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ChatCompletionMessageFunctionCallImplToJson( + this, + ); + } +} + +abstract class _ChatCompletionMessageFunctionCall + extends ChatCompletionMessageFunctionCall { + const factory _ChatCompletionMessageFunctionCall( + {required final String name, required final String arguments}) = + _$ChatCompletionMessageFunctionCallImpl; + const _ChatCompletionMessageFunctionCall._() : super._(); + + factory _ChatCompletionMessageFunctionCall.fromJson( + Map json) = + _$ChatCompletionMessageFunctionCallImpl.fromJson; + + @override + + /// The name of the function to call. + String get name; + @override + + /// The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + String get arguments; + @override + @JsonKey(ignore: true) + _$$ChatCompletionMessageFunctionCallImplCopyWith< + _$ChatCompletionMessageFunctionCallImpl> + get copyWith => throw _privateConstructorUsedError; +} + +ChatCompletionFunctions _$ChatCompletionFunctionsFromJson( + Map json) { + return _ChatCompletionFunctions.fromJson(json); +} + +/// @nodoc +mixin _$ChatCompletionFunctions { + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + String get name => throw _privateConstructorUsedError; + + /// A description of what the function does, used by the model to choose when and how to call the function. + @JsonKey(includeIfNull: false) + String? get description => throw _privateConstructorUsedError; + + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// + /// To describe a function that accepts no parameters, provide the value `{"type": "object", "properties": {}}`. + Map get parameters => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ChatCompletionFunctionsCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ChatCompletionFunctionsCopyWith<$Res> { + factory $ChatCompletionFunctionsCopyWith(ChatCompletionFunctions value, + $Res Function(ChatCompletionFunctions) then) = + _$ChatCompletionFunctionsCopyWithImpl<$Res, ChatCompletionFunctions>; + @useResult + $Res call( + {String name, + @JsonKey(includeIfNull: false) String? description, + Map parameters}); +} + +/// @nodoc +class _$ChatCompletionFunctionsCopyWithImpl<$Res, + $Val extends ChatCompletionFunctions> + implements $ChatCompletionFunctionsCopyWith<$Res> { + _$ChatCompletionFunctionsCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = null, + Object? description = freezed, + Object? parameters = null, + }) { + return _then(_value.copyWith( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + description: freezed == description + ? _value.description + : description // ignore: cast_nullable_to_non_nullable + as String?, + parameters: null == parameters + ? _value.parameters + : parameters // ignore: cast_nullable_to_non_nullable + as Map, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ChatCompletionFunctionsImplCopyWith<$Res> + implements $ChatCompletionFunctionsCopyWith<$Res> { + factory _$$ChatCompletionFunctionsImplCopyWith( + _$ChatCompletionFunctionsImpl value, + $Res Function(_$ChatCompletionFunctionsImpl) then) = + __$$ChatCompletionFunctionsImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {String name, + @JsonKey(includeIfNull: false) String? description, + Map parameters}); +} + +/// @nodoc +class __$$ChatCompletionFunctionsImplCopyWithImpl<$Res> + extends _$ChatCompletionFunctionsCopyWithImpl<$Res, + _$ChatCompletionFunctionsImpl> + implements _$$ChatCompletionFunctionsImplCopyWith<$Res> { + __$$ChatCompletionFunctionsImplCopyWithImpl( + _$ChatCompletionFunctionsImpl _value, + $Res Function(_$ChatCompletionFunctionsImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = null, + Object? description = freezed, + Object? parameters = null, + }) { + return _then(_$ChatCompletionFunctionsImpl( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + description: freezed == description + ? _value.description + : description // ignore: cast_nullable_to_non_nullable + as String?, + parameters: null == parameters + ? _value._parameters + : parameters // ignore: cast_nullable_to_non_nullable + as Map, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ChatCompletionFunctionsImpl extends _ChatCompletionFunctions { + const _$ChatCompletionFunctionsImpl( + {required this.name, + @JsonKey(includeIfNull: false) this.description, + required final Map parameters}) + : _parameters = parameters, + super._(); + + factory _$ChatCompletionFunctionsImpl.fromJson(Map json) => + _$$ChatCompletionFunctionsImplFromJson(json); + + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + @override + final String name; + + /// A description of what the function does, used by the model to choose when and how to call the function. + @override + @JsonKey(includeIfNull: false) + final String? description; + + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// + /// To describe a function that accepts no parameters, provide the value `{"type": "object", "properties": {}}`. + final Map _parameters; + + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// + /// To describe a function that accepts no parameters, provide the value `{"type": "object", "properties": {}}`. + @override + Map get parameters { + if (_parameters is EqualUnmodifiableMapView) return _parameters; + // ignore: implicit_dynamic_type + return EqualUnmodifiableMapView(_parameters); + } + + @override + String toString() { + return 'ChatCompletionFunctions(name: $name, description: $description, parameters: $parameters)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ChatCompletionFunctionsImpl && + (identical(other.name, name) || other.name == name) && + (identical(other.description, description) || + other.description == description) && + const DeepCollectionEquality() + .equals(other._parameters, _parameters)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, name, description, + const DeepCollectionEquality().hash(_parameters)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ChatCompletionFunctionsImplCopyWith<_$ChatCompletionFunctionsImpl> + get copyWith => __$$ChatCompletionFunctionsImplCopyWithImpl< + _$ChatCompletionFunctionsImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ChatCompletionFunctionsImplToJson( + this, + ); + } +} + +abstract class _ChatCompletionFunctions extends ChatCompletionFunctions { + const factory _ChatCompletionFunctions( + {required final String name, + @JsonKey(includeIfNull: false) final String? description, + required final Map parameters}) = + _$ChatCompletionFunctionsImpl; + const _ChatCompletionFunctions._() : super._(); + + factory _ChatCompletionFunctions.fromJson(Map json) = + _$ChatCompletionFunctionsImpl.fromJson; + + @override + + /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + String get name; + @override + + /// A description of what the function does, used by the model to choose when and how to call the function. + @JsonKey(includeIfNull: false) + String? get description; + @override + + /// The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + /// + /// To describe a function that accepts no parameters, provide the value `{"type": "object", "properties": {}}`. + Map get parameters; + @override + @JsonKey(ignore: true) + _$$ChatCompletionFunctionsImplCopyWith<_$ChatCompletionFunctionsImpl> + get copyWith => throw _privateConstructorUsedError; +} + +ChatCompletionFunctionCallOption _$ChatCompletionFunctionCallOptionFromJson( + Map json) { + return _ChatCompletionFunctionCallOption.fromJson(json); +} + +/// @nodoc +mixin _$ChatCompletionFunctionCallOption { + /// The name of the function to call. + String get name => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ChatCompletionFunctionCallOptionCopyWith + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ChatCompletionFunctionCallOptionCopyWith<$Res> { + factory $ChatCompletionFunctionCallOptionCopyWith( + ChatCompletionFunctionCallOption value, + $Res Function(ChatCompletionFunctionCallOption) then) = + _$ChatCompletionFunctionCallOptionCopyWithImpl<$Res, + ChatCompletionFunctionCallOption>; + @useResult + $Res call({String name}); +} + +/// @nodoc +class _$ChatCompletionFunctionCallOptionCopyWithImpl<$Res, + $Val extends ChatCompletionFunctionCallOption> + implements $ChatCompletionFunctionCallOptionCopyWith<$Res> { + _$ChatCompletionFunctionCallOptionCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = null, + }) { + return _then(_value.copyWith( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ChatCompletionFunctionCallOptionImplCopyWith<$Res> + implements $ChatCompletionFunctionCallOptionCopyWith<$Res> { + factory _$$ChatCompletionFunctionCallOptionImplCopyWith( + _$ChatCompletionFunctionCallOptionImpl value, + $Res Function(_$ChatCompletionFunctionCallOptionImpl) then) = + __$$ChatCompletionFunctionCallOptionImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String name}); +} + +/// @nodoc +class __$$ChatCompletionFunctionCallOptionImplCopyWithImpl<$Res> + extends _$ChatCompletionFunctionCallOptionCopyWithImpl<$Res, + _$ChatCompletionFunctionCallOptionImpl> + implements _$$ChatCompletionFunctionCallOptionImplCopyWith<$Res> { + __$$ChatCompletionFunctionCallOptionImplCopyWithImpl( + _$ChatCompletionFunctionCallOptionImpl _value, + $Res Function(_$ChatCompletionFunctionCallOptionImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? name = null, + }) { + return _then(_$ChatCompletionFunctionCallOptionImpl( + name: null == name + ? _value.name + : name // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ChatCompletionFunctionCallOptionImpl + extends _ChatCompletionFunctionCallOption { + const _$ChatCompletionFunctionCallOptionImpl({required this.name}) + : super._(); + + factory _$ChatCompletionFunctionCallOptionImpl.fromJson( + Map json) => + _$$ChatCompletionFunctionCallOptionImplFromJson(json); + + /// The name of the function to call. + @override + final String name; + + @override + String toString() { + return 'ChatCompletionFunctionCallOption(name: $name)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ChatCompletionFunctionCallOptionImpl && + (identical(other.name, name) || other.name == name)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, name); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ChatCompletionFunctionCallOptionImplCopyWith< + _$ChatCompletionFunctionCallOptionImpl> + get copyWith => __$$ChatCompletionFunctionCallOptionImplCopyWithImpl< + _$ChatCompletionFunctionCallOptionImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ChatCompletionFunctionCallOptionImplToJson( + this, + ); + } +} + +abstract class _ChatCompletionFunctionCallOption + extends ChatCompletionFunctionCallOption { + const factory _ChatCompletionFunctionCallOption( + {required final String name}) = _$ChatCompletionFunctionCallOptionImpl; + const _ChatCompletionFunctionCallOption._() : super._(); + + factory _ChatCompletionFunctionCallOption.fromJson( + Map json) = + _$ChatCompletionFunctionCallOptionImpl.fromJson; + + @override + + /// The name of the function to call. + String get name; + @override + @JsonKey(ignore: true) + _$$ChatCompletionFunctionCallOptionImplCopyWith< + _$ChatCompletionFunctionCallOptionImpl> + get copyWith => throw _privateConstructorUsedError; +} + +CreateChatCompletionResponse _$CreateChatCompletionResponseFromJson( + Map json) { + return _CreateChatCompletionResponse.fromJson(json); +} + +/// @nodoc +mixin _$CreateChatCompletionResponse { + /// A unique identifier for the chat completion. + String get id => throw _privateConstructorUsedError; + + /// A list of chat completion choices. Can be more than one if `n` is greater than 1. + List get choices => + throw _privateConstructorUsedError; + + /// The Unix timestamp (in seconds) of when the chat completion was created. + int get created => throw _privateConstructorUsedError; + + /// The model used for the chat completion. + String get model => throw _privateConstructorUsedError; + + /// The object type, which is always `chat.completion`. + String get object => throw _privateConstructorUsedError; + + /// Usage statistics for the completion request. + @JsonKey(includeIfNull: false) + CompletionUsage? get usage => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $CreateChatCompletionResponseCopyWith + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CreateChatCompletionResponseCopyWith<$Res> { + factory $CreateChatCompletionResponseCopyWith( + CreateChatCompletionResponse value, + $Res Function(CreateChatCompletionResponse) then) = + _$CreateChatCompletionResponseCopyWithImpl<$Res, + CreateChatCompletionResponse>; + @useResult + $Res call( + {String id, + List choices, + int created, + String model, + String object, + @JsonKey(includeIfNull: false) CompletionUsage? usage}); + + $CompletionUsageCopyWith<$Res>? get usage; +} + +/// @nodoc +class _$CreateChatCompletionResponseCopyWithImpl<$Res, + $Val extends CreateChatCompletionResponse> + implements $CreateChatCompletionResponseCopyWith<$Res> { + _$CreateChatCompletionResponseCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = null, + Object? choices = null, + Object? created = null, + Object? model = null, + Object? object = null, + Object? usage = freezed, + }) { + return _then(_value.copyWith( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + choices: null == choices + ? _value.choices + : choices // ignore: cast_nullable_to_non_nullable + as List, + created: null == created + ? _value.created + : created // ignore: cast_nullable_to_non_nullable + as int, + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, + usage: freezed == usage + ? _value.usage + : usage // ignore: cast_nullable_to_non_nullable + as CompletionUsage?, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $CompletionUsageCopyWith<$Res>? get usage { + if (_value.usage == null) { + return null; + } + + return $CompletionUsageCopyWith<$Res>(_value.usage!, (value) { + return _then(_value.copyWith(usage: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$CreateChatCompletionResponseImplCopyWith<$Res> + implements $CreateChatCompletionResponseCopyWith<$Res> { + factory _$$CreateChatCompletionResponseImplCopyWith( + _$CreateChatCompletionResponseImpl value, + $Res Function(_$CreateChatCompletionResponseImpl) then) = + __$$CreateChatCompletionResponseImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {String id, + List choices, + int created, + String model, + String object, + @JsonKey(includeIfNull: false) CompletionUsage? usage}); + + @override + $CompletionUsageCopyWith<$Res>? get usage; +} + +/// @nodoc +class __$$CreateChatCompletionResponseImplCopyWithImpl<$Res> + extends _$CreateChatCompletionResponseCopyWithImpl<$Res, + _$CreateChatCompletionResponseImpl> + implements _$$CreateChatCompletionResponseImplCopyWith<$Res> { + __$$CreateChatCompletionResponseImplCopyWithImpl( + _$CreateChatCompletionResponseImpl _value, + $Res Function(_$CreateChatCompletionResponseImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = null, + Object? choices = null, + Object? created = null, + Object? model = null, + Object? object = null, + Object? usage = freezed, + }) { + return _then(_$CreateChatCompletionResponseImpl( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + choices: null == choices + ? _value._choices + : choices // ignore: cast_nullable_to_non_nullable + as List, + created: null == created + ? _value.created + : created // ignore: cast_nullable_to_non_nullable + as int, + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, + usage: freezed == usage + ? _value.usage + : usage // ignore: cast_nullable_to_non_nullable + as CompletionUsage?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CreateChatCompletionResponseImpl extends _CreateChatCompletionResponse { + const _$CreateChatCompletionResponseImpl( + {required this.id, + required final List choices, + required this.created, + required this.model, + required this.object, + @JsonKey(includeIfNull: false) this.usage}) + : _choices = choices, + super._(); + + factory _$CreateChatCompletionResponseImpl.fromJson( + Map json) => + _$$CreateChatCompletionResponseImplFromJson(json); + + /// A unique identifier for the chat completion. + @override + final String id; + + /// A list of chat completion choices. Can be more than one if `n` is greater than 1. + final List _choices; + + /// A list of chat completion choices. Can be more than one if `n` is greater than 1. + @override + List get choices { + if (_choices is EqualUnmodifiableListView) return _choices; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_choices); + } + + /// The Unix timestamp (in seconds) of when the chat completion was created. + @override + final int created; + + /// The model used for the chat completion. + @override + final String model; + + /// The object type, which is always `chat.completion`. + @override + final String object; + + /// Usage statistics for the completion request. + @override + @JsonKey(includeIfNull: false) + final CompletionUsage? usage; + + @override + String toString() { + return 'CreateChatCompletionResponse(id: $id, choices: $choices, created: $created, model: $model, object: $object, usage: $usage)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CreateChatCompletionResponseImpl && + (identical(other.id, id) || other.id == id) && + const DeepCollectionEquality().equals(other._choices, _choices) && + (identical(other.created, created) || other.created == created) && + (identical(other.model, model) || other.model == model) && + (identical(other.object, object) || other.object == object) && + (identical(other.usage, usage) || other.usage == usage)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, + id, + const DeepCollectionEquality().hash(_choices), + created, + model, + object, + usage); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$CreateChatCompletionResponseImplCopyWith< + _$CreateChatCompletionResponseImpl> + get copyWith => __$$CreateChatCompletionResponseImplCopyWithImpl< + _$CreateChatCompletionResponseImpl>(this, _$identity); + + @override + Map toJson() { + return _$$CreateChatCompletionResponseImplToJson( + this, + ); + } +} + +abstract class _CreateChatCompletionResponse + extends CreateChatCompletionResponse { + const factory _CreateChatCompletionResponse( + {required final String id, + required final List choices, + required final int created, + required final String model, + required final String object, + @JsonKey(includeIfNull: false) final CompletionUsage? usage}) = + _$CreateChatCompletionResponseImpl; + const _CreateChatCompletionResponse._() : super._(); + + factory _CreateChatCompletionResponse.fromJson(Map json) = + _$CreateChatCompletionResponseImpl.fromJson; + + @override + + /// A unique identifier for the chat completion. + String get id; + @override + + /// A list of chat completion choices. Can be more than one if `n` is greater than 1. + List get choices; + @override + + /// The Unix timestamp (in seconds) of when the chat completion was created. + int get created; + @override + + /// The model used for the chat completion. + String get model; + @override + + /// The object type, which is always `chat.completion`. + String get object; + @override + + /// Usage statistics for the completion request. + @JsonKey(includeIfNull: false) + CompletionUsage? get usage; + @override + @JsonKey(ignore: true) + _$$CreateChatCompletionResponseImplCopyWith< + _$CreateChatCompletionResponseImpl> + get copyWith => throw _privateConstructorUsedError; +} + +ChatCompletionResponseChoice _$ChatCompletionResponseChoiceFromJson( + Map json) { + return _ChatCompletionResponseChoice.fromJson(json); +} + +/// @nodoc +mixin _$ChatCompletionResponseChoice { + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + /// `length` if the maximum number of tokens specified in the request was reached, + /// `content_filter` if content was omitted due to a flag from our content filters, + /// or `function_call` if the model called a function. + @JsonKey(name: 'finish_reason') + ChatCompletionFinishReason get finishReason => + throw _privateConstructorUsedError; + + /// The index of the choice in the list of choices. + int get index => throw _privateConstructorUsedError; + + /// A message in a chat conversation. + ChatCompletionMessage get message => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ChatCompletionResponseChoiceCopyWith + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ChatCompletionResponseChoiceCopyWith<$Res> { + factory $ChatCompletionResponseChoiceCopyWith( + ChatCompletionResponseChoice value, + $Res Function(ChatCompletionResponseChoice) then) = + _$ChatCompletionResponseChoiceCopyWithImpl<$Res, + ChatCompletionResponseChoice>; + @useResult + $Res call( + {@JsonKey(name: 'finish_reason') ChatCompletionFinishReason finishReason, + int index, + ChatCompletionMessage message}); + + $ChatCompletionMessageCopyWith<$Res> get message; +} + +/// @nodoc +class _$ChatCompletionResponseChoiceCopyWithImpl<$Res, + $Val extends ChatCompletionResponseChoice> + implements $ChatCompletionResponseChoiceCopyWith<$Res> { + _$ChatCompletionResponseChoiceCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? finishReason = null, + Object? index = null, + Object? message = null, + }) { + return _then(_value.copyWith( + finishReason: null == finishReason + ? _value.finishReason + : finishReason // ignore: cast_nullable_to_non_nullable + as ChatCompletionFinishReason, + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + message: null == message + ? _value.message + : message // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessage, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $ChatCompletionMessageCopyWith<$Res> get message { + return $ChatCompletionMessageCopyWith<$Res>(_value.message, (value) { + return _then(_value.copyWith(message: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$ChatCompletionResponseChoiceImplCopyWith<$Res> + implements $ChatCompletionResponseChoiceCopyWith<$Res> { + factory _$$ChatCompletionResponseChoiceImplCopyWith( + _$ChatCompletionResponseChoiceImpl value, + $Res Function(_$ChatCompletionResponseChoiceImpl) then) = + __$$ChatCompletionResponseChoiceImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'finish_reason') ChatCompletionFinishReason finishReason, + int index, + ChatCompletionMessage message}); + + @override + $ChatCompletionMessageCopyWith<$Res> get message; +} + +/// @nodoc +class __$$ChatCompletionResponseChoiceImplCopyWithImpl<$Res> + extends _$ChatCompletionResponseChoiceCopyWithImpl<$Res, + _$ChatCompletionResponseChoiceImpl> + implements _$$ChatCompletionResponseChoiceImplCopyWith<$Res> { + __$$ChatCompletionResponseChoiceImplCopyWithImpl( + _$ChatCompletionResponseChoiceImpl _value, + $Res Function(_$ChatCompletionResponseChoiceImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? finishReason = null, + Object? index = null, + Object? message = null, + }) { + return _then(_$ChatCompletionResponseChoiceImpl( + finishReason: null == finishReason + ? _value.finishReason + : finishReason // ignore: cast_nullable_to_non_nullable + as ChatCompletionFinishReason, + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + message: null == message + ? _value.message + : message // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessage, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ChatCompletionResponseChoiceImpl extends _ChatCompletionResponseChoice { + const _$ChatCompletionResponseChoiceImpl( + {@JsonKey(name: 'finish_reason') required this.finishReason, + required this.index, + required this.message}) + : super._(); + + factory _$ChatCompletionResponseChoiceImpl.fromJson( + Map json) => + _$$ChatCompletionResponseChoiceImplFromJson(json); + + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + /// `length` if the maximum number of tokens specified in the request was reached, + /// `content_filter` if content was omitted due to a flag from our content filters, + /// or `function_call` if the model called a function. + @override + @JsonKey(name: 'finish_reason') + final ChatCompletionFinishReason finishReason; + + /// The index of the choice in the list of choices. + @override + final int index; + + /// A message in a chat conversation. + @override + final ChatCompletionMessage message; + + @override + String toString() { + return 'ChatCompletionResponseChoice(finishReason: $finishReason, index: $index, message: $message)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ChatCompletionResponseChoiceImpl && + (identical(other.finishReason, finishReason) || + other.finishReason == finishReason) && + (identical(other.index, index) || other.index == index) && + (identical(other.message, message) || other.message == message)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, finishReason, index, message); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ChatCompletionResponseChoiceImplCopyWith< + _$ChatCompletionResponseChoiceImpl> + get copyWith => __$$ChatCompletionResponseChoiceImplCopyWithImpl< + _$ChatCompletionResponseChoiceImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ChatCompletionResponseChoiceImplToJson( + this, + ); + } +} + +abstract class _ChatCompletionResponseChoice + extends ChatCompletionResponseChoice { + const factory _ChatCompletionResponseChoice( + {@JsonKey(name: 'finish_reason') + required final ChatCompletionFinishReason finishReason, + required final int index, + required final ChatCompletionMessage message}) = + _$ChatCompletionResponseChoiceImpl; + const _ChatCompletionResponseChoice._() : super._(); + + factory _ChatCompletionResponseChoice.fromJson(Map json) = + _$ChatCompletionResponseChoiceImpl.fromJson; + + @override + + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + /// `length` if the maximum number of tokens specified in the request was reached, + /// `content_filter` if content was omitted due to a flag from our content filters, + /// or `function_call` if the model called a function. + @JsonKey(name: 'finish_reason') + ChatCompletionFinishReason get finishReason; + @override + + /// The index of the choice in the list of choices. + int get index; + @override + + /// A message in a chat conversation. + ChatCompletionMessage get message; + @override + @JsonKey(ignore: true) + _$$ChatCompletionResponseChoiceImplCopyWith< + _$ChatCompletionResponseChoiceImpl> + get copyWith => throw _privateConstructorUsedError; +} + +CreateChatCompletionStreamResponse _$CreateChatCompletionStreamResponseFromJson( + Map json) { + return _CreateChatCompletionStreamResponse.fromJson(json); +} + +/// @nodoc +mixin _$CreateChatCompletionStreamResponse { + /// A unique identifier for the chat completion. Each chunk has the same ID. + String get id => throw _privateConstructorUsedError; + + /// A list of chat completion choices. Can be more than one if `n` is greater than 1. + List get choices => + throw _privateConstructorUsedError; + + /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. + int get created => throw _privateConstructorUsedError; + + /// The model to generate the completion. + String get model => throw _privateConstructorUsedError; + + /// The object type, which is always `chat.completion.chunk`. + String get object => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $CreateChatCompletionStreamResponseCopyWith< + CreateChatCompletionStreamResponse> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CreateChatCompletionStreamResponseCopyWith<$Res> { + factory $CreateChatCompletionStreamResponseCopyWith( + CreateChatCompletionStreamResponse value, + $Res Function(CreateChatCompletionStreamResponse) then) = + _$CreateChatCompletionStreamResponseCopyWithImpl<$Res, + CreateChatCompletionStreamResponse>; + @useResult + $Res call( + {String id, + List choices, + int created, + String model, + String object}); +} + +/// @nodoc +class _$CreateChatCompletionStreamResponseCopyWithImpl<$Res, + $Val extends CreateChatCompletionStreamResponse> + implements $CreateChatCompletionStreamResponseCopyWith<$Res> { + _$CreateChatCompletionStreamResponseCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = null, + Object? choices = null, + Object? created = null, + Object? model = null, + Object? object = null, + }) { + return _then(_value.copyWith( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + choices: null == choices + ? _value.choices + : choices // ignore: cast_nullable_to_non_nullable + as List, + created: null == created + ? _value.created + : created // ignore: cast_nullable_to_non_nullable + as int, + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$CreateChatCompletionStreamResponseImplCopyWith<$Res> + implements $CreateChatCompletionStreamResponseCopyWith<$Res> { + factory _$$CreateChatCompletionStreamResponseImplCopyWith( + _$CreateChatCompletionStreamResponseImpl value, + $Res Function(_$CreateChatCompletionStreamResponseImpl) then) = + __$$CreateChatCompletionStreamResponseImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {String id, + List choices, + int created, + String model, + String object}); +} + +/// @nodoc +class __$$CreateChatCompletionStreamResponseImplCopyWithImpl<$Res> + extends _$CreateChatCompletionStreamResponseCopyWithImpl<$Res, + _$CreateChatCompletionStreamResponseImpl> + implements _$$CreateChatCompletionStreamResponseImplCopyWith<$Res> { + __$$CreateChatCompletionStreamResponseImplCopyWithImpl( + _$CreateChatCompletionStreamResponseImpl _value, + $Res Function(_$CreateChatCompletionStreamResponseImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = null, + Object? choices = null, + Object? created = null, + Object? model = null, + Object? object = null, + }) { + return _then(_$CreateChatCompletionStreamResponseImpl( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + choices: null == choices + ? _value._choices + : choices // ignore: cast_nullable_to_non_nullable + as List, + created: null == created + ? _value.created + : created // ignore: cast_nullable_to_non_nullable + as int, + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CreateChatCompletionStreamResponseImpl + extends _CreateChatCompletionStreamResponse { + const _$CreateChatCompletionStreamResponseImpl( + {required this.id, + required final List choices, + required this.created, + required this.model, + required this.object}) + : _choices = choices, + super._(); + + factory _$CreateChatCompletionStreamResponseImpl.fromJson( + Map json) => + _$$CreateChatCompletionStreamResponseImplFromJson(json); + + /// A unique identifier for the chat completion. Each chunk has the same ID. + @override + final String id; + + /// A list of chat completion choices. Can be more than one if `n` is greater than 1. + final List _choices; + + /// A list of chat completion choices. Can be more than one if `n` is greater than 1. + @override + List get choices { + if (_choices is EqualUnmodifiableListView) return _choices; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_choices); + } + + /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. + @override + final int created; + + /// The model to generate the completion. + @override + final String model; + + /// The object type, which is always `chat.completion.chunk`. + @override + final String object; + + @override + String toString() { + return 'CreateChatCompletionStreamResponse(id: $id, choices: $choices, created: $created, model: $model, object: $object)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CreateChatCompletionStreamResponseImpl && + (identical(other.id, id) || other.id == id) && + const DeepCollectionEquality().equals(other._choices, _choices) && + (identical(other.created, created) || other.created == created) && + (identical(other.model, model) || other.model == model) && + (identical(other.object, object) || other.object == object)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, id, + const DeepCollectionEquality().hash(_choices), created, model, object); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$CreateChatCompletionStreamResponseImplCopyWith< + _$CreateChatCompletionStreamResponseImpl> + get copyWith => __$$CreateChatCompletionStreamResponseImplCopyWithImpl< + _$CreateChatCompletionStreamResponseImpl>(this, _$identity); + + @override + Map toJson() { + return _$$CreateChatCompletionStreamResponseImplToJson( + this, + ); + } +} + +abstract class _CreateChatCompletionStreamResponse + extends CreateChatCompletionStreamResponse { + const factory _CreateChatCompletionStreamResponse( + {required final String id, + required final List choices, + required final int created, + required final String model, + required final String object}) = _$CreateChatCompletionStreamResponseImpl; + const _CreateChatCompletionStreamResponse._() : super._(); + + factory _CreateChatCompletionStreamResponse.fromJson( + Map json) = + _$CreateChatCompletionStreamResponseImpl.fromJson; + + @override + + /// A unique identifier for the chat completion. Each chunk has the same ID. + String get id; + @override + + /// A list of chat completion choices. Can be more than one if `n` is greater than 1. + List get choices; + @override + + /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. + int get created; + @override + + /// The model to generate the completion. + String get model; + @override + + /// The object type, which is always `chat.completion.chunk`. + String get object; + @override + @JsonKey(ignore: true) + _$$CreateChatCompletionStreamResponseImplCopyWith< + _$CreateChatCompletionStreamResponseImpl> + get copyWith => throw _privateConstructorUsedError; +} + +ChatCompletionStreamResponseChoice _$ChatCompletionStreamResponseChoiceFromJson( + Map json) { + return _ChatCompletionStreamResponseChoice.fromJson(json); +} + +/// @nodoc +mixin _$ChatCompletionStreamResponseChoice { + /// A chat completion delta generated by streamed model responses. + ChatCompletionStreamResponseDelta get delta => + throw _privateConstructorUsedError; + + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + /// `length` if the maximum number of tokens specified in the request was reached, + /// `content_filter` if content was omitted due to a flag from our content filters, + /// or `function_call` if the model called a function. + @JsonKey( + name: 'finish_reason', + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ChatCompletionFinishReason? get finishReason => + throw _privateConstructorUsedError; + + /// The index of the choice in the list of choices. + int get index => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ChatCompletionStreamResponseChoiceCopyWith< + ChatCompletionStreamResponseChoice> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ChatCompletionStreamResponseChoiceCopyWith<$Res> { + factory $ChatCompletionStreamResponseChoiceCopyWith( + ChatCompletionStreamResponseChoice value, + $Res Function(ChatCompletionStreamResponseChoice) then) = + _$ChatCompletionStreamResponseChoiceCopyWithImpl<$Res, + ChatCompletionStreamResponseChoice>; + @useResult + $Res call( + {ChatCompletionStreamResponseDelta delta, + @JsonKey( + name: 'finish_reason', + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ChatCompletionFinishReason? finishReason, + int index}); + + $ChatCompletionStreamResponseDeltaCopyWith<$Res> get delta; +} + +/// @nodoc +class _$ChatCompletionStreamResponseChoiceCopyWithImpl<$Res, + $Val extends ChatCompletionStreamResponseChoice> + implements $ChatCompletionStreamResponseChoiceCopyWith<$Res> { + _$ChatCompletionStreamResponseChoiceCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? delta = null, + Object? finishReason = freezed, + Object? index = null, + }) { + return _then(_value.copyWith( + delta: null == delta + ? _value.delta + : delta // ignore: cast_nullable_to_non_nullable + as ChatCompletionStreamResponseDelta, + finishReason: freezed == finishReason + ? _value.finishReason + : finishReason // ignore: cast_nullable_to_non_nullable + as ChatCompletionFinishReason?, + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $ChatCompletionStreamResponseDeltaCopyWith<$Res> get delta { + return $ChatCompletionStreamResponseDeltaCopyWith<$Res>(_value.delta, + (value) { + return _then(_value.copyWith(delta: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$ChatCompletionStreamResponseChoiceImplCopyWith<$Res> + implements $ChatCompletionStreamResponseChoiceCopyWith<$Res> { + factory _$$ChatCompletionStreamResponseChoiceImplCopyWith( + _$ChatCompletionStreamResponseChoiceImpl value, + $Res Function(_$ChatCompletionStreamResponseChoiceImpl) then) = + __$$ChatCompletionStreamResponseChoiceImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {ChatCompletionStreamResponseDelta delta, + @JsonKey( + name: 'finish_reason', + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ChatCompletionFinishReason? finishReason, + int index}); + + @override + $ChatCompletionStreamResponseDeltaCopyWith<$Res> get delta; +} + +/// @nodoc +class __$$ChatCompletionStreamResponseChoiceImplCopyWithImpl<$Res> + extends _$ChatCompletionStreamResponseChoiceCopyWithImpl<$Res, + _$ChatCompletionStreamResponseChoiceImpl> + implements _$$ChatCompletionStreamResponseChoiceImplCopyWith<$Res> { + __$$ChatCompletionStreamResponseChoiceImplCopyWithImpl( + _$ChatCompletionStreamResponseChoiceImpl _value, + $Res Function(_$ChatCompletionStreamResponseChoiceImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? delta = null, + Object? finishReason = freezed, + Object? index = null, + }) { + return _then(_$ChatCompletionStreamResponseChoiceImpl( + delta: null == delta + ? _value.delta + : delta // ignore: cast_nullable_to_non_nullable + as ChatCompletionStreamResponseDelta, + finishReason: freezed == finishReason + ? _value.finishReason + : finishReason // ignore: cast_nullable_to_non_nullable + as ChatCompletionFinishReason?, + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ChatCompletionStreamResponseChoiceImpl + extends _ChatCompletionStreamResponseChoice { + const _$ChatCompletionStreamResponseChoiceImpl( + {required this.delta, + @JsonKey( + name: 'finish_reason', + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + required this.finishReason, + required this.index}) + : super._(); + + factory _$ChatCompletionStreamResponseChoiceImpl.fromJson( + Map json) => + _$$ChatCompletionStreamResponseChoiceImplFromJson(json); + + /// A chat completion delta generated by streamed model responses. + @override + final ChatCompletionStreamResponseDelta delta; + + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + /// `length` if the maximum number of tokens specified in the request was reached, + /// `content_filter` if content was omitted due to a flag from our content filters, + /// or `function_call` if the model called a function. + @override + @JsonKey( + name: 'finish_reason', + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final ChatCompletionFinishReason? finishReason; + + /// The index of the choice in the list of choices. + @override + final int index; + + @override + String toString() { + return 'ChatCompletionStreamResponseChoice(delta: $delta, finishReason: $finishReason, index: $index)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ChatCompletionStreamResponseChoiceImpl && + (identical(other.delta, delta) || other.delta == delta) && + (identical(other.finishReason, finishReason) || + other.finishReason == finishReason) && + (identical(other.index, index) || other.index == index)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, delta, finishReason, index); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ChatCompletionStreamResponseChoiceImplCopyWith< + _$ChatCompletionStreamResponseChoiceImpl> + get copyWith => __$$ChatCompletionStreamResponseChoiceImplCopyWithImpl< + _$ChatCompletionStreamResponseChoiceImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ChatCompletionStreamResponseChoiceImplToJson( + this, + ); + } +} + +abstract class _ChatCompletionStreamResponseChoice + extends ChatCompletionStreamResponseChoice { + const factory _ChatCompletionStreamResponseChoice( + {required final ChatCompletionStreamResponseDelta delta, + @JsonKey( + name: 'finish_reason', + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + required final ChatCompletionFinishReason? finishReason, + required final int index}) = _$ChatCompletionStreamResponseChoiceImpl; + const _ChatCompletionStreamResponseChoice._() : super._(); + + factory _ChatCompletionStreamResponseChoice.fromJson( + Map json) = + _$ChatCompletionStreamResponseChoiceImpl.fromJson; + + @override + + /// A chat completion delta generated by streamed model responses. + ChatCompletionStreamResponseDelta get delta; + @override + + /// The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + /// `length` if the maximum number of tokens specified in the request was reached, + /// `content_filter` if content was omitted due to a flag from our content filters, + /// or `function_call` if the model called a function. + @JsonKey( + name: 'finish_reason', + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ChatCompletionFinishReason? get finishReason; + @override + + /// The index of the choice in the list of choices. + int get index; + @override + @JsonKey(ignore: true) + _$$ChatCompletionStreamResponseChoiceImplCopyWith< + _$ChatCompletionStreamResponseChoiceImpl> + get copyWith => throw _privateConstructorUsedError; +} + +ChatCompletionStreamResponseDelta _$ChatCompletionStreamResponseDeltaFromJson( + Map json) { + return _ChatCompletionStreamResponseDelta.fromJson(json); +} + +/// @nodoc +mixin _$ChatCompletionStreamResponseDelta { + /// The contents of the chunk message. + @JsonKey(includeIfNull: false) + String? get content => throw _privateConstructorUsedError; + + /// The name and arguments of a function that should be called, as generated by the model. + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? get functionCall => + throw _privateConstructorUsedError; + + /// The role of the messages author. One of `system`, `user`, `assistant`, or `function`. + @JsonKey( + includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ChatCompletionMessageRole? get role => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ChatCompletionStreamResponseDeltaCopyWith + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ChatCompletionStreamResponseDeltaCopyWith<$Res> { + factory $ChatCompletionStreamResponseDeltaCopyWith( + ChatCompletionStreamResponseDelta value, + $Res Function(ChatCompletionStreamResponseDelta) then) = + _$ChatCompletionStreamResponseDeltaCopyWithImpl<$Res, + ChatCompletionStreamResponseDelta>; + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? content, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall, + @JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ChatCompletionMessageRole? role}); + + $ChatCompletionMessageFunctionCallCopyWith<$Res>? get functionCall; +} + +/// @nodoc +class _$ChatCompletionStreamResponseDeltaCopyWithImpl<$Res, + $Val extends ChatCompletionStreamResponseDelta> + implements $ChatCompletionStreamResponseDeltaCopyWith<$Res> { + _$ChatCompletionStreamResponseDeltaCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? content = freezed, + Object? functionCall = freezed, + Object? role = freezed, + }) { + return _then(_value.copyWith( + content: freezed == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as String?, + functionCall: freezed == functionCall + ? _value.functionCall + : functionCall // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageFunctionCall?, + role: freezed == role + ? _value.role + : role // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageRole?, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $ChatCompletionMessageFunctionCallCopyWith<$Res>? get functionCall { + if (_value.functionCall == null) { + return null; + } + + return $ChatCompletionMessageFunctionCallCopyWith<$Res>( + _value.functionCall!, (value) { + return _then(_value.copyWith(functionCall: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$ChatCompletionStreamResponseDeltaImplCopyWith<$Res> + implements $ChatCompletionStreamResponseDeltaCopyWith<$Res> { + factory _$$ChatCompletionStreamResponseDeltaImplCopyWith( + _$ChatCompletionStreamResponseDeltaImpl value, + $Res Function(_$ChatCompletionStreamResponseDeltaImpl) then) = + __$$ChatCompletionStreamResponseDeltaImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(includeIfNull: false) String? content, + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? functionCall, + @JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ChatCompletionMessageRole? role}); + + @override + $ChatCompletionMessageFunctionCallCopyWith<$Res>? get functionCall; +} + +/// @nodoc +class __$$ChatCompletionStreamResponseDeltaImplCopyWithImpl<$Res> + extends _$ChatCompletionStreamResponseDeltaCopyWithImpl<$Res, + _$ChatCompletionStreamResponseDeltaImpl> + implements _$$ChatCompletionStreamResponseDeltaImplCopyWith<$Res> { + __$$ChatCompletionStreamResponseDeltaImplCopyWithImpl( + _$ChatCompletionStreamResponseDeltaImpl _value, + $Res Function(_$ChatCompletionStreamResponseDeltaImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? content = freezed, + Object? functionCall = freezed, + Object? role = freezed, + }) { + return _then(_$ChatCompletionStreamResponseDeltaImpl( + content: freezed == content + ? _value.content + : content // ignore: cast_nullable_to_non_nullable + as String?, + functionCall: freezed == functionCall + ? _value.functionCall + : functionCall // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageFunctionCall?, + role: freezed == role + ? _value.role + : role // ignore: cast_nullable_to_non_nullable + as ChatCompletionMessageRole?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ChatCompletionStreamResponseDeltaImpl + extends _ChatCompletionStreamResponseDelta { + const _$ChatCompletionStreamResponseDeltaImpl( + {@JsonKey(includeIfNull: false) this.content, + @JsonKey(name: 'function_call', includeIfNull: false) this.functionCall, + @JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + this.role}) + : super._(); + + factory _$ChatCompletionStreamResponseDeltaImpl.fromJson( + Map json) => + _$$ChatCompletionStreamResponseDeltaImplFromJson(json); + + /// The contents of the chunk message. + @override + @JsonKey(includeIfNull: false) + final String? content; + + /// The name and arguments of a function that should be called, as generated by the model. + @override + @JsonKey(name: 'function_call', includeIfNull: false) + final ChatCompletionMessageFunctionCall? functionCall; + + /// The role of the messages author. One of `system`, `user`, `assistant`, or `function`. + @override + @JsonKey( + includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final ChatCompletionMessageRole? role; + + @override + String toString() { + return 'ChatCompletionStreamResponseDelta(content: $content, functionCall: $functionCall, role: $role)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ChatCompletionStreamResponseDeltaImpl && + (identical(other.content, content) || other.content == content) && + (identical(other.functionCall, functionCall) || + other.functionCall == functionCall) && + (identical(other.role, role) || other.role == role)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, content, functionCall, role); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ChatCompletionStreamResponseDeltaImplCopyWith< + _$ChatCompletionStreamResponseDeltaImpl> + get copyWith => __$$ChatCompletionStreamResponseDeltaImplCopyWithImpl< + _$ChatCompletionStreamResponseDeltaImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ChatCompletionStreamResponseDeltaImplToJson( + this, + ); + } +} + +abstract class _ChatCompletionStreamResponseDelta + extends ChatCompletionStreamResponseDelta { + const factory _ChatCompletionStreamResponseDelta( + {@JsonKey(includeIfNull: false) final String? content, + @JsonKey(name: 'function_call', includeIfNull: false) + final ChatCompletionMessageFunctionCall? functionCall, + @JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final ChatCompletionMessageRole? role}) = + _$ChatCompletionStreamResponseDeltaImpl; + const _ChatCompletionStreamResponseDelta._() : super._(); + + factory _ChatCompletionStreamResponseDelta.fromJson( + Map json) = + _$ChatCompletionStreamResponseDeltaImpl.fromJson; + + @override + + /// The contents of the chunk message. + @JsonKey(includeIfNull: false) + String? get content; + @override + + /// The name and arguments of a function that should be called, as generated by the model. + @JsonKey(name: 'function_call', includeIfNull: false) + ChatCompletionMessageFunctionCall? get functionCall; + @override + + /// The role of the messages author. One of `system`, `user`, `assistant`, or `function`. + @JsonKey( + includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ChatCompletionMessageRole? get role; + @override + @JsonKey(ignore: true) + _$$ChatCompletionStreamResponseDeltaImplCopyWith< + _$ChatCompletionStreamResponseDeltaImpl> + get copyWith => throw _privateConstructorUsedError; +} + +CompletionUsage _$CompletionUsageFromJson(Map json) { + return _CompletionUsage.fromJson(json); +} + +/// @nodoc +mixin _$CompletionUsage { + /// Number of tokens in the generated completion. + @JsonKey(name: 'completion_tokens') + int? get completionTokens => throw _privateConstructorUsedError; + + /// Number of tokens in the prompt. + @JsonKey(name: 'prompt_tokens') + int get promptTokens => throw _privateConstructorUsedError; + + /// Total number of tokens used in the request (prompt + completion). + @JsonKey(name: 'total_tokens') + int get totalTokens => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $CompletionUsageCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CompletionUsageCopyWith<$Res> { + factory $CompletionUsageCopyWith( + CompletionUsage value, $Res Function(CompletionUsage) then) = + _$CompletionUsageCopyWithImpl<$Res, CompletionUsage>; + @useResult + $Res call( + {@JsonKey(name: 'completion_tokens') int? completionTokens, + @JsonKey(name: 'prompt_tokens') int promptTokens, + @JsonKey(name: 'total_tokens') int totalTokens}); +} + +/// @nodoc +class _$CompletionUsageCopyWithImpl<$Res, $Val extends CompletionUsage> + implements $CompletionUsageCopyWith<$Res> { + _$CompletionUsageCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? completionTokens = freezed, + Object? promptTokens = null, + Object? totalTokens = null, + }) { + return _then(_value.copyWith( + completionTokens: freezed == completionTokens + ? _value.completionTokens + : completionTokens // ignore: cast_nullable_to_non_nullable + as int?, + promptTokens: null == promptTokens + ? _value.promptTokens + : promptTokens // ignore: cast_nullable_to_non_nullable + as int, + totalTokens: null == totalTokens + ? _value.totalTokens + : totalTokens // ignore: cast_nullable_to_non_nullable + as int, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$CompletionUsageImplCopyWith<$Res> + implements $CompletionUsageCopyWith<$Res> { + factory _$$CompletionUsageImplCopyWith(_$CompletionUsageImpl value, + $Res Function(_$CompletionUsageImpl) then) = + __$$CompletionUsageImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'completion_tokens') int? completionTokens, + @JsonKey(name: 'prompt_tokens') int promptTokens, + @JsonKey(name: 'total_tokens') int totalTokens}); +} + +/// @nodoc +class __$$CompletionUsageImplCopyWithImpl<$Res> + extends _$CompletionUsageCopyWithImpl<$Res, _$CompletionUsageImpl> + implements _$$CompletionUsageImplCopyWith<$Res> { + __$$CompletionUsageImplCopyWithImpl( + _$CompletionUsageImpl _value, $Res Function(_$CompletionUsageImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? completionTokens = freezed, + Object? promptTokens = null, + Object? totalTokens = null, + }) { + return _then(_$CompletionUsageImpl( + completionTokens: freezed == completionTokens + ? _value.completionTokens + : completionTokens // ignore: cast_nullable_to_non_nullable + as int?, + promptTokens: null == promptTokens + ? _value.promptTokens + : promptTokens // ignore: cast_nullable_to_non_nullable + as int, + totalTokens: null == totalTokens + ? _value.totalTokens + : totalTokens // ignore: cast_nullable_to_non_nullable + as int, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CompletionUsageImpl extends _CompletionUsage { + const _$CompletionUsageImpl( + {@JsonKey(name: 'completion_tokens') required this.completionTokens, + @JsonKey(name: 'prompt_tokens') required this.promptTokens, + @JsonKey(name: 'total_tokens') required this.totalTokens}) + : super._(); + + factory _$CompletionUsageImpl.fromJson(Map json) => + _$$CompletionUsageImplFromJson(json); + + /// Number of tokens in the generated completion. + @override + @JsonKey(name: 'completion_tokens') + final int? completionTokens; + + /// Number of tokens in the prompt. + @override + @JsonKey(name: 'prompt_tokens') + final int promptTokens; + + /// Total number of tokens used in the request (prompt + completion). + @override + @JsonKey(name: 'total_tokens') + final int totalTokens; + + @override + String toString() { + return 'CompletionUsage(completionTokens: $completionTokens, promptTokens: $promptTokens, totalTokens: $totalTokens)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CompletionUsageImpl && + (identical(other.completionTokens, completionTokens) || + other.completionTokens == completionTokens) && + (identical(other.promptTokens, promptTokens) || + other.promptTokens == promptTokens) && + (identical(other.totalTokens, totalTokens) || + other.totalTokens == totalTokens)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, completionTokens, promptTokens, totalTokens); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$CompletionUsageImplCopyWith<_$CompletionUsageImpl> get copyWith => + __$$CompletionUsageImplCopyWithImpl<_$CompletionUsageImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$CompletionUsageImplToJson( + this, + ); + } +} + +abstract class _CompletionUsage extends CompletionUsage { + const factory _CompletionUsage( + {@JsonKey(name: 'completion_tokens') required final int? completionTokens, + @JsonKey(name: 'prompt_tokens') required final int promptTokens, + @JsonKey(name: 'total_tokens') + required final int totalTokens}) = _$CompletionUsageImpl; + const _CompletionUsage._() : super._(); + + factory _CompletionUsage.fromJson(Map json) = + _$CompletionUsageImpl.fromJson; + + @override + + /// Number of tokens in the generated completion. + @JsonKey(name: 'completion_tokens') + int? get completionTokens; + @override + + /// Number of tokens in the prompt. + @JsonKey(name: 'prompt_tokens') + int get promptTokens; + @override + + /// Total number of tokens used in the request (prompt + completion). + @JsonKey(name: 'total_tokens') + int get totalTokens; + @override + @JsonKey(ignore: true) + _$$CompletionUsageImplCopyWith<_$CompletionUsageImpl> get copyWith => + throw _privateConstructorUsedError; +} + +CreateEmbeddingRequest _$CreateEmbeddingRequestFromJson( + Map json) { + return _CreateEmbeddingRequest.fromJson(json); +} + +/// @nodoc +mixin _$CreateEmbeddingRequest { + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + @_EmbeddingModelConverter() + EmbeddingModel get model => throw _privateConstructorUsedError; + + /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`) and cannot be an empty string. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + @_EmbeddingInputConverter() + EmbeddingInput get input => throw _privateConstructorUsedError; + + /// The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/). + @JsonKey(name: 'encoding_format') + EmbeddingEncodingFormat get encodingFormat => + throw _privateConstructorUsedError; + + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @JsonKey(includeIfNull: false) + String? get user => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $CreateEmbeddingRequestCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CreateEmbeddingRequestCopyWith<$Res> { + factory $CreateEmbeddingRequestCopyWith(CreateEmbeddingRequest value, + $Res Function(CreateEmbeddingRequest) then) = + _$CreateEmbeddingRequestCopyWithImpl<$Res, CreateEmbeddingRequest>; + @useResult + $Res call( + {@_EmbeddingModelConverter() EmbeddingModel model, + @_EmbeddingInputConverter() EmbeddingInput input, + @JsonKey(name: 'encoding_format') EmbeddingEncodingFormat encodingFormat, + @JsonKey(includeIfNull: false) String? user}); + + $EmbeddingModelCopyWith<$Res> get model; + $EmbeddingInputCopyWith<$Res> get input; +} + +/// @nodoc +class _$CreateEmbeddingRequestCopyWithImpl<$Res, + $Val extends CreateEmbeddingRequest> + implements $CreateEmbeddingRequestCopyWith<$Res> { + _$CreateEmbeddingRequestCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = null, + Object? input = null, + Object? encodingFormat = null, + Object? user = freezed, + }) { + return _then(_value.copyWith( + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as EmbeddingModel, + input: null == input + ? _value.input + : input // ignore: cast_nullable_to_non_nullable + as EmbeddingInput, + encodingFormat: null == encodingFormat + ? _value.encodingFormat + : encodingFormat // ignore: cast_nullable_to_non_nullable + as EmbeddingEncodingFormat, + user: freezed == user + ? _value.user + : user // ignore: cast_nullable_to_non_nullable + as String?, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $EmbeddingModelCopyWith<$Res> get model { + return $EmbeddingModelCopyWith<$Res>(_value.model, (value) { + return _then(_value.copyWith(model: value) as $Val); + }); + } + + @override + @pragma('vm:prefer-inline') + $EmbeddingInputCopyWith<$Res> get input { + return $EmbeddingInputCopyWith<$Res>(_value.input, (value) { + return _then(_value.copyWith(input: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$CreateEmbeddingRequestImplCopyWith<$Res> + implements $CreateEmbeddingRequestCopyWith<$Res> { + factory _$$CreateEmbeddingRequestImplCopyWith( + _$CreateEmbeddingRequestImpl value, + $Res Function(_$CreateEmbeddingRequestImpl) then) = + __$$CreateEmbeddingRequestImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@_EmbeddingModelConverter() EmbeddingModel model, + @_EmbeddingInputConverter() EmbeddingInput input, + @JsonKey(name: 'encoding_format') EmbeddingEncodingFormat encodingFormat, + @JsonKey(includeIfNull: false) String? user}); + + @override + $EmbeddingModelCopyWith<$Res> get model; + @override + $EmbeddingInputCopyWith<$Res> get input; +} + +/// @nodoc +class __$$CreateEmbeddingRequestImplCopyWithImpl<$Res> + extends _$CreateEmbeddingRequestCopyWithImpl<$Res, + _$CreateEmbeddingRequestImpl> + implements _$$CreateEmbeddingRequestImplCopyWith<$Res> { + __$$CreateEmbeddingRequestImplCopyWithImpl( + _$CreateEmbeddingRequestImpl _value, + $Res Function(_$CreateEmbeddingRequestImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = null, + Object? input = null, + Object? encodingFormat = null, + Object? user = freezed, + }) { + return _then(_$CreateEmbeddingRequestImpl( + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as EmbeddingModel, + input: null == input + ? _value.input + : input // ignore: cast_nullable_to_non_nullable + as EmbeddingInput, + encodingFormat: null == encodingFormat + ? _value.encodingFormat + : encodingFormat // ignore: cast_nullable_to_non_nullable + as EmbeddingEncodingFormat, + user: freezed == user + ? _value.user + : user // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CreateEmbeddingRequestImpl extends _CreateEmbeddingRequest { + const _$CreateEmbeddingRequestImpl( + {@_EmbeddingModelConverter() required this.model, + @_EmbeddingInputConverter() required this.input, + @JsonKey(name: 'encoding_format') + this.encodingFormat = EmbeddingEncodingFormat.float, + @JsonKey(includeIfNull: false) this.user}) + : super._(); + + factory _$CreateEmbeddingRequestImpl.fromJson(Map json) => + _$$CreateEmbeddingRequestImplFromJson(json); + + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + @override + @_EmbeddingModelConverter() + final EmbeddingModel model; + + /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`) and cannot be an empty string. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + @override + @_EmbeddingInputConverter() + final EmbeddingInput input; + + /// The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/). + @override + @JsonKey(name: 'encoding_format') + final EmbeddingEncodingFormat encodingFormat; + + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @override + @JsonKey(includeIfNull: false) + final String? user; + + @override + String toString() { + return 'CreateEmbeddingRequest(model: $model, input: $input, encodingFormat: $encodingFormat, user: $user)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CreateEmbeddingRequestImpl && + (identical(other.model, model) || other.model == model) && + (identical(other.input, input) || other.input == input) && + (identical(other.encodingFormat, encodingFormat) || + other.encodingFormat == encodingFormat) && + (identical(other.user, user) || other.user == user)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, model, input, encodingFormat, user); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$CreateEmbeddingRequestImplCopyWith<_$CreateEmbeddingRequestImpl> + get copyWith => __$$CreateEmbeddingRequestImplCopyWithImpl< + _$CreateEmbeddingRequestImpl>(this, _$identity); + + @override + Map toJson() { + return _$$CreateEmbeddingRequestImplToJson( + this, + ); + } +} + +abstract class _CreateEmbeddingRequest extends CreateEmbeddingRequest { + const factory _CreateEmbeddingRequest( + {@_EmbeddingModelConverter() required final EmbeddingModel model, + @_EmbeddingInputConverter() required final EmbeddingInput input, + @JsonKey(name: 'encoding_format') + final EmbeddingEncodingFormat encodingFormat, + @JsonKey(includeIfNull: false) final String? user}) = + _$CreateEmbeddingRequestImpl; + const _CreateEmbeddingRequest._() : super._(); + + factory _CreateEmbeddingRequest.fromJson(Map json) = + _$CreateEmbeddingRequestImpl.fromJson; + + @override + + /// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + @_EmbeddingModelConverter() + EmbeddingModel get model; + @override + + /// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`) and cannot be an empty string. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + @_EmbeddingInputConverter() + EmbeddingInput get input; + @override + + /// The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/). + @JsonKey(name: 'encoding_format') + EmbeddingEncodingFormat get encodingFormat; + @override + + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @JsonKey(includeIfNull: false) + String? get user; + @override + @JsonKey(ignore: true) + _$$CreateEmbeddingRequestImplCopyWith<_$CreateEmbeddingRequestImpl> + get copyWith => throw _privateConstructorUsedError; +} + +EmbeddingModel _$EmbeddingModelFromJson(Map json) { + switch (json['runtimeType']) { + case 'string': + return _UnionEmbeddingModelString.fromJson(json); + case 'enumeration': + return _UnionEmbeddingModelEnum.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'runtimeType', 'EmbeddingModel', + 'Invalid union type "${json['runtimeType']}"!'); + } +} + +/// @nodoc +mixin _$EmbeddingModel { + Object get value => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(EmbeddingModels value) enumeration, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(EmbeddingModels value)? enumeration, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(EmbeddingModels value)? enumeration, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionEmbeddingModelString value) string, + required TResult Function(_UnionEmbeddingModelEnum value) enumeration, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionEmbeddingModelString value)? string, + TResult? Function(_UnionEmbeddingModelEnum value)? enumeration, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionEmbeddingModelString value)? string, + TResult Function(_UnionEmbeddingModelEnum value)? enumeration, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $EmbeddingModelCopyWith<$Res> { + factory $EmbeddingModelCopyWith( + EmbeddingModel value, $Res Function(EmbeddingModel) then) = + _$EmbeddingModelCopyWithImpl<$Res, EmbeddingModel>; +} + +/// @nodoc +class _$EmbeddingModelCopyWithImpl<$Res, $Val extends EmbeddingModel> + implements $EmbeddingModelCopyWith<$Res> { + _$EmbeddingModelCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; +} + +/// @nodoc +abstract class _$$UnionEmbeddingModelStringImplCopyWith<$Res> { + factory _$$UnionEmbeddingModelStringImplCopyWith( + _$UnionEmbeddingModelStringImpl value, + $Res Function(_$UnionEmbeddingModelStringImpl) then) = + __$$UnionEmbeddingModelStringImplCopyWithImpl<$Res>; + @useResult + $Res call({String value}); +} + +/// @nodoc +class __$$UnionEmbeddingModelStringImplCopyWithImpl<$Res> + extends _$EmbeddingModelCopyWithImpl<$Res, _$UnionEmbeddingModelStringImpl> + implements _$$UnionEmbeddingModelStringImplCopyWith<$Res> { + __$$UnionEmbeddingModelStringImplCopyWithImpl( + _$UnionEmbeddingModelStringImpl _value, + $Res Function(_$UnionEmbeddingModelStringImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$UnionEmbeddingModelStringImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UnionEmbeddingModelStringImpl extends _UnionEmbeddingModelString { + const _$UnionEmbeddingModelStringImpl(this.value, {final String? $type}) + : $type = $type ?? 'string', + super._(); + + factory _$UnionEmbeddingModelStringImpl.fromJson(Map json) => + _$$UnionEmbeddingModelStringImplFromJson(json); + + @override + final String value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'EmbeddingModel.string(value: $value)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UnionEmbeddingModelStringImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UnionEmbeddingModelStringImplCopyWith<_$UnionEmbeddingModelStringImpl> + get copyWith => __$$UnionEmbeddingModelStringImplCopyWithImpl< + _$UnionEmbeddingModelStringImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(EmbeddingModels value) enumeration, + }) { + return string(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(EmbeddingModels value)? enumeration, + }) { + return string?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(EmbeddingModels value)? enumeration, + required TResult orElse(), + }) { + if (string != null) { + return string(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionEmbeddingModelString value) string, + required TResult Function(_UnionEmbeddingModelEnum value) enumeration, + }) { + return string(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionEmbeddingModelString value)? string, + TResult? Function(_UnionEmbeddingModelEnum value)? enumeration, + }) { + return string?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionEmbeddingModelString value)? string, + TResult Function(_UnionEmbeddingModelEnum value)? enumeration, + required TResult orElse(), + }) { + if (string != null) { + return string(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$UnionEmbeddingModelStringImplToJson( + this, + ); + } +} + +abstract class _UnionEmbeddingModelString extends EmbeddingModel { + const factory _UnionEmbeddingModelString(final String value) = + _$UnionEmbeddingModelStringImpl; + const _UnionEmbeddingModelString._() : super._(); + + factory _UnionEmbeddingModelString.fromJson(Map json) = + _$UnionEmbeddingModelStringImpl.fromJson; + + @override + String get value; + @JsonKey(ignore: true) + _$$UnionEmbeddingModelStringImplCopyWith<_$UnionEmbeddingModelStringImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$UnionEmbeddingModelEnumImplCopyWith<$Res> { + factory _$$UnionEmbeddingModelEnumImplCopyWith( + _$UnionEmbeddingModelEnumImpl value, + $Res Function(_$UnionEmbeddingModelEnumImpl) then) = + __$$UnionEmbeddingModelEnumImplCopyWithImpl<$Res>; + @useResult + $Res call({EmbeddingModels value}); +} + +/// @nodoc +class __$$UnionEmbeddingModelEnumImplCopyWithImpl<$Res> + extends _$EmbeddingModelCopyWithImpl<$Res, _$UnionEmbeddingModelEnumImpl> + implements _$$UnionEmbeddingModelEnumImplCopyWith<$Res> { + __$$UnionEmbeddingModelEnumImplCopyWithImpl( + _$UnionEmbeddingModelEnumImpl _value, + $Res Function(_$UnionEmbeddingModelEnumImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$UnionEmbeddingModelEnumImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as EmbeddingModels, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UnionEmbeddingModelEnumImpl extends _UnionEmbeddingModelEnum { + const _$UnionEmbeddingModelEnumImpl(this.value, {final String? $type}) + : $type = $type ?? 'enumeration', + super._(); + + factory _$UnionEmbeddingModelEnumImpl.fromJson(Map json) => + _$$UnionEmbeddingModelEnumImplFromJson(json); + + @override + final EmbeddingModels value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'EmbeddingModel.enumeration(value: $value)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UnionEmbeddingModelEnumImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UnionEmbeddingModelEnumImplCopyWith<_$UnionEmbeddingModelEnumImpl> + get copyWith => __$$UnionEmbeddingModelEnumImplCopyWithImpl< + _$UnionEmbeddingModelEnumImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(EmbeddingModels value) enumeration, + }) { + return enumeration(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(EmbeddingModels value)? enumeration, + }) { + return enumeration?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(EmbeddingModels value)? enumeration, + required TResult orElse(), + }) { + if (enumeration != null) { + return enumeration(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionEmbeddingModelString value) string, + required TResult Function(_UnionEmbeddingModelEnum value) enumeration, + }) { + return enumeration(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionEmbeddingModelString value)? string, + TResult? Function(_UnionEmbeddingModelEnum value)? enumeration, + }) { + return enumeration?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionEmbeddingModelString value)? string, + TResult Function(_UnionEmbeddingModelEnum value)? enumeration, + required TResult orElse(), + }) { + if (enumeration != null) { + return enumeration(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$UnionEmbeddingModelEnumImplToJson( + this, + ); + } +} + +abstract class _UnionEmbeddingModelEnum extends EmbeddingModel { + const factory _UnionEmbeddingModelEnum(final EmbeddingModels value) = + _$UnionEmbeddingModelEnumImpl; + const _UnionEmbeddingModelEnum._() : super._(); + + factory _UnionEmbeddingModelEnum.fromJson(Map json) = + _$UnionEmbeddingModelEnumImpl.fromJson; + + @override + EmbeddingModels get value; + @JsonKey(ignore: true) + _$$UnionEmbeddingModelEnumImplCopyWith<_$UnionEmbeddingModelEnumImpl> + get copyWith => throw _privateConstructorUsedError; +} + +EmbeddingInput _$EmbeddingInputFromJson(Map json) { + switch (json['runtimeType']) { + case 'string': + return _UnionEmbeddingInputString.fromJson(json); + case 'arrayString': + return _UnionEmbeddingInputArrayString.fromJson(json); + case 'arrayInteger': + return _UnionEmbeddingInputArrayInteger.fromJson(json); + case 'array': + return _UnionEmbeddingInputArray.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'runtimeType', 'EmbeddingInput', + 'Invalid union type "${json['runtimeType']}"!'); + } +} + +/// @nodoc +mixin _$EmbeddingInput { + Object get value => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(List value) arrayString, + required TResult Function(List value) arrayInteger, + required TResult Function(List> value) array, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(List value)? arrayString, + TResult? Function(List value)? arrayInteger, + TResult? Function(List> value)? array, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(List value)? arrayString, + TResult Function(List value)? arrayInteger, + TResult Function(List> value)? array, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionEmbeddingInputString value) string, + required TResult Function(_UnionEmbeddingInputArrayString value) + arrayString, + required TResult Function(_UnionEmbeddingInputArrayInteger value) + arrayInteger, + required TResult Function(_UnionEmbeddingInputArray value) array, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionEmbeddingInputString value)? string, + TResult? Function(_UnionEmbeddingInputArrayString value)? arrayString, + TResult? Function(_UnionEmbeddingInputArrayInteger value)? arrayInteger, + TResult? Function(_UnionEmbeddingInputArray value)? array, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionEmbeddingInputString value)? string, + TResult Function(_UnionEmbeddingInputArrayString value)? arrayString, + TResult Function(_UnionEmbeddingInputArrayInteger value)? arrayInteger, + TResult Function(_UnionEmbeddingInputArray value)? array, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $EmbeddingInputCopyWith<$Res> { + factory $EmbeddingInputCopyWith( + EmbeddingInput value, $Res Function(EmbeddingInput) then) = + _$EmbeddingInputCopyWithImpl<$Res, EmbeddingInput>; +} + +/// @nodoc +class _$EmbeddingInputCopyWithImpl<$Res, $Val extends EmbeddingInput> + implements $EmbeddingInputCopyWith<$Res> { + _$EmbeddingInputCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; +} + +/// @nodoc +abstract class _$$UnionEmbeddingInputStringImplCopyWith<$Res> { + factory _$$UnionEmbeddingInputStringImplCopyWith( + _$UnionEmbeddingInputStringImpl value, + $Res Function(_$UnionEmbeddingInputStringImpl) then) = + __$$UnionEmbeddingInputStringImplCopyWithImpl<$Res>; + @useResult + $Res call({String value}); +} + +/// @nodoc +class __$$UnionEmbeddingInputStringImplCopyWithImpl<$Res> + extends _$EmbeddingInputCopyWithImpl<$Res, _$UnionEmbeddingInputStringImpl> + implements _$$UnionEmbeddingInputStringImplCopyWith<$Res> { + __$$UnionEmbeddingInputStringImplCopyWithImpl( + _$UnionEmbeddingInputStringImpl _value, + $Res Function(_$UnionEmbeddingInputStringImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$UnionEmbeddingInputStringImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UnionEmbeddingInputStringImpl extends _UnionEmbeddingInputString { + const _$UnionEmbeddingInputStringImpl(this.value, {final String? $type}) + : $type = $type ?? 'string', + super._(); + + factory _$UnionEmbeddingInputStringImpl.fromJson(Map json) => + _$$UnionEmbeddingInputStringImplFromJson(json); + + @override + final String value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'EmbeddingInput.string(value: $value)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UnionEmbeddingInputStringImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UnionEmbeddingInputStringImplCopyWith<_$UnionEmbeddingInputStringImpl> + get copyWith => __$$UnionEmbeddingInputStringImplCopyWithImpl< + _$UnionEmbeddingInputStringImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(List value) arrayString, + required TResult Function(List value) arrayInteger, + required TResult Function(List> value) array, + }) { + return string(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(List value)? arrayString, + TResult? Function(List value)? arrayInteger, + TResult? Function(List> value)? array, + }) { + return string?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(List value)? arrayString, + TResult Function(List value)? arrayInteger, + TResult Function(List> value)? array, + required TResult orElse(), + }) { + if (string != null) { + return string(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionEmbeddingInputString value) string, + required TResult Function(_UnionEmbeddingInputArrayString value) + arrayString, + required TResult Function(_UnionEmbeddingInputArrayInteger value) + arrayInteger, + required TResult Function(_UnionEmbeddingInputArray value) array, + }) { + return string(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionEmbeddingInputString value)? string, + TResult? Function(_UnionEmbeddingInputArrayString value)? arrayString, + TResult? Function(_UnionEmbeddingInputArrayInteger value)? arrayInteger, + TResult? Function(_UnionEmbeddingInputArray value)? array, + }) { + return string?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionEmbeddingInputString value)? string, + TResult Function(_UnionEmbeddingInputArrayString value)? arrayString, + TResult Function(_UnionEmbeddingInputArrayInteger value)? arrayInteger, + TResult Function(_UnionEmbeddingInputArray value)? array, + required TResult orElse(), + }) { + if (string != null) { + return string(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$UnionEmbeddingInputStringImplToJson( + this, + ); + } +} + +abstract class _UnionEmbeddingInputString extends EmbeddingInput { + const factory _UnionEmbeddingInputString(final String value) = + _$UnionEmbeddingInputStringImpl; + const _UnionEmbeddingInputString._() : super._(); + + factory _UnionEmbeddingInputString.fromJson(Map json) = + _$UnionEmbeddingInputStringImpl.fromJson; + + @override + String get value; + @JsonKey(ignore: true) + _$$UnionEmbeddingInputStringImplCopyWith<_$UnionEmbeddingInputStringImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$UnionEmbeddingInputArrayStringImplCopyWith<$Res> { + factory _$$UnionEmbeddingInputArrayStringImplCopyWith( + _$UnionEmbeddingInputArrayStringImpl value, + $Res Function(_$UnionEmbeddingInputArrayStringImpl) then) = + __$$UnionEmbeddingInputArrayStringImplCopyWithImpl<$Res>; + @useResult + $Res call({List value}); +} + +/// @nodoc +class __$$UnionEmbeddingInputArrayStringImplCopyWithImpl<$Res> + extends _$EmbeddingInputCopyWithImpl<$Res, + _$UnionEmbeddingInputArrayStringImpl> + implements _$$UnionEmbeddingInputArrayStringImplCopyWith<$Res> { + __$$UnionEmbeddingInputArrayStringImplCopyWithImpl( + _$UnionEmbeddingInputArrayStringImpl _value, + $Res Function(_$UnionEmbeddingInputArrayStringImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$UnionEmbeddingInputArrayStringImpl( + null == value + ? _value._value + : value // ignore: cast_nullable_to_non_nullable + as List, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UnionEmbeddingInputArrayStringImpl + extends _UnionEmbeddingInputArrayString { + const _$UnionEmbeddingInputArrayStringImpl(final List value, + {final String? $type}) + : _value = value, + $type = $type ?? 'arrayString', + super._(); + + factory _$UnionEmbeddingInputArrayStringImpl.fromJson( + Map json) => + _$$UnionEmbeddingInputArrayStringImplFromJson(json); + + final List _value; + @override + List get value { + if (_value is EqualUnmodifiableListView) return _value; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_value); + } + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'EmbeddingInput.arrayString(value: $value)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UnionEmbeddingInputArrayStringImpl && + const DeepCollectionEquality().equals(other._value, _value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UnionEmbeddingInputArrayStringImplCopyWith< + _$UnionEmbeddingInputArrayStringImpl> + get copyWith => __$$UnionEmbeddingInputArrayStringImplCopyWithImpl< + _$UnionEmbeddingInputArrayStringImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(List value) arrayString, + required TResult Function(List value) arrayInteger, + required TResult Function(List> value) array, + }) { + return arrayString(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(List value)? arrayString, + TResult? Function(List value)? arrayInteger, + TResult? Function(List> value)? array, + }) { + return arrayString?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(List value)? arrayString, + TResult Function(List value)? arrayInteger, + TResult Function(List> value)? array, + required TResult orElse(), + }) { + if (arrayString != null) { + return arrayString(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionEmbeddingInputString value) string, + required TResult Function(_UnionEmbeddingInputArrayString value) + arrayString, + required TResult Function(_UnionEmbeddingInputArrayInteger value) + arrayInteger, + required TResult Function(_UnionEmbeddingInputArray value) array, + }) { + return arrayString(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionEmbeddingInputString value)? string, + TResult? Function(_UnionEmbeddingInputArrayString value)? arrayString, + TResult? Function(_UnionEmbeddingInputArrayInteger value)? arrayInteger, + TResult? Function(_UnionEmbeddingInputArray value)? array, + }) { + return arrayString?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionEmbeddingInputString value)? string, + TResult Function(_UnionEmbeddingInputArrayString value)? arrayString, + TResult Function(_UnionEmbeddingInputArrayInteger value)? arrayInteger, + TResult Function(_UnionEmbeddingInputArray value)? array, + required TResult orElse(), + }) { + if (arrayString != null) { + return arrayString(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$UnionEmbeddingInputArrayStringImplToJson( + this, + ); + } +} + +abstract class _UnionEmbeddingInputArrayString extends EmbeddingInput { + const factory _UnionEmbeddingInputArrayString(final List value) = + _$UnionEmbeddingInputArrayStringImpl; + const _UnionEmbeddingInputArrayString._() : super._(); + + factory _UnionEmbeddingInputArrayString.fromJson(Map json) = + _$UnionEmbeddingInputArrayStringImpl.fromJson; + + @override + List get value; + @JsonKey(ignore: true) + _$$UnionEmbeddingInputArrayStringImplCopyWith< + _$UnionEmbeddingInputArrayStringImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$UnionEmbeddingInputArrayIntegerImplCopyWith<$Res> { + factory _$$UnionEmbeddingInputArrayIntegerImplCopyWith( + _$UnionEmbeddingInputArrayIntegerImpl value, + $Res Function(_$UnionEmbeddingInputArrayIntegerImpl) then) = + __$$UnionEmbeddingInputArrayIntegerImplCopyWithImpl<$Res>; + @useResult + $Res call({List value}); +} + +/// @nodoc +class __$$UnionEmbeddingInputArrayIntegerImplCopyWithImpl<$Res> + extends _$EmbeddingInputCopyWithImpl<$Res, + _$UnionEmbeddingInputArrayIntegerImpl> + implements _$$UnionEmbeddingInputArrayIntegerImplCopyWith<$Res> { + __$$UnionEmbeddingInputArrayIntegerImplCopyWithImpl( + _$UnionEmbeddingInputArrayIntegerImpl _value, + $Res Function(_$UnionEmbeddingInputArrayIntegerImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$UnionEmbeddingInputArrayIntegerImpl( + null == value + ? _value._value + : value // ignore: cast_nullable_to_non_nullable + as List, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UnionEmbeddingInputArrayIntegerImpl + extends _UnionEmbeddingInputArrayInteger { + const _$UnionEmbeddingInputArrayIntegerImpl(final List value, + {final String? $type}) + : _value = value, + $type = $type ?? 'arrayInteger', + super._(); + + factory _$UnionEmbeddingInputArrayIntegerImpl.fromJson( + Map json) => + _$$UnionEmbeddingInputArrayIntegerImplFromJson(json); + + final List _value; + @override + List get value { + if (_value is EqualUnmodifiableListView) return _value; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_value); + } + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'EmbeddingInput.arrayInteger(value: $value)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UnionEmbeddingInputArrayIntegerImpl && + const DeepCollectionEquality().equals(other._value, _value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UnionEmbeddingInputArrayIntegerImplCopyWith< + _$UnionEmbeddingInputArrayIntegerImpl> + get copyWith => __$$UnionEmbeddingInputArrayIntegerImplCopyWithImpl< + _$UnionEmbeddingInputArrayIntegerImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(List value) arrayString, + required TResult Function(List value) arrayInteger, + required TResult Function(List> value) array, + }) { + return arrayInteger(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(List value)? arrayString, + TResult? Function(List value)? arrayInteger, + TResult? Function(List> value)? array, + }) { + return arrayInteger?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(List value)? arrayString, + TResult Function(List value)? arrayInteger, + TResult Function(List> value)? array, + required TResult orElse(), + }) { + if (arrayInteger != null) { + return arrayInteger(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionEmbeddingInputString value) string, + required TResult Function(_UnionEmbeddingInputArrayString value) + arrayString, + required TResult Function(_UnionEmbeddingInputArrayInteger value) + arrayInteger, + required TResult Function(_UnionEmbeddingInputArray value) array, + }) { + return arrayInteger(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionEmbeddingInputString value)? string, + TResult? Function(_UnionEmbeddingInputArrayString value)? arrayString, + TResult? Function(_UnionEmbeddingInputArrayInteger value)? arrayInteger, + TResult? Function(_UnionEmbeddingInputArray value)? array, + }) { + return arrayInteger?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionEmbeddingInputString value)? string, + TResult Function(_UnionEmbeddingInputArrayString value)? arrayString, + TResult Function(_UnionEmbeddingInputArrayInteger value)? arrayInteger, + TResult Function(_UnionEmbeddingInputArray value)? array, + required TResult orElse(), + }) { + if (arrayInteger != null) { + return arrayInteger(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$UnionEmbeddingInputArrayIntegerImplToJson( + this, + ); + } +} + +abstract class _UnionEmbeddingInputArrayInteger extends EmbeddingInput { + const factory _UnionEmbeddingInputArrayInteger(final List value) = + _$UnionEmbeddingInputArrayIntegerImpl; + const _UnionEmbeddingInputArrayInteger._() : super._(); + + factory _UnionEmbeddingInputArrayInteger.fromJson(Map json) = + _$UnionEmbeddingInputArrayIntegerImpl.fromJson; + + @override + List get value; + @JsonKey(ignore: true) + _$$UnionEmbeddingInputArrayIntegerImplCopyWith< + _$UnionEmbeddingInputArrayIntegerImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$UnionEmbeddingInputArrayImplCopyWith<$Res> { + factory _$$UnionEmbeddingInputArrayImplCopyWith( + _$UnionEmbeddingInputArrayImpl value, + $Res Function(_$UnionEmbeddingInputArrayImpl) then) = + __$$UnionEmbeddingInputArrayImplCopyWithImpl<$Res>; + @useResult + $Res call({List> value}); +} + +/// @nodoc +class __$$UnionEmbeddingInputArrayImplCopyWithImpl<$Res> + extends _$EmbeddingInputCopyWithImpl<$Res, _$UnionEmbeddingInputArrayImpl> + implements _$$UnionEmbeddingInputArrayImplCopyWith<$Res> { + __$$UnionEmbeddingInputArrayImplCopyWithImpl( + _$UnionEmbeddingInputArrayImpl _value, + $Res Function(_$UnionEmbeddingInputArrayImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$UnionEmbeddingInputArrayImpl( + null == value + ? _value._value + : value // ignore: cast_nullable_to_non_nullable + as List>, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UnionEmbeddingInputArrayImpl extends _UnionEmbeddingInputArray { + const _$UnionEmbeddingInputArrayImpl(final List> value, + {final String? $type}) + : _value = value, + $type = $type ?? 'array', + super._(); + + factory _$UnionEmbeddingInputArrayImpl.fromJson(Map json) => + _$$UnionEmbeddingInputArrayImplFromJson(json); + + final List> _value; + @override + List> get value { + if (_value is EqualUnmodifiableListView) return _value; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_value); + } + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'EmbeddingInput.array(value: $value)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UnionEmbeddingInputArrayImpl && + const DeepCollectionEquality().equals(other._value, _value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UnionEmbeddingInputArrayImplCopyWith<_$UnionEmbeddingInputArrayImpl> + get copyWith => __$$UnionEmbeddingInputArrayImplCopyWithImpl< + _$UnionEmbeddingInputArrayImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(List value) arrayString, + required TResult Function(List value) arrayInteger, + required TResult Function(List> value) array, + }) { + return array(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(List value)? arrayString, + TResult? Function(List value)? arrayInteger, + TResult? Function(List> value)? array, + }) { + return array?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(List value)? arrayString, + TResult Function(List value)? arrayInteger, + TResult Function(List> value)? array, + required TResult orElse(), + }) { + if (array != null) { + return array(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionEmbeddingInputString value) string, + required TResult Function(_UnionEmbeddingInputArrayString value) + arrayString, + required TResult Function(_UnionEmbeddingInputArrayInteger value) + arrayInteger, + required TResult Function(_UnionEmbeddingInputArray value) array, + }) { + return array(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionEmbeddingInputString value)? string, + TResult? Function(_UnionEmbeddingInputArrayString value)? arrayString, + TResult? Function(_UnionEmbeddingInputArrayInteger value)? arrayInteger, + TResult? Function(_UnionEmbeddingInputArray value)? array, + }) { + return array?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionEmbeddingInputString value)? string, + TResult Function(_UnionEmbeddingInputArrayString value)? arrayString, + TResult Function(_UnionEmbeddingInputArrayInteger value)? arrayInteger, + TResult Function(_UnionEmbeddingInputArray value)? array, + required TResult orElse(), + }) { + if (array != null) { + return array(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$UnionEmbeddingInputArrayImplToJson( + this, + ); + } +} + +abstract class _UnionEmbeddingInputArray extends EmbeddingInput { + const factory _UnionEmbeddingInputArray(final List> value) = + _$UnionEmbeddingInputArrayImpl; + const _UnionEmbeddingInputArray._() : super._(); + + factory _UnionEmbeddingInputArray.fromJson(Map json) = + _$UnionEmbeddingInputArrayImpl.fromJson; + + @override + List> get value; + @JsonKey(ignore: true) + _$$UnionEmbeddingInputArrayImplCopyWith<_$UnionEmbeddingInputArrayImpl> + get copyWith => throw _privateConstructorUsedError; +} + +CreateEmbeddingResponse _$CreateEmbeddingResponseFromJson( + Map json) { + return _CreateEmbeddingResponse.fromJson(json); +} + +/// @nodoc +mixin _$CreateEmbeddingResponse { + /// The list of embeddings generated by the model. + List get data => throw _privateConstructorUsedError; + + /// The name of the model used to generate the embedding. + String get model => throw _privateConstructorUsedError; + + /// The object type, which is always "list". + String get object => throw _privateConstructorUsedError; + + /// The usage information for the request. + EmbeddingUsage get usage => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $CreateEmbeddingResponseCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CreateEmbeddingResponseCopyWith<$Res> { + factory $CreateEmbeddingResponseCopyWith(CreateEmbeddingResponse value, + $Res Function(CreateEmbeddingResponse) then) = + _$CreateEmbeddingResponseCopyWithImpl<$Res, CreateEmbeddingResponse>; + @useResult + $Res call( + {List data, + String model, + String object, + EmbeddingUsage usage}); + + $EmbeddingUsageCopyWith<$Res> get usage; +} + +/// @nodoc +class _$CreateEmbeddingResponseCopyWithImpl<$Res, + $Val extends CreateEmbeddingResponse> + implements $CreateEmbeddingResponseCopyWith<$Res> { + _$CreateEmbeddingResponseCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? data = null, + Object? model = null, + Object? object = null, + Object? usage = null, + }) { + return _then(_value.copyWith( + data: null == data + ? _value.data + : data // ignore: cast_nullable_to_non_nullable + as List, + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, + usage: null == usage + ? _value.usage + : usage // ignore: cast_nullable_to_non_nullable + as EmbeddingUsage, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $EmbeddingUsageCopyWith<$Res> get usage { + return $EmbeddingUsageCopyWith<$Res>(_value.usage, (value) { + return _then(_value.copyWith(usage: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$CreateEmbeddingResponseImplCopyWith<$Res> + implements $CreateEmbeddingResponseCopyWith<$Res> { + factory _$$CreateEmbeddingResponseImplCopyWith( + _$CreateEmbeddingResponseImpl value, + $Res Function(_$CreateEmbeddingResponseImpl) then) = + __$$CreateEmbeddingResponseImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {List data, + String model, + String object, + EmbeddingUsage usage}); + + @override + $EmbeddingUsageCopyWith<$Res> get usage; +} + +/// @nodoc +class __$$CreateEmbeddingResponseImplCopyWithImpl<$Res> + extends _$CreateEmbeddingResponseCopyWithImpl<$Res, + _$CreateEmbeddingResponseImpl> + implements _$$CreateEmbeddingResponseImplCopyWith<$Res> { + __$$CreateEmbeddingResponseImplCopyWithImpl( + _$CreateEmbeddingResponseImpl _value, + $Res Function(_$CreateEmbeddingResponseImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? data = null, + Object? model = null, + Object? object = null, + Object? usage = null, + }) { + return _then(_$CreateEmbeddingResponseImpl( + data: null == data + ? _value._data + : data // ignore: cast_nullable_to_non_nullable + as List, + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, + usage: null == usage + ? _value.usage + : usage // ignore: cast_nullable_to_non_nullable + as EmbeddingUsage, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CreateEmbeddingResponseImpl extends _CreateEmbeddingResponse { + const _$CreateEmbeddingResponseImpl( + {required final List data, + required this.model, + required this.object, + required this.usage}) + : _data = data, + super._(); + + factory _$CreateEmbeddingResponseImpl.fromJson(Map json) => + _$$CreateEmbeddingResponseImplFromJson(json); + + /// The list of embeddings generated by the model. + final List _data; + + /// The list of embeddings generated by the model. + @override + List get data { + if (_data is EqualUnmodifiableListView) return _data; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_data); + } + + /// The name of the model used to generate the embedding. + @override + final String model; + + /// The object type, which is always "list". + @override + final String object; + + /// The usage information for the request. + @override + final EmbeddingUsage usage; + + @override + String toString() { + return 'CreateEmbeddingResponse(data: $data, model: $model, object: $object, usage: $usage)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CreateEmbeddingResponseImpl && + const DeepCollectionEquality().equals(other._data, _data) && + (identical(other.model, model) || other.model == model) && + (identical(other.object, object) || other.object == object) && + (identical(other.usage, usage) || other.usage == usage)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, + const DeepCollectionEquality().hash(_data), model, object, usage); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$CreateEmbeddingResponseImplCopyWith<_$CreateEmbeddingResponseImpl> + get copyWith => __$$CreateEmbeddingResponseImplCopyWithImpl< + _$CreateEmbeddingResponseImpl>(this, _$identity); + + @override + Map toJson() { + return _$$CreateEmbeddingResponseImplToJson( + this, + ); + } +} + +abstract class _CreateEmbeddingResponse extends CreateEmbeddingResponse { + const factory _CreateEmbeddingResponse( + {required final List data, + required final String model, + required final String object, + required final EmbeddingUsage usage}) = _$CreateEmbeddingResponseImpl; + const _CreateEmbeddingResponse._() : super._(); + + factory _CreateEmbeddingResponse.fromJson(Map json) = + _$CreateEmbeddingResponseImpl.fromJson; + + @override + + /// The list of embeddings generated by the model. + List get data; + @override + + /// The name of the model used to generate the embedding. + String get model; + @override + + /// The object type, which is always "list". + String get object; + @override + + /// The usage information for the request. + EmbeddingUsage get usage; + @override + @JsonKey(ignore: true) + _$$CreateEmbeddingResponseImplCopyWith<_$CreateEmbeddingResponseImpl> + get copyWith => throw _privateConstructorUsedError; +} + +Embedding _$EmbeddingFromJson(Map json) { + return _Embedding.fromJson(json); +} + +/// @nodoc +mixin _$Embedding { + /// The index of the embedding in the list of embeddings. + int get index => throw _privateConstructorUsedError; + + /// The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](https://platform.openai.com/docs/guides/embeddings). + List get embedding => throw _privateConstructorUsedError; + + /// The object type, which is always "embedding". + String get object => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $EmbeddingCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $EmbeddingCopyWith<$Res> { + factory $EmbeddingCopyWith(Embedding value, $Res Function(Embedding) then) = + _$EmbeddingCopyWithImpl<$Res, Embedding>; + @useResult + $Res call({int index, List embedding, String object}); +} + +/// @nodoc +class _$EmbeddingCopyWithImpl<$Res, $Val extends Embedding> + implements $EmbeddingCopyWith<$Res> { + _$EmbeddingCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? index = null, + Object? embedding = null, + Object? object = null, + }) { + return _then(_value.copyWith( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + embedding: null == embedding + ? _value.embedding + : embedding // ignore: cast_nullable_to_non_nullable + as List, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$EmbeddingImplCopyWith<$Res> + implements $EmbeddingCopyWith<$Res> { + factory _$$EmbeddingImplCopyWith( + _$EmbeddingImpl value, $Res Function(_$EmbeddingImpl) then) = + __$$EmbeddingImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({int index, List embedding, String object}); +} + +/// @nodoc +class __$$EmbeddingImplCopyWithImpl<$Res> + extends _$EmbeddingCopyWithImpl<$Res, _$EmbeddingImpl> + implements _$$EmbeddingImplCopyWith<$Res> { + __$$EmbeddingImplCopyWithImpl( + _$EmbeddingImpl _value, $Res Function(_$EmbeddingImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? index = null, + Object? embedding = null, + Object? object = null, + }) { + return _then(_$EmbeddingImpl( + index: null == index + ? _value.index + : index // ignore: cast_nullable_to_non_nullable + as int, + embedding: null == embedding + ? _value._embedding + : embedding // ignore: cast_nullable_to_non_nullable + as List, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$EmbeddingImpl extends _Embedding { + const _$EmbeddingImpl( + {required this.index, + required final List embedding, + required this.object}) + : _embedding = embedding, + super._(); + + factory _$EmbeddingImpl.fromJson(Map json) => + _$$EmbeddingImplFromJson(json); + + /// The index of the embedding in the list of embeddings. + @override + final int index; + + /// The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](https://platform.openai.com/docs/guides/embeddings). + final List _embedding; + + /// The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](https://platform.openai.com/docs/guides/embeddings). + @override + List get embedding { + if (_embedding is EqualUnmodifiableListView) return _embedding; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_embedding); + } + + /// The object type, which is always "embedding". + @override + final String object; + + @override + String toString() { + return 'Embedding(index: $index, embedding: $embedding, object: $object)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$EmbeddingImpl && + (identical(other.index, index) || other.index == index) && + const DeepCollectionEquality() + .equals(other._embedding, _embedding) && + (identical(other.object, object) || other.object == object)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, index, + const DeepCollectionEquality().hash(_embedding), object); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$EmbeddingImplCopyWith<_$EmbeddingImpl> get copyWith => + __$$EmbeddingImplCopyWithImpl<_$EmbeddingImpl>(this, _$identity); + + @override + Map toJson() { + return _$$EmbeddingImplToJson( + this, + ); + } +} + +abstract class _Embedding extends Embedding { + const factory _Embedding( + {required final int index, + required final List embedding, + required final String object}) = _$EmbeddingImpl; + const _Embedding._() : super._(); + + factory _Embedding.fromJson(Map json) = + _$EmbeddingImpl.fromJson; + + @override + + /// The index of the embedding in the list of embeddings. + int get index; + @override + + /// The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](https://platform.openai.com/docs/guides/embeddings). + List get embedding; + @override + + /// The object type, which is always "embedding". + String get object; + @override + @JsonKey(ignore: true) + _$$EmbeddingImplCopyWith<_$EmbeddingImpl> get copyWith => + throw _privateConstructorUsedError; +} + +EmbeddingUsage _$EmbeddingUsageFromJson(Map json) { + return _EmbeddingUsage.fromJson(json); +} + +/// @nodoc +mixin _$EmbeddingUsage { + /// The number of tokens used by the prompt. + @JsonKey(name: 'prompt_tokens') + int get promptTokens => throw _privateConstructorUsedError; + + /// The total number of tokens used by the request. + @JsonKey(name: 'total_tokens') + int get totalTokens => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $EmbeddingUsageCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $EmbeddingUsageCopyWith<$Res> { + factory $EmbeddingUsageCopyWith( + EmbeddingUsage value, $Res Function(EmbeddingUsage) then) = + _$EmbeddingUsageCopyWithImpl<$Res, EmbeddingUsage>; + @useResult + $Res call( + {@JsonKey(name: 'prompt_tokens') int promptTokens, + @JsonKey(name: 'total_tokens') int totalTokens}); +} + +/// @nodoc +class _$EmbeddingUsageCopyWithImpl<$Res, $Val extends EmbeddingUsage> + implements $EmbeddingUsageCopyWith<$Res> { + _$EmbeddingUsageCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? promptTokens = null, + Object? totalTokens = null, + }) { + return _then(_value.copyWith( + promptTokens: null == promptTokens + ? _value.promptTokens + : promptTokens // ignore: cast_nullable_to_non_nullable + as int, + totalTokens: null == totalTokens + ? _value.totalTokens + : totalTokens // ignore: cast_nullable_to_non_nullable + as int, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$EmbeddingUsageImplCopyWith<$Res> + implements $EmbeddingUsageCopyWith<$Res> { + factory _$$EmbeddingUsageImplCopyWith(_$EmbeddingUsageImpl value, + $Res Function(_$EmbeddingUsageImpl) then) = + __$$EmbeddingUsageImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'prompt_tokens') int promptTokens, + @JsonKey(name: 'total_tokens') int totalTokens}); +} + +/// @nodoc +class __$$EmbeddingUsageImplCopyWithImpl<$Res> + extends _$EmbeddingUsageCopyWithImpl<$Res, _$EmbeddingUsageImpl> + implements _$$EmbeddingUsageImplCopyWith<$Res> { + __$$EmbeddingUsageImplCopyWithImpl( + _$EmbeddingUsageImpl _value, $Res Function(_$EmbeddingUsageImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? promptTokens = null, + Object? totalTokens = null, + }) { + return _then(_$EmbeddingUsageImpl( + promptTokens: null == promptTokens + ? _value.promptTokens + : promptTokens // ignore: cast_nullable_to_non_nullable + as int, + totalTokens: null == totalTokens + ? _value.totalTokens + : totalTokens // ignore: cast_nullable_to_non_nullable + as int, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$EmbeddingUsageImpl extends _EmbeddingUsage { + const _$EmbeddingUsageImpl( + {@JsonKey(name: 'prompt_tokens') required this.promptTokens, + @JsonKey(name: 'total_tokens') required this.totalTokens}) + : super._(); + + factory _$EmbeddingUsageImpl.fromJson(Map json) => + _$$EmbeddingUsageImplFromJson(json); + + /// The number of tokens used by the prompt. + @override + @JsonKey(name: 'prompt_tokens') + final int promptTokens; + + /// The total number of tokens used by the request. + @override + @JsonKey(name: 'total_tokens') + final int totalTokens; + + @override + String toString() { + return 'EmbeddingUsage(promptTokens: $promptTokens, totalTokens: $totalTokens)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$EmbeddingUsageImpl && + (identical(other.promptTokens, promptTokens) || + other.promptTokens == promptTokens) && + (identical(other.totalTokens, totalTokens) || + other.totalTokens == totalTokens)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, promptTokens, totalTokens); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$EmbeddingUsageImplCopyWith<_$EmbeddingUsageImpl> get copyWith => + __$$EmbeddingUsageImplCopyWithImpl<_$EmbeddingUsageImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$EmbeddingUsageImplToJson( + this, + ); + } +} + +abstract class _EmbeddingUsage extends EmbeddingUsage { + const factory _EmbeddingUsage( + {@JsonKey(name: 'prompt_tokens') required final int promptTokens, + @JsonKey(name: 'total_tokens') required final int totalTokens}) = + _$EmbeddingUsageImpl; + const _EmbeddingUsage._() : super._(); + + factory _EmbeddingUsage.fromJson(Map json) = + _$EmbeddingUsageImpl.fromJson; + + @override + + /// The number of tokens used by the prompt. + @JsonKey(name: 'prompt_tokens') + int get promptTokens; + @override + + /// The total number of tokens used by the request. + @JsonKey(name: 'total_tokens') + int get totalTokens; + @override + @JsonKey(ignore: true) + _$$EmbeddingUsageImplCopyWith<_$EmbeddingUsageImpl> get copyWith => + throw _privateConstructorUsedError; +} + +CreateFineTuningJobRequest _$CreateFineTuningJobRequestFromJson( + Map json) { + return _CreateFineTuningJobRequest.fromJson(json); +} + +/// @nodoc +mixin _$CreateFineTuningJobRequest { + /// The name of the model to fine-tune. You can select one of the + /// [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + @_FineTuningModelConverter() + FineTuningModel get model => throw _privateConstructorUsedError; + + /// The ID of an uploaded file that contains training data. + /// + /// See [upload file](https://platform.openai.com/docs/api-reference/files/upload) for how to upload a file. + /// + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + /// + /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + @JsonKey(name: 'training_file') + String get trainingFile => throw _privateConstructorUsedError; + + /// The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + @JsonKey(includeIfNull: false) + FineTuningJobHyperparameters? get hyperparameters => + throw _privateConstructorUsedError; + + /// A string of up to 18 characters that will be added to your fine-tuned model name. + /// + /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + @JsonKey(includeIfNull: false) + String? get suffix => throw _privateConstructorUsedError; + + /// The ID of an uploaded file that contains validation data. + /// + /// If you provide this file, the data is used to generate validation + /// metrics periodically during fine-tuning. These metrics can be viewed in + /// the fine-tuning results file. + /// The same data should not be present in both train and validation files. + /// + /// Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. + /// + /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + @JsonKey(name: 'validation_file', includeIfNull: false) + String? get validationFile => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $CreateFineTuningJobRequestCopyWith + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CreateFineTuningJobRequestCopyWith<$Res> { + factory $CreateFineTuningJobRequestCopyWith(CreateFineTuningJobRequest value, + $Res Function(CreateFineTuningJobRequest) then) = + _$CreateFineTuningJobRequestCopyWithImpl<$Res, + CreateFineTuningJobRequest>; + @useResult + $Res call( + {@_FineTuningModelConverter() FineTuningModel model, + @JsonKey(name: 'training_file') String trainingFile, + @JsonKey(includeIfNull: false) + FineTuningJobHyperparameters? hyperparameters, + @JsonKey(includeIfNull: false) String? suffix, + @JsonKey(name: 'validation_file', includeIfNull: false) + String? validationFile}); + + $FineTuningModelCopyWith<$Res> get model; + $FineTuningJobHyperparametersCopyWith<$Res>? get hyperparameters; +} + +/// @nodoc +class _$CreateFineTuningJobRequestCopyWithImpl<$Res, + $Val extends CreateFineTuningJobRequest> + implements $CreateFineTuningJobRequestCopyWith<$Res> { + _$CreateFineTuningJobRequestCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = null, + Object? trainingFile = null, + Object? hyperparameters = freezed, + Object? suffix = freezed, + Object? validationFile = freezed, + }) { + return _then(_value.copyWith( + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as FineTuningModel, + trainingFile: null == trainingFile + ? _value.trainingFile + : trainingFile // ignore: cast_nullable_to_non_nullable + as String, + hyperparameters: freezed == hyperparameters + ? _value.hyperparameters + : hyperparameters // ignore: cast_nullable_to_non_nullable + as FineTuningJobHyperparameters?, + suffix: freezed == suffix + ? _value.suffix + : suffix // ignore: cast_nullable_to_non_nullable + as String?, + validationFile: freezed == validationFile + ? _value.validationFile + : validationFile // ignore: cast_nullable_to_non_nullable + as String?, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $FineTuningModelCopyWith<$Res> get model { + return $FineTuningModelCopyWith<$Res>(_value.model, (value) { + return _then(_value.copyWith(model: value) as $Val); + }); + } + + @override + @pragma('vm:prefer-inline') + $FineTuningJobHyperparametersCopyWith<$Res>? get hyperparameters { + if (_value.hyperparameters == null) { + return null; + } + + return $FineTuningJobHyperparametersCopyWith<$Res>(_value.hyperparameters!, + (value) { + return _then(_value.copyWith(hyperparameters: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$CreateFineTuningJobRequestImplCopyWith<$Res> + implements $CreateFineTuningJobRequestCopyWith<$Res> { + factory _$$CreateFineTuningJobRequestImplCopyWith( + _$CreateFineTuningJobRequestImpl value, + $Res Function(_$CreateFineTuningJobRequestImpl) then) = + __$$CreateFineTuningJobRequestImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@_FineTuningModelConverter() FineTuningModel model, + @JsonKey(name: 'training_file') String trainingFile, + @JsonKey(includeIfNull: false) + FineTuningJobHyperparameters? hyperparameters, + @JsonKey(includeIfNull: false) String? suffix, + @JsonKey(name: 'validation_file', includeIfNull: false) + String? validationFile}); + + @override + $FineTuningModelCopyWith<$Res> get model; + @override + $FineTuningJobHyperparametersCopyWith<$Res>? get hyperparameters; +} + +/// @nodoc +class __$$CreateFineTuningJobRequestImplCopyWithImpl<$Res> + extends _$CreateFineTuningJobRequestCopyWithImpl<$Res, + _$CreateFineTuningJobRequestImpl> + implements _$$CreateFineTuningJobRequestImplCopyWith<$Res> { + __$$CreateFineTuningJobRequestImplCopyWithImpl( + _$CreateFineTuningJobRequestImpl _value, + $Res Function(_$CreateFineTuningJobRequestImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = null, + Object? trainingFile = null, + Object? hyperparameters = freezed, + Object? suffix = freezed, + Object? validationFile = freezed, + }) { + return _then(_$CreateFineTuningJobRequestImpl( + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as FineTuningModel, + trainingFile: null == trainingFile + ? _value.trainingFile + : trainingFile // ignore: cast_nullable_to_non_nullable + as String, + hyperparameters: freezed == hyperparameters + ? _value.hyperparameters + : hyperparameters // ignore: cast_nullable_to_non_nullable + as FineTuningJobHyperparameters?, + suffix: freezed == suffix + ? _value.suffix + : suffix // ignore: cast_nullable_to_non_nullable + as String?, + validationFile: freezed == validationFile + ? _value.validationFile + : validationFile // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CreateFineTuningJobRequestImpl extends _CreateFineTuningJobRequest { + const _$CreateFineTuningJobRequestImpl( + {@_FineTuningModelConverter() required this.model, + @JsonKey(name: 'training_file') required this.trainingFile, + @JsonKey(includeIfNull: false) this.hyperparameters, + @JsonKey(includeIfNull: false) this.suffix, + @JsonKey(name: 'validation_file', includeIfNull: false) + this.validationFile}) + : super._(); + + factory _$CreateFineTuningJobRequestImpl.fromJson( + Map json) => + _$$CreateFineTuningJobRequestImplFromJson(json); + + /// The name of the model to fine-tune. You can select one of the + /// [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + @override + @_FineTuningModelConverter() + final FineTuningModel model; + + /// The ID of an uploaded file that contains training data. + /// + /// See [upload file](https://platform.openai.com/docs/api-reference/files/upload) for how to upload a file. + /// + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + /// + /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + @override + @JsonKey(name: 'training_file') + final String trainingFile; + + /// The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + @override + @JsonKey(includeIfNull: false) + final FineTuningJobHyperparameters? hyperparameters; + + /// A string of up to 18 characters that will be added to your fine-tuned model name. + /// + /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + @override + @JsonKey(includeIfNull: false) + final String? suffix; + + /// The ID of an uploaded file that contains validation data. + /// + /// If you provide this file, the data is used to generate validation + /// metrics periodically during fine-tuning. These metrics can be viewed in + /// the fine-tuning results file. + /// The same data should not be present in both train and validation files. + /// + /// Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. + /// + /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + @override + @JsonKey(name: 'validation_file', includeIfNull: false) + final String? validationFile; + + @override + String toString() { + return 'CreateFineTuningJobRequest(model: $model, trainingFile: $trainingFile, hyperparameters: $hyperparameters, suffix: $suffix, validationFile: $validationFile)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CreateFineTuningJobRequestImpl && + (identical(other.model, model) || other.model == model) && + (identical(other.trainingFile, trainingFile) || + other.trainingFile == trainingFile) && + (identical(other.hyperparameters, hyperparameters) || + other.hyperparameters == hyperparameters) && + (identical(other.suffix, suffix) || other.suffix == suffix) && + (identical(other.validationFile, validationFile) || + other.validationFile == validationFile)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, model, trainingFile, + hyperparameters, suffix, validationFile); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$CreateFineTuningJobRequestImplCopyWith<_$CreateFineTuningJobRequestImpl> + get copyWith => __$$CreateFineTuningJobRequestImplCopyWithImpl< + _$CreateFineTuningJobRequestImpl>(this, _$identity); + + @override + Map toJson() { + return _$$CreateFineTuningJobRequestImplToJson( + this, + ); + } +} + +abstract class _CreateFineTuningJobRequest extends CreateFineTuningJobRequest { + const factory _CreateFineTuningJobRequest( + {@_FineTuningModelConverter() required final FineTuningModel model, + @JsonKey(name: 'training_file') required final String trainingFile, + @JsonKey(includeIfNull: false) + final FineTuningJobHyperparameters? hyperparameters, + @JsonKey(includeIfNull: false) final String? suffix, + @JsonKey(name: 'validation_file', includeIfNull: false) + final String? validationFile}) = _$CreateFineTuningJobRequestImpl; + const _CreateFineTuningJobRequest._() : super._(); + + factory _CreateFineTuningJobRequest.fromJson(Map json) = + _$CreateFineTuningJobRequestImpl.fromJson; + + @override + + /// The name of the model to fine-tune. You can select one of the + /// [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + @_FineTuningModelConverter() + FineTuningModel get model; + @override + + /// The ID of an uploaded file that contains training data. + /// + /// See [upload file](https://platform.openai.com/docs/api-reference/files/upload) for how to upload a file. + /// + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + /// + /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + @JsonKey(name: 'training_file') + String get trainingFile; + @override + + /// The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + @JsonKey(includeIfNull: false) + FineTuningJobHyperparameters? get hyperparameters; + @override + + /// A string of up to 18 characters that will be added to your fine-tuned model name. + /// + /// For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + @JsonKey(includeIfNull: false) + String? get suffix; + @override + + /// The ID of an uploaded file that contains validation data. + /// + /// If you provide this file, the data is used to generate validation + /// metrics periodically during fine-tuning. These metrics can be viewed in + /// the fine-tuning results file. + /// The same data should not be present in both train and validation files. + /// + /// Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. + /// + /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + @JsonKey(name: 'validation_file', includeIfNull: false) + String? get validationFile; + @override + @JsonKey(ignore: true) + _$$CreateFineTuningJobRequestImplCopyWith<_$CreateFineTuningJobRequestImpl> + get copyWith => throw _privateConstructorUsedError; +} + +FineTuningModel _$FineTuningModelFromJson(Map json) { + switch (json['runtimeType']) { + case 'string': + return _UnionFineTuningModelString.fromJson(json); + case 'enumeration': + return _UnionFineTuningModelEnum.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'runtimeType', 'FineTuningModel', + 'Invalid union type "${json['runtimeType']}"!'); + } +} + +/// @nodoc +mixin _$FineTuningModel { + Object get value => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(FineTuningModels value) enumeration, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(FineTuningModels value)? enumeration, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(FineTuningModels value)? enumeration, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionFineTuningModelString value) string, + required TResult Function(_UnionFineTuningModelEnum value) enumeration, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionFineTuningModelString value)? string, + TResult? Function(_UnionFineTuningModelEnum value)? enumeration, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionFineTuningModelString value)? string, + TResult Function(_UnionFineTuningModelEnum value)? enumeration, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $FineTuningModelCopyWith<$Res> { + factory $FineTuningModelCopyWith( + FineTuningModel value, $Res Function(FineTuningModel) then) = + _$FineTuningModelCopyWithImpl<$Res, FineTuningModel>; +} + +/// @nodoc +class _$FineTuningModelCopyWithImpl<$Res, $Val extends FineTuningModel> + implements $FineTuningModelCopyWith<$Res> { + _$FineTuningModelCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; +} + +/// @nodoc +abstract class _$$UnionFineTuningModelStringImplCopyWith<$Res> { + factory _$$UnionFineTuningModelStringImplCopyWith( + _$UnionFineTuningModelStringImpl value, + $Res Function(_$UnionFineTuningModelStringImpl) then) = + __$$UnionFineTuningModelStringImplCopyWithImpl<$Res>; + @useResult + $Res call({String value}); +} + +/// @nodoc +class __$$UnionFineTuningModelStringImplCopyWithImpl<$Res> + extends _$FineTuningModelCopyWithImpl<$Res, + _$UnionFineTuningModelStringImpl> + implements _$$UnionFineTuningModelStringImplCopyWith<$Res> { + __$$UnionFineTuningModelStringImplCopyWithImpl( + _$UnionFineTuningModelStringImpl _value, + $Res Function(_$UnionFineTuningModelStringImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$UnionFineTuningModelStringImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UnionFineTuningModelStringImpl extends _UnionFineTuningModelString { + const _$UnionFineTuningModelStringImpl(this.value, {final String? $type}) + : $type = $type ?? 'string', + super._(); + + factory _$UnionFineTuningModelStringImpl.fromJson( + Map json) => + _$$UnionFineTuningModelStringImplFromJson(json); + + @override + final String value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'FineTuningModel.string(value: $value)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UnionFineTuningModelStringImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UnionFineTuningModelStringImplCopyWith<_$UnionFineTuningModelStringImpl> + get copyWith => __$$UnionFineTuningModelStringImplCopyWithImpl< + _$UnionFineTuningModelStringImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(FineTuningModels value) enumeration, + }) { + return string(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(FineTuningModels value)? enumeration, + }) { + return string?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(FineTuningModels value)? enumeration, + required TResult orElse(), + }) { + if (string != null) { + return string(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionFineTuningModelString value) string, + required TResult Function(_UnionFineTuningModelEnum value) enumeration, + }) { + return string(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionFineTuningModelString value)? string, + TResult? Function(_UnionFineTuningModelEnum value)? enumeration, + }) { + return string?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionFineTuningModelString value)? string, + TResult Function(_UnionFineTuningModelEnum value)? enumeration, + required TResult orElse(), + }) { + if (string != null) { + return string(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$UnionFineTuningModelStringImplToJson( + this, + ); + } +} + +abstract class _UnionFineTuningModelString extends FineTuningModel { + const factory _UnionFineTuningModelString(final String value) = + _$UnionFineTuningModelStringImpl; + const _UnionFineTuningModelString._() : super._(); + + factory _UnionFineTuningModelString.fromJson(Map json) = + _$UnionFineTuningModelStringImpl.fromJson; + + @override + String get value; + @JsonKey(ignore: true) + _$$UnionFineTuningModelStringImplCopyWith<_$UnionFineTuningModelStringImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$UnionFineTuningModelEnumImplCopyWith<$Res> { + factory _$$UnionFineTuningModelEnumImplCopyWith( + _$UnionFineTuningModelEnumImpl value, + $Res Function(_$UnionFineTuningModelEnumImpl) then) = + __$$UnionFineTuningModelEnumImplCopyWithImpl<$Res>; + @useResult + $Res call({FineTuningModels value}); +} + +/// @nodoc +class __$$UnionFineTuningModelEnumImplCopyWithImpl<$Res> + extends _$FineTuningModelCopyWithImpl<$Res, _$UnionFineTuningModelEnumImpl> + implements _$$UnionFineTuningModelEnumImplCopyWith<$Res> { + __$$UnionFineTuningModelEnumImplCopyWithImpl( + _$UnionFineTuningModelEnumImpl _value, + $Res Function(_$UnionFineTuningModelEnumImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$UnionFineTuningModelEnumImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as FineTuningModels, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UnionFineTuningModelEnumImpl extends _UnionFineTuningModelEnum { + const _$UnionFineTuningModelEnumImpl(this.value, {final String? $type}) + : $type = $type ?? 'enumeration', + super._(); + + factory _$UnionFineTuningModelEnumImpl.fromJson(Map json) => + _$$UnionFineTuningModelEnumImplFromJson(json); + + @override + final FineTuningModels value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'FineTuningModel.enumeration(value: $value)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UnionFineTuningModelEnumImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UnionFineTuningModelEnumImplCopyWith<_$UnionFineTuningModelEnumImpl> + get copyWith => __$$UnionFineTuningModelEnumImplCopyWithImpl< + _$UnionFineTuningModelEnumImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(FineTuningModels value) enumeration, + }) { + return enumeration(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(FineTuningModels value)? enumeration, + }) { + return enumeration?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(FineTuningModels value)? enumeration, + required TResult orElse(), + }) { + if (enumeration != null) { + return enumeration(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionFineTuningModelString value) string, + required TResult Function(_UnionFineTuningModelEnum value) enumeration, + }) { + return enumeration(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionFineTuningModelString value)? string, + TResult? Function(_UnionFineTuningModelEnum value)? enumeration, + }) { + return enumeration?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionFineTuningModelString value)? string, + TResult Function(_UnionFineTuningModelEnum value)? enumeration, + required TResult orElse(), + }) { + if (enumeration != null) { + return enumeration(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$UnionFineTuningModelEnumImplToJson( + this, + ); + } +} + +abstract class _UnionFineTuningModelEnum extends FineTuningModel { + const factory _UnionFineTuningModelEnum(final FineTuningModels value) = + _$UnionFineTuningModelEnumImpl; + const _UnionFineTuningModelEnum._() : super._(); + + factory _UnionFineTuningModelEnum.fromJson(Map json) = + _$UnionFineTuningModelEnumImpl.fromJson; + + @override + FineTuningModels get value; + @JsonKey(ignore: true) + _$$UnionFineTuningModelEnumImplCopyWith<_$UnionFineTuningModelEnumImpl> + get copyWith => throw _privateConstructorUsedError; +} + +FineTuningJob _$FineTuningJobFromJson(Map json) { + return _FineTuningJob.fromJson(json); +} + +/// @nodoc +mixin _$FineTuningJob { + /// The object identifier, which can be referenced in the API endpoints. + String get id => throw _privateConstructorUsedError; + + /// The Unix timestamp (in seconds) for when the fine-tuning job was created. + @JsonKey(name: 'created_at') + int get createdAt => throw _privateConstructorUsedError; + + /// For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. + FineTuningJobError? get error => throw _privateConstructorUsedError; + + /// The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. + @JsonKey(name: 'fine_tuned_model') + String? get fineTunedModel => throw _privateConstructorUsedError; + + /// The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. + @JsonKey(name: 'finished_at') + int? get finishedAt => throw _privateConstructorUsedError; + + /// The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + FineTuningJobHyperparameters get hyperparameters => + throw _privateConstructorUsedError; + + /// The base model that is being fine-tuned. + String get model => throw _privateConstructorUsedError; + + /// The object type, which is always "fine_tuning.job". + String get object => throw _privateConstructorUsedError; + + /// The organization that owns the fine-tuning job. + @JsonKey(name: 'organization_id') + String get organizationId => throw _privateConstructorUsedError; + + /// The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + @JsonKey(name: 'result_files') + List get resultFiles => throw _privateConstructorUsedError; + + /// The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. + FineTuningJobStatus get status => throw _privateConstructorUsedError; + + /// The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. + @JsonKey(name: 'trained_tokens') + int? get trainedTokens => throw _privateConstructorUsedError; + + /// The file ID used for training. You can retrieve the training data with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + @JsonKey(name: 'training_file') + String get trainingFile => throw _privateConstructorUsedError; + + /// The file ID used for validation. You can retrieve the validation results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + @JsonKey(name: 'validation_file') + String? get validationFile => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $FineTuningJobCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $FineTuningJobCopyWith<$Res> { + factory $FineTuningJobCopyWith( + FineTuningJob value, $Res Function(FineTuningJob) then) = + _$FineTuningJobCopyWithImpl<$Res, FineTuningJob>; + @useResult + $Res call( + {String id, + @JsonKey(name: 'created_at') int createdAt, + FineTuningJobError? error, + @JsonKey(name: 'fine_tuned_model') String? fineTunedModel, + @JsonKey(name: 'finished_at') int? finishedAt, + FineTuningJobHyperparameters hyperparameters, + String model, + String object, + @JsonKey(name: 'organization_id') String organizationId, + @JsonKey(name: 'result_files') List resultFiles, + FineTuningJobStatus status, + @JsonKey(name: 'trained_tokens') int? trainedTokens, + @JsonKey(name: 'training_file') String trainingFile, + @JsonKey(name: 'validation_file') String? validationFile}); + + $FineTuningJobErrorCopyWith<$Res>? get error; + $FineTuningJobHyperparametersCopyWith<$Res> get hyperparameters; +} + +/// @nodoc +class _$FineTuningJobCopyWithImpl<$Res, $Val extends FineTuningJob> + implements $FineTuningJobCopyWith<$Res> { + _$FineTuningJobCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = null, + Object? createdAt = null, + Object? error = freezed, + Object? fineTunedModel = freezed, + Object? finishedAt = freezed, + Object? hyperparameters = null, + Object? model = null, + Object? object = null, + Object? organizationId = null, + Object? resultFiles = null, + Object? status = null, + Object? trainedTokens = freezed, + Object? trainingFile = null, + Object? validationFile = freezed, + }) { + return _then(_value.copyWith( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + createdAt: null == createdAt + ? _value.createdAt + : createdAt // ignore: cast_nullable_to_non_nullable + as int, + error: freezed == error + ? _value.error + : error // ignore: cast_nullable_to_non_nullable + as FineTuningJobError?, + fineTunedModel: freezed == fineTunedModel + ? _value.fineTunedModel + : fineTunedModel // ignore: cast_nullable_to_non_nullable + as String?, + finishedAt: freezed == finishedAt + ? _value.finishedAt + : finishedAt // ignore: cast_nullable_to_non_nullable + as int?, + hyperparameters: null == hyperparameters + ? _value.hyperparameters + : hyperparameters // ignore: cast_nullable_to_non_nullable + as FineTuningJobHyperparameters, + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, + organizationId: null == organizationId + ? _value.organizationId + : organizationId // ignore: cast_nullable_to_non_nullable + as String, + resultFiles: null == resultFiles + ? _value.resultFiles + : resultFiles // ignore: cast_nullable_to_non_nullable + as List, + status: null == status + ? _value.status + : status // ignore: cast_nullable_to_non_nullable + as FineTuningJobStatus, + trainedTokens: freezed == trainedTokens + ? _value.trainedTokens + : trainedTokens // ignore: cast_nullable_to_non_nullable + as int?, + trainingFile: null == trainingFile + ? _value.trainingFile + : trainingFile // ignore: cast_nullable_to_non_nullable + as String, + validationFile: freezed == validationFile + ? _value.validationFile + : validationFile // ignore: cast_nullable_to_non_nullable + as String?, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $FineTuningJobErrorCopyWith<$Res>? get error { + if (_value.error == null) { + return null; + } + + return $FineTuningJobErrorCopyWith<$Res>(_value.error!, (value) { + return _then(_value.copyWith(error: value) as $Val); + }); + } + + @override + @pragma('vm:prefer-inline') + $FineTuningJobHyperparametersCopyWith<$Res> get hyperparameters { + return $FineTuningJobHyperparametersCopyWith<$Res>(_value.hyperparameters, + (value) { + return _then(_value.copyWith(hyperparameters: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$FineTuningJobImplCopyWith<$Res> + implements $FineTuningJobCopyWith<$Res> { + factory _$$FineTuningJobImplCopyWith( + _$FineTuningJobImpl value, $Res Function(_$FineTuningJobImpl) then) = + __$$FineTuningJobImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {String id, + @JsonKey(name: 'created_at') int createdAt, + FineTuningJobError? error, + @JsonKey(name: 'fine_tuned_model') String? fineTunedModel, + @JsonKey(name: 'finished_at') int? finishedAt, + FineTuningJobHyperparameters hyperparameters, + String model, + String object, + @JsonKey(name: 'organization_id') String organizationId, + @JsonKey(name: 'result_files') List resultFiles, + FineTuningJobStatus status, + @JsonKey(name: 'trained_tokens') int? trainedTokens, + @JsonKey(name: 'training_file') String trainingFile, + @JsonKey(name: 'validation_file') String? validationFile}); + + @override + $FineTuningJobErrorCopyWith<$Res>? get error; + @override + $FineTuningJobHyperparametersCopyWith<$Res> get hyperparameters; +} + +/// @nodoc +class __$$FineTuningJobImplCopyWithImpl<$Res> + extends _$FineTuningJobCopyWithImpl<$Res, _$FineTuningJobImpl> + implements _$$FineTuningJobImplCopyWith<$Res> { + __$$FineTuningJobImplCopyWithImpl( + _$FineTuningJobImpl _value, $Res Function(_$FineTuningJobImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = null, + Object? createdAt = null, + Object? error = freezed, + Object? fineTunedModel = freezed, + Object? finishedAt = freezed, + Object? hyperparameters = null, + Object? model = null, + Object? object = null, + Object? organizationId = null, + Object? resultFiles = null, + Object? status = null, + Object? trainedTokens = freezed, + Object? trainingFile = null, + Object? validationFile = freezed, + }) { + return _then(_$FineTuningJobImpl( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + createdAt: null == createdAt + ? _value.createdAt + : createdAt // ignore: cast_nullable_to_non_nullable + as int, + error: freezed == error + ? _value.error + : error // ignore: cast_nullable_to_non_nullable + as FineTuningJobError?, + fineTunedModel: freezed == fineTunedModel + ? _value.fineTunedModel + : fineTunedModel // ignore: cast_nullable_to_non_nullable + as String?, + finishedAt: freezed == finishedAt + ? _value.finishedAt + : finishedAt // ignore: cast_nullable_to_non_nullable + as int?, + hyperparameters: null == hyperparameters + ? _value.hyperparameters + : hyperparameters // ignore: cast_nullable_to_non_nullable + as FineTuningJobHyperparameters, + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, + organizationId: null == organizationId + ? _value.organizationId + : organizationId // ignore: cast_nullable_to_non_nullable + as String, + resultFiles: null == resultFiles + ? _value._resultFiles + : resultFiles // ignore: cast_nullable_to_non_nullable + as List, + status: null == status + ? _value.status + : status // ignore: cast_nullable_to_non_nullable + as FineTuningJobStatus, + trainedTokens: freezed == trainedTokens + ? _value.trainedTokens + : trainedTokens // ignore: cast_nullable_to_non_nullable + as int?, + trainingFile: null == trainingFile + ? _value.trainingFile + : trainingFile // ignore: cast_nullable_to_non_nullable + as String, + validationFile: freezed == validationFile + ? _value.validationFile + : validationFile // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$FineTuningJobImpl extends _FineTuningJob { + const _$FineTuningJobImpl( + {required this.id, + @JsonKey(name: 'created_at') required this.createdAt, + required this.error, + @JsonKey(name: 'fine_tuned_model') required this.fineTunedModel, + @JsonKey(name: 'finished_at') required this.finishedAt, + required this.hyperparameters, + required this.model, + required this.object, + @JsonKey(name: 'organization_id') required this.organizationId, + @JsonKey(name: 'result_files') required final List resultFiles, + required this.status, + @JsonKey(name: 'trained_tokens') required this.trainedTokens, + @JsonKey(name: 'training_file') required this.trainingFile, + @JsonKey(name: 'validation_file') required this.validationFile}) + : _resultFiles = resultFiles, + super._(); + + factory _$FineTuningJobImpl.fromJson(Map json) => + _$$FineTuningJobImplFromJson(json); + + /// The object identifier, which can be referenced in the API endpoints. + @override + final String id; + + /// The Unix timestamp (in seconds) for when the fine-tuning job was created. + @override + @JsonKey(name: 'created_at') + final int createdAt; + + /// For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. + @override + final FineTuningJobError? error; + + /// The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. + @override + @JsonKey(name: 'fine_tuned_model') + final String? fineTunedModel; + + /// The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. + @override + @JsonKey(name: 'finished_at') + final int? finishedAt; + + /// The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + @override + final FineTuningJobHyperparameters hyperparameters; + + /// The base model that is being fine-tuned. + @override + final String model; + + /// The object type, which is always "fine_tuning.job". + @override + final String object; + + /// The organization that owns the fine-tuning job. + @override + @JsonKey(name: 'organization_id') + final String organizationId; + + /// The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + final List _resultFiles; + + /// The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + @override + @JsonKey(name: 'result_files') + List get resultFiles { + if (_resultFiles is EqualUnmodifiableListView) return _resultFiles; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_resultFiles); + } + + /// The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. + @override + final FineTuningJobStatus status; + + /// The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. + @override + @JsonKey(name: 'trained_tokens') + final int? trainedTokens; + + /// The file ID used for training. You can retrieve the training data with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + @override + @JsonKey(name: 'training_file') + final String trainingFile; + + /// The file ID used for validation. You can retrieve the validation results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + @override + @JsonKey(name: 'validation_file') + final String? validationFile; + + @override + String toString() { + return 'FineTuningJob(id: $id, createdAt: $createdAt, error: $error, fineTunedModel: $fineTunedModel, finishedAt: $finishedAt, hyperparameters: $hyperparameters, model: $model, object: $object, organizationId: $organizationId, resultFiles: $resultFiles, status: $status, trainedTokens: $trainedTokens, trainingFile: $trainingFile, validationFile: $validationFile)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$FineTuningJobImpl && + (identical(other.id, id) || other.id == id) && + (identical(other.createdAt, createdAt) || + other.createdAt == createdAt) && + (identical(other.error, error) || other.error == error) && + (identical(other.fineTunedModel, fineTunedModel) || + other.fineTunedModel == fineTunedModel) && + (identical(other.finishedAt, finishedAt) || + other.finishedAt == finishedAt) && + (identical(other.hyperparameters, hyperparameters) || + other.hyperparameters == hyperparameters) && + (identical(other.model, model) || other.model == model) && + (identical(other.object, object) || other.object == object) && + (identical(other.organizationId, organizationId) || + other.organizationId == organizationId) && + const DeepCollectionEquality() + .equals(other._resultFiles, _resultFiles) && + (identical(other.status, status) || other.status == status) && + (identical(other.trainedTokens, trainedTokens) || + other.trainedTokens == trainedTokens) && + (identical(other.trainingFile, trainingFile) || + other.trainingFile == trainingFile) && + (identical(other.validationFile, validationFile) || + other.validationFile == validationFile)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, + id, + createdAt, + error, + fineTunedModel, + finishedAt, + hyperparameters, + model, + object, + organizationId, + const DeepCollectionEquality().hash(_resultFiles), + status, + trainedTokens, + trainingFile, + validationFile); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$FineTuningJobImplCopyWith<_$FineTuningJobImpl> get copyWith => + __$$FineTuningJobImplCopyWithImpl<_$FineTuningJobImpl>(this, _$identity); + + @override + Map toJson() { + return _$$FineTuningJobImplToJson( + this, + ); + } +} + +abstract class _FineTuningJob extends FineTuningJob { + const factory _FineTuningJob( + {required final String id, + @JsonKey(name: 'created_at') required final int createdAt, + required final FineTuningJobError? error, + @JsonKey(name: 'fine_tuned_model') required final String? fineTunedModel, + @JsonKey(name: 'finished_at') required final int? finishedAt, + required final FineTuningJobHyperparameters hyperparameters, + required final String model, + required final String object, + @JsonKey(name: 'organization_id') required final String organizationId, + @JsonKey(name: 'result_files') required final List resultFiles, + required final FineTuningJobStatus status, + @JsonKey(name: 'trained_tokens') required final int? trainedTokens, + @JsonKey(name: 'training_file') required final String trainingFile, + @JsonKey(name: 'validation_file') + required final String? validationFile}) = _$FineTuningJobImpl; + const _FineTuningJob._() : super._(); + + factory _FineTuningJob.fromJson(Map json) = + _$FineTuningJobImpl.fromJson; + + @override + + /// The object identifier, which can be referenced in the API endpoints. + String get id; + @override + + /// The Unix timestamp (in seconds) for when the fine-tuning job was created. + @JsonKey(name: 'created_at') + int get createdAt; + @override + + /// For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. + FineTuningJobError? get error; + @override + + /// The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. + @JsonKey(name: 'fine_tuned_model') + String? get fineTunedModel; + @override + + /// The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. + @JsonKey(name: 'finished_at') + int? get finishedAt; + @override + + /// The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + FineTuningJobHyperparameters get hyperparameters; + @override + + /// The base model that is being fine-tuned. + String get model; + @override + + /// The object type, which is always "fine_tuning.job". + String get object; + @override + + /// The organization that owns the fine-tuning job. + @JsonKey(name: 'organization_id') + String get organizationId; + @override + + /// The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + @JsonKey(name: 'result_files') + List get resultFiles; + @override + + /// The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. + FineTuningJobStatus get status; + @override + + /// The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. + @JsonKey(name: 'trained_tokens') + int? get trainedTokens; + @override + + /// The file ID used for training. You can retrieve the training data with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + @JsonKey(name: 'training_file') + String get trainingFile; + @override + + /// The file ID used for validation. You can retrieve the validation results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + @JsonKey(name: 'validation_file') + String? get validationFile; + @override + @JsonKey(ignore: true) + _$$FineTuningJobImplCopyWith<_$FineTuningJobImpl> get copyWith => + throw _privateConstructorUsedError; +} + +FineTuningJobError _$FineTuningJobErrorFromJson(Map json) { + return _FineTuningJobError.fromJson(json); +} + +/// @nodoc +mixin _$FineTuningJobError { + /// A machine-readable error code. + String get code => throw _privateConstructorUsedError; + + /// A human-readable error message. + String get message => throw _privateConstructorUsedError; + + /// The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. + String? get param => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $FineTuningJobErrorCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $FineTuningJobErrorCopyWith<$Res> { + factory $FineTuningJobErrorCopyWith( + FineTuningJobError value, $Res Function(FineTuningJobError) then) = + _$FineTuningJobErrorCopyWithImpl<$Res, FineTuningJobError>; + @useResult + $Res call({String code, String message, String? param}); +} + +/// @nodoc +class _$FineTuningJobErrorCopyWithImpl<$Res, $Val extends FineTuningJobError> + implements $FineTuningJobErrorCopyWith<$Res> { + _$FineTuningJobErrorCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? code = null, + Object? message = null, + Object? param = freezed, + }) { + return _then(_value.copyWith( + code: null == code + ? _value.code + : code // ignore: cast_nullable_to_non_nullable + as String, + message: null == message + ? _value.message + : message // ignore: cast_nullable_to_non_nullable + as String, + param: freezed == param + ? _value.param + : param // ignore: cast_nullable_to_non_nullable + as String?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$FineTuningJobErrorImplCopyWith<$Res> + implements $FineTuningJobErrorCopyWith<$Res> { + factory _$$FineTuningJobErrorImplCopyWith(_$FineTuningJobErrorImpl value, + $Res Function(_$FineTuningJobErrorImpl) then) = + __$$FineTuningJobErrorImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String code, String message, String? param}); +} + +/// @nodoc +class __$$FineTuningJobErrorImplCopyWithImpl<$Res> + extends _$FineTuningJobErrorCopyWithImpl<$Res, _$FineTuningJobErrorImpl> + implements _$$FineTuningJobErrorImplCopyWith<$Res> { + __$$FineTuningJobErrorImplCopyWithImpl(_$FineTuningJobErrorImpl _value, + $Res Function(_$FineTuningJobErrorImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? code = null, + Object? message = null, + Object? param = freezed, + }) { + return _then(_$FineTuningJobErrorImpl( + code: null == code + ? _value.code + : code // ignore: cast_nullable_to_non_nullable + as String, + message: null == message + ? _value.message + : message // ignore: cast_nullable_to_non_nullable + as String, + param: freezed == param + ? _value.param + : param // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$FineTuningJobErrorImpl extends _FineTuningJobError { + const _$FineTuningJobErrorImpl( + {required this.code, required this.message, required this.param}) + : super._(); + + factory _$FineTuningJobErrorImpl.fromJson(Map json) => + _$$FineTuningJobErrorImplFromJson(json); + + /// A machine-readable error code. + @override + final String code; + + /// A human-readable error message. + @override + final String message; + + /// The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. + @override + final String? param; + + @override + String toString() { + return 'FineTuningJobError(code: $code, message: $message, param: $param)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$FineTuningJobErrorImpl && + (identical(other.code, code) || other.code == code) && + (identical(other.message, message) || other.message == message) && + (identical(other.param, param) || other.param == param)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, code, message, param); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$FineTuningJobErrorImplCopyWith<_$FineTuningJobErrorImpl> get copyWith => + __$$FineTuningJobErrorImplCopyWithImpl<_$FineTuningJobErrorImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$FineTuningJobErrorImplToJson( + this, + ); + } +} + +abstract class _FineTuningJobError extends FineTuningJobError { + const factory _FineTuningJobError( + {required final String code, + required final String message, + required final String? param}) = _$FineTuningJobErrorImpl; + const _FineTuningJobError._() : super._(); + + factory _FineTuningJobError.fromJson(Map json) = + _$FineTuningJobErrorImpl.fromJson; + + @override + + /// A machine-readable error code. + String get code; + @override + + /// A human-readable error message. + String get message; + @override + + /// The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. + String? get param; + @override + @JsonKey(ignore: true) + _$$FineTuningJobErrorImplCopyWith<_$FineTuningJobErrorImpl> get copyWith => + throw _privateConstructorUsedError; +} + +FineTuningJobHyperparameters _$FineTuningJobHyperparametersFromJson( + Map json) { + return _FineTuningJobHyperparameters.fromJson(json); +} + +/// @nodoc +mixin _$FineTuningJobHyperparameters { + /// The number of epochs to train the model for. An epoch refers to one + /// full cycle through the training dataset. + @_FineTuningNEpochsConverter() + @JsonKey(name: 'n_epochs') + FineTuningNEpochs get nEpochs => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $FineTuningJobHyperparametersCopyWith + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $FineTuningJobHyperparametersCopyWith<$Res> { + factory $FineTuningJobHyperparametersCopyWith( + FineTuningJobHyperparameters value, + $Res Function(FineTuningJobHyperparameters) then) = + _$FineTuningJobHyperparametersCopyWithImpl<$Res, + FineTuningJobHyperparameters>; + @useResult + $Res call( + {@_FineTuningNEpochsConverter() + @JsonKey(name: 'n_epochs') + FineTuningNEpochs nEpochs}); + + $FineTuningNEpochsCopyWith<$Res> get nEpochs; +} + +/// @nodoc +class _$FineTuningJobHyperparametersCopyWithImpl<$Res, + $Val extends FineTuningJobHyperparameters> + implements $FineTuningJobHyperparametersCopyWith<$Res> { + _$FineTuningJobHyperparametersCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? nEpochs = null, + }) { + return _then(_value.copyWith( + nEpochs: null == nEpochs + ? _value.nEpochs + : nEpochs // ignore: cast_nullable_to_non_nullable + as FineTuningNEpochs, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $FineTuningNEpochsCopyWith<$Res> get nEpochs { + return $FineTuningNEpochsCopyWith<$Res>(_value.nEpochs, (value) { + return _then(_value.copyWith(nEpochs: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$FineTuningJobHyperparametersImplCopyWith<$Res> + implements $FineTuningJobHyperparametersCopyWith<$Res> { + factory _$$FineTuningJobHyperparametersImplCopyWith( + _$FineTuningJobHyperparametersImpl value, + $Res Function(_$FineTuningJobHyperparametersImpl) then) = + __$$FineTuningJobHyperparametersImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@_FineTuningNEpochsConverter() + @JsonKey(name: 'n_epochs') + FineTuningNEpochs nEpochs}); + + @override + $FineTuningNEpochsCopyWith<$Res> get nEpochs; +} + +/// @nodoc +class __$$FineTuningJobHyperparametersImplCopyWithImpl<$Res> + extends _$FineTuningJobHyperparametersCopyWithImpl<$Res, + _$FineTuningJobHyperparametersImpl> + implements _$$FineTuningJobHyperparametersImplCopyWith<$Res> { + __$$FineTuningJobHyperparametersImplCopyWithImpl( + _$FineTuningJobHyperparametersImpl _value, + $Res Function(_$FineTuningJobHyperparametersImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? nEpochs = null, + }) { + return _then(_$FineTuningJobHyperparametersImpl( + nEpochs: null == nEpochs + ? _value.nEpochs + : nEpochs // ignore: cast_nullable_to_non_nullable + as FineTuningNEpochs, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$FineTuningJobHyperparametersImpl extends _FineTuningJobHyperparameters { + const _$FineTuningJobHyperparametersImpl( + {@_FineTuningNEpochsConverter() + @JsonKey(name: 'n_epochs') + required this.nEpochs}) + : super._(); + + factory _$FineTuningJobHyperparametersImpl.fromJson( + Map json) => + _$$FineTuningJobHyperparametersImplFromJson(json); + + /// The number of epochs to train the model for. An epoch refers to one + /// full cycle through the training dataset. + @override + @_FineTuningNEpochsConverter() + @JsonKey(name: 'n_epochs') + final FineTuningNEpochs nEpochs; + + @override + String toString() { + return 'FineTuningJobHyperparameters(nEpochs: $nEpochs)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$FineTuningJobHyperparametersImpl && + (identical(other.nEpochs, nEpochs) || other.nEpochs == nEpochs)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, nEpochs); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$FineTuningJobHyperparametersImplCopyWith< + _$FineTuningJobHyperparametersImpl> + get copyWith => __$$FineTuningJobHyperparametersImplCopyWithImpl< + _$FineTuningJobHyperparametersImpl>(this, _$identity); + + @override + Map toJson() { + return _$$FineTuningJobHyperparametersImplToJson( + this, + ); + } +} + +abstract class _FineTuningJobHyperparameters + extends FineTuningJobHyperparameters { + const factory _FineTuningJobHyperparameters( + {@_FineTuningNEpochsConverter() + @JsonKey(name: 'n_epochs') + required final FineTuningNEpochs nEpochs}) = + _$FineTuningJobHyperparametersImpl; + const _FineTuningJobHyperparameters._() : super._(); + + factory _FineTuningJobHyperparameters.fromJson(Map json) = + _$FineTuningJobHyperparametersImpl.fromJson; + + @override + + /// The number of epochs to train the model for. An epoch refers to one + /// full cycle through the training dataset. + @_FineTuningNEpochsConverter() + @JsonKey(name: 'n_epochs') + FineTuningNEpochs get nEpochs; + @override + @JsonKey(ignore: true) + _$$FineTuningJobHyperparametersImplCopyWith< + _$FineTuningJobHyperparametersImpl> + get copyWith => throw _privateConstructorUsedError; +} + +FineTuningNEpochs _$FineTuningNEpochsFromJson(Map json) { + switch (json['runtimeType']) { + case 'enumeration': + return _UnionFineTuningNEpochsEnum.fromJson(json); + case 'integer': + return _UnionFineTuningNEpochsInteger.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'runtimeType', 'FineTuningNEpochs', + 'Invalid union type "${json['runtimeType']}"!'); + } +} + +/// @nodoc +mixin _$FineTuningNEpochs { + Object get value => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(FineTuningNEpochsOptions value) enumeration, + required TResult Function(int value) integer, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(FineTuningNEpochsOptions value)? enumeration, + TResult? Function(int value)? integer, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(FineTuningNEpochsOptions value)? enumeration, + TResult Function(int value)? integer, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionFineTuningNEpochsEnum value) enumeration, + required TResult Function(_UnionFineTuningNEpochsInteger value) integer, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionFineTuningNEpochsEnum value)? enumeration, + TResult? Function(_UnionFineTuningNEpochsInteger value)? integer, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionFineTuningNEpochsEnum value)? enumeration, + TResult Function(_UnionFineTuningNEpochsInteger value)? integer, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $FineTuningNEpochsCopyWith<$Res> { + factory $FineTuningNEpochsCopyWith( + FineTuningNEpochs value, $Res Function(FineTuningNEpochs) then) = + _$FineTuningNEpochsCopyWithImpl<$Res, FineTuningNEpochs>; +} + +/// @nodoc +class _$FineTuningNEpochsCopyWithImpl<$Res, $Val extends FineTuningNEpochs> + implements $FineTuningNEpochsCopyWith<$Res> { + _$FineTuningNEpochsCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; +} + +/// @nodoc +abstract class _$$UnionFineTuningNEpochsEnumImplCopyWith<$Res> { + factory _$$UnionFineTuningNEpochsEnumImplCopyWith( + _$UnionFineTuningNEpochsEnumImpl value, + $Res Function(_$UnionFineTuningNEpochsEnumImpl) then) = + __$$UnionFineTuningNEpochsEnumImplCopyWithImpl<$Res>; + @useResult + $Res call({FineTuningNEpochsOptions value}); +} + +/// @nodoc +class __$$UnionFineTuningNEpochsEnumImplCopyWithImpl<$Res> + extends _$FineTuningNEpochsCopyWithImpl<$Res, + _$UnionFineTuningNEpochsEnumImpl> + implements _$$UnionFineTuningNEpochsEnumImplCopyWith<$Res> { + __$$UnionFineTuningNEpochsEnumImplCopyWithImpl( + _$UnionFineTuningNEpochsEnumImpl _value, + $Res Function(_$UnionFineTuningNEpochsEnumImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$UnionFineTuningNEpochsEnumImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as FineTuningNEpochsOptions, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UnionFineTuningNEpochsEnumImpl extends _UnionFineTuningNEpochsEnum { + const _$UnionFineTuningNEpochsEnumImpl(this.value, {final String? $type}) + : $type = $type ?? 'enumeration', + super._(); + + factory _$UnionFineTuningNEpochsEnumImpl.fromJson( + Map json) => + _$$UnionFineTuningNEpochsEnumImplFromJson(json); + + @override + final FineTuningNEpochsOptions value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'FineTuningNEpochs.enumeration(value: $value)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UnionFineTuningNEpochsEnumImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UnionFineTuningNEpochsEnumImplCopyWith<_$UnionFineTuningNEpochsEnumImpl> + get copyWith => __$$UnionFineTuningNEpochsEnumImplCopyWithImpl< + _$UnionFineTuningNEpochsEnumImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(FineTuningNEpochsOptions value) enumeration, + required TResult Function(int value) integer, + }) { + return enumeration(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(FineTuningNEpochsOptions value)? enumeration, + TResult? Function(int value)? integer, + }) { + return enumeration?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(FineTuningNEpochsOptions value)? enumeration, + TResult Function(int value)? integer, + required TResult orElse(), + }) { + if (enumeration != null) { + return enumeration(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionFineTuningNEpochsEnum value) enumeration, + required TResult Function(_UnionFineTuningNEpochsInteger value) integer, + }) { + return enumeration(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionFineTuningNEpochsEnum value)? enumeration, + TResult? Function(_UnionFineTuningNEpochsInteger value)? integer, + }) { + return enumeration?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionFineTuningNEpochsEnum value)? enumeration, + TResult Function(_UnionFineTuningNEpochsInteger value)? integer, + required TResult orElse(), + }) { + if (enumeration != null) { + return enumeration(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$UnionFineTuningNEpochsEnumImplToJson( + this, + ); + } +} + +abstract class _UnionFineTuningNEpochsEnum extends FineTuningNEpochs { + const factory _UnionFineTuningNEpochsEnum( + final FineTuningNEpochsOptions value) = _$UnionFineTuningNEpochsEnumImpl; + const _UnionFineTuningNEpochsEnum._() : super._(); + + factory _UnionFineTuningNEpochsEnum.fromJson(Map json) = + _$UnionFineTuningNEpochsEnumImpl.fromJson; + + @override + FineTuningNEpochsOptions get value; + @JsonKey(ignore: true) + _$$UnionFineTuningNEpochsEnumImplCopyWith<_$UnionFineTuningNEpochsEnumImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$UnionFineTuningNEpochsIntegerImplCopyWith<$Res> { + factory _$$UnionFineTuningNEpochsIntegerImplCopyWith( + _$UnionFineTuningNEpochsIntegerImpl value, + $Res Function(_$UnionFineTuningNEpochsIntegerImpl) then) = + __$$UnionFineTuningNEpochsIntegerImplCopyWithImpl<$Res>; + @useResult + $Res call({int value}); +} + +/// @nodoc +class __$$UnionFineTuningNEpochsIntegerImplCopyWithImpl<$Res> + extends _$FineTuningNEpochsCopyWithImpl<$Res, + _$UnionFineTuningNEpochsIntegerImpl> + implements _$$UnionFineTuningNEpochsIntegerImplCopyWith<$Res> { + __$$UnionFineTuningNEpochsIntegerImplCopyWithImpl( + _$UnionFineTuningNEpochsIntegerImpl _value, + $Res Function(_$UnionFineTuningNEpochsIntegerImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$UnionFineTuningNEpochsIntegerImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as int, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UnionFineTuningNEpochsIntegerImpl + extends _UnionFineTuningNEpochsInteger { + const _$UnionFineTuningNEpochsIntegerImpl(this.value, {final String? $type}) + : $type = $type ?? 'integer', + super._(); + + factory _$UnionFineTuningNEpochsIntegerImpl.fromJson( + Map json) => + _$$UnionFineTuningNEpochsIntegerImplFromJson(json); + + @override + final int value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'FineTuningNEpochs.integer(value: $value)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UnionFineTuningNEpochsIntegerImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UnionFineTuningNEpochsIntegerImplCopyWith< + _$UnionFineTuningNEpochsIntegerImpl> + get copyWith => __$$UnionFineTuningNEpochsIntegerImplCopyWithImpl< + _$UnionFineTuningNEpochsIntegerImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(FineTuningNEpochsOptions value) enumeration, + required TResult Function(int value) integer, + }) { + return integer(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(FineTuningNEpochsOptions value)? enumeration, + TResult? Function(int value)? integer, + }) { + return integer?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(FineTuningNEpochsOptions value)? enumeration, + TResult Function(int value)? integer, + required TResult orElse(), + }) { + if (integer != null) { + return integer(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionFineTuningNEpochsEnum value) enumeration, + required TResult Function(_UnionFineTuningNEpochsInteger value) integer, + }) { + return integer(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionFineTuningNEpochsEnum value)? enumeration, + TResult? Function(_UnionFineTuningNEpochsInteger value)? integer, + }) { + return integer?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionFineTuningNEpochsEnum value)? enumeration, + TResult Function(_UnionFineTuningNEpochsInteger value)? integer, + required TResult orElse(), + }) { + if (integer != null) { + return integer(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$UnionFineTuningNEpochsIntegerImplToJson( + this, + ); + } +} + +abstract class _UnionFineTuningNEpochsInteger extends FineTuningNEpochs { + const factory _UnionFineTuningNEpochsInteger(final int value) = + _$UnionFineTuningNEpochsIntegerImpl; + const _UnionFineTuningNEpochsInteger._() : super._(); + + factory _UnionFineTuningNEpochsInteger.fromJson(Map json) = + _$UnionFineTuningNEpochsIntegerImpl.fromJson; + + @override + int get value; + @JsonKey(ignore: true) + _$$UnionFineTuningNEpochsIntegerImplCopyWith< + _$UnionFineTuningNEpochsIntegerImpl> + get copyWith => throw _privateConstructorUsedError; +} + +ListPaginatedFineTuningJobsResponse + _$ListPaginatedFineTuningJobsResponseFromJson(Map json) { + return _ListPaginatedFineTuningJobsResponse.fromJson(json); +} + +/// @nodoc +mixin _$ListPaginatedFineTuningJobsResponse { + /// The list of fine-tuning jobs. + List get data => throw _privateConstructorUsedError; + + /// Whether there are more fine-tuning jobs to retrieve. + @JsonKey(name: 'has_more') + bool get hasMore => throw _privateConstructorUsedError; + + /// The object type, which is always "list". + String get object => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ListPaginatedFineTuningJobsResponseCopyWith< + ListPaginatedFineTuningJobsResponse> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ListPaginatedFineTuningJobsResponseCopyWith<$Res> { + factory $ListPaginatedFineTuningJobsResponseCopyWith( + ListPaginatedFineTuningJobsResponse value, + $Res Function(ListPaginatedFineTuningJobsResponse) then) = + _$ListPaginatedFineTuningJobsResponseCopyWithImpl<$Res, + ListPaginatedFineTuningJobsResponse>; + @useResult + $Res call( + {List data, + @JsonKey(name: 'has_more') bool hasMore, + String object}); +} + +/// @nodoc +class _$ListPaginatedFineTuningJobsResponseCopyWithImpl<$Res, + $Val extends ListPaginatedFineTuningJobsResponse> + implements $ListPaginatedFineTuningJobsResponseCopyWith<$Res> { + _$ListPaginatedFineTuningJobsResponseCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? data = null, + Object? hasMore = null, + Object? object = null, + }) { + return _then(_value.copyWith( + data: null == data + ? _value.data + : data // ignore: cast_nullable_to_non_nullable + as List, + hasMore: null == hasMore + ? _value.hasMore + : hasMore // ignore: cast_nullable_to_non_nullable + as bool, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ListPaginatedFineTuningJobsResponseImplCopyWith<$Res> + implements $ListPaginatedFineTuningJobsResponseCopyWith<$Res> { + factory _$$ListPaginatedFineTuningJobsResponseImplCopyWith( + _$ListPaginatedFineTuningJobsResponseImpl value, + $Res Function(_$ListPaginatedFineTuningJobsResponseImpl) then) = + __$$ListPaginatedFineTuningJobsResponseImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {List data, + @JsonKey(name: 'has_more') bool hasMore, + String object}); +} + +/// @nodoc +class __$$ListPaginatedFineTuningJobsResponseImplCopyWithImpl<$Res> + extends _$ListPaginatedFineTuningJobsResponseCopyWithImpl<$Res, + _$ListPaginatedFineTuningJobsResponseImpl> + implements _$$ListPaginatedFineTuningJobsResponseImplCopyWith<$Res> { + __$$ListPaginatedFineTuningJobsResponseImplCopyWithImpl( + _$ListPaginatedFineTuningJobsResponseImpl _value, + $Res Function(_$ListPaginatedFineTuningJobsResponseImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? data = null, + Object? hasMore = null, + Object? object = null, + }) { + return _then(_$ListPaginatedFineTuningJobsResponseImpl( + data: null == data + ? _value._data + : data // ignore: cast_nullable_to_non_nullable + as List, + hasMore: null == hasMore + ? _value.hasMore + : hasMore // ignore: cast_nullable_to_non_nullable + as bool, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ListPaginatedFineTuningJobsResponseImpl + extends _ListPaginatedFineTuningJobsResponse { + const _$ListPaginatedFineTuningJobsResponseImpl( + {required final List data, + @JsonKey(name: 'has_more') required this.hasMore, + required this.object}) + : _data = data, + super._(); + + factory _$ListPaginatedFineTuningJobsResponseImpl.fromJson( + Map json) => + _$$ListPaginatedFineTuningJobsResponseImplFromJson(json); + + /// The list of fine-tuning jobs. + final List _data; + + /// The list of fine-tuning jobs. + @override + List get data { + if (_data is EqualUnmodifiableListView) return _data; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_data); + } + + /// Whether there are more fine-tuning jobs to retrieve. + @override + @JsonKey(name: 'has_more') + final bool hasMore; + + /// The object type, which is always "list". + @override + final String object; + + @override + String toString() { + return 'ListPaginatedFineTuningJobsResponse(data: $data, hasMore: $hasMore, object: $object)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ListPaginatedFineTuningJobsResponseImpl && + const DeepCollectionEquality().equals(other._data, _data) && + (identical(other.hasMore, hasMore) || other.hasMore == hasMore) && + (identical(other.object, object) || other.object == object)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, const DeepCollectionEquality().hash(_data), hasMore, object); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ListPaginatedFineTuningJobsResponseImplCopyWith< + _$ListPaginatedFineTuningJobsResponseImpl> + get copyWith => __$$ListPaginatedFineTuningJobsResponseImplCopyWithImpl< + _$ListPaginatedFineTuningJobsResponseImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ListPaginatedFineTuningJobsResponseImplToJson( + this, + ); + } +} + +abstract class _ListPaginatedFineTuningJobsResponse + extends ListPaginatedFineTuningJobsResponse { + const factory _ListPaginatedFineTuningJobsResponse( + {required final List data, + @JsonKey(name: 'has_more') required final bool hasMore, + required final String object}) = + _$ListPaginatedFineTuningJobsResponseImpl; + const _ListPaginatedFineTuningJobsResponse._() : super._(); + + factory _ListPaginatedFineTuningJobsResponse.fromJson( + Map json) = + _$ListPaginatedFineTuningJobsResponseImpl.fromJson; + + @override + + /// The list of fine-tuning jobs. + List get data; + @override + + /// Whether there are more fine-tuning jobs to retrieve. + @JsonKey(name: 'has_more') + bool get hasMore; + @override + + /// The object type, which is always "list". + String get object; + @override + @JsonKey(ignore: true) + _$$ListPaginatedFineTuningJobsResponseImplCopyWith< + _$ListPaginatedFineTuningJobsResponseImpl> + get copyWith => throw _privateConstructorUsedError; +} + +ListFineTuningJobEventsResponse _$ListFineTuningJobEventsResponseFromJson( + Map json) { + return _ListFineTuningJobEventsResponse.fromJson(json); +} + +/// @nodoc +mixin _$ListFineTuningJobEventsResponse { + /// The list of fine-tuning job events. + List get data => throw _privateConstructorUsedError; + + /// The object type, which is always "list". + String get object => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ListFineTuningJobEventsResponseCopyWith + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ListFineTuningJobEventsResponseCopyWith<$Res> { + factory $ListFineTuningJobEventsResponseCopyWith( + ListFineTuningJobEventsResponse value, + $Res Function(ListFineTuningJobEventsResponse) then) = + _$ListFineTuningJobEventsResponseCopyWithImpl<$Res, + ListFineTuningJobEventsResponse>; + @useResult + $Res call({List data, String object}); +} + +/// @nodoc +class _$ListFineTuningJobEventsResponseCopyWithImpl<$Res, + $Val extends ListFineTuningJobEventsResponse> + implements $ListFineTuningJobEventsResponseCopyWith<$Res> { + _$ListFineTuningJobEventsResponseCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? data = null, + Object? object = null, + }) { + return _then(_value.copyWith( + data: null == data + ? _value.data + : data // ignore: cast_nullable_to_non_nullable + as List, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ListFineTuningJobEventsResponseImplCopyWith<$Res> + implements $ListFineTuningJobEventsResponseCopyWith<$Res> { + factory _$$ListFineTuningJobEventsResponseImplCopyWith( + _$ListFineTuningJobEventsResponseImpl value, + $Res Function(_$ListFineTuningJobEventsResponseImpl) then) = + __$$ListFineTuningJobEventsResponseImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({List data, String object}); +} + +/// @nodoc +class __$$ListFineTuningJobEventsResponseImplCopyWithImpl<$Res> + extends _$ListFineTuningJobEventsResponseCopyWithImpl<$Res, + _$ListFineTuningJobEventsResponseImpl> + implements _$$ListFineTuningJobEventsResponseImplCopyWith<$Res> { + __$$ListFineTuningJobEventsResponseImplCopyWithImpl( + _$ListFineTuningJobEventsResponseImpl _value, + $Res Function(_$ListFineTuningJobEventsResponseImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? data = null, + Object? object = null, + }) { + return _then(_$ListFineTuningJobEventsResponseImpl( + data: null == data + ? _value._data + : data // ignore: cast_nullable_to_non_nullable + as List, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ListFineTuningJobEventsResponseImpl + extends _ListFineTuningJobEventsResponse { + const _$ListFineTuningJobEventsResponseImpl( + {required final List data, required this.object}) + : _data = data, + super._(); + + factory _$ListFineTuningJobEventsResponseImpl.fromJson( + Map json) => + _$$ListFineTuningJobEventsResponseImplFromJson(json); + + /// The list of fine-tuning job events. + final List _data; + + /// The list of fine-tuning job events. + @override + List get data { + if (_data is EqualUnmodifiableListView) return _data; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_data); + } + + /// The object type, which is always "list". + @override + final String object; + + @override + String toString() { + return 'ListFineTuningJobEventsResponse(data: $data, object: $object)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ListFineTuningJobEventsResponseImpl && + const DeepCollectionEquality().equals(other._data, _data) && + (identical(other.object, object) || other.object == object)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, const DeepCollectionEquality().hash(_data), object); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ListFineTuningJobEventsResponseImplCopyWith< + _$ListFineTuningJobEventsResponseImpl> + get copyWith => __$$ListFineTuningJobEventsResponseImplCopyWithImpl< + _$ListFineTuningJobEventsResponseImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ListFineTuningJobEventsResponseImplToJson( + this, + ); + } +} + +abstract class _ListFineTuningJobEventsResponse + extends ListFineTuningJobEventsResponse { + const factory _ListFineTuningJobEventsResponse( + {required final List data, + required final String object}) = _$ListFineTuningJobEventsResponseImpl; + const _ListFineTuningJobEventsResponse._() : super._(); + + factory _ListFineTuningJobEventsResponse.fromJson(Map json) = + _$ListFineTuningJobEventsResponseImpl.fromJson; + + @override + + /// The list of fine-tuning job events. + List get data; + @override + + /// The object type, which is always "list". + String get object; + @override + @JsonKey(ignore: true) + _$$ListFineTuningJobEventsResponseImplCopyWith< + _$ListFineTuningJobEventsResponseImpl> + get copyWith => throw _privateConstructorUsedError; +} + +FineTuningJobEvent _$FineTuningJobEventFromJson(Map json) { + return _FineTuningJobEvent.fromJson(json); +} + +/// @nodoc +mixin _$FineTuningJobEvent { + /// The event identifier, which can be referenced in the API endpoints. + String get id => throw _privateConstructorUsedError; + + /// The Unix timestamp (in seconds) for when the event was created. + @JsonKey(name: 'created_at') + int get createdAt => throw _privateConstructorUsedError; + + /// The log level of the event. + FineTuningJobEventLevel get level => throw _privateConstructorUsedError; + + /// The message of the event. + String get message => throw _privateConstructorUsedError; + + /// The object type, which is always "fine_tuning.job.event". + String get object => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $FineTuningJobEventCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $FineTuningJobEventCopyWith<$Res> { + factory $FineTuningJobEventCopyWith( + FineTuningJobEvent value, $Res Function(FineTuningJobEvent) then) = + _$FineTuningJobEventCopyWithImpl<$Res, FineTuningJobEvent>; + @useResult + $Res call( + {String id, + @JsonKey(name: 'created_at') int createdAt, + FineTuningJobEventLevel level, + String message, + String object}); +} + +/// @nodoc +class _$FineTuningJobEventCopyWithImpl<$Res, $Val extends FineTuningJobEvent> + implements $FineTuningJobEventCopyWith<$Res> { + _$FineTuningJobEventCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = null, + Object? createdAt = null, + Object? level = null, + Object? message = null, + Object? object = null, + }) { + return _then(_value.copyWith( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + createdAt: null == createdAt + ? _value.createdAt + : createdAt // ignore: cast_nullable_to_non_nullable + as int, + level: null == level + ? _value.level + : level // ignore: cast_nullable_to_non_nullable + as FineTuningJobEventLevel, + message: null == message + ? _value.message + : message // ignore: cast_nullable_to_non_nullable + as String, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$FineTuningJobEventImplCopyWith<$Res> + implements $FineTuningJobEventCopyWith<$Res> { + factory _$$FineTuningJobEventImplCopyWith(_$FineTuningJobEventImpl value, + $Res Function(_$FineTuningJobEventImpl) then) = + __$$FineTuningJobEventImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {String id, + @JsonKey(name: 'created_at') int createdAt, + FineTuningJobEventLevel level, + String message, + String object}); +} + +/// @nodoc +class __$$FineTuningJobEventImplCopyWithImpl<$Res> + extends _$FineTuningJobEventCopyWithImpl<$Res, _$FineTuningJobEventImpl> + implements _$$FineTuningJobEventImplCopyWith<$Res> { + __$$FineTuningJobEventImplCopyWithImpl(_$FineTuningJobEventImpl _value, + $Res Function(_$FineTuningJobEventImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = null, + Object? createdAt = null, + Object? level = null, + Object? message = null, + Object? object = null, + }) { + return _then(_$FineTuningJobEventImpl( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + createdAt: null == createdAt + ? _value.createdAt + : createdAt // ignore: cast_nullable_to_non_nullable + as int, + level: null == level + ? _value.level + : level // ignore: cast_nullable_to_non_nullable + as FineTuningJobEventLevel, + message: null == message + ? _value.message + : message // ignore: cast_nullable_to_non_nullable + as String, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$FineTuningJobEventImpl extends _FineTuningJobEvent { + const _$FineTuningJobEventImpl( + {required this.id, + @JsonKey(name: 'created_at') required this.createdAt, + required this.level, + required this.message, + required this.object}) + : super._(); + + factory _$FineTuningJobEventImpl.fromJson(Map json) => + _$$FineTuningJobEventImplFromJson(json); + + /// The event identifier, which can be referenced in the API endpoints. + @override + final String id; + + /// The Unix timestamp (in seconds) for when the event was created. + @override + @JsonKey(name: 'created_at') + final int createdAt; + + /// The log level of the event. + @override + final FineTuningJobEventLevel level; + + /// The message of the event. + @override + final String message; + + /// The object type, which is always "fine_tuning.job.event". + @override + final String object; + + @override + String toString() { + return 'FineTuningJobEvent(id: $id, createdAt: $createdAt, level: $level, message: $message, object: $object)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$FineTuningJobEventImpl && + (identical(other.id, id) || other.id == id) && + (identical(other.createdAt, createdAt) || + other.createdAt == createdAt) && + (identical(other.level, level) || other.level == level) && + (identical(other.message, message) || other.message == message) && + (identical(other.object, object) || other.object == object)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, id, createdAt, level, message, object); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$FineTuningJobEventImplCopyWith<_$FineTuningJobEventImpl> get copyWith => + __$$FineTuningJobEventImplCopyWithImpl<_$FineTuningJobEventImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$FineTuningJobEventImplToJson( + this, + ); + } +} + +abstract class _FineTuningJobEvent extends FineTuningJobEvent { + const factory _FineTuningJobEvent( + {required final String id, + @JsonKey(name: 'created_at') required final int createdAt, + required final FineTuningJobEventLevel level, + required final String message, + required final String object}) = _$FineTuningJobEventImpl; + const _FineTuningJobEvent._() : super._(); + + factory _FineTuningJobEvent.fromJson(Map json) = + _$FineTuningJobEventImpl.fromJson; + + @override + + /// The event identifier, which can be referenced in the API endpoints. + String get id; + @override + + /// The Unix timestamp (in seconds) for when the event was created. + @JsonKey(name: 'created_at') + int get createdAt; + @override + + /// The log level of the event. + FineTuningJobEventLevel get level; + @override + + /// The message of the event. + String get message; + @override + + /// The object type, which is always "fine_tuning.job.event". + String get object; + @override + @JsonKey(ignore: true) + _$$FineTuningJobEventImplCopyWith<_$FineTuningJobEventImpl> get copyWith => + throw _privateConstructorUsedError; +} + +CreateImageRequest _$CreateImageRequestFromJson(Map json) { + return _CreateImageRequest.fromJson(json); +} + +/// @nodoc +mixin _$CreateImageRequest { + /// A text description of the desired image(s). The maximum length is 1000 characters. + String get prompt => throw _privateConstructorUsedError; + + /// The number of images to generate. Must be between 1 and 10. + @JsonKey(includeIfNull: false) + int? get n => throw _privateConstructorUsedError; + + /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. + @JsonKey( + name: 'response_format', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ImageResponseFormat? get responseFormat => throw _privateConstructorUsedError; + + /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + @JsonKey( + includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ImageSize? get size => throw _privateConstructorUsedError; + + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @JsonKey(includeIfNull: false) + String? get user => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $CreateImageRequestCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CreateImageRequestCopyWith<$Res> { + factory $CreateImageRequestCopyWith( + CreateImageRequest value, $Res Function(CreateImageRequest) then) = + _$CreateImageRequestCopyWithImpl<$Res, CreateImageRequest>; + @useResult + $Res call( + {String prompt, + @JsonKey(includeIfNull: false) int? n, + @JsonKey( + name: 'response_format', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ImageResponseFormat? responseFormat, + @JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ImageSize? size, + @JsonKey(includeIfNull: false) String? user}); +} + +/// @nodoc +class _$CreateImageRequestCopyWithImpl<$Res, $Val extends CreateImageRequest> + implements $CreateImageRequestCopyWith<$Res> { + _$CreateImageRequestCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? prompt = null, + Object? n = freezed, + Object? responseFormat = freezed, + Object? size = freezed, + Object? user = freezed, + }) { + return _then(_value.copyWith( + prompt: null == prompt + ? _value.prompt + : prompt // ignore: cast_nullable_to_non_nullable + as String, + n: freezed == n + ? _value.n + : n // ignore: cast_nullable_to_non_nullable + as int?, + responseFormat: freezed == responseFormat + ? _value.responseFormat + : responseFormat // ignore: cast_nullable_to_non_nullable + as ImageResponseFormat?, + size: freezed == size + ? _value.size + : size // ignore: cast_nullable_to_non_nullable + as ImageSize?, + user: freezed == user + ? _value.user + : user // ignore: cast_nullable_to_non_nullable + as String?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$CreateImageRequestImplCopyWith<$Res> + implements $CreateImageRequestCopyWith<$Res> { + factory _$$CreateImageRequestImplCopyWith(_$CreateImageRequestImpl value, + $Res Function(_$CreateImageRequestImpl) then) = + __$$CreateImageRequestImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {String prompt, + @JsonKey(includeIfNull: false) int? n, + @JsonKey( + name: 'response_format', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ImageResponseFormat? responseFormat, + @JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ImageSize? size, + @JsonKey(includeIfNull: false) String? user}); +} + +/// @nodoc +class __$$CreateImageRequestImplCopyWithImpl<$Res> + extends _$CreateImageRequestCopyWithImpl<$Res, _$CreateImageRequestImpl> + implements _$$CreateImageRequestImplCopyWith<$Res> { + __$$CreateImageRequestImplCopyWithImpl(_$CreateImageRequestImpl _value, + $Res Function(_$CreateImageRequestImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? prompt = null, + Object? n = freezed, + Object? responseFormat = freezed, + Object? size = freezed, + Object? user = freezed, + }) { + return _then(_$CreateImageRequestImpl( + prompt: null == prompt + ? _value.prompt + : prompt // ignore: cast_nullable_to_non_nullable + as String, + n: freezed == n + ? _value.n + : n // ignore: cast_nullable_to_non_nullable + as int?, + responseFormat: freezed == responseFormat + ? _value.responseFormat + : responseFormat // ignore: cast_nullable_to_non_nullable + as ImageResponseFormat?, + size: freezed == size + ? _value.size + : size // ignore: cast_nullable_to_non_nullable + as ImageSize?, + user: freezed == user + ? _value.user + : user // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CreateImageRequestImpl extends _CreateImageRequest { + const _$CreateImageRequestImpl( + {required this.prompt, + @JsonKey(includeIfNull: false) this.n = 1, + @JsonKey( + name: 'response_format', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + this.responseFormat = ImageResponseFormat.url, + @JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + this.size = ImageSize.v1024x1024, + @JsonKey(includeIfNull: false) this.user}) + : super._(); + + factory _$CreateImageRequestImpl.fromJson(Map json) => + _$$CreateImageRequestImplFromJson(json); + + /// A text description of the desired image(s). The maximum length is 1000 characters. + @override + final String prompt; + + /// The number of images to generate. Must be between 1 and 10. + @override + @JsonKey(includeIfNull: false) + final int? n; + + /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. + @override + @JsonKey( + name: 'response_format', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final ImageResponseFormat? responseFormat; + + /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + @override + @JsonKey( + includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final ImageSize? size; + + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @override + @JsonKey(includeIfNull: false) + final String? user; + + @override + String toString() { + return 'CreateImageRequest(prompt: $prompt, n: $n, responseFormat: $responseFormat, size: $size, user: $user)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CreateImageRequestImpl && + (identical(other.prompt, prompt) || other.prompt == prompt) && + (identical(other.n, n) || other.n == n) && + (identical(other.responseFormat, responseFormat) || + other.responseFormat == responseFormat) && + (identical(other.size, size) || other.size == size) && + (identical(other.user, user) || other.user == user)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, prompt, n, responseFormat, size, user); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$CreateImageRequestImplCopyWith<_$CreateImageRequestImpl> get copyWith => + __$$CreateImageRequestImplCopyWithImpl<_$CreateImageRequestImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$CreateImageRequestImplToJson( + this, + ); + } +} + +abstract class _CreateImageRequest extends CreateImageRequest { + const factory _CreateImageRequest( + {required final String prompt, + @JsonKey(includeIfNull: false) final int? n, + @JsonKey( + name: 'response_format', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final ImageResponseFormat? responseFormat, + @JsonKey( + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + final ImageSize? size, + @JsonKey(includeIfNull: false) final String? user}) = + _$CreateImageRequestImpl; + const _CreateImageRequest._() : super._(); + + factory _CreateImageRequest.fromJson(Map json) = + _$CreateImageRequestImpl.fromJson; + + @override + + /// A text description of the desired image(s). The maximum length is 1000 characters. + String get prompt; + @override + + /// The number of images to generate. Must be between 1 and 10. + @JsonKey(includeIfNull: false) + int? get n; + @override + + /// The format in which the generated images are returned. Must be one of `url` or `b64_json`. + @JsonKey( + name: 'response_format', + includeIfNull: false, + unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ImageResponseFormat? get responseFormat; + @override + + /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + @JsonKey( + includeIfNull: false, unknownEnumValue: JsonKey.nullForUndefinedEnumValue) + ImageSize? get size; + @override + + /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + @JsonKey(includeIfNull: false) + String? get user; + @override + @JsonKey(ignore: true) + _$$CreateImageRequestImplCopyWith<_$CreateImageRequestImpl> get copyWith => + throw _privateConstructorUsedError; +} + +ImagesResponse _$ImagesResponseFromJson(Map json) { + return _ImagesResponse.fromJson(json); +} + +/// @nodoc +mixin _$ImagesResponse { + /// The Unix timestamp (in seconds) when the image was created. + int get created => throw _privateConstructorUsedError; + + /// The list of images generated by the model. + List get data => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ImagesResponseCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ImagesResponseCopyWith<$Res> { + factory $ImagesResponseCopyWith( + ImagesResponse value, $Res Function(ImagesResponse) then) = + _$ImagesResponseCopyWithImpl<$Res, ImagesResponse>; + @useResult + $Res call({int created, List data}); +} + +/// @nodoc +class _$ImagesResponseCopyWithImpl<$Res, $Val extends ImagesResponse> + implements $ImagesResponseCopyWith<$Res> { + _$ImagesResponseCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? created = null, + Object? data = null, + }) { + return _then(_value.copyWith( + created: null == created + ? _value.created + : created // ignore: cast_nullable_to_non_nullable + as int, + data: null == data + ? _value.data + : data // ignore: cast_nullable_to_non_nullable + as List, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ImagesResponseImplCopyWith<$Res> + implements $ImagesResponseCopyWith<$Res> { + factory _$$ImagesResponseImplCopyWith(_$ImagesResponseImpl value, + $Res Function(_$ImagesResponseImpl) then) = + __$$ImagesResponseImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({int created, List data}); +} + +/// @nodoc +class __$$ImagesResponseImplCopyWithImpl<$Res> + extends _$ImagesResponseCopyWithImpl<$Res, _$ImagesResponseImpl> + implements _$$ImagesResponseImplCopyWith<$Res> { + __$$ImagesResponseImplCopyWithImpl( + _$ImagesResponseImpl _value, $Res Function(_$ImagesResponseImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? created = null, + Object? data = null, + }) { + return _then(_$ImagesResponseImpl( + created: null == created + ? _value.created + : created // ignore: cast_nullable_to_non_nullable + as int, + data: null == data + ? _value._data + : data // ignore: cast_nullable_to_non_nullable + as List, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ImagesResponseImpl extends _ImagesResponse { + const _$ImagesResponseImpl( + {required this.created, required final List data}) + : _data = data, + super._(); + + factory _$ImagesResponseImpl.fromJson(Map json) => + _$$ImagesResponseImplFromJson(json); + + /// The Unix timestamp (in seconds) when the image was created. + @override + final int created; + + /// The list of images generated by the model. + final List _data; + + /// The list of images generated by the model. + @override + List get data { + if (_data is EqualUnmodifiableListView) return _data; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_data); + } + + @override + String toString() { + return 'ImagesResponse(created: $created, data: $data)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ImagesResponseImpl && + (identical(other.created, created) || other.created == created) && + const DeepCollectionEquality().equals(other._data, _data)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, created, const DeepCollectionEquality().hash(_data)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ImagesResponseImplCopyWith<_$ImagesResponseImpl> get copyWith => + __$$ImagesResponseImplCopyWithImpl<_$ImagesResponseImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$ImagesResponseImplToJson( + this, + ); + } +} + +abstract class _ImagesResponse extends ImagesResponse { + const factory _ImagesResponse( + {required final int created, + required final List data}) = _$ImagesResponseImpl; + const _ImagesResponse._() : super._(); + + factory _ImagesResponse.fromJson(Map json) = + _$ImagesResponseImpl.fromJson; + + @override + + /// The Unix timestamp (in seconds) when the image was created. + int get created; + @override + + /// The list of images generated by the model. + List get data; + @override + @JsonKey(ignore: true) + _$$ImagesResponseImplCopyWith<_$ImagesResponseImpl> get copyWith => + throw _privateConstructorUsedError; +} + +Image _$ImageFromJson(Map json) { + return _Image.fromJson(json); +} + +/// @nodoc +mixin _$Image { + /// The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. + @JsonKey(name: 'b64_json', includeIfNull: false) + String? get b64Json => throw _privateConstructorUsedError; + + /// The URL of the generated image, if `response_format` is `url` (default). + @JsonKey(includeIfNull: false) + String? get url => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ImageCopyWith get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ImageCopyWith<$Res> { + factory $ImageCopyWith(Image value, $Res Function(Image) then) = + _$ImageCopyWithImpl<$Res, Image>; + @useResult + $Res call( + {@JsonKey(name: 'b64_json', includeIfNull: false) String? b64Json, + @JsonKey(includeIfNull: false) String? url}); +} + +/// @nodoc +class _$ImageCopyWithImpl<$Res, $Val extends Image> + implements $ImageCopyWith<$Res> { + _$ImageCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? b64Json = freezed, + Object? url = freezed, + }) { + return _then(_value.copyWith( + b64Json: freezed == b64Json + ? _value.b64Json + : b64Json // ignore: cast_nullable_to_non_nullable + as String?, + url: freezed == url + ? _value.url + : url // ignore: cast_nullable_to_non_nullable + as String?, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ImageImplCopyWith<$Res> implements $ImageCopyWith<$Res> { + factory _$$ImageImplCopyWith( + _$ImageImpl value, $Res Function(_$ImageImpl) then) = + __$$ImageImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@JsonKey(name: 'b64_json', includeIfNull: false) String? b64Json, + @JsonKey(includeIfNull: false) String? url}); +} + +/// @nodoc +class __$$ImageImplCopyWithImpl<$Res> + extends _$ImageCopyWithImpl<$Res, _$ImageImpl> + implements _$$ImageImplCopyWith<$Res> { + __$$ImageImplCopyWithImpl( + _$ImageImpl _value, $Res Function(_$ImageImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? b64Json = freezed, + Object? url = freezed, + }) { + return _then(_$ImageImpl( + b64Json: freezed == b64Json + ? _value.b64Json + : b64Json // ignore: cast_nullable_to_non_nullable + as String?, + url: freezed == url + ? _value.url + : url // ignore: cast_nullable_to_non_nullable + as String?, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ImageImpl extends _Image { + const _$ImageImpl( + {@JsonKey(name: 'b64_json', includeIfNull: false) this.b64Json, + @JsonKey(includeIfNull: false) this.url}) + : super._(); + + factory _$ImageImpl.fromJson(Map json) => + _$$ImageImplFromJson(json); + + /// The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. + @override + @JsonKey(name: 'b64_json', includeIfNull: false) + final String? b64Json; + + /// The URL of the generated image, if `response_format` is `url` (default). + @override + @JsonKey(includeIfNull: false) + final String? url; + + @override + String toString() { + return 'Image(b64Json: $b64Json, url: $url)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ImageImpl && + (identical(other.b64Json, b64Json) || other.b64Json == b64Json) && + (identical(other.url, url) || other.url == url)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, b64Json, url); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ImageImplCopyWith<_$ImageImpl> get copyWith => + __$$ImageImplCopyWithImpl<_$ImageImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ImageImplToJson( + this, + ); + } +} + +abstract class _Image extends Image { + const factory _Image( + {@JsonKey(name: 'b64_json', includeIfNull: false) final String? b64Json, + @JsonKey(includeIfNull: false) final String? url}) = _$ImageImpl; + const _Image._() : super._(); + + factory _Image.fromJson(Map json) = _$ImageImpl.fromJson; + + @override + + /// The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. + @JsonKey(name: 'b64_json', includeIfNull: false) + String? get b64Json; + @override + + /// The URL of the generated image, if `response_format` is `url` (default). + @JsonKey(includeIfNull: false) + String? get url; + @override + @JsonKey(ignore: true) + _$$ImageImplCopyWith<_$ImageImpl> get copyWith => + throw _privateConstructorUsedError; +} + +Model _$ModelFromJson(Map json) { + return _Model.fromJson(json); +} + +/// @nodoc +mixin _$Model { + /// The model identifier, which can be referenced in the API endpoints. + String get id => throw _privateConstructorUsedError; + + /// The Unix timestamp (in seconds) when the model was created. + int get created => throw _privateConstructorUsedError; + + /// The object type, which is always "model". + String get object => throw _privateConstructorUsedError; + + /// The organization that owns the model. + @JsonKey(name: 'owned_by') + String get ownedBy => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ModelCopyWith get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ModelCopyWith<$Res> { + factory $ModelCopyWith(Model value, $Res Function(Model) then) = + _$ModelCopyWithImpl<$Res, Model>; + @useResult + $Res call( + {String id, + int created, + String object, + @JsonKey(name: 'owned_by') String ownedBy}); +} + +/// @nodoc +class _$ModelCopyWithImpl<$Res, $Val extends Model> + implements $ModelCopyWith<$Res> { + _$ModelCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = null, + Object? created = null, + Object? object = null, + Object? ownedBy = null, + }) { + return _then(_value.copyWith( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + created: null == created + ? _value.created + : created // ignore: cast_nullable_to_non_nullable + as int, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, + ownedBy: null == ownedBy + ? _value.ownedBy + : ownedBy // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ModelImplCopyWith<$Res> implements $ModelCopyWith<$Res> { + factory _$$ModelImplCopyWith( + _$ModelImpl value, $Res Function(_$ModelImpl) then) = + __$$ModelImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {String id, + int created, + String object, + @JsonKey(name: 'owned_by') String ownedBy}); +} + +/// @nodoc +class __$$ModelImplCopyWithImpl<$Res> + extends _$ModelCopyWithImpl<$Res, _$ModelImpl> + implements _$$ModelImplCopyWith<$Res> { + __$$ModelImplCopyWithImpl( + _$ModelImpl _value, $Res Function(_$ModelImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = null, + Object? created = null, + Object? object = null, + Object? ownedBy = null, + }) { + return _then(_$ModelImpl( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + created: null == created + ? _value.created + : created // ignore: cast_nullable_to_non_nullable + as int, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, + ownedBy: null == ownedBy + ? _value.ownedBy + : ownedBy // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ModelImpl extends _Model { + const _$ModelImpl( + {required this.id, + required this.created, + required this.object, + @JsonKey(name: 'owned_by') required this.ownedBy}) + : super._(); + + factory _$ModelImpl.fromJson(Map json) => + _$$ModelImplFromJson(json); + + /// The model identifier, which can be referenced in the API endpoints. + @override + final String id; + + /// The Unix timestamp (in seconds) when the model was created. + @override + final int created; + + /// The object type, which is always "model". + @override + final String object; + + /// The organization that owns the model. + @override + @JsonKey(name: 'owned_by') + final String ownedBy; + + @override + String toString() { + return 'Model(id: $id, created: $created, object: $object, ownedBy: $ownedBy)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ModelImpl && + (identical(other.id, id) || other.id == id) && + (identical(other.created, created) || other.created == created) && + (identical(other.object, object) || other.object == object) && + (identical(other.ownedBy, ownedBy) || other.ownedBy == ownedBy)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, id, created, object, ownedBy); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ModelImplCopyWith<_$ModelImpl> get copyWith => + __$$ModelImplCopyWithImpl<_$ModelImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ModelImplToJson( + this, + ); + } +} + +abstract class _Model extends Model { + const factory _Model( + {required final String id, + required final int created, + required final String object, + @JsonKey(name: 'owned_by') required final String ownedBy}) = _$ModelImpl; + const _Model._() : super._(); + + factory _Model.fromJson(Map json) = _$ModelImpl.fromJson; + + @override + + /// The model identifier, which can be referenced in the API endpoints. + String get id; + @override + + /// The Unix timestamp (in seconds) when the model was created. + int get created; + @override + + /// The object type, which is always "model". + String get object; + @override + + /// The organization that owns the model. + @JsonKey(name: 'owned_by') + String get ownedBy; + @override + @JsonKey(ignore: true) + _$$ModelImplCopyWith<_$ModelImpl> get copyWith => + throw _privateConstructorUsedError; +} + +ListModelsResponse _$ListModelsResponseFromJson(Map json) { + return _ListModelsResponse.fromJson(json); +} + +/// @nodoc +mixin _$ListModelsResponse { + /// The object type, which is always "list". + String get object => throw _privateConstructorUsedError; + + /// The list of models. + List get data => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ListModelsResponseCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ListModelsResponseCopyWith<$Res> { + factory $ListModelsResponseCopyWith( + ListModelsResponse value, $Res Function(ListModelsResponse) then) = + _$ListModelsResponseCopyWithImpl<$Res, ListModelsResponse>; + @useResult + $Res call({String object, List data}); +} + +/// @nodoc +class _$ListModelsResponseCopyWithImpl<$Res, $Val extends ListModelsResponse> + implements $ListModelsResponseCopyWith<$Res> { + _$ListModelsResponseCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? object = null, + Object? data = null, + }) { + return _then(_value.copyWith( + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, + data: null == data + ? _value.data + : data // ignore: cast_nullable_to_non_nullable + as List, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ListModelsResponseImplCopyWith<$Res> + implements $ListModelsResponseCopyWith<$Res> { + factory _$$ListModelsResponseImplCopyWith(_$ListModelsResponseImpl value, + $Res Function(_$ListModelsResponseImpl) then) = + __$$ListModelsResponseImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String object, List data}); +} + +/// @nodoc +class __$$ListModelsResponseImplCopyWithImpl<$Res> + extends _$ListModelsResponseCopyWithImpl<$Res, _$ListModelsResponseImpl> + implements _$$ListModelsResponseImplCopyWith<$Res> { + __$$ListModelsResponseImplCopyWithImpl(_$ListModelsResponseImpl _value, + $Res Function(_$ListModelsResponseImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? object = null, + Object? data = null, + }) { + return _then(_$ListModelsResponseImpl( + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, + data: null == data + ? _value._data + : data // ignore: cast_nullable_to_non_nullable + as List, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ListModelsResponseImpl extends _ListModelsResponse { + const _$ListModelsResponseImpl( + {required this.object, required final List data}) + : _data = data, + super._(); + + factory _$ListModelsResponseImpl.fromJson(Map json) => + _$$ListModelsResponseImplFromJson(json); + + /// The object type, which is always "list". + @override + final String object; + + /// The list of models. + final List _data; + + /// The list of models. + @override + List get data { + if (_data is EqualUnmodifiableListView) return _data; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_data); + } + + @override + String toString() { + return 'ListModelsResponse(object: $object, data: $data)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ListModelsResponseImpl && + (identical(other.object, object) || other.object == object) && + const DeepCollectionEquality().equals(other._data, _data)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, object, const DeepCollectionEquality().hash(_data)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ListModelsResponseImplCopyWith<_$ListModelsResponseImpl> get copyWith => + __$$ListModelsResponseImplCopyWithImpl<_$ListModelsResponseImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$ListModelsResponseImplToJson( + this, + ); + } +} + +abstract class _ListModelsResponse extends ListModelsResponse { + const factory _ListModelsResponse( + {required final String object, + required final List data}) = _$ListModelsResponseImpl; + const _ListModelsResponse._() : super._(); + + factory _ListModelsResponse.fromJson(Map json) = + _$ListModelsResponseImpl.fromJson; + + @override + + /// The object type, which is always "list". + String get object; + @override + + /// The list of models. + List get data; + @override + @JsonKey(ignore: true) + _$$ListModelsResponseImplCopyWith<_$ListModelsResponseImpl> get copyWith => + throw _privateConstructorUsedError; +} + +DeleteModelResponse _$DeleteModelResponseFromJson(Map json) { + return _DeleteModelResponse.fromJson(json); +} + +/// @nodoc +mixin _$DeleteModelResponse { + /// The model identifier. + String get id => throw _privateConstructorUsedError; + + /// Whether the model was deleted. + bool get deleted => throw _privateConstructorUsedError; + + /// The object type, which is always "model". + String get object => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $DeleteModelResponseCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $DeleteModelResponseCopyWith<$Res> { + factory $DeleteModelResponseCopyWith( + DeleteModelResponse value, $Res Function(DeleteModelResponse) then) = + _$DeleteModelResponseCopyWithImpl<$Res, DeleteModelResponse>; + @useResult + $Res call({String id, bool deleted, String object}); +} + +/// @nodoc +class _$DeleteModelResponseCopyWithImpl<$Res, $Val extends DeleteModelResponse> + implements $DeleteModelResponseCopyWith<$Res> { + _$DeleteModelResponseCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = null, + Object? deleted = null, + Object? object = null, + }) { + return _then(_value.copyWith( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + deleted: null == deleted + ? _value.deleted + : deleted // ignore: cast_nullable_to_non_nullable + as bool, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$DeleteModelResponseImplCopyWith<$Res> + implements $DeleteModelResponseCopyWith<$Res> { + factory _$$DeleteModelResponseImplCopyWith(_$DeleteModelResponseImpl value, + $Res Function(_$DeleteModelResponseImpl) then) = + __$$DeleteModelResponseImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String id, bool deleted, String object}); +} + +/// @nodoc +class __$$DeleteModelResponseImplCopyWithImpl<$Res> + extends _$DeleteModelResponseCopyWithImpl<$Res, _$DeleteModelResponseImpl> + implements _$$DeleteModelResponseImplCopyWith<$Res> { + __$$DeleteModelResponseImplCopyWithImpl(_$DeleteModelResponseImpl _value, + $Res Function(_$DeleteModelResponseImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = null, + Object? deleted = null, + Object? object = null, + }) { + return _then(_$DeleteModelResponseImpl( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + deleted: null == deleted + ? _value.deleted + : deleted // ignore: cast_nullable_to_non_nullable + as bool, + object: null == object + ? _value.object + : object // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$DeleteModelResponseImpl extends _DeleteModelResponse { + const _$DeleteModelResponseImpl( + {required this.id, required this.deleted, required this.object}) + : super._(); + + factory _$DeleteModelResponseImpl.fromJson(Map json) => + _$$DeleteModelResponseImplFromJson(json); + + /// The model identifier. + @override + final String id; + + /// Whether the model was deleted. + @override + final bool deleted; + + /// The object type, which is always "model". + @override + final String object; + + @override + String toString() { + return 'DeleteModelResponse(id: $id, deleted: $deleted, object: $object)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$DeleteModelResponseImpl && + (identical(other.id, id) || other.id == id) && + (identical(other.deleted, deleted) || other.deleted == deleted) && + (identical(other.object, object) || other.object == object)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, id, deleted, object); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$DeleteModelResponseImplCopyWith<_$DeleteModelResponseImpl> get copyWith => + __$$DeleteModelResponseImplCopyWithImpl<_$DeleteModelResponseImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$DeleteModelResponseImplToJson( + this, + ); + } +} + +abstract class _DeleteModelResponse extends DeleteModelResponse { + const factory _DeleteModelResponse( + {required final String id, + required final bool deleted, + required final String object}) = _$DeleteModelResponseImpl; + const _DeleteModelResponse._() : super._(); + + factory _DeleteModelResponse.fromJson(Map json) = + _$DeleteModelResponseImpl.fromJson; + + @override + + /// The model identifier. + String get id; + @override + + /// Whether the model was deleted. + bool get deleted; + @override + + /// The object type, which is always "model". + String get object; + @override + @JsonKey(ignore: true) + _$$DeleteModelResponseImplCopyWith<_$DeleteModelResponseImpl> get copyWith => + throw _privateConstructorUsedError; +} + +CreateModerationRequest _$CreateModerationRequestFromJson( + Map json) { + return _CreateModerationRequest.fromJson(json); +} + +/// @nodoc +mixin _$CreateModerationRequest { + /// Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. + /// + /// The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + @_ModerationModelConverter() + ModerationModel get model => throw _privateConstructorUsedError; + + /// The input text to classify + @_ModerationInputConverter() + ModerationInput get input => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $CreateModerationRequestCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CreateModerationRequestCopyWith<$Res> { + factory $CreateModerationRequestCopyWith(CreateModerationRequest value, + $Res Function(CreateModerationRequest) then) = + _$CreateModerationRequestCopyWithImpl<$Res, CreateModerationRequest>; + @useResult + $Res call( + {@_ModerationModelConverter() ModerationModel model, + @_ModerationInputConverter() ModerationInput input}); + + $ModerationModelCopyWith<$Res> get model; + $ModerationInputCopyWith<$Res> get input; +} + +/// @nodoc +class _$CreateModerationRequestCopyWithImpl<$Res, + $Val extends CreateModerationRequest> + implements $CreateModerationRequestCopyWith<$Res> { + _$CreateModerationRequestCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = null, + Object? input = null, + }) { + return _then(_value.copyWith( + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as ModerationModel, + input: null == input + ? _value.input + : input // ignore: cast_nullable_to_non_nullable + as ModerationInput, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $ModerationModelCopyWith<$Res> get model { + return $ModerationModelCopyWith<$Res>(_value.model, (value) { + return _then(_value.copyWith(model: value) as $Val); + }); + } + + @override + @pragma('vm:prefer-inline') + $ModerationInputCopyWith<$Res> get input { + return $ModerationInputCopyWith<$Res>(_value.input, (value) { + return _then(_value.copyWith(input: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$CreateModerationRequestImplCopyWith<$Res> + implements $CreateModerationRequestCopyWith<$Res> { + factory _$$CreateModerationRequestImplCopyWith( + _$CreateModerationRequestImpl value, + $Res Function(_$CreateModerationRequestImpl) then) = + __$$CreateModerationRequestImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {@_ModerationModelConverter() ModerationModel model, + @_ModerationInputConverter() ModerationInput input}); + + @override + $ModerationModelCopyWith<$Res> get model; + @override + $ModerationInputCopyWith<$Res> get input; +} + +/// @nodoc +class __$$CreateModerationRequestImplCopyWithImpl<$Res> + extends _$CreateModerationRequestCopyWithImpl<$Res, + _$CreateModerationRequestImpl> + implements _$$CreateModerationRequestImplCopyWith<$Res> { + __$$CreateModerationRequestImplCopyWithImpl( + _$CreateModerationRequestImpl _value, + $Res Function(_$CreateModerationRequestImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? model = null, + Object? input = null, + }) { + return _then(_$CreateModerationRequestImpl( + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as ModerationModel, + input: null == input + ? _value.input + : input // ignore: cast_nullable_to_non_nullable + as ModerationInput, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CreateModerationRequestImpl extends _CreateModerationRequest { + const _$CreateModerationRequestImpl( + {@_ModerationModelConverter() + this.model = const ModerationModel.string('text-moderation-latest'), + @_ModerationInputConverter() required this.input}) + : super._(); + + factory _$CreateModerationRequestImpl.fromJson(Map json) => + _$$CreateModerationRequestImplFromJson(json); + + /// Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. + /// + /// The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + @override + @JsonKey() + @_ModerationModelConverter() + final ModerationModel model; + + /// The input text to classify + @override + @_ModerationInputConverter() + final ModerationInput input; + + @override + String toString() { + return 'CreateModerationRequest(model: $model, input: $input)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CreateModerationRequestImpl && + (identical(other.model, model) || other.model == model) && + (identical(other.input, input) || other.input == input)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, model, input); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$CreateModerationRequestImplCopyWith<_$CreateModerationRequestImpl> + get copyWith => __$$CreateModerationRequestImplCopyWithImpl< + _$CreateModerationRequestImpl>(this, _$identity); + + @override + Map toJson() { + return _$$CreateModerationRequestImplToJson( + this, + ); + } +} + +abstract class _CreateModerationRequest extends CreateModerationRequest { + const factory _CreateModerationRequest( + {@_ModerationModelConverter() final ModerationModel model, + @_ModerationInputConverter() required final ModerationInput input}) = + _$CreateModerationRequestImpl; + const _CreateModerationRequest._() : super._(); + + factory _CreateModerationRequest.fromJson(Map json) = + _$CreateModerationRequestImpl.fromJson; + + @override + + /// Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. + /// + /// The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + @_ModerationModelConverter() + ModerationModel get model; + @override + + /// The input text to classify + @_ModerationInputConverter() + ModerationInput get input; + @override + @JsonKey(ignore: true) + _$$CreateModerationRequestImplCopyWith<_$CreateModerationRequestImpl> + get copyWith => throw _privateConstructorUsedError; +} + +ModerationModel _$ModerationModelFromJson(Map json) { + switch (json['runtimeType']) { + case 'string': + return _UnionModerationModelString.fromJson(json); + case 'enumeration': + return _UnionModerationModelEnum.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'runtimeType', 'ModerationModel', + 'Invalid union type "${json['runtimeType']}"!'); + } +} + +/// @nodoc +mixin _$ModerationModel { + Object get value => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(ModerationModels value) enumeration, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(ModerationModels value)? enumeration, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(ModerationModels value)? enumeration, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionModerationModelString value) string, + required TResult Function(_UnionModerationModelEnum value) enumeration, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionModerationModelString value)? string, + TResult? Function(_UnionModerationModelEnum value)? enumeration, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionModerationModelString value)? string, + TResult Function(_UnionModerationModelEnum value)? enumeration, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ModerationModelCopyWith<$Res> { + factory $ModerationModelCopyWith( + ModerationModel value, $Res Function(ModerationModel) then) = + _$ModerationModelCopyWithImpl<$Res, ModerationModel>; +} + +/// @nodoc +class _$ModerationModelCopyWithImpl<$Res, $Val extends ModerationModel> + implements $ModerationModelCopyWith<$Res> { + _$ModerationModelCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; +} + +/// @nodoc +abstract class _$$UnionModerationModelStringImplCopyWith<$Res> { + factory _$$UnionModerationModelStringImplCopyWith( + _$UnionModerationModelStringImpl value, + $Res Function(_$UnionModerationModelStringImpl) then) = + __$$UnionModerationModelStringImplCopyWithImpl<$Res>; + @useResult + $Res call({String value}); +} + +/// @nodoc +class __$$UnionModerationModelStringImplCopyWithImpl<$Res> + extends _$ModerationModelCopyWithImpl<$Res, + _$UnionModerationModelStringImpl> + implements _$$UnionModerationModelStringImplCopyWith<$Res> { + __$$UnionModerationModelStringImplCopyWithImpl( + _$UnionModerationModelStringImpl _value, + $Res Function(_$UnionModerationModelStringImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$UnionModerationModelStringImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UnionModerationModelStringImpl extends _UnionModerationModelString { + const _$UnionModerationModelStringImpl(this.value, {final String? $type}) + : $type = $type ?? 'string', + super._(); + + factory _$UnionModerationModelStringImpl.fromJson( + Map json) => + _$$UnionModerationModelStringImplFromJson(json); + + @override + final String value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'ModerationModel.string(value: $value)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UnionModerationModelStringImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UnionModerationModelStringImplCopyWith<_$UnionModerationModelStringImpl> + get copyWith => __$$UnionModerationModelStringImplCopyWithImpl< + _$UnionModerationModelStringImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(ModerationModels value) enumeration, + }) { + return string(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(ModerationModels value)? enumeration, + }) { + return string?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(ModerationModels value)? enumeration, + required TResult orElse(), + }) { + if (string != null) { + return string(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionModerationModelString value) string, + required TResult Function(_UnionModerationModelEnum value) enumeration, + }) { + return string(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionModerationModelString value)? string, + TResult? Function(_UnionModerationModelEnum value)? enumeration, + }) { + return string?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionModerationModelString value)? string, + TResult Function(_UnionModerationModelEnum value)? enumeration, + required TResult orElse(), + }) { + if (string != null) { + return string(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$UnionModerationModelStringImplToJson( + this, + ); + } +} + +abstract class _UnionModerationModelString extends ModerationModel { + const factory _UnionModerationModelString(final String value) = + _$UnionModerationModelStringImpl; + const _UnionModerationModelString._() : super._(); + + factory _UnionModerationModelString.fromJson(Map json) = + _$UnionModerationModelStringImpl.fromJson; + + @override + String get value; + @JsonKey(ignore: true) + _$$UnionModerationModelStringImplCopyWith<_$UnionModerationModelStringImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$UnionModerationModelEnumImplCopyWith<$Res> { + factory _$$UnionModerationModelEnumImplCopyWith( + _$UnionModerationModelEnumImpl value, + $Res Function(_$UnionModerationModelEnumImpl) then) = + __$$UnionModerationModelEnumImplCopyWithImpl<$Res>; + @useResult + $Res call({ModerationModels value}); +} + +/// @nodoc +class __$$UnionModerationModelEnumImplCopyWithImpl<$Res> + extends _$ModerationModelCopyWithImpl<$Res, _$UnionModerationModelEnumImpl> + implements _$$UnionModerationModelEnumImplCopyWith<$Res> { + __$$UnionModerationModelEnumImplCopyWithImpl( + _$UnionModerationModelEnumImpl _value, + $Res Function(_$UnionModerationModelEnumImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$UnionModerationModelEnumImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as ModerationModels, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UnionModerationModelEnumImpl extends _UnionModerationModelEnum { + const _$UnionModerationModelEnumImpl(this.value, {final String? $type}) + : $type = $type ?? 'enumeration', + super._(); + + factory _$UnionModerationModelEnumImpl.fromJson(Map json) => + _$$UnionModerationModelEnumImplFromJson(json); + + @override + final ModerationModels value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'ModerationModel.enumeration(value: $value)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UnionModerationModelEnumImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UnionModerationModelEnumImplCopyWith<_$UnionModerationModelEnumImpl> + get copyWith => __$$UnionModerationModelEnumImplCopyWithImpl< + _$UnionModerationModelEnumImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(ModerationModels value) enumeration, + }) { + return enumeration(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(ModerationModels value)? enumeration, + }) { + return enumeration?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(ModerationModels value)? enumeration, + required TResult orElse(), + }) { + if (enumeration != null) { + return enumeration(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionModerationModelString value) string, + required TResult Function(_UnionModerationModelEnum value) enumeration, + }) { + return enumeration(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionModerationModelString value)? string, + TResult? Function(_UnionModerationModelEnum value)? enumeration, + }) { + return enumeration?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionModerationModelString value)? string, + TResult Function(_UnionModerationModelEnum value)? enumeration, + required TResult orElse(), + }) { + if (enumeration != null) { + return enumeration(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$UnionModerationModelEnumImplToJson( + this, + ); + } +} + +abstract class _UnionModerationModelEnum extends ModerationModel { + const factory _UnionModerationModelEnum(final ModerationModels value) = + _$UnionModerationModelEnumImpl; + const _UnionModerationModelEnum._() : super._(); + + factory _UnionModerationModelEnum.fromJson(Map json) = + _$UnionModerationModelEnumImpl.fromJson; + + @override + ModerationModels get value; + @JsonKey(ignore: true) + _$$UnionModerationModelEnumImplCopyWith<_$UnionModerationModelEnumImpl> + get copyWith => throw _privateConstructorUsedError; +} + +ModerationInput _$ModerationInputFromJson(Map json) { + switch (json['runtimeType']) { + case 'string': + return _UnionModerationInputString.fromJson(json); + case 'arrayString': + return _UnionModerationInputArrayString.fromJson(json); + + default: + throw CheckedFromJsonException(json, 'runtimeType', 'ModerationInput', + 'Invalid union type "${json['runtimeType']}"!'); + } +} + +/// @nodoc +mixin _$ModerationInput { + Object get value => throw _privateConstructorUsedError; + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(List value) arrayString, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(List value)? arrayString, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(List value)? arrayString, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionModerationInputString value) string, + required TResult Function(_UnionModerationInputArrayString value) + arrayString, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionModerationInputString value)? string, + TResult? Function(_UnionModerationInputArrayString value)? arrayString, + }) => + throw _privateConstructorUsedError; + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionModerationInputString value)? string, + TResult Function(_UnionModerationInputArrayString value)? arrayString, + required TResult orElse(), + }) => + throw _privateConstructorUsedError; + Map toJson() => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ModerationInputCopyWith<$Res> { + factory $ModerationInputCopyWith( + ModerationInput value, $Res Function(ModerationInput) then) = + _$ModerationInputCopyWithImpl<$Res, ModerationInput>; +} + +/// @nodoc +class _$ModerationInputCopyWithImpl<$Res, $Val extends ModerationInput> + implements $ModerationInputCopyWith<$Res> { + _$ModerationInputCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; +} + +/// @nodoc +abstract class _$$UnionModerationInputStringImplCopyWith<$Res> { + factory _$$UnionModerationInputStringImplCopyWith( + _$UnionModerationInputStringImpl value, + $Res Function(_$UnionModerationInputStringImpl) then) = + __$$UnionModerationInputStringImplCopyWithImpl<$Res>; + @useResult + $Res call({String value}); +} + +/// @nodoc +class __$$UnionModerationInputStringImplCopyWithImpl<$Res> + extends _$ModerationInputCopyWithImpl<$Res, + _$UnionModerationInputStringImpl> + implements _$$UnionModerationInputStringImplCopyWith<$Res> { + __$$UnionModerationInputStringImplCopyWithImpl( + _$UnionModerationInputStringImpl _value, + $Res Function(_$UnionModerationInputStringImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$UnionModerationInputStringImpl( + null == value + ? _value.value + : value // ignore: cast_nullable_to_non_nullable + as String, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UnionModerationInputStringImpl extends _UnionModerationInputString { + const _$UnionModerationInputStringImpl(this.value, {final String? $type}) + : $type = $type ?? 'string', + super._(); + + factory _$UnionModerationInputStringImpl.fromJson( + Map json) => + _$$UnionModerationInputStringImplFromJson(json); + + @override + final String value; + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'ModerationInput.string(value: $value)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UnionModerationInputStringImpl && + (identical(other.value, value) || other.value == value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash(runtimeType, value); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UnionModerationInputStringImplCopyWith<_$UnionModerationInputStringImpl> + get copyWith => __$$UnionModerationInputStringImplCopyWithImpl< + _$UnionModerationInputStringImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(List value) arrayString, + }) { + return string(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(List value)? arrayString, + }) { + return string?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(List value)? arrayString, + required TResult orElse(), + }) { + if (string != null) { + return string(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionModerationInputString value) string, + required TResult Function(_UnionModerationInputArrayString value) + arrayString, + }) { + return string(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionModerationInputString value)? string, + TResult? Function(_UnionModerationInputArrayString value)? arrayString, + }) { + return string?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionModerationInputString value)? string, + TResult Function(_UnionModerationInputArrayString value)? arrayString, + required TResult orElse(), + }) { + if (string != null) { + return string(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$UnionModerationInputStringImplToJson( + this, + ); + } +} + +abstract class _UnionModerationInputString extends ModerationInput { + const factory _UnionModerationInputString(final String value) = + _$UnionModerationInputStringImpl; + const _UnionModerationInputString._() : super._(); + + factory _UnionModerationInputString.fromJson(Map json) = + _$UnionModerationInputStringImpl.fromJson; + + @override + String get value; + @JsonKey(ignore: true) + _$$UnionModerationInputStringImplCopyWith<_$UnionModerationInputStringImpl> + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class _$$UnionModerationInputArrayStringImplCopyWith<$Res> { + factory _$$UnionModerationInputArrayStringImplCopyWith( + _$UnionModerationInputArrayStringImpl value, + $Res Function(_$UnionModerationInputArrayStringImpl) then) = + __$$UnionModerationInputArrayStringImplCopyWithImpl<$Res>; + @useResult + $Res call({List value}); +} + +/// @nodoc +class __$$UnionModerationInputArrayStringImplCopyWithImpl<$Res> + extends _$ModerationInputCopyWithImpl<$Res, + _$UnionModerationInputArrayStringImpl> + implements _$$UnionModerationInputArrayStringImplCopyWith<$Res> { + __$$UnionModerationInputArrayStringImplCopyWithImpl( + _$UnionModerationInputArrayStringImpl _value, + $Res Function(_$UnionModerationInputArrayStringImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? value = null, + }) { + return _then(_$UnionModerationInputArrayStringImpl( + null == value + ? _value._value + : value // ignore: cast_nullable_to_non_nullable + as List, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$UnionModerationInputArrayStringImpl + extends _UnionModerationInputArrayString { + const _$UnionModerationInputArrayStringImpl(final List value, + {final String? $type}) + : _value = value, + $type = $type ?? 'arrayString', + super._(); + + factory _$UnionModerationInputArrayStringImpl.fromJson( + Map json) => + _$$UnionModerationInputArrayStringImplFromJson(json); + + final List _value; + @override + List get value { + if (_value is EqualUnmodifiableListView) return _value; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_value); + } + + @JsonKey(name: 'runtimeType') + final String $type; + + @override + String toString() { + return 'ModerationInput.arrayString(value: $value)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$UnionModerationInputArrayStringImpl && + const DeepCollectionEquality().equals(other._value, _value)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, const DeepCollectionEquality().hash(_value)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$UnionModerationInputArrayStringImplCopyWith< + _$UnionModerationInputArrayStringImpl> + get copyWith => __$$UnionModerationInputArrayStringImplCopyWithImpl< + _$UnionModerationInputArrayStringImpl>(this, _$identity); + + @override + @optionalTypeArgs + TResult when({ + required TResult Function(String value) string, + required TResult Function(List value) arrayString, + }) { + return arrayString(value); + } + + @override + @optionalTypeArgs + TResult? whenOrNull({ + TResult? Function(String value)? string, + TResult? Function(List value)? arrayString, + }) { + return arrayString?.call(value); + } + + @override + @optionalTypeArgs + TResult maybeWhen({ + TResult Function(String value)? string, + TResult Function(List value)? arrayString, + required TResult orElse(), + }) { + if (arrayString != null) { + return arrayString(value); + } + return orElse(); + } + + @override + @optionalTypeArgs + TResult map({ + required TResult Function(_UnionModerationInputString value) string, + required TResult Function(_UnionModerationInputArrayString value) + arrayString, + }) { + return arrayString(this); + } + + @override + @optionalTypeArgs + TResult? mapOrNull({ + TResult? Function(_UnionModerationInputString value)? string, + TResult? Function(_UnionModerationInputArrayString value)? arrayString, + }) { + return arrayString?.call(this); + } + + @override + @optionalTypeArgs + TResult maybeMap({ + TResult Function(_UnionModerationInputString value)? string, + TResult Function(_UnionModerationInputArrayString value)? arrayString, + required TResult orElse(), + }) { + if (arrayString != null) { + return arrayString(this); + } + return orElse(); + } + + @override + Map toJson() { + return _$$UnionModerationInputArrayStringImplToJson( + this, + ); + } +} + +abstract class _UnionModerationInputArrayString extends ModerationInput { + const factory _UnionModerationInputArrayString(final List value) = + _$UnionModerationInputArrayStringImpl; + const _UnionModerationInputArrayString._() : super._(); + + factory _UnionModerationInputArrayString.fromJson(Map json) = + _$UnionModerationInputArrayStringImpl.fromJson; + + @override + List get value; + @JsonKey(ignore: true) + _$$UnionModerationInputArrayStringImplCopyWith< + _$UnionModerationInputArrayStringImpl> + get copyWith => throw _privateConstructorUsedError; +} + +CreateModerationResponse _$CreateModerationResponseFromJson( + Map json) { + return _CreateModerationResponse.fromJson(json); +} + +/// @nodoc +mixin _$CreateModerationResponse { + /// The unique identifier for the moderation request. + String get id => throw _privateConstructorUsedError; + + /// The model used to generate the moderation results. + String get model => throw _privateConstructorUsedError; + + /// A list of moderation objects. + List get results => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $CreateModerationResponseCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $CreateModerationResponseCopyWith<$Res> { + factory $CreateModerationResponseCopyWith(CreateModerationResponse value, + $Res Function(CreateModerationResponse) then) = + _$CreateModerationResponseCopyWithImpl<$Res, CreateModerationResponse>; + @useResult + $Res call({String id, String model, List results}); +} + +/// @nodoc +class _$CreateModerationResponseCopyWithImpl<$Res, + $Val extends CreateModerationResponse> + implements $CreateModerationResponseCopyWith<$Res> { + _$CreateModerationResponseCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = null, + Object? model = null, + Object? results = null, + }) { + return _then(_value.copyWith( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String, + results: null == results + ? _value.results + : results // ignore: cast_nullable_to_non_nullable + as List, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$CreateModerationResponseImplCopyWith<$Res> + implements $CreateModerationResponseCopyWith<$Res> { + factory _$$CreateModerationResponseImplCopyWith( + _$CreateModerationResponseImpl value, + $Res Function(_$CreateModerationResponseImpl) then) = + __$$CreateModerationResponseImplCopyWithImpl<$Res>; + @override + @useResult + $Res call({String id, String model, List results}); +} + +/// @nodoc +class __$$CreateModerationResponseImplCopyWithImpl<$Res> + extends _$CreateModerationResponseCopyWithImpl<$Res, + _$CreateModerationResponseImpl> + implements _$$CreateModerationResponseImplCopyWith<$Res> { + __$$CreateModerationResponseImplCopyWithImpl( + _$CreateModerationResponseImpl _value, + $Res Function(_$CreateModerationResponseImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? id = null, + Object? model = null, + Object? results = null, + }) { + return _then(_$CreateModerationResponseImpl( + id: null == id + ? _value.id + : id // ignore: cast_nullable_to_non_nullable + as String, + model: null == model + ? _value.model + : model // ignore: cast_nullable_to_non_nullable + as String, + results: null == results + ? _value._results + : results // ignore: cast_nullable_to_non_nullable + as List, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$CreateModerationResponseImpl extends _CreateModerationResponse { + const _$CreateModerationResponseImpl( + {required this.id, + required this.model, + required final List results}) + : _results = results, + super._(); + + factory _$CreateModerationResponseImpl.fromJson(Map json) => + _$$CreateModerationResponseImplFromJson(json); + + /// The unique identifier for the moderation request. + @override + final String id; + + /// The model used to generate the moderation results. + @override + final String model; + + /// A list of moderation objects. + final List _results; + + /// A list of moderation objects. + @override + List get results { + if (_results is EqualUnmodifiableListView) return _results; + // ignore: implicit_dynamic_type + return EqualUnmodifiableListView(_results); + } + + @override + String toString() { + return 'CreateModerationResponse(id: $id, model: $model, results: $results)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$CreateModerationResponseImpl && + (identical(other.id, id) || other.id == id) && + (identical(other.model, model) || other.model == model) && + const DeepCollectionEquality().equals(other._results, _results)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, id, model, const DeepCollectionEquality().hash(_results)); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$CreateModerationResponseImplCopyWith<_$CreateModerationResponseImpl> + get copyWith => __$$CreateModerationResponseImplCopyWithImpl< + _$CreateModerationResponseImpl>(this, _$identity); + + @override + Map toJson() { + return _$$CreateModerationResponseImplToJson( + this, + ); + } +} + +abstract class _CreateModerationResponse extends CreateModerationResponse { + const factory _CreateModerationResponse( + {required final String id, + required final String model, + required final List results}) = + _$CreateModerationResponseImpl; + const _CreateModerationResponse._() : super._(); + + factory _CreateModerationResponse.fromJson(Map json) = + _$CreateModerationResponseImpl.fromJson; + + @override + + /// The unique identifier for the moderation request. + String get id; + @override + + /// The model used to generate the moderation results. + String get model; + @override + + /// A list of moderation objects. + List get results; + @override + @JsonKey(ignore: true) + _$$CreateModerationResponseImplCopyWith<_$CreateModerationResponseImpl> + get copyWith => throw _privateConstructorUsedError; +} + +Moderation _$ModerationFromJson(Map json) { + return _Moderation.fromJson(json); +} + +/// @nodoc +mixin _$Moderation { + /// Whether the content violates [OpenAI's usage policies](https://platform.openai.com/policies/usage-policies). + bool get flagged => throw _privateConstructorUsedError; + + /// A list of the categories, and whether they are flagged or not. + ModerationCategories get categories => throw _privateConstructorUsedError; + + /// A list of the categories along with their scores as predicted by model. + @JsonKey(name: 'category_scores') + ModerationCategoriesScores get categoryScores => + throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ModerationCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ModerationCopyWith<$Res> { + factory $ModerationCopyWith( + Moderation value, $Res Function(Moderation) then) = + _$ModerationCopyWithImpl<$Res, Moderation>; + @useResult + $Res call( + {bool flagged, + ModerationCategories categories, + @JsonKey(name: 'category_scores') + ModerationCategoriesScores categoryScores}); + + $ModerationCategoriesCopyWith<$Res> get categories; + $ModerationCategoriesScoresCopyWith<$Res> get categoryScores; +} + +/// @nodoc +class _$ModerationCopyWithImpl<$Res, $Val extends Moderation> + implements $ModerationCopyWith<$Res> { + _$ModerationCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? flagged = null, + Object? categories = null, + Object? categoryScores = null, + }) { + return _then(_value.copyWith( + flagged: null == flagged + ? _value.flagged + : flagged // ignore: cast_nullable_to_non_nullable + as bool, + categories: null == categories + ? _value.categories + : categories // ignore: cast_nullable_to_non_nullable + as ModerationCategories, + categoryScores: null == categoryScores + ? _value.categoryScores + : categoryScores // ignore: cast_nullable_to_non_nullable + as ModerationCategoriesScores, + ) as $Val); + } + + @override + @pragma('vm:prefer-inline') + $ModerationCategoriesCopyWith<$Res> get categories { + return $ModerationCategoriesCopyWith<$Res>(_value.categories, (value) { + return _then(_value.copyWith(categories: value) as $Val); + }); + } + + @override + @pragma('vm:prefer-inline') + $ModerationCategoriesScoresCopyWith<$Res> get categoryScores { + return $ModerationCategoriesScoresCopyWith<$Res>(_value.categoryScores, + (value) { + return _then(_value.copyWith(categoryScores: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$ModerationImplCopyWith<$Res> + implements $ModerationCopyWith<$Res> { + factory _$$ModerationImplCopyWith( + _$ModerationImpl value, $Res Function(_$ModerationImpl) then) = + __$$ModerationImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {bool flagged, + ModerationCategories categories, + @JsonKey(name: 'category_scores') + ModerationCategoriesScores categoryScores}); + + @override + $ModerationCategoriesCopyWith<$Res> get categories; + @override + $ModerationCategoriesScoresCopyWith<$Res> get categoryScores; +} + +/// @nodoc +class __$$ModerationImplCopyWithImpl<$Res> + extends _$ModerationCopyWithImpl<$Res, _$ModerationImpl> + implements _$$ModerationImplCopyWith<$Res> { + __$$ModerationImplCopyWithImpl( + _$ModerationImpl _value, $Res Function(_$ModerationImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? flagged = null, + Object? categories = null, + Object? categoryScores = null, + }) { + return _then(_$ModerationImpl( + flagged: null == flagged + ? _value.flagged + : flagged // ignore: cast_nullable_to_non_nullable + as bool, + categories: null == categories + ? _value.categories + : categories // ignore: cast_nullable_to_non_nullable + as ModerationCategories, + categoryScores: null == categoryScores + ? _value.categoryScores + : categoryScores // ignore: cast_nullable_to_non_nullable + as ModerationCategoriesScores, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ModerationImpl extends _Moderation { + const _$ModerationImpl( + {required this.flagged, + required this.categories, + @JsonKey(name: 'category_scores') required this.categoryScores}) + : super._(); + + factory _$ModerationImpl.fromJson(Map json) => + _$$ModerationImplFromJson(json); + + /// Whether the content violates [OpenAI's usage policies](https://platform.openai.com/policies/usage-policies). + @override + final bool flagged; + + /// A list of the categories, and whether they are flagged or not. + @override + final ModerationCategories categories; + + /// A list of the categories along with their scores as predicted by model. + @override + @JsonKey(name: 'category_scores') + final ModerationCategoriesScores categoryScores; + + @override + String toString() { + return 'Moderation(flagged: $flagged, categories: $categories, categoryScores: $categoryScores)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ModerationImpl && + (identical(other.flagged, flagged) || other.flagged == flagged) && + (identical(other.categories, categories) || + other.categories == categories) && + (identical(other.categoryScores, categoryScores) || + other.categoryScores == categoryScores)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => + Object.hash(runtimeType, flagged, categories, categoryScores); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ModerationImplCopyWith<_$ModerationImpl> get copyWith => + __$$ModerationImplCopyWithImpl<_$ModerationImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ModerationImplToJson( + this, + ); + } +} + +abstract class _Moderation extends Moderation { + const factory _Moderation( + {required final bool flagged, + required final ModerationCategories categories, + @JsonKey(name: 'category_scores') + required final ModerationCategoriesScores categoryScores}) = + _$ModerationImpl; + const _Moderation._() : super._(); + + factory _Moderation.fromJson(Map json) = + _$ModerationImpl.fromJson; + + @override + + /// Whether the content violates [OpenAI's usage policies](https://platform.openai.com/policies/usage-policies). + bool get flagged; + @override + + /// A list of the categories, and whether they are flagged or not. + ModerationCategories get categories; + @override + + /// A list of the categories along with their scores as predicted by model. + @JsonKey(name: 'category_scores') + ModerationCategoriesScores get categoryScores; + @override + @JsonKey(ignore: true) + _$$ModerationImplCopyWith<_$ModerationImpl> get copyWith => + throw _privateConstructorUsedError; +} + +ModerationCategories _$ModerationCategoriesFromJson(Map json) { + return _ModerationCategories.fromJson(json); +} + +/// @nodoc +mixin _$ModerationCategories { + /// Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harrassment. + bool get hate => throw _privateConstructorUsedError; + + /// Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. + @JsonKey(name: 'hate/threatening') + bool get hateThreatening => throw _privateConstructorUsedError; + + /// Content that expresses, incites, or promotes harassing language towards any target. + bool get harassment => throw _privateConstructorUsedError; + + /// Harassment content that also includes violence or serious harm towards any target. + @JsonKey(name: 'harassment/threatening') + bool get harassmentThreatening => throw _privateConstructorUsedError; + + /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. + @JsonKey(name: 'self-harm') + bool get selfHarm => throw _privateConstructorUsedError; + + /// Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. + @JsonKey(name: 'self-harm/intent') + bool get selfHarmIntent => throw _privateConstructorUsedError; + + /// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. + @JsonKey(name: 'self-harm/instructions') + bool get selfHarmInstructions => throw _privateConstructorUsedError; + + /// Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). + bool get sexual => throw _privateConstructorUsedError; + + /// Sexual content that includes an individual who is under 18 years old. + @JsonKey(name: 'sexual/minors') + bool get sexualMinors => throw _privateConstructorUsedError; + + /// Content that depicts death, violence, or physical injury. + bool get violence => throw _privateConstructorUsedError; + + /// Content that depicts death, violence, or physical injury in graphic detail. + @JsonKey(name: 'violence/graphic') + bool get violenceGraphic => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ModerationCategoriesCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ModerationCategoriesCopyWith<$Res> { + factory $ModerationCategoriesCopyWith(ModerationCategories value, + $Res Function(ModerationCategories) then) = + _$ModerationCategoriesCopyWithImpl<$Res, ModerationCategories>; + @useResult + $Res call( + {bool hate, + @JsonKey(name: 'hate/threatening') bool hateThreatening, + bool harassment, + @JsonKey(name: 'harassment/threatening') bool harassmentThreatening, + @JsonKey(name: 'self-harm') bool selfHarm, + @JsonKey(name: 'self-harm/intent') bool selfHarmIntent, + @JsonKey(name: 'self-harm/instructions') bool selfHarmInstructions, + bool sexual, + @JsonKey(name: 'sexual/minors') bool sexualMinors, + bool violence, + @JsonKey(name: 'violence/graphic') bool violenceGraphic}); +} + +/// @nodoc +class _$ModerationCategoriesCopyWithImpl<$Res, + $Val extends ModerationCategories> + implements $ModerationCategoriesCopyWith<$Res> { + _$ModerationCategoriesCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? hate = null, + Object? hateThreatening = null, + Object? harassment = null, + Object? harassmentThreatening = null, + Object? selfHarm = null, + Object? selfHarmIntent = null, + Object? selfHarmInstructions = null, + Object? sexual = null, + Object? sexualMinors = null, + Object? violence = null, + Object? violenceGraphic = null, + }) { + return _then(_value.copyWith( + hate: null == hate + ? _value.hate + : hate // ignore: cast_nullable_to_non_nullable + as bool, + hateThreatening: null == hateThreatening + ? _value.hateThreatening + : hateThreatening // ignore: cast_nullable_to_non_nullable + as bool, + harassment: null == harassment + ? _value.harassment + : harassment // ignore: cast_nullable_to_non_nullable + as bool, + harassmentThreatening: null == harassmentThreatening + ? _value.harassmentThreatening + : harassmentThreatening // ignore: cast_nullable_to_non_nullable + as bool, + selfHarm: null == selfHarm + ? _value.selfHarm + : selfHarm // ignore: cast_nullable_to_non_nullable + as bool, + selfHarmIntent: null == selfHarmIntent + ? _value.selfHarmIntent + : selfHarmIntent // ignore: cast_nullable_to_non_nullable + as bool, + selfHarmInstructions: null == selfHarmInstructions + ? _value.selfHarmInstructions + : selfHarmInstructions // ignore: cast_nullable_to_non_nullable + as bool, + sexual: null == sexual + ? _value.sexual + : sexual // ignore: cast_nullable_to_non_nullable + as bool, + sexualMinors: null == sexualMinors + ? _value.sexualMinors + : sexualMinors // ignore: cast_nullable_to_non_nullable + as bool, + violence: null == violence + ? _value.violence + : violence // ignore: cast_nullable_to_non_nullable + as bool, + violenceGraphic: null == violenceGraphic + ? _value.violenceGraphic + : violenceGraphic // ignore: cast_nullable_to_non_nullable + as bool, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ModerationCategoriesImplCopyWith<$Res> + implements $ModerationCategoriesCopyWith<$Res> { + factory _$$ModerationCategoriesImplCopyWith(_$ModerationCategoriesImpl value, + $Res Function(_$ModerationCategoriesImpl) then) = + __$$ModerationCategoriesImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {bool hate, + @JsonKey(name: 'hate/threatening') bool hateThreatening, + bool harassment, + @JsonKey(name: 'harassment/threatening') bool harassmentThreatening, + @JsonKey(name: 'self-harm') bool selfHarm, + @JsonKey(name: 'self-harm/intent') bool selfHarmIntent, + @JsonKey(name: 'self-harm/instructions') bool selfHarmInstructions, + bool sexual, + @JsonKey(name: 'sexual/minors') bool sexualMinors, + bool violence, + @JsonKey(name: 'violence/graphic') bool violenceGraphic}); +} + +/// @nodoc +class __$$ModerationCategoriesImplCopyWithImpl<$Res> + extends _$ModerationCategoriesCopyWithImpl<$Res, _$ModerationCategoriesImpl> + implements _$$ModerationCategoriesImplCopyWith<$Res> { + __$$ModerationCategoriesImplCopyWithImpl(_$ModerationCategoriesImpl _value, + $Res Function(_$ModerationCategoriesImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? hate = null, + Object? hateThreatening = null, + Object? harassment = null, + Object? harassmentThreatening = null, + Object? selfHarm = null, + Object? selfHarmIntent = null, + Object? selfHarmInstructions = null, + Object? sexual = null, + Object? sexualMinors = null, + Object? violence = null, + Object? violenceGraphic = null, + }) { + return _then(_$ModerationCategoriesImpl( + hate: null == hate + ? _value.hate + : hate // ignore: cast_nullable_to_non_nullable + as bool, + hateThreatening: null == hateThreatening + ? _value.hateThreatening + : hateThreatening // ignore: cast_nullable_to_non_nullable + as bool, + harassment: null == harassment + ? _value.harassment + : harassment // ignore: cast_nullable_to_non_nullable + as bool, + harassmentThreatening: null == harassmentThreatening + ? _value.harassmentThreatening + : harassmentThreatening // ignore: cast_nullable_to_non_nullable + as bool, + selfHarm: null == selfHarm + ? _value.selfHarm + : selfHarm // ignore: cast_nullable_to_non_nullable + as bool, + selfHarmIntent: null == selfHarmIntent + ? _value.selfHarmIntent + : selfHarmIntent // ignore: cast_nullable_to_non_nullable + as bool, + selfHarmInstructions: null == selfHarmInstructions + ? _value.selfHarmInstructions + : selfHarmInstructions // ignore: cast_nullable_to_non_nullable + as bool, + sexual: null == sexual + ? _value.sexual + : sexual // ignore: cast_nullable_to_non_nullable + as bool, + sexualMinors: null == sexualMinors + ? _value.sexualMinors + : sexualMinors // ignore: cast_nullable_to_non_nullable + as bool, + violence: null == violence + ? _value.violence + : violence // ignore: cast_nullable_to_non_nullable + as bool, + violenceGraphic: null == violenceGraphic + ? _value.violenceGraphic + : violenceGraphic // ignore: cast_nullable_to_non_nullable + as bool, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ModerationCategoriesImpl extends _ModerationCategories { + const _$ModerationCategoriesImpl( + {required this.hate, + @JsonKey(name: 'hate/threatening') required this.hateThreatening, + required this.harassment, + @JsonKey(name: 'harassment/threatening') + required this.harassmentThreatening, + @JsonKey(name: 'self-harm') required this.selfHarm, + @JsonKey(name: 'self-harm/intent') required this.selfHarmIntent, + @JsonKey(name: 'self-harm/instructions') + required this.selfHarmInstructions, + required this.sexual, + @JsonKey(name: 'sexual/minors') required this.sexualMinors, + required this.violence, + @JsonKey(name: 'violence/graphic') required this.violenceGraphic}) + : super._(); + + factory _$ModerationCategoriesImpl.fromJson(Map json) => + _$$ModerationCategoriesImplFromJson(json); + + /// Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harrassment. + @override + final bool hate; + + /// Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. + @override + @JsonKey(name: 'hate/threatening') + final bool hateThreatening; + + /// Content that expresses, incites, or promotes harassing language towards any target. + @override + final bool harassment; + + /// Harassment content that also includes violence or serious harm towards any target. + @override + @JsonKey(name: 'harassment/threatening') + final bool harassmentThreatening; + + /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. + @override + @JsonKey(name: 'self-harm') + final bool selfHarm; + + /// Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. + @override + @JsonKey(name: 'self-harm/intent') + final bool selfHarmIntent; + + /// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. + @override + @JsonKey(name: 'self-harm/instructions') + final bool selfHarmInstructions; + + /// Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). + @override + final bool sexual; + + /// Sexual content that includes an individual who is under 18 years old. + @override + @JsonKey(name: 'sexual/minors') + final bool sexualMinors; + + /// Content that depicts death, violence, or physical injury. + @override + final bool violence; + + /// Content that depicts death, violence, or physical injury in graphic detail. + @override + @JsonKey(name: 'violence/graphic') + final bool violenceGraphic; + + @override + String toString() { + return 'ModerationCategories(hate: $hate, hateThreatening: $hateThreatening, harassment: $harassment, harassmentThreatening: $harassmentThreatening, selfHarm: $selfHarm, selfHarmIntent: $selfHarmIntent, selfHarmInstructions: $selfHarmInstructions, sexual: $sexual, sexualMinors: $sexualMinors, violence: $violence, violenceGraphic: $violenceGraphic)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ModerationCategoriesImpl && + (identical(other.hate, hate) || other.hate == hate) && + (identical(other.hateThreatening, hateThreatening) || + other.hateThreatening == hateThreatening) && + (identical(other.harassment, harassment) || + other.harassment == harassment) && + (identical(other.harassmentThreatening, harassmentThreatening) || + other.harassmentThreatening == harassmentThreatening) && + (identical(other.selfHarm, selfHarm) || + other.selfHarm == selfHarm) && + (identical(other.selfHarmIntent, selfHarmIntent) || + other.selfHarmIntent == selfHarmIntent) && + (identical(other.selfHarmInstructions, selfHarmInstructions) || + other.selfHarmInstructions == selfHarmInstructions) && + (identical(other.sexual, sexual) || other.sexual == sexual) && + (identical(other.sexualMinors, sexualMinors) || + other.sexualMinors == sexualMinors) && + (identical(other.violence, violence) || + other.violence == violence) && + (identical(other.violenceGraphic, violenceGraphic) || + other.violenceGraphic == violenceGraphic)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, + hate, + hateThreatening, + harassment, + harassmentThreatening, + selfHarm, + selfHarmIntent, + selfHarmInstructions, + sexual, + sexualMinors, + violence, + violenceGraphic); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ModerationCategoriesImplCopyWith<_$ModerationCategoriesImpl> + get copyWith => + __$$ModerationCategoriesImplCopyWithImpl<_$ModerationCategoriesImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$ModerationCategoriesImplToJson( + this, + ); + } +} + +abstract class _ModerationCategories extends ModerationCategories { + const factory _ModerationCategories( + {required final bool hate, + @JsonKey(name: 'hate/threatening') required final bool hateThreatening, + required final bool harassment, + @JsonKey(name: 'harassment/threatening') + required final bool harassmentThreatening, + @JsonKey(name: 'self-harm') required final bool selfHarm, + @JsonKey(name: 'self-harm/intent') required final bool selfHarmIntent, + @JsonKey(name: 'self-harm/instructions') + required final bool selfHarmInstructions, + required final bool sexual, + @JsonKey(name: 'sexual/minors') required final bool sexualMinors, + required final bool violence, + @JsonKey(name: 'violence/graphic') + required final bool violenceGraphic}) = _$ModerationCategoriesImpl; + const _ModerationCategories._() : super._(); + + factory _ModerationCategories.fromJson(Map json) = + _$ModerationCategoriesImpl.fromJson; + + @override + + /// Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harrassment. + bool get hate; + @override + + /// Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. + @JsonKey(name: 'hate/threatening') + bool get hateThreatening; + @override + + /// Content that expresses, incites, or promotes harassing language towards any target. + bool get harassment; + @override + + /// Harassment content that also includes violence or serious harm towards any target. + @JsonKey(name: 'harassment/threatening') + bool get harassmentThreatening; + @override + + /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. + @JsonKey(name: 'self-harm') + bool get selfHarm; + @override + + /// Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. + @JsonKey(name: 'self-harm/intent') + bool get selfHarmIntent; + @override + + /// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. + @JsonKey(name: 'self-harm/instructions') + bool get selfHarmInstructions; + @override + + /// Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). + bool get sexual; + @override + + /// Sexual content that includes an individual who is under 18 years old. + @JsonKey(name: 'sexual/minors') + bool get sexualMinors; + @override + + /// Content that depicts death, violence, or physical injury. + bool get violence; + @override + + /// Content that depicts death, violence, or physical injury in graphic detail. + @JsonKey(name: 'violence/graphic') + bool get violenceGraphic; + @override + @JsonKey(ignore: true) + _$$ModerationCategoriesImplCopyWith<_$ModerationCategoriesImpl> + get copyWith => throw _privateConstructorUsedError; +} + +ModerationCategoriesScores _$ModerationCategoriesScoresFromJson( + Map json) { + return _ModerationCategoriesScores.fromJson(json); +} + +/// @nodoc +mixin _$ModerationCategoriesScores { + /// The score for the category 'hate'. + double get hate => throw _privateConstructorUsedError; + + /// The score for the category 'hate/threatening'. + @JsonKey(name: 'hate/threatening') + double get hateThreatening => throw _privateConstructorUsedError; + + /// The score for the category 'harassment'. + double get harassment => throw _privateConstructorUsedError; + + /// The score for the category 'harassment/threatening'. + @JsonKey(name: 'harassment/threatening') + double get harassmentThreatening => throw _privateConstructorUsedError; + + /// The score for the category 'self-harm'. + @JsonKey(name: 'self-harm') + double get selfHarm => throw _privateConstructorUsedError; + + /// The score for the category 'self-harm/intent'. + @JsonKey(name: 'self-harm/intent') + double get selfHarmIntent => throw _privateConstructorUsedError; + + /// The score for the category 'self-harm/instructions'. + @JsonKey(name: 'self-harm/instructions') + double get selfHarmInstructions => throw _privateConstructorUsedError; + + /// The score for the category 'sexual'. + double get sexual => throw _privateConstructorUsedError; + + /// The score for the category 'sexual/minors'. + @JsonKey(name: 'sexual/minors') + double get sexualMinors => throw _privateConstructorUsedError; + + /// The score for the category 'violence'. + double get violence => throw _privateConstructorUsedError; + + /// The score for the category 'violence/graphic'. + @JsonKey(name: 'violence/graphic') + double get violenceGraphic => throw _privateConstructorUsedError; + + Map toJson() => throw _privateConstructorUsedError; + @JsonKey(ignore: true) + $ModerationCategoriesScoresCopyWith + get copyWith => throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $ModerationCategoriesScoresCopyWith<$Res> { + factory $ModerationCategoriesScoresCopyWith(ModerationCategoriesScores value, + $Res Function(ModerationCategoriesScores) then) = + _$ModerationCategoriesScoresCopyWithImpl<$Res, + ModerationCategoriesScores>; + @useResult + $Res call( + {double hate, + @JsonKey(name: 'hate/threatening') double hateThreatening, + double harassment, + @JsonKey(name: 'harassment/threatening') double harassmentThreatening, + @JsonKey(name: 'self-harm') double selfHarm, + @JsonKey(name: 'self-harm/intent') double selfHarmIntent, + @JsonKey(name: 'self-harm/instructions') double selfHarmInstructions, + double sexual, + @JsonKey(name: 'sexual/minors') double sexualMinors, + double violence, + @JsonKey(name: 'violence/graphic') double violenceGraphic}); +} + +/// @nodoc +class _$ModerationCategoriesScoresCopyWithImpl<$Res, + $Val extends ModerationCategoriesScores> + implements $ModerationCategoriesScoresCopyWith<$Res> { + _$ModerationCategoriesScoresCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? hate = null, + Object? hateThreatening = null, + Object? harassment = null, + Object? harassmentThreatening = null, + Object? selfHarm = null, + Object? selfHarmIntent = null, + Object? selfHarmInstructions = null, + Object? sexual = null, + Object? sexualMinors = null, + Object? violence = null, + Object? violenceGraphic = null, + }) { + return _then(_value.copyWith( + hate: null == hate + ? _value.hate + : hate // ignore: cast_nullable_to_non_nullable + as double, + hateThreatening: null == hateThreatening + ? _value.hateThreatening + : hateThreatening // ignore: cast_nullable_to_non_nullable + as double, + harassment: null == harassment + ? _value.harassment + : harassment // ignore: cast_nullable_to_non_nullable + as double, + harassmentThreatening: null == harassmentThreatening + ? _value.harassmentThreatening + : harassmentThreatening // ignore: cast_nullable_to_non_nullable + as double, + selfHarm: null == selfHarm + ? _value.selfHarm + : selfHarm // ignore: cast_nullable_to_non_nullable + as double, + selfHarmIntent: null == selfHarmIntent + ? _value.selfHarmIntent + : selfHarmIntent // ignore: cast_nullable_to_non_nullable + as double, + selfHarmInstructions: null == selfHarmInstructions + ? _value.selfHarmInstructions + : selfHarmInstructions // ignore: cast_nullable_to_non_nullable + as double, + sexual: null == sexual + ? _value.sexual + : sexual // ignore: cast_nullable_to_non_nullable + as double, + sexualMinors: null == sexualMinors + ? _value.sexualMinors + : sexualMinors // ignore: cast_nullable_to_non_nullable + as double, + violence: null == violence + ? _value.violence + : violence // ignore: cast_nullable_to_non_nullable + as double, + violenceGraphic: null == violenceGraphic + ? _value.violenceGraphic + : violenceGraphic // ignore: cast_nullable_to_non_nullable + as double, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$ModerationCategoriesScoresImplCopyWith<$Res> + implements $ModerationCategoriesScoresCopyWith<$Res> { + factory _$$ModerationCategoriesScoresImplCopyWith( + _$ModerationCategoriesScoresImpl value, + $Res Function(_$ModerationCategoriesScoresImpl) then) = + __$$ModerationCategoriesScoresImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {double hate, + @JsonKey(name: 'hate/threatening') double hateThreatening, + double harassment, + @JsonKey(name: 'harassment/threatening') double harassmentThreatening, + @JsonKey(name: 'self-harm') double selfHarm, + @JsonKey(name: 'self-harm/intent') double selfHarmIntent, + @JsonKey(name: 'self-harm/instructions') double selfHarmInstructions, + double sexual, + @JsonKey(name: 'sexual/minors') double sexualMinors, + double violence, + @JsonKey(name: 'violence/graphic') double violenceGraphic}); +} + +/// @nodoc +class __$$ModerationCategoriesScoresImplCopyWithImpl<$Res> + extends _$ModerationCategoriesScoresCopyWithImpl<$Res, + _$ModerationCategoriesScoresImpl> + implements _$$ModerationCategoriesScoresImplCopyWith<$Res> { + __$$ModerationCategoriesScoresImplCopyWithImpl( + _$ModerationCategoriesScoresImpl _value, + $Res Function(_$ModerationCategoriesScoresImpl) _then) + : super(_value, _then); + + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? hate = null, + Object? hateThreatening = null, + Object? harassment = null, + Object? harassmentThreatening = null, + Object? selfHarm = null, + Object? selfHarmIntent = null, + Object? selfHarmInstructions = null, + Object? sexual = null, + Object? sexualMinors = null, + Object? violence = null, + Object? violenceGraphic = null, + }) { + return _then(_$ModerationCategoriesScoresImpl( + hate: null == hate + ? _value.hate + : hate // ignore: cast_nullable_to_non_nullable + as double, + hateThreatening: null == hateThreatening + ? _value.hateThreatening + : hateThreatening // ignore: cast_nullable_to_non_nullable + as double, + harassment: null == harassment + ? _value.harassment + : harassment // ignore: cast_nullable_to_non_nullable + as double, + harassmentThreatening: null == harassmentThreatening + ? _value.harassmentThreatening + : harassmentThreatening // ignore: cast_nullable_to_non_nullable + as double, + selfHarm: null == selfHarm + ? _value.selfHarm + : selfHarm // ignore: cast_nullable_to_non_nullable + as double, + selfHarmIntent: null == selfHarmIntent + ? _value.selfHarmIntent + : selfHarmIntent // ignore: cast_nullable_to_non_nullable + as double, + selfHarmInstructions: null == selfHarmInstructions + ? _value.selfHarmInstructions + : selfHarmInstructions // ignore: cast_nullable_to_non_nullable + as double, + sexual: null == sexual + ? _value.sexual + : sexual // ignore: cast_nullable_to_non_nullable + as double, + sexualMinors: null == sexualMinors + ? _value.sexualMinors + : sexualMinors // ignore: cast_nullable_to_non_nullable + as double, + violence: null == violence + ? _value.violence + : violence // ignore: cast_nullable_to_non_nullable + as double, + violenceGraphic: null == violenceGraphic + ? _value.violenceGraphic + : violenceGraphic // ignore: cast_nullable_to_non_nullable + as double, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$ModerationCategoriesScoresImpl extends _ModerationCategoriesScores { + const _$ModerationCategoriesScoresImpl( + {required this.hate, + @JsonKey(name: 'hate/threatening') required this.hateThreatening, + required this.harassment, + @JsonKey(name: 'harassment/threatening') + required this.harassmentThreatening, + @JsonKey(name: 'self-harm') required this.selfHarm, + @JsonKey(name: 'self-harm/intent') required this.selfHarmIntent, + @JsonKey(name: 'self-harm/instructions') + required this.selfHarmInstructions, + required this.sexual, + @JsonKey(name: 'sexual/minors') required this.sexualMinors, + required this.violence, + @JsonKey(name: 'violence/graphic') required this.violenceGraphic}) + : super._(); + + factory _$ModerationCategoriesScoresImpl.fromJson( + Map json) => + _$$ModerationCategoriesScoresImplFromJson(json); + + /// The score for the category 'hate'. + @override + final double hate; + + /// The score for the category 'hate/threatening'. + @override + @JsonKey(name: 'hate/threatening') + final double hateThreatening; + + /// The score for the category 'harassment'. + @override + final double harassment; + + /// The score for the category 'harassment/threatening'. + @override + @JsonKey(name: 'harassment/threatening') + final double harassmentThreatening; + + /// The score for the category 'self-harm'. + @override + @JsonKey(name: 'self-harm') + final double selfHarm; + + /// The score for the category 'self-harm/intent'. + @override + @JsonKey(name: 'self-harm/intent') + final double selfHarmIntent; + + /// The score for the category 'self-harm/instructions'. + @override + @JsonKey(name: 'self-harm/instructions') + final double selfHarmInstructions; + + /// The score for the category 'sexual'. + @override + final double sexual; + + /// The score for the category 'sexual/minors'. + @override + @JsonKey(name: 'sexual/minors') + final double sexualMinors; + + /// The score for the category 'violence'. + @override + final double violence; + + /// The score for the category 'violence/graphic'. + @override + @JsonKey(name: 'violence/graphic') + final double violenceGraphic; + + @override + String toString() { + return 'ModerationCategoriesScores(hate: $hate, hateThreatening: $hateThreatening, harassment: $harassment, harassmentThreatening: $harassmentThreatening, selfHarm: $selfHarm, selfHarmIntent: $selfHarmIntent, selfHarmInstructions: $selfHarmInstructions, sexual: $sexual, sexualMinors: $sexualMinors, violence: $violence, violenceGraphic: $violenceGraphic)'; + } + + @override + bool operator ==(dynamic other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$ModerationCategoriesScoresImpl && + (identical(other.hate, hate) || other.hate == hate) && + (identical(other.hateThreatening, hateThreatening) || + other.hateThreatening == hateThreatening) && + (identical(other.harassment, harassment) || + other.harassment == harassment) && + (identical(other.harassmentThreatening, harassmentThreatening) || + other.harassmentThreatening == harassmentThreatening) && + (identical(other.selfHarm, selfHarm) || + other.selfHarm == selfHarm) && + (identical(other.selfHarmIntent, selfHarmIntent) || + other.selfHarmIntent == selfHarmIntent) && + (identical(other.selfHarmInstructions, selfHarmInstructions) || + other.selfHarmInstructions == selfHarmInstructions) && + (identical(other.sexual, sexual) || other.sexual == sexual) && + (identical(other.sexualMinors, sexualMinors) || + other.sexualMinors == sexualMinors) && + (identical(other.violence, violence) || + other.violence == violence) && + (identical(other.violenceGraphic, violenceGraphic) || + other.violenceGraphic == violenceGraphic)); + } + + @JsonKey(ignore: true) + @override + int get hashCode => Object.hash( + runtimeType, + hate, + hateThreatening, + harassment, + harassmentThreatening, + selfHarm, + selfHarmIntent, + selfHarmInstructions, + sexual, + sexualMinors, + violence, + violenceGraphic); + + @JsonKey(ignore: true) + @override + @pragma('vm:prefer-inline') + _$$ModerationCategoriesScoresImplCopyWith<_$ModerationCategoriesScoresImpl> + get copyWith => __$$ModerationCategoriesScoresImplCopyWithImpl< + _$ModerationCategoriesScoresImpl>(this, _$identity); + + @override + Map toJson() { + return _$$ModerationCategoriesScoresImplToJson( + this, + ); + } +} + +abstract class _ModerationCategoriesScores extends ModerationCategoriesScores { + const factory _ModerationCategoriesScores( + {required final double hate, + @JsonKey(name: 'hate/threatening') required final double hateThreatening, + required final double harassment, + @JsonKey(name: 'harassment/threatening') + required final double harassmentThreatening, + @JsonKey(name: 'self-harm') required final double selfHarm, + @JsonKey(name: 'self-harm/intent') required final double selfHarmIntent, + @JsonKey(name: 'self-harm/instructions') + required final double selfHarmInstructions, + required final double sexual, + @JsonKey(name: 'sexual/minors') required final double sexualMinors, + required final double violence, + @JsonKey(name: 'violence/graphic') + required final double + violenceGraphic}) = _$ModerationCategoriesScoresImpl; + const _ModerationCategoriesScores._() : super._(); + + factory _ModerationCategoriesScores.fromJson(Map json) = + _$ModerationCategoriesScoresImpl.fromJson; + + @override + + /// The score for the category 'hate'. + double get hate; + @override + + /// The score for the category 'hate/threatening'. + @JsonKey(name: 'hate/threatening') + double get hateThreatening; + @override + + /// The score for the category 'harassment'. + double get harassment; + @override + + /// The score for the category 'harassment/threatening'. + @JsonKey(name: 'harassment/threatening') + double get harassmentThreatening; + @override + + /// The score for the category 'self-harm'. + @JsonKey(name: 'self-harm') + double get selfHarm; + @override + + /// The score for the category 'self-harm/intent'. + @JsonKey(name: 'self-harm/intent') + double get selfHarmIntent; + @override + + /// The score for the category 'self-harm/instructions'. + @JsonKey(name: 'self-harm/instructions') + double get selfHarmInstructions; + @override + + /// The score for the category 'sexual'. + double get sexual; + @override + + /// The score for the category 'sexual/minors'. + @JsonKey(name: 'sexual/minors') + double get sexualMinors; + @override + + /// The score for the category 'violence'. + double get violence; + @override + + /// The score for the category 'violence/graphic'. + @JsonKey(name: 'violence/graphic') + double get violenceGraphic; + @override + @JsonKey(ignore: true) + _$$ModerationCategoriesScoresImplCopyWith<_$ModerationCategoriesScoresImpl> + get copyWith => throw _privateConstructorUsedError; +} diff --git a/packages/openai_dart/lib/src/generated/schema/schema.g.dart b/packages/openai_dart/lib/src/generated/schema/schema.g.dart new file mode 100644 index 00000000..782ebdf6 --- /dev/null +++ b/packages/openai_dart/lib/src/generated/schema/schema.g.dart @@ -0,0 +1,1447 @@ +// GENERATED CODE - DO NOT MODIFY BY HAND + +// ignore_for_file: prefer_final_parameters, require_trailing_commas, non_constant_identifier_names + +part of 'schema.dart'; + +// ************************************************************************** +// JsonSerializableGenerator +// ************************************************************************** + +_$CreateCompletionRequestImpl _$$CreateCompletionRequestImplFromJson( + Map json) => + _$CreateCompletionRequestImpl( + model: const _CompletionModelConverter().fromJson(json['model']), + prompt: const _CompletionPromptConverter().fromJson(json['prompt']), + bestOf: json['best_of'] as int?, + echo: json['echo'] as bool? ?? false, + frequencyPenalty: (json['frequency_penalty'] as num?)?.toDouble() ?? 0.0, + logitBias: (json['logit_bias'] as Map?)?.map( + (k, e) => MapEntry(k, e as int), + ), + logprobs: json['logprobs'] as int?, + maxTokens: json['max_tokens'] as int? ?? 16, + n: json['n'] as int? ?? 1, + presencePenalty: (json['presence_penalty'] as num?)?.toDouble() ?? 0.0, + stop: const _CompletionStopConverter().fromJson(json['stop']), + stream: json['stream'] as bool? ?? false, + suffix: json['suffix'] as String?, + temperature: (json['temperature'] as num?)?.toDouble() ?? 1.0, + topP: (json['top_p'] as num?)?.toDouble() ?? 1.0, + user: json['user'] as String?, + ); + +Map _$$CreateCompletionRequestImplToJson( + _$CreateCompletionRequestImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull( + 'model', const _CompletionModelConverter().toJson(instance.model)); + writeNotNull( + 'prompt', const _CompletionPromptConverter().toJson(instance.prompt)); + writeNotNull('best_of', instance.bestOf); + writeNotNull('echo', instance.echo); + writeNotNull('frequency_penalty', instance.frequencyPenalty); + writeNotNull('logit_bias', instance.logitBias); + writeNotNull('logprobs', instance.logprobs); + writeNotNull('max_tokens', instance.maxTokens); + writeNotNull('n', instance.n); + writeNotNull('presence_penalty', instance.presencePenalty); + writeNotNull('stop', const _CompletionStopConverter().toJson(instance.stop)); + writeNotNull('stream', instance.stream); + writeNotNull('suffix', instance.suffix); + writeNotNull('temperature', instance.temperature); + writeNotNull('top_p', instance.topP); + writeNotNull('user', instance.user); + return val; +} + +_$UnionCompletionModelStringImpl _$$UnionCompletionModelStringImplFromJson( + Map json) => + _$UnionCompletionModelStringImpl( + json['value'] as String, + $type: json['runtimeType'] as String?, + ); + +Map _$$UnionCompletionModelStringImplToJson( + _$UnionCompletionModelStringImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + +_$UnionCompletionModelEnumImpl _$$UnionCompletionModelEnumImplFromJson( + Map json) => + _$UnionCompletionModelEnumImpl( + $enumDecode(_$CompletionModelsEnumMap, json['value']), + $type: json['runtimeType'] as String?, + ); + +Map _$$UnionCompletionModelEnumImplToJson( + _$UnionCompletionModelEnumImpl instance) => + { + 'value': _$CompletionModelsEnumMap[instance.value]!, + 'runtimeType': instance.$type, + }; + +const _$CompletionModelsEnumMap = { + CompletionModels.babbage002: 'babbage-002', + CompletionModels.davinci002: 'davinci-002', + CompletionModels.gpt35TurboInstruct: 'gpt-3.5-turbo-instruct', + CompletionModels.textDavinci003: 'text-davinci-003', + CompletionModels.textDavinci002: 'text-davinci-002', + CompletionModels.textDavinci001: 'text-davinci-001', + CompletionModels.codeDavinci002: 'code-davinci-002', + CompletionModels.textCurie001: 'text-curie-001', + CompletionModels.textBabbage001: 'text-babbage-001', + CompletionModels.textAda001: 'text-ada-001', +}; + +_$UnionCompletionPromptStringImpl _$$UnionCompletionPromptStringImplFromJson( + Map json) => + _$UnionCompletionPromptStringImpl( + json['value'] as String, + $type: json['runtimeType'] as String?, + ); + +Map _$$UnionCompletionPromptStringImplToJson( + _$UnionCompletionPromptStringImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + +_$UnionCompletionPromptArrayStringImpl + _$$UnionCompletionPromptArrayStringImplFromJson( + Map json) => + _$UnionCompletionPromptArrayStringImpl( + (json['value'] as List).map((e) => e as String).toList(), + $type: json['runtimeType'] as String?, + ); + +Map _$$UnionCompletionPromptArrayStringImplToJson( + _$UnionCompletionPromptArrayStringImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + +_$UnionCompletionPromptArrayIntegerImpl + _$$UnionCompletionPromptArrayIntegerImplFromJson( + Map json) => + _$UnionCompletionPromptArrayIntegerImpl( + (json['value'] as List).map((e) => e as int).toList(), + $type: json['runtimeType'] as String?, + ); + +Map _$$UnionCompletionPromptArrayIntegerImplToJson( + _$UnionCompletionPromptArrayIntegerImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + +_$UnionCompletionPromptArrayImpl _$$UnionCompletionPromptArrayImplFromJson( + Map json) => + _$UnionCompletionPromptArrayImpl( + (json['value'] as List) + .map((e) => (e as List).map((e) => e as int).toList()) + .toList(), + $type: json['runtimeType'] as String?, + ); + +Map _$$UnionCompletionPromptArrayImplToJson( + _$UnionCompletionPromptArrayImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + +_$UnionCompletionStopStringImpl _$$UnionCompletionStopStringImplFromJson( + Map json) => + _$UnionCompletionStopStringImpl( + json['value'] as String, + $type: json['runtimeType'] as String?, + ); + +Map _$$UnionCompletionStopStringImplToJson( + _$UnionCompletionStopStringImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + +_$UnionCompletionStopArrayStringImpl + _$$UnionCompletionStopArrayStringImplFromJson(Map json) => + _$UnionCompletionStopArrayStringImpl( + (json['value'] as List).map((e) => e as String).toList(), + $type: json['runtimeType'] as String?, + ); + +Map _$$UnionCompletionStopArrayStringImplToJson( + _$UnionCompletionStopArrayStringImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + +_$CreateCompletionResponseImpl _$$CreateCompletionResponseImplFromJson( + Map json) => + _$CreateCompletionResponseImpl( + id: json['id'] as String, + choices: (json['choices'] as List) + .map((e) => CompletionChoice.fromJson(e as Map)) + .toList(), + created: json['created'] as int, + model: json['model'] as String, + object: json['object'] as String, + usage: json['usage'] == null + ? null + : CompletionUsage.fromJson(json['usage'] as Map), + ); + +Map _$$CreateCompletionResponseImplToJson( + _$CreateCompletionResponseImpl instance) { + final val = { + 'id': instance.id, + 'choices': instance.choices.map((e) => e.toJson()).toList(), + 'created': instance.created, + 'model': instance.model, + 'object': instance.object, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('usage', instance.usage?.toJson()); + return val; +} + +_$CompletionChoiceImpl _$$CompletionChoiceImplFromJson( + Map json) => + _$CompletionChoiceImpl( + finishReason: $enumDecodeNullable( + _$CompletionFinishReasonEnumMap, json['finish_reason'], + unknownValue: JsonKey.nullForUndefinedEnumValue), + index: json['index'] as int, + logprobs: json['logprobs'] == null + ? null + : CompletionLogprobs.fromJson( + json['logprobs'] as Map), + text: json['text'] as String, + ); + +Map _$$CompletionChoiceImplToJson( + _$CompletionChoiceImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull( + 'finish_reason', _$CompletionFinishReasonEnumMap[instance.finishReason]); + val['index'] = instance.index; + writeNotNull('logprobs', instance.logprobs?.toJson()); + val['text'] = instance.text; + return val; +} + +const _$CompletionFinishReasonEnumMap = { + CompletionFinishReason.stop: 'stop', + CompletionFinishReason.length: 'length', + CompletionFinishReason.contentFilter: 'content_filter', +}; + +_$CompletionLogprobsImpl _$$CompletionLogprobsImplFromJson( + Map json) => + _$CompletionLogprobsImpl( + textOffset: (json['text_offset'] as List?) + ?.map((e) => e as int) + .toList(), + tokenLogprobs: (json['token_logprobs'] as List?) + ?.map((e) => (e as num?)?.toDouble()) + .toList(), + tokens: + (json['tokens'] as List?)?.map((e) => e as String).toList(), + topLogprobs: (json['top_logprobs'] as List?) + ?.map((e) => (e as Map?)?.map( + (k, e) => MapEntry(k, (e as num).toDouble()), + )) + .toList(), + ); + +Map _$$CompletionLogprobsImplToJson( + _$CompletionLogprobsImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('text_offset', instance.textOffset); + writeNotNull('token_logprobs', instance.tokenLogprobs); + writeNotNull('tokens', instance.tokens); + writeNotNull('top_logprobs', instance.topLogprobs); + return val; +} + +_$CreateChatCompletionRequestImpl _$$CreateChatCompletionRequestImplFromJson( + Map json) => + _$CreateChatCompletionRequestImpl( + model: const _ChatCompletionModelConverter().fromJson(json['model']), + messages: (json['messages'] as List) + .map((e) => ChatCompletionMessage.fromJson(e as Map)) + .toList(), + frequencyPenalty: (json['frequency_penalty'] as num?)?.toDouble() ?? 0.0, + functionCall: json['function_call'], + functions: (json['functions'] as List?) + ?.map((e) => + ChatCompletionFunctions.fromJson(e as Map)) + .toList(), + logitBias: (json['logit_bias'] as Map?)?.map( + (k, e) => MapEntry(k, e as int), + ), + maxTokens: json['max_tokens'] as int?, + n: json['n'] as int? ?? 1, + presencePenalty: (json['presence_penalty'] as num?)?.toDouble() ?? 0.0, + stop: const _ChatCompletionStopConverter().fromJson(json['stop']), + stream: json['stream'] as bool? ?? false, + temperature: (json['temperature'] as num?)?.toDouble() ?? 1.0, + topP: (json['top_p'] as num?)?.toDouble() ?? 1.0, + user: json['user'] as String?, + ); + +Map _$$CreateChatCompletionRequestImplToJson( + _$CreateChatCompletionRequestImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull( + 'model', const _ChatCompletionModelConverter().toJson(instance.model)); + val['messages'] = instance.messages.map((e) => e.toJson()).toList(); + writeNotNull('frequency_penalty', instance.frequencyPenalty); + writeNotNull('function_call', instance.functionCall); + writeNotNull( + 'functions', instance.functions?.map((e) => e.toJson()).toList()); + writeNotNull('logit_bias', instance.logitBias); + writeNotNull('max_tokens', instance.maxTokens); + writeNotNull('n', instance.n); + writeNotNull('presence_penalty', instance.presencePenalty); + writeNotNull( + 'stop', + _$JsonConverterToJson( + instance.stop, const _ChatCompletionStopConverter().toJson)); + writeNotNull('stream', instance.stream); + writeNotNull('temperature', instance.temperature); + writeNotNull('top_p', instance.topP); + writeNotNull('user', instance.user); + return val; +} + +Json? _$JsonConverterToJson( + Value? value, + Json? Function(Value value) toJson, +) => + value == null ? null : toJson(value); + +_$UnionChatCompletionModelStringImpl + _$$UnionChatCompletionModelStringImplFromJson(Map json) => + _$UnionChatCompletionModelStringImpl( + json['value'] as String, + $type: json['runtimeType'] as String?, + ); + +Map _$$UnionChatCompletionModelStringImplToJson( + _$UnionChatCompletionModelStringImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + +_$UnionChatCompletionModelEnumImpl _$$UnionChatCompletionModelEnumImplFromJson( + Map json) => + _$UnionChatCompletionModelEnumImpl( + $enumDecode(_$ChatCompletionModelsEnumMap, json['value']), + $type: json['runtimeType'] as String?, + ); + +Map _$$UnionChatCompletionModelEnumImplToJson( + _$UnionChatCompletionModelEnumImpl instance) => + { + 'value': _$ChatCompletionModelsEnumMap[instance.value]!, + 'runtimeType': instance.$type, + }; + +const _$ChatCompletionModelsEnumMap = { + ChatCompletionModels.gpt4: 'gpt-4', + ChatCompletionModels.gpt40314: 'gpt-4-0314', + ChatCompletionModels.gpt40613: 'gpt-4-0613', + ChatCompletionModels.gpt432k: 'gpt-4-32k', + ChatCompletionModels.gpt432k0314: 'gpt-4-32k-0314', + ChatCompletionModels.gpt432k0613: 'gpt-4-32k-0613', + ChatCompletionModels.gpt35Turbo: 'gpt-3.5-turbo', + ChatCompletionModels.gpt35Turbo16k: 'gpt-3.5-turbo-16k', + ChatCompletionModels.gpt35Turbo0301: 'gpt-3.5-turbo-0301', + ChatCompletionModels.gpt35Turbo0613: 'gpt-3.5-turbo-0613', + ChatCompletionModels.gpt35Turbo16k0613: 'gpt-3.5-turbo-16k-0613', +}; + +_$UnionChatCompletionStopStringImpl + _$$UnionChatCompletionStopStringImplFromJson(Map json) => + _$UnionChatCompletionStopStringImpl( + json['value'] as String, + $type: json['runtimeType'] as String?, + ); + +Map _$$UnionChatCompletionStopStringImplToJson( + _$UnionChatCompletionStopStringImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + +_$UnionChatCompletionStopArrayStringImpl + _$$UnionChatCompletionStopArrayStringImplFromJson( + Map json) => + _$UnionChatCompletionStopArrayStringImpl( + (json['value'] as List).map((e) => e as String).toList(), + $type: json['runtimeType'] as String?, + ); + +Map _$$UnionChatCompletionStopArrayStringImplToJson( + _$UnionChatCompletionStopArrayStringImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + +_$ChatCompletionMessageImpl _$$ChatCompletionMessageImplFromJson( + Map json) => + _$ChatCompletionMessageImpl( + role: $enumDecode(_$ChatCompletionMessageRoleEnumMap, json['role']), + content: json['content'] as String?, + functionCall: json['function_call'] == null + ? null + : ChatCompletionMessageFunctionCall.fromJson( + json['function_call'] as Map), + name: json['name'] as String?, + ); + +Map _$$ChatCompletionMessageImplToJson( + _$ChatCompletionMessageImpl instance) { + final val = { + 'role': _$ChatCompletionMessageRoleEnumMap[instance.role]!, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('content', instance.content); + writeNotNull('function_call', instance.functionCall?.toJson()); + writeNotNull('name', instance.name); + return val; +} + +const _$ChatCompletionMessageRoleEnumMap = { + ChatCompletionMessageRole.system: 'system', + ChatCompletionMessageRole.user: 'user', + ChatCompletionMessageRole.assistant: 'assistant', + ChatCompletionMessageRole.function: 'function', +}; + +_$ChatCompletionMessageFunctionCallImpl + _$$ChatCompletionMessageFunctionCallImplFromJson( + Map json) => + _$ChatCompletionMessageFunctionCallImpl( + name: json['name'] as String, + arguments: json['arguments'] as String, + ); + +Map _$$ChatCompletionMessageFunctionCallImplToJson( + _$ChatCompletionMessageFunctionCallImpl instance) => + { + 'name': instance.name, + 'arguments': instance.arguments, + }; + +_$ChatCompletionFunctionsImpl _$$ChatCompletionFunctionsImplFromJson( + Map json) => + _$ChatCompletionFunctionsImpl( + name: json['name'] as String, + description: json['description'] as String?, + parameters: json['parameters'] as Map, + ); + +Map _$$ChatCompletionFunctionsImplToJson( + _$ChatCompletionFunctionsImpl instance) { + final val = { + 'name': instance.name, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('description', instance.description); + val['parameters'] = instance.parameters; + return val; +} + +_$ChatCompletionFunctionCallOptionImpl + _$$ChatCompletionFunctionCallOptionImplFromJson( + Map json) => + _$ChatCompletionFunctionCallOptionImpl( + name: json['name'] as String, + ); + +Map _$$ChatCompletionFunctionCallOptionImplToJson( + _$ChatCompletionFunctionCallOptionImpl instance) => + { + 'name': instance.name, + }; + +_$CreateChatCompletionResponseImpl _$$CreateChatCompletionResponseImplFromJson( + Map json) => + _$CreateChatCompletionResponseImpl( + id: json['id'] as String, + choices: (json['choices'] as List) + .map((e) => + ChatCompletionResponseChoice.fromJson(e as Map)) + .toList(), + created: json['created'] as int, + model: json['model'] as String, + object: json['object'] as String, + usage: json['usage'] == null + ? null + : CompletionUsage.fromJson(json['usage'] as Map), + ); + +Map _$$CreateChatCompletionResponseImplToJson( + _$CreateChatCompletionResponseImpl instance) { + final val = { + 'id': instance.id, + 'choices': instance.choices.map((e) => e.toJson()).toList(), + 'created': instance.created, + 'model': instance.model, + 'object': instance.object, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('usage', instance.usage?.toJson()); + return val; +} + +_$ChatCompletionResponseChoiceImpl _$$ChatCompletionResponseChoiceImplFromJson( + Map json) => + _$ChatCompletionResponseChoiceImpl( + finishReason: $enumDecode( + _$ChatCompletionFinishReasonEnumMap, json['finish_reason']), + index: json['index'] as int, + message: ChatCompletionMessage.fromJson( + json['message'] as Map), + ); + +Map _$$ChatCompletionResponseChoiceImplToJson( + _$ChatCompletionResponseChoiceImpl instance) => + { + 'finish_reason': + _$ChatCompletionFinishReasonEnumMap[instance.finishReason]!, + 'index': instance.index, + 'message': instance.message.toJson(), + }; + +const _$ChatCompletionFinishReasonEnumMap = { + ChatCompletionFinishReason.stop: 'stop', + ChatCompletionFinishReason.length: 'length', + ChatCompletionFinishReason.functionCall: 'function_call', + ChatCompletionFinishReason.contentFilter: 'content_filter', +}; + +_$CreateChatCompletionStreamResponseImpl + _$$CreateChatCompletionStreamResponseImplFromJson( + Map json) => + _$CreateChatCompletionStreamResponseImpl( + id: json['id'] as String, + choices: (json['choices'] as List) + .map((e) => ChatCompletionStreamResponseChoice.fromJson( + e as Map)) + .toList(), + created: json['created'] as int, + model: json['model'] as String, + object: json['object'] as String, + ); + +Map _$$CreateChatCompletionStreamResponseImplToJson( + _$CreateChatCompletionStreamResponseImpl instance) => + { + 'id': instance.id, + 'choices': instance.choices.map((e) => e.toJson()).toList(), + 'created': instance.created, + 'model': instance.model, + 'object': instance.object, + }; + +_$ChatCompletionStreamResponseChoiceImpl + _$$ChatCompletionStreamResponseChoiceImplFromJson( + Map json) => + _$ChatCompletionStreamResponseChoiceImpl( + delta: ChatCompletionStreamResponseDelta.fromJson( + json['delta'] as Map), + finishReason: $enumDecodeNullable( + _$ChatCompletionFinishReasonEnumMap, json['finish_reason'], + unknownValue: JsonKey.nullForUndefinedEnumValue), + index: json['index'] as int, + ); + +Map _$$ChatCompletionStreamResponseChoiceImplToJson( + _$ChatCompletionStreamResponseChoiceImpl instance) { + final val = { + 'delta': instance.delta.toJson(), + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('finish_reason', + _$ChatCompletionFinishReasonEnumMap[instance.finishReason]); + val['index'] = instance.index; + return val; +} + +_$ChatCompletionStreamResponseDeltaImpl + _$$ChatCompletionStreamResponseDeltaImplFromJson( + Map json) => + _$ChatCompletionStreamResponseDeltaImpl( + content: json['content'] as String?, + functionCall: json['function_call'] == null + ? null + : ChatCompletionMessageFunctionCall.fromJson( + json['function_call'] as Map), + role: $enumDecodeNullable( + _$ChatCompletionMessageRoleEnumMap, json['role'], + unknownValue: JsonKey.nullForUndefinedEnumValue), + ); + +Map _$$ChatCompletionStreamResponseDeltaImplToJson( + _$ChatCompletionStreamResponseDeltaImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('content', instance.content); + writeNotNull('function_call', instance.functionCall?.toJson()); + writeNotNull('role', _$ChatCompletionMessageRoleEnumMap[instance.role]); + return val; +} + +_$CompletionUsageImpl _$$CompletionUsageImplFromJson( + Map json) => + _$CompletionUsageImpl( + completionTokens: json['completion_tokens'] as int?, + promptTokens: json['prompt_tokens'] as int, + totalTokens: json['total_tokens'] as int, + ); + +Map _$$CompletionUsageImplToJson( + _$CompletionUsageImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('completion_tokens', instance.completionTokens); + val['prompt_tokens'] = instance.promptTokens; + val['total_tokens'] = instance.totalTokens; + return val; +} + +_$CreateEmbeddingRequestImpl _$$CreateEmbeddingRequestImplFromJson( + Map json) => + _$CreateEmbeddingRequestImpl( + model: const _EmbeddingModelConverter().fromJson(json['model']), + input: const _EmbeddingInputConverter().fromJson(json['input']), + encodingFormat: $enumDecodeNullable( + _$EmbeddingEncodingFormatEnumMap, json['encoding_format']) ?? + EmbeddingEncodingFormat.float, + user: json['user'] as String?, + ); + +Map _$$CreateEmbeddingRequestImplToJson( + _$CreateEmbeddingRequestImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull( + 'model', const _EmbeddingModelConverter().toJson(instance.model)); + writeNotNull( + 'input', const _EmbeddingInputConverter().toJson(instance.input)); + val['encoding_format'] = + _$EmbeddingEncodingFormatEnumMap[instance.encodingFormat]!; + writeNotNull('user', instance.user); + return val; +} + +const _$EmbeddingEncodingFormatEnumMap = { + EmbeddingEncodingFormat.float: 'float', + EmbeddingEncodingFormat.base64: 'base64', +}; + +_$UnionEmbeddingModelStringImpl _$$UnionEmbeddingModelStringImplFromJson( + Map json) => + _$UnionEmbeddingModelStringImpl( + json['value'] as String, + $type: json['runtimeType'] as String?, + ); + +Map _$$UnionEmbeddingModelStringImplToJson( + _$UnionEmbeddingModelStringImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + +_$UnionEmbeddingModelEnumImpl _$$UnionEmbeddingModelEnumImplFromJson( + Map json) => + _$UnionEmbeddingModelEnumImpl( + $enumDecode(_$EmbeddingModelsEnumMap, json['value']), + $type: json['runtimeType'] as String?, + ); + +Map _$$UnionEmbeddingModelEnumImplToJson( + _$UnionEmbeddingModelEnumImpl instance) => + { + 'value': _$EmbeddingModelsEnumMap[instance.value]!, + 'runtimeType': instance.$type, + }; + +const _$EmbeddingModelsEnumMap = { + EmbeddingModels.textEmbeddingAda002: 'text-embedding-ada-002', +}; + +_$UnionEmbeddingInputStringImpl _$$UnionEmbeddingInputStringImplFromJson( + Map json) => + _$UnionEmbeddingInputStringImpl( + json['value'] as String, + $type: json['runtimeType'] as String?, + ); + +Map _$$UnionEmbeddingInputStringImplToJson( + _$UnionEmbeddingInputStringImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + +_$UnionEmbeddingInputArrayStringImpl + _$$UnionEmbeddingInputArrayStringImplFromJson(Map json) => + _$UnionEmbeddingInputArrayStringImpl( + (json['value'] as List).map((e) => e as String).toList(), + $type: json['runtimeType'] as String?, + ); + +Map _$$UnionEmbeddingInputArrayStringImplToJson( + _$UnionEmbeddingInputArrayStringImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + +_$UnionEmbeddingInputArrayIntegerImpl + _$$UnionEmbeddingInputArrayIntegerImplFromJson(Map json) => + _$UnionEmbeddingInputArrayIntegerImpl( + (json['value'] as List).map((e) => e as int).toList(), + $type: json['runtimeType'] as String?, + ); + +Map _$$UnionEmbeddingInputArrayIntegerImplToJson( + _$UnionEmbeddingInputArrayIntegerImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + +_$UnionEmbeddingInputArrayImpl _$$UnionEmbeddingInputArrayImplFromJson( + Map json) => + _$UnionEmbeddingInputArrayImpl( + (json['value'] as List) + .map((e) => (e as List).map((e) => e as int).toList()) + .toList(), + $type: json['runtimeType'] as String?, + ); + +Map _$$UnionEmbeddingInputArrayImplToJson( + _$UnionEmbeddingInputArrayImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + +_$CreateEmbeddingResponseImpl _$$CreateEmbeddingResponseImplFromJson( + Map json) => + _$CreateEmbeddingResponseImpl( + data: (json['data'] as List) + .map((e) => Embedding.fromJson(e as Map)) + .toList(), + model: json['model'] as String, + object: json['object'] as String, + usage: EmbeddingUsage.fromJson(json['usage'] as Map), + ); + +Map _$$CreateEmbeddingResponseImplToJson( + _$CreateEmbeddingResponseImpl instance) => + { + 'data': instance.data.map((e) => e.toJson()).toList(), + 'model': instance.model, + 'object': instance.object, + 'usage': instance.usage.toJson(), + }; + +_$EmbeddingImpl _$$EmbeddingImplFromJson(Map json) => + _$EmbeddingImpl( + index: json['index'] as int, + embedding: (json['embedding'] as List) + .map((e) => (e as num).toDouble()) + .toList(), + object: json['object'] as String, + ); + +Map _$$EmbeddingImplToJson(_$EmbeddingImpl instance) => + { + 'index': instance.index, + 'embedding': instance.embedding, + 'object': instance.object, + }; + +_$EmbeddingUsageImpl _$$EmbeddingUsageImplFromJson(Map json) => + _$EmbeddingUsageImpl( + promptTokens: json['prompt_tokens'] as int, + totalTokens: json['total_tokens'] as int, + ); + +Map _$$EmbeddingUsageImplToJson( + _$EmbeddingUsageImpl instance) => + { + 'prompt_tokens': instance.promptTokens, + 'total_tokens': instance.totalTokens, + }; + +_$CreateFineTuningJobRequestImpl _$$CreateFineTuningJobRequestImplFromJson( + Map json) => + _$CreateFineTuningJobRequestImpl( + model: const _FineTuningModelConverter().fromJson(json['model']), + trainingFile: json['training_file'] as String, + hyperparameters: json['hyperparameters'] == null + ? null + : FineTuningJobHyperparameters.fromJson( + json['hyperparameters'] as Map), + suffix: json['suffix'] as String?, + validationFile: json['validation_file'] as String?, + ); + +Map _$$CreateFineTuningJobRequestImplToJson( + _$CreateFineTuningJobRequestImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull( + 'model', const _FineTuningModelConverter().toJson(instance.model)); + val['training_file'] = instance.trainingFile; + writeNotNull('hyperparameters', instance.hyperparameters?.toJson()); + writeNotNull('suffix', instance.suffix); + writeNotNull('validation_file', instance.validationFile); + return val; +} + +_$UnionFineTuningModelStringImpl _$$UnionFineTuningModelStringImplFromJson( + Map json) => + _$UnionFineTuningModelStringImpl( + json['value'] as String, + $type: json['runtimeType'] as String?, + ); + +Map _$$UnionFineTuningModelStringImplToJson( + _$UnionFineTuningModelStringImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + +_$UnionFineTuningModelEnumImpl _$$UnionFineTuningModelEnumImplFromJson( + Map json) => + _$UnionFineTuningModelEnumImpl( + $enumDecode(_$FineTuningModelsEnumMap, json['value']), + $type: json['runtimeType'] as String?, + ); + +Map _$$UnionFineTuningModelEnumImplToJson( + _$UnionFineTuningModelEnumImpl instance) => + { + 'value': _$FineTuningModelsEnumMap[instance.value]!, + 'runtimeType': instance.$type, + }; + +const _$FineTuningModelsEnumMap = { + FineTuningModels.babbage002: 'babbage-002', + FineTuningModels.davinci002: 'davinci-002', + FineTuningModels.gpt35Turbo: 'gpt-3.5-turbo', +}; + +_$FineTuningJobImpl _$$FineTuningJobImplFromJson(Map json) => + _$FineTuningJobImpl( + id: json['id'] as String, + createdAt: json['created_at'] as int, + error: json['error'] == null + ? null + : FineTuningJobError.fromJson(json['error'] as Map), + fineTunedModel: json['fine_tuned_model'] as String?, + finishedAt: json['finished_at'] as int?, + hyperparameters: FineTuningJobHyperparameters.fromJson( + json['hyperparameters'] as Map), + model: json['model'] as String, + object: json['object'] as String, + organizationId: json['organization_id'] as String, + resultFiles: (json['result_files'] as List) + .map((e) => e as String) + .toList(), + status: $enumDecode(_$FineTuningJobStatusEnumMap, json['status']), + trainedTokens: json['trained_tokens'] as int?, + trainingFile: json['training_file'] as String, + validationFile: json['validation_file'] as String?, + ); + +Map _$$FineTuningJobImplToJson(_$FineTuningJobImpl instance) { + final val = { + 'id': instance.id, + 'created_at': instance.createdAt, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('error', instance.error?.toJson()); + writeNotNull('fine_tuned_model', instance.fineTunedModel); + writeNotNull('finished_at', instance.finishedAt); + val['hyperparameters'] = instance.hyperparameters.toJson(); + val['model'] = instance.model; + val['object'] = instance.object; + val['organization_id'] = instance.organizationId; + val['result_files'] = instance.resultFiles; + val['status'] = _$FineTuningJobStatusEnumMap[instance.status]!; + writeNotNull('trained_tokens', instance.trainedTokens); + val['training_file'] = instance.trainingFile; + writeNotNull('validation_file', instance.validationFile); + return val; +} + +const _$FineTuningJobStatusEnumMap = { + FineTuningJobStatus.validatingFiles: 'validating_files', + FineTuningJobStatus.queued: 'queued', + FineTuningJobStatus.running: 'running', + FineTuningJobStatus.succeeded: 'succeeded', + FineTuningJobStatus.failed: 'failed', + FineTuningJobStatus.cancelled: 'cancelled', +}; + +_$FineTuningJobErrorImpl _$$FineTuningJobErrorImplFromJson( + Map json) => + _$FineTuningJobErrorImpl( + code: json['code'] as String, + message: json['message'] as String, + param: json['param'] as String?, + ); + +Map _$$FineTuningJobErrorImplToJson( + _$FineTuningJobErrorImpl instance) { + final val = { + 'code': instance.code, + 'message': instance.message, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('param', instance.param); + return val; +} + +_$FineTuningJobHyperparametersImpl _$$FineTuningJobHyperparametersImplFromJson( + Map json) => + _$FineTuningJobHyperparametersImpl( + nEpochs: const _FineTuningNEpochsConverter().fromJson(json['n_epochs']), + ); + +Map _$$FineTuningJobHyperparametersImplToJson( + _$FineTuningJobHyperparametersImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull( + 'n_epochs', const _FineTuningNEpochsConverter().toJson(instance.nEpochs)); + return val; +} + +_$UnionFineTuningNEpochsEnumImpl _$$UnionFineTuningNEpochsEnumImplFromJson( + Map json) => + _$UnionFineTuningNEpochsEnumImpl( + $enumDecode(_$FineTuningNEpochsOptionsEnumMap, json['value']), + $type: json['runtimeType'] as String?, + ); + +Map _$$UnionFineTuningNEpochsEnumImplToJson( + _$UnionFineTuningNEpochsEnumImpl instance) => + { + 'value': _$FineTuningNEpochsOptionsEnumMap[instance.value]!, + 'runtimeType': instance.$type, + }; + +const _$FineTuningNEpochsOptionsEnumMap = { + FineTuningNEpochsOptions.auto: 'auto', +}; + +_$UnionFineTuningNEpochsIntegerImpl + _$$UnionFineTuningNEpochsIntegerImplFromJson(Map json) => + _$UnionFineTuningNEpochsIntegerImpl( + json['value'] as int, + $type: json['runtimeType'] as String?, + ); + +Map _$$UnionFineTuningNEpochsIntegerImplToJson( + _$UnionFineTuningNEpochsIntegerImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + +_$ListPaginatedFineTuningJobsResponseImpl + _$$ListPaginatedFineTuningJobsResponseImplFromJson( + Map json) => + _$ListPaginatedFineTuningJobsResponseImpl( + data: (json['data'] as List) + .map((e) => FineTuningJob.fromJson(e as Map)) + .toList(), + hasMore: json['has_more'] as bool, + object: json['object'] as String, + ); + +Map _$$ListPaginatedFineTuningJobsResponseImplToJson( + _$ListPaginatedFineTuningJobsResponseImpl instance) => + { + 'data': instance.data.map((e) => e.toJson()).toList(), + 'has_more': instance.hasMore, + 'object': instance.object, + }; + +_$ListFineTuningJobEventsResponseImpl + _$$ListFineTuningJobEventsResponseImplFromJson(Map json) => + _$ListFineTuningJobEventsResponseImpl( + data: (json['data'] as List) + .map( + (e) => FineTuningJobEvent.fromJson(e as Map)) + .toList(), + object: json['object'] as String, + ); + +Map _$$ListFineTuningJobEventsResponseImplToJson( + _$ListFineTuningJobEventsResponseImpl instance) => + { + 'data': instance.data.map((e) => e.toJson()).toList(), + 'object': instance.object, + }; + +_$FineTuningJobEventImpl _$$FineTuningJobEventImplFromJson( + Map json) => + _$FineTuningJobEventImpl( + id: json['id'] as String, + createdAt: json['created_at'] as int, + level: $enumDecode(_$FineTuningJobEventLevelEnumMap, json['level']), + message: json['message'] as String, + object: json['object'] as String, + ); + +Map _$$FineTuningJobEventImplToJson( + _$FineTuningJobEventImpl instance) => + { + 'id': instance.id, + 'created_at': instance.createdAt, + 'level': _$FineTuningJobEventLevelEnumMap[instance.level]!, + 'message': instance.message, + 'object': instance.object, + }; + +const _$FineTuningJobEventLevelEnumMap = { + FineTuningJobEventLevel.info: 'info', + FineTuningJobEventLevel.warn: 'warn', + FineTuningJobEventLevel.error: 'error', +}; + +_$CreateImageRequestImpl _$$CreateImageRequestImplFromJson( + Map json) => + _$CreateImageRequestImpl( + prompt: json['prompt'] as String, + n: json['n'] as int? ?? 1, + responseFormat: $enumDecodeNullable( + _$ImageResponseFormatEnumMap, json['response_format'], + unknownValue: JsonKey.nullForUndefinedEnumValue) ?? + ImageResponseFormat.url, + size: $enumDecodeNullable(_$ImageSizeEnumMap, json['size'], + unknownValue: JsonKey.nullForUndefinedEnumValue) ?? + ImageSize.v1024x1024, + user: json['user'] as String?, + ); + +Map _$$CreateImageRequestImplToJson( + _$CreateImageRequestImpl instance) { + final val = { + 'prompt': instance.prompt, + }; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('n', instance.n); + writeNotNull( + 'response_format', _$ImageResponseFormatEnumMap[instance.responseFormat]); + writeNotNull('size', _$ImageSizeEnumMap[instance.size]); + writeNotNull('user', instance.user); + return val; +} + +const _$ImageResponseFormatEnumMap = { + ImageResponseFormat.url: 'url', + ImageResponseFormat.b64Json: 'b64_json', +}; + +const _$ImageSizeEnumMap = { + ImageSize.v256x256: '256x256', + ImageSize.v512x512: '512x512', + ImageSize.v1024x1024: '1024x1024', +}; + +_$ImagesResponseImpl _$$ImagesResponseImplFromJson(Map json) => + _$ImagesResponseImpl( + created: json['created'] as int, + data: (json['data'] as List) + .map((e) => Image.fromJson(e as Map)) + .toList(), + ); + +Map _$$ImagesResponseImplToJson( + _$ImagesResponseImpl instance) => + { + 'created': instance.created, + 'data': instance.data.map((e) => e.toJson()).toList(), + }; + +_$ImageImpl _$$ImageImplFromJson(Map json) => _$ImageImpl( + b64Json: json['b64_json'] as String?, + url: json['url'] as String?, + ); + +Map _$$ImageImplToJson(_$ImageImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull('b64_json', instance.b64Json); + writeNotNull('url', instance.url); + return val; +} + +_$ModelImpl _$$ModelImplFromJson(Map json) => _$ModelImpl( + id: json['id'] as String, + created: json['created'] as int, + object: json['object'] as String, + ownedBy: json['owned_by'] as String, + ); + +Map _$$ModelImplToJson(_$ModelImpl instance) => + { + 'id': instance.id, + 'created': instance.created, + 'object': instance.object, + 'owned_by': instance.ownedBy, + }; + +_$ListModelsResponseImpl _$$ListModelsResponseImplFromJson( + Map json) => + _$ListModelsResponseImpl( + object: json['object'] as String, + data: (json['data'] as List) + .map((e) => Model.fromJson(e as Map)) + .toList(), + ); + +Map _$$ListModelsResponseImplToJson( + _$ListModelsResponseImpl instance) => + { + 'object': instance.object, + 'data': instance.data.map((e) => e.toJson()).toList(), + }; + +_$DeleteModelResponseImpl _$$DeleteModelResponseImplFromJson( + Map json) => + _$DeleteModelResponseImpl( + id: json['id'] as String, + deleted: json['deleted'] as bool, + object: json['object'] as String, + ); + +Map _$$DeleteModelResponseImplToJson( + _$DeleteModelResponseImpl instance) => + { + 'id': instance.id, + 'deleted': instance.deleted, + 'object': instance.object, + }; + +_$CreateModerationRequestImpl _$$CreateModerationRequestImplFromJson( + Map json) => + _$CreateModerationRequestImpl( + model: json['model'] == null + ? const ModerationModel.string('text-moderation-latest') + : const _ModerationModelConverter().fromJson(json['model']), + input: const _ModerationInputConverter().fromJson(json['input']), + ); + +Map _$$CreateModerationRequestImplToJson( + _$CreateModerationRequestImpl instance) { + final val = {}; + + void writeNotNull(String key, dynamic value) { + if (value != null) { + val[key] = value; + } + } + + writeNotNull( + 'model', const _ModerationModelConverter().toJson(instance.model)); + writeNotNull( + 'input', const _ModerationInputConverter().toJson(instance.input)); + return val; +} + +_$UnionModerationModelStringImpl _$$UnionModerationModelStringImplFromJson( + Map json) => + _$UnionModerationModelStringImpl( + json['value'] as String, + $type: json['runtimeType'] as String?, + ); + +Map _$$UnionModerationModelStringImplToJson( + _$UnionModerationModelStringImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + +_$UnionModerationModelEnumImpl _$$UnionModerationModelEnumImplFromJson( + Map json) => + _$UnionModerationModelEnumImpl( + $enumDecode(_$ModerationModelsEnumMap, json['value']), + $type: json['runtimeType'] as String?, + ); + +Map _$$UnionModerationModelEnumImplToJson( + _$UnionModerationModelEnumImpl instance) => + { + 'value': _$ModerationModelsEnumMap[instance.value]!, + 'runtimeType': instance.$type, + }; + +const _$ModerationModelsEnumMap = { + ModerationModels.textModerationLatest: 'text-moderation-latest', + ModerationModels.textModerationStable: 'text-moderation-stable', +}; + +_$UnionModerationInputStringImpl _$$UnionModerationInputStringImplFromJson( + Map json) => + _$UnionModerationInputStringImpl( + json['value'] as String, + $type: json['runtimeType'] as String?, + ); + +Map _$$UnionModerationInputStringImplToJson( + _$UnionModerationInputStringImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + +_$UnionModerationInputArrayStringImpl + _$$UnionModerationInputArrayStringImplFromJson(Map json) => + _$UnionModerationInputArrayStringImpl( + (json['value'] as List).map((e) => e as String).toList(), + $type: json['runtimeType'] as String?, + ); + +Map _$$UnionModerationInputArrayStringImplToJson( + _$UnionModerationInputArrayStringImpl instance) => + { + 'value': instance.value, + 'runtimeType': instance.$type, + }; + +_$CreateModerationResponseImpl _$$CreateModerationResponseImplFromJson( + Map json) => + _$CreateModerationResponseImpl( + id: json['id'] as String, + model: json['model'] as String, + results: (json['results'] as List) + .map((e) => Moderation.fromJson(e as Map)) + .toList(), + ); + +Map _$$CreateModerationResponseImplToJson( + _$CreateModerationResponseImpl instance) => + { + 'id': instance.id, + 'model': instance.model, + 'results': instance.results.map((e) => e.toJson()).toList(), + }; + +_$ModerationImpl _$$ModerationImplFromJson(Map json) => + _$ModerationImpl( + flagged: json['flagged'] as bool, + categories: ModerationCategories.fromJson( + json['categories'] as Map), + categoryScores: ModerationCategoriesScores.fromJson( + json['category_scores'] as Map), + ); + +Map _$$ModerationImplToJson(_$ModerationImpl instance) => + { + 'flagged': instance.flagged, + 'categories': instance.categories.toJson(), + 'category_scores': instance.categoryScores.toJson(), + }; + +_$ModerationCategoriesImpl _$$ModerationCategoriesImplFromJson( + Map json) => + _$ModerationCategoriesImpl( + hate: json['hate'] as bool, + hateThreatening: json['hate/threatening'] as bool, + harassment: json['harassment'] as bool, + harassmentThreatening: json['harassment/threatening'] as bool, + selfHarm: json['self-harm'] as bool, + selfHarmIntent: json['self-harm/intent'] as bool, + selfHarmInstructions: json['self-harm/instructions'] as bool, + sexual: json['sexual'] as bool, + sexualMinors: json['sexual/minors'] as bool, + violence: json['violence'] as bool, + violenceGraphic: json['violence/graphic'] as bool, + ); + +Map _$$ModerationCategoriesImplToJson( + _$ModerationCategoriesImpl instance) => + { + 'hate': instance.hate, + 'hate/threatening': instance.hateThreatening, + 'harassment': instance.harassment, + 'harassment/threatening': instance.harassmentThreatening, + 'self-harm': instance.selfHarm, + 'self-harm/intent': instance.selfHarmIntent, + 'self-harm/instructions': instance.selfHarmInstructions, + 'sexual': instance.sexual, + 'sexual/minors': instance.sexualMinors, + 'violence': instance.violence, + 'violence/graphic': instance.violenceGraphic, + }; + +_$ModerationCategoriesScoresImpl _$$ModerationCategoriesScoresImplFromJson( + Map json) => + _$ModerationCategoriesScoresImpl( + hate: (json['hate'] as num).toDouble(), + hateThreatening: (json['hate/threatening'] as num).toDouble(), + harassment: (json['harassment'] as num).toDouble(), + harassmentThreatening: (json['harassment/threatening'] as num).toDouble(), + selfHarm: (json['self-harm'] as num).toDouble(), + selfHarmIntent: (json['self-harm/intent'] as num).toDouble(), + selfHarmInstructions: (json['self-harm/instructions'] as num).toDouble(), + sexual: (json['sexual'] as num).toDouble(), + sexualMinors: (json['sexual/minors'] as num).toDouble(), + violence: (json['violence'] as num).toDouble(), + violenceGraphic: (json['violence/graphic'] as num).toDouble(), + ); + +Map _$$ModerationCategoriesScoresImplToJson( + _$ModerationCategoriesScoresImpl instance) => + { + 'hate': instance.hate, + 'hate/threatening': instance.hateThreatening, + 'harassment': instance.harassment, + 'harassment/threatening': instance.harassmentThreatening, + 'self-harm': instance.selfHarm, + 'self-harm/intent': instance.selfHarmIntent, + 'self-harm/instructions': instance.selfHarmInstructions, + 'sexual': instance.sexual, + 'sexual/minors': instance.sexualMinors, + 'violence': instance.violence, + 'violence/graphic': instance.violenceGraphic, + }; diff --git a/packages/openai_dart/lib/src/http_client/http_client.dart b/packages/openai_dart/lib/src/http_client/http_client.dart new file mode 100644 index 00000000..99555ca4 --- /dev/null +++ b/packages/openai_dart/lib/src/http_client/http_client.dart @@ -0,0 +1,4 @@ +export 'http_client_stub.dart' + if (dart.library.io) 'http_client_io.dart' + if (dart.library.js) 'http_client_html.dart' + if (dart.library.html) 'http_client_html.dart'; diff --git a/packages/openai_dart/lib/src/http_client/http_client_html.dart b/packages/openai_dart/lib/src/http_client/http_client_html.dart new file mode 100644 index 00000000..e94a7ead --- /dev/null +++ b/packages/openai_dart/lib/src/http_client/http_client_html.dart @@ -0,0 +1,8 @@ +import 'package:fetch_client/fetch_client.dart' as fetch; +import 'package:http/http.dart' as http; +import 'package:http/retry.dart'; + +/// Creates an IOClient with a retry policy. +http.Client createDefaultHttpClient() { + return RetryClient(fetch.FetchClient(mode: fetch.RequestMode.cors)); +} diff --git a/packages/openai_dart/lib/src/http_client/http_client_io.dart b/packages/openai_dart/lib/src/http_client/http_client_io.dart new file mode 100644 index 00000000..a715c59a --- /dev/null +++ b/packages/openai_dart/lib/src/http_client/http_client_io.dart @@ -0,0 +1,7 @@ +import 'package:http/http.dart' as http; +import 'package:http/retry.dart'; + +/// Creates an IOClient with a retry policy. +http.Client createDefaultHttpClient() { + return RetryClient(http.Client()); +} diff --git a/packages/openai_dart/lib/src/http_client/http_client_stub.dart b/packages/openai_dart/lib/src/http_client/http_client_stub.dart new file mode 100644 index 00000000..7054a1ac --- /dev/null +++ b/packages/openai_dart/lib/src/http_client/http_client_stub.dart @@ -0,0 +1,6 @@ +import 'package:http/http.dart' as http; + +/// Creates a default HTTP client for the current platform. +http.Client createDefaultHttpClient() => throw UnsupportedError( + 'Cannot create a client without dart:html or dart:io.', + ); diff --git a/packages/openai_dart/oas/main.dart b/packages/openai_dart/oas/main.dart new file mode 100644 index 00000000..39a129f6 --- /dev/null +++ b/packages/openai_dart/oas/main.dart @@ -0,0 +1,21 @@ +import 'dart:io'; + +import 'package:openapi_spec/openapi_spec.dart'; + +/// Generates Chroma API client Dart code from the OpenAPI spec. +/// Official spec: https://github.com/openai/openai-openapi/blob/master/openapi.yaml +void main() async { + final spec = OpenApi.fromFile(source: 'oas/openapi_curated.yaml'); + await spec.generate( + package: 'OpenAI', + destination: 'lib/src/generated/', + replace: true, + clientOptions: const ClientGeneratorOptions( + enabled: true, + ), + ); + await Process.run( + 'dart', + ['run', 'build_runner', 'build', 'lib', '--delete-conflicting-outputs'], + ); +} diff --git a/packages/openai_dart/oas/openapi_curated.yaml b/packages/openai_dart/oas/openapi_curated.yaml new file mode 100644 index 00000000..178cf963 --- /dev/null +++ b/packages/openai_dart/oas/openapi_curated.yaml @@ -0,0 +1,2619 @@ +#file: noinspection YAMLUnusedAnchor +openapi: 3.0.0 + +info: + title: OpenAI API + description: The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. + version: "2.0.0" + termsOfService: https://openai.com/policies/terms-of-use + contact: + name: OpenAI Support + url: https://help.openai.com/ + license: + name: MIT + url: https://github.com/openai/openai-openapi/blob/master/LICENSE + +servers: + - url: https://api.openai.com/v1 + +tags: + - name: Chat + description: Given a list of messages comprising a conversation, the model will return a response. + - name: Completions + description: Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position. + - name: Embeddings + description: Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. + - name: Fine-tuning + description: Manage fine-tuning jobs to tailor a model to your specific training data. + - name: Images + description: Given a prompt and/or an input image, the model will generate a new image. + - name: Models + description: List and describe the various models available in the API. + - name: Moderations + description: Given a input text, outputs if the model classifies it as violating OpenAI's content policy. + +paths: + /chat/completions: + post: + operationId: createChatCompletion + tags: + - Chat + summary: Creates a model response for the given chat conversation. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateChatCompletionRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/CreateChatCompletionResponse" + x-oaiMeta: + name: Create chat completion + group: chat + returns: | + Returns a [chat completion](https://platform.openai.com/docs/api-reference/chat/object) object, or a streamed sequence of [chat completion chunk](https://platform.openai.com/docs/api-reference/chat/streaming) objects if the request is streamed. + path: create + examples: + - title: No Streaming + request: + curl: | + curl https://api.openai.com/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_model_id", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Hello!" + } + ] + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + + completion = openai.ChatCompletion.create( + model="VAR_model_id", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello!"} + ] + ) + + print(completion.choices[0].message) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const completion = await openai.chat.completions.create({ + messages: [{ role: "system", content: "You are a helpful assistant." }], + model: "VAR_model_id", + }); + + console.log(completion.choices[0]); + } + + main(); + response: &chat_completion_example | + { + "id": "chatcmpl-123", + "object": "chat.completion", + "created": 1677652288, + "model": "gpt-3.5-turbo-0613", + "choices": [{ + "index": 0, + "message": { + "role": "assistant", + "content": "\n\nHello there, how may I assist you today?", + }, + "finish_reason": "stop" + }], + "usage": { + "prompt_tokens": 9, + "completion_tokens": 12, + "total_tokens": 21 + } + } + - title: Streaming + request: + curl: | + curl https://api.openai.com/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_model_id", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Hello!" + } + ], + "stream": true + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + + completion = openai.ChatCompletion.create( + model="VAR_model_id", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello!"} + ], + stream=True + ) + + for chunk in completion: + print(chunk.choices[0].delta) + + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const completion = await openai.chat.completions.create({ + model: "VAR_model_id", + messages: [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello!"} + ], + stream: true, + }); + + for await (const chunk of completion) { + console.log(chunk.choices[0].delta.content); + } + } + + main(); + response: &chat_completion_chunk_example | + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613","choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null}]} + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613","choices":[{"index":0,"delta":{"content":"Hello"},"finish_reason":null}]} + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613","choices":[{"index":0,"delta":{"content":"!"},"finish_reason":null}]} + + .... + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613","choices":[{"index":0,"delta":{"content":" today"},"finish_reason":null}]} + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613","choices":[{"index":0,"delta":{"content":"?"},"finish_reason":null}]} + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613","choices":[{"index":0,"delta":{},"finish_reason":"stop"}]} + - title: Function calling + request: + curl: | + curl https://api.openai.com/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "gpt-3.5-turbo", + "messages": [ + { + "role": "user", + "content": "What is the weather like in Boston?" + } + ], + "functions": [ + { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"] + } + }, + "required": ["location"] + } + } + ], + "function_call": "auto" + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + + functions = [ + { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + ] + messages = [{"role": "user", "content": "What's the weather like in Boston today?"}] + completion = openai.ChatCompletion.create( + model="VAR_model_id", + messages=messages, + functions=functions, + function_call="auto", # auto is default, but we'll be explicit + ) + + print(completion) + + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const messages = [{"role": "user", "content": "What's the weather like in Boston today?"}]; + const functions = [ + { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + ]; + + const response = await openai.chat.completions.create({ + model: "gpt-3.5-turbo", + messages: messages, + functions: functions, + function_call: "auto", // auto is default, but we'll be explicit + }); + + console.log(response); + } + + main(); + response: &chat_completion_function_example | + { + "choices": [ + { + "finish_reason": "function_call", + "index": 0, + "message": { + "content": null, + "function_call": { + "arguments": "{\n \"location\": \"Boston, MA\"\n}", + "name": "get_current_weather" + }, + "role": "assistant" + } + } + ], + "created": 1694028367, + "model": "gpt-3.5-turbo-0613", + "object": "chat.completion", + "usage": { + "completion_tokens": 18, + "prompt_tokens": 82, + "total_tokens": 100 + } + } + /completions: + post: + operationId: createCompletion + tags: + - Completions + summary: Creates a completion for the provided prompt and parameters. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateCompletionRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/CreateCompletionResponse" + x-oaiMeta: + name: Create completion + returns: | + Returns a [completion](https://platform.openai.com/docs/api-reference/completions/object) object, or a sequence of completion objects if the request is streamed. + legacy: true + examples: + - title: No streaming + request: + curl: | + curl https://api.openai.com/v1/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_model_id", + "prompt": "Say this is a test", + "max_tokens": 7, + "temperature": 0 + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Completion.create( + model="VAR_model_id", + prompt="Say this is a test", + max_tokens=7, + temperature=0 + ) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const completion = await openai.completions.create({ + model: "VAR_model_id", + prompt: "Say this is a test.", + max_tokens: 7, + temperature: 0, + }); + + console.log(completion); + } + main(); + response: | + { + "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", + "object": "text_completion", + "created": 1589478378, + "model": "VAR_model_id", + "choices": [ + { + "text": "\n\nThis is indeed a test", + "index": 0, + "logprobs": null, + "finish_reason": "length" + } + ], + "usage": { + "prompt_tokens": 5, + "completion_tokens": 7, + "total_tokens": 12 + } + } + - title: Streaming + request: + curl: | + curl https://api.openai.com/v1/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_model_id", + "prompt": "Say this is a test", + "max_tokens": 7, + "temperature": 0, + "stream": true + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + for chunk in openai.Completion.create( + model="VAR_model_id", + prompt="Say this is a test", + max_tokens=7, + temperature=0, + stream=True + ): + print(chunk['choices'][0]['text']) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const stream = await openai.completions.create({ + model: "VAR_model_id", + prompt: "Say this is a test.", + stream: true, + }); + + for await (const chunk of stream) { + console.log(chunk.choices[0].text) + } + } + main(); + response: | + { + "id": "cmpl-7iA7iJjj8V2zOkCGvWF2hAkDWBQZe", + "object": "text_completion", + "created": 1690759702, + "choices": [ + { + "text": "This", + "index": 0, + "logprobs": null, + "finish_reason": null + } + ], + "model": "gpt-3.5-turbo-instruct" + } + /embeddings: + post: + operationId: createEmbedding + tags: + - Embeddings + summary: Creates an embedding vector representing the input text. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateEmbeddingRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/CreateEmbeddingResponse" + x-oaiMeta: + name: Create embeddings + returns: A list of [embedding](https://platform.openai.com/docs/api-reference/embeddings/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/embeddings \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "input": "The food was delicious and the waiter...", + "model": "text-embedding-ada-002" + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Embedding.create( + model="text-embedding-ada-002", + input="The food was delicious and the waiter..." + ) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const embedding = await openai.embeddings.create({ + model: "text-embedding-ada-002", + input: "The quick brown fox jumped over the lazy dog", + }); + + console.log(embedding); + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "object": "embedding", + "embedding": [ + 0.0023064255, + -0.009327292, + .... (1536 floats total for ada-002) + -0.0028842222, + ], + "index": 0 + } + ], + "model": "text-embedding-ada-002", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + /fine_tuning/jobs: + post: + operationId: createFineTuningJob + tags: + - Fine-tuning + summary: | + Creates a job that fine-tunes a specified model from a given dataset. + + Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + + [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning). + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateFineTuningJobRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/FineTuningJob" + x-oaiMeta: + name: Create fine-tuning job + returns: A [fine-tuning.job](https://platform.openai.com/docs/api-reference/fine-tuning/object) object. + examples: + - title: No hyperparameters + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "training_file": "file-abc123", + "model": "gpt-3.5-turbo" + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.FineTuningJob.create(training_file="file-abc123", model="gpt-3.5-turbo") + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTuning.jobs.create({ + training_file: "file-abc123" + }); + + console.log(fineTune); + } + + main(); + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-3.5-turbo-0613", + "created_at": 1614807352, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "status": "queued", + "validation_file": null, + "training_file": "file-abc123", + } + - title: Hyperparameters + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "training_file": "file-abc123", + "model": "gpt-3.5-turbo", + "hyperparameters": { + "n_epochs": 2 + } + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.FineTuningJob.create(training_file="file-abc123", model="gpt-3.5-turbo", hyperparameters={"n_epochs":2}) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTuning.jobs.create({ + training_file: "file-abc123", + model: "gpt-3.5-turbo", + hyperparameters: { n_epochs: 2 } + }); + + console.log(fineTune); + } + + main(); + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-3.5-turbo-0613", + "created_at": 1614807352, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "status": "queued", + "validation_file": null, + "training_file": "file-abc123", + "hyperparameters":{"n_epochs":2}, + } + - title: Validation file + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "training_file": "file-abc123", + "validation_file": "file-abc123", + "model": "gpt-3.5-turbo" + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.FineTuningJob.create(training_file="file-abc123", validation_file="file-abc123", model="gpt-3.5-turbo") + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTuning.jobs.create({ + training_file: "file-abc123", + validation_file: "file-abc123" + }); + + console.log(fineTune); + } + + main(); + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-3.5-turbo-0613", + "created_at": 1614807352, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "status": "queued", + "validation_file": "file-abc123", + "training_file": "file-abc123", + } + get: + operationId: listPaginatedFineTuningJobs + tags: + - Fine-tuning + summary: | + List your organization's fine-tuning jobs. + parameters: + - name: after + in: query + description: Identifier for the last job from the previous pagination request. + required: false + schema: + type: string + - name: limit + in: query + description: Number of fine-tuning jobs to retrieve. + required: false + schema: + type: integer + default: 20 + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListPaginatedFineTuningJobsResponse" + x-oaiMeta: + name: List fine-tuning jobs + returns: A list of paginated [fine-tuning job](https://platform.openai.com/docs/api-reference/fine-tuning/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs?limit=2 \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.FineTuningJob.list() + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const list = await openai.fineTuning.jobs.list(); + + for await (const fineTune of list) { + console.log(fineTune); + } + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "object": "fine_tuning.job.event", + "id": "ft-event-TjX0lMfOniCZX64t9PUQT5hn", + "created_at": 1689813489, + "level": "warn", + "message": "Fine tuning process stopping due to job cancellation", + "data": null, + "type": "message" + }, + { ... }, + { ... } + ], "has_more": true + } + /fine_tuning/jobs/{fine_tuning_job_id}: + get: + operationId: retrieveFineTuningJob + tags: + - Fine-tuning + summary: | + Get info about a fine-tuning job. + + [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning). + parameters: + - in: path + name: fine_tuning_job_id + required: true + schema: + type: string + example: ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tuning job. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/FineTuningJob" + x-oaiMeta: + name: Retrieve fine-tuning job + returns: The [fine-tuning](https://platform.openai.com/docs/api-reference/fine-tunes/object) object with the given ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs/ft-AF1WoRqd3aJAHsqc9NY7iL8F \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.FineTuningJob.retrieve("ftjob-abc123") + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTuning.jobs.retrieve("ftjob-abc123"); + + console.log(fineTune); + } + + main(); + response: &fine_tuning_example | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "davinci-002", + "created_at": 1692661014, + "finished_at": 1692661190, + "fine_tuned_model": "ft:davinci-002:my-org:custom_suffix:7q8mpxmy", + "organization_id": "org-123", + "result_files": [ + "file-abc123" + ], + "status": "succeeded", + "validation_file": null, + "training_file": "file-abc123", + "hyperparameters": { + "n_epochs": 4, + }, + "trained_tokens": 5768 + } + /fine_tuning/jobs/{fine_tuning_job_id}/events: + get: + operationId: listFineTuningEvents + tags: + - Fine-tuning + summary: | + Get status updates for a fine-tuning job. + parameters: + - in: path + name: fine_tuning_job_id + required: true + schema: + type: string + example: ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tuning job to get events for. + - name: after + in: query + description: Identifier for the last event from the previous pagination request. + required: false + schema: + type: string + - name: limit + in: query + description: Number of events to retrieve. + required: false + schema: + type: integer + default: 20 + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListFineTuningJobEventsResponse" + x-oaiMeta: + name: List fine-tuning events + returns: A list of fine-tuning event objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/events \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.FineTuningJob.list_events(id="ftjob-abc123", limit=2) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const list = await openai.fineTuning.list_events(id="ftjob-abc123", limit=2); + + for await (const fineTune of list) { + console.log(fineTune); + } + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "object": "fine_tuning.job.event", + "id": "ft-event-ddTJfwuMVpfLXseO0Am0Gqjm", + "created_at": 1692407401, + "level": "info", + "message": "Fine tuning job successfully completed", + "data": null, + "type": "message" + }, + { + "object": "fine_tuning.job.event", + "id": "ft-event-tyiGuB72evQncpH87xe505Sv", + "created_at": 1692407400, + "level": "info", + "message": "New fine-tuned model created: ft:gpt-3.5-turbo:openai::7p4lURel", + "data": null, + "type": "message" + } + ], + "has_more": true + } + /fine_tuning/jobs/{fine_tuning_job_id}/cancel: + post: + operationId: cancelFineTuningJob + tags: + - Fine-tuning + summary: | + Immediately cancel a fine-tune job. + parameters: + - in: path + name: fine_tuning_job_id + required: true + schema: + type: string + example: ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tuning job to cancel. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/FineTuningJob" + x-oaiMeta: + name: Cancel fine-tuning + returns: The cancelled [fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/cancel \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.FineTuningJob.cancel("ftjob-abc123") + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTuning.jobs.cancel("ftjob-abc123"); + + console.log(fineTune); + } + main(); + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-3.5-turbo-0613", + "created_at": 1689376978, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "hyperparameters": { + "n_epochs": "auto" + }, + "status": "cancelled", + "validation_file": "file-abc123", + "training_file": "file-abc123" + } + /images/generations: + post: + operationId: createImage + tags: + - Images + summary: Creates an image given a prompt. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateImageRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ImagesResponse" + x-oaiMeta: + name: Create image + returns: Returns a list of [image](https://platform.openai.com/docs/api-reference/images/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/images/generations \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "prompt": "A cute baby sea otter", + "n": 2, + "size": "1024x1024" + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Image.create( + prompt="A cute baby sea otter", + n=2, + size="1024x1024" + ) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const image = await openai.images.generate({ prompt: "A cute baby sea otter" }); + + console.log(image.data); + } + main(); + response: | + { + "created": 1589478378, + "data": [ + { + "url": "https://..." + }, + { + "url": "https://..." + } + ] + } + /models: + get: + operationId: listModels + tags: + - Models + summary: Lists the currently available models, and provides basic information about each one such as the owner and availability. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListModelsResponse" + x-oaiMeta: + name: List models + returns: A list of [model](https://platform.openai.com/docs/api-reference/models/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/models \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Model.list() + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const list = await openai.models.list(); + + for await (const model of list) { + console.log(model); + } + } + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "model-id-0", + "object": "model", + "created": 1686935002, + "owned_by": "organization-owner" + }, + { + "id": "model-id-1", + "object": "model", + "created": 1686935002, + "owned_by": "organization-owner", + }, + { + "id": "model-id-2", + "object": "model", + "created": 1686935002, + "owned_by": "openai" + }, + ], + "object": "list" + } + /models/{model}: + get: + operationId: retrieveModel + tags: + - Models + summary: Retrieves a model instance, providing basic information about the model such as the owner and permissioning. + parameters: + - in: path + name: model + required: true + schema: + type: string + # ideally this will be an actual ID, so this will always work from browser + example: gpt-3.5-turbo + description: The ID of the model to use for this request + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Model" + x-oaiMeta: + name: Retrieve model + returns: The [model](https://platform.openai.com/docs/api-reference/models/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/models/VAR_model_id \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Model.retrieve("VAR_model_id") + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const model = await openai.models.retrieve("gpt-3.5-turbo"); + + console.log(model); + } + + main(); + response: &retrieve_model_response | + { + "id": "VAR_model_id", + "object": "model", + "created": 1686935002, + "owned_by": "openai" + } + delete: + operationId: deleteModel + tags: + - Models + summary: Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. + parameters: + - in: path + name: model + required: true + schema: + type: string + example: ft:gpt-3.5-turbo:acemeco:suffix:abc123 + description: The model to delete + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteModelResponse" + x-oaiMeta: + name: Delete fine-tune model + returns: Deletion status. + examples: + request: + curl: | + curl https://api.openai.com/v1/models/ft:gpt-3.5-turbo:acemeco:suffix:abc123 \ + -X DELETE \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Model.delete("ft:gpt-3.5-turbo:acemeco:suffix:abc123") + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const model = await openai.models.del("ft:gpt-3.5-turbo:acemeco:suffix:abc123"); + + console.log(model); + } + main(); + response: | + { + "id": "ft:gpt-3.5-turbo:acemeco:suffix:abc123", + "object": "model", + "deleted": true + } + /moderations: + post: + operationId: createModeration + tags: + - Moderations + summary: Classifies if text violates OpenAI's Content Policy. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateModerationRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/CreateModerationResponse" + x-oaiMeta: + name: Create moderation + returns: A [moderation](https://platform.openai.com/docs/api-reference/moderations/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/moderations \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "input": "I want to kill them." + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Moderation.create( + input="I want to kill them.", + ) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const moderation = await openai.moderations.create({ input: "I want to kill them." }); + + console.log(moderation); + } + main(); + response: &moderation_example | + { + "id": "modr-XXXXX", + "model": "text-moderation-005", + "results": [ + { + "flagged": true, + "categories": { + "sexual": false, + "hate": false, + "harassment": false, + "self-harm": false, + "sexual/minors": false, + "hate/threatening": false, + "violence/graphic": false, + "self-harm/intent": false, + "self-harm/instructions": false, + "harassment/threatening": true, + "violence": true, + }, + "category_scores": { + "sexual": 1.2282071e-06, + "hate": 0.010696256, + "harassment": 0.29842457, + "self-harm": 1.5236925e-08, + "sexual/minors": 5.7246268e-08, + "hate/threatening": 0.0060676364, + "violence/graphic": 4.435014e-06, + "self-harm/intent": 8.098441e-10, + "self-harm/instructions": 2.8498655e-11, + "harassment/threatening": 0.63055265, + "violence": 0.99011886, + } + } + ] + } + + +components: + + securitySchemes: + ApiKeyAuth: + type: http + scheme: 'bearer' + + schemas: + CreateCompletionRequest: + type: object + description: Request object for the Create completion endpoint. + properties: + model: + title: CompletionModel + description: &model_description | + ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + anyOf: + - type: string + - type: string + title: CompletionModels + description: | + Available completion models. Mind that the list may not be exhaustive nor up-to-date. + enum: + [ + "babbage-002", + "davinci-002", + "gpt-3.5-turbo-instruct", + "text-davinci-003", + "text-davinci-002", + "text-davinci-001", + "code-davinci-002", + "text-curie-001", + "text-babbage-001", + "text-ada-001", + ] + x-oaiTypeLabel: string + prompt: + title: CompletionPrompt + description: &completions_prompt_description | + The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. + + Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. + default: "<|endoftext|>" + nullable: true + oneOf: + - type: string + default: "" + example: "This is a test." + - type: array + items: + type: string + default: "" + example: "This is a test." + - type: array + minItems: 1 + items: + type: integer + example: "[1212, 318, 257, 1332, 13]" + - type: array + minItems: 1 + items: + type: array + minItems: 1 + items: + type: integer + example: "[[1212, 318, 257, 1332, 13]]" + best_of: + type: integer + minimum: 0 + maximum: 20 + nullable: true + description: &completions_best_of_description | + Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. + + When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. + + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + echo: + type: boolean + default: false + nullable: true + description: &completions_echo_description > + Echo back the prompt in addition to the completion + frequency_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: &completions_frequency_penalty_description | + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + logit_bias: &completions_logit_bias + type: object + x-oaiTypeLabel: map + default: null + nullable: true + additionalProperties: + type: integer + description: &completions_logit_bias_description | + Modify the likelihood of specified tokens appearing in the completion. + + Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](https://platform.openai.com/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + + As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. + logprobs: &completions_logprobs_configuration + type: integer + minimum: 0 + maximum: 5 + default: null + nullable: true + description: &completions_logprobs_description | + Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. + + The maximum value for `logprobs` is 5. + max_tokens: + type: integer + minimum: 0 + default: 16 + example: 16 + nullable: true + description: &completions_max_tokens_description | + The maximum number of [tokens](https://platform.openai.com/tokenizer) to generate in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + n: + type: integer + minimum: 1 + maximum: 128 + default: 1 + example: 1 + nullable: true + description: &completions_completions_description | + How many completions to generate for each prompt. + + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + presence_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: &completions_presence_penalty_description | + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + + [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details) + stop: + title: CompletionStop + description: &completions_stop_description > + Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + default: null + nullable: true + oneOf: + - type: string + default: <|endoftext|> + example: "\n" + nullable: true + - type: array + minItems: 1 + maxItems: 4 + items: + type: string + example: '["\n"]' + stream: + description: > + Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + type: boolean + nullable: true + default: false + suffix: + description: The suffix that comes after a completion of inserted text. + default: null + nullable: true + type: string + example: "test." + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: &completions_temperature_description | + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + + We generally recommend altering this or `top_p` but not both. + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: &completions_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + user: &end_user_param_configuration + type: string + example: user-1234 + description: | + A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). + required: + - model + - prompt + CreateCompletionResponse: + type: object + description: | + Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint). + properties: + id: + type: string + description: A unique identifier for the completion. + choices: + type: array + description: The list of completion choices the model generated for the input prompt. + items: + $ref: "#/components/schemas/CompletionChoice" + created: + type: integer + description: The Unix timestamp (in seconds) of when the completion was created. + model: + type: string + description: The model used for completion. + object: + type: string + description: The object type, which is always "text_completion" + usage: + $ref: "#/components/schemas/CompletionUsage" + required: + - id + - object + - created + - model + - choices + x-oaiMeta: + name: The completion object + legacy: true + example: | + { + "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", + "object": "text_completion", + "created": 1589478378, + "model": "gpt-3.5-turbo", + "choices": [ + { + "text": "\n\nThis is indeed a test", + "index": 0, + "logprobs": null, + "finish_reason": "length" + } + ], + "usage": { + "prompt_tokens": 5, + "completion_tokens": 7, + "total_tokens": 12 + } + } + CompletionChoice: + type: object + description: A choice the model generated for the input prompt. + required: + - finish_reason + - index + - logprobs + - text + properties: + finish_reason: + $ref: "#/components/schemas/CompletionFinishReason" + index: + type: integer + description: The index of the choice in the list of generated choices. + logprobs: + $ref: "#/components/schemas/CompletionLogprobs" + nullable: true + text: + type: string + description: The text of the completion. + CompletionFinishReason: + type: string + nullable: true + description: &completion_finish_reason_description | + The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + `length` if the maximum number of tokens specified in the request was reached, + or `content_filter` if content was omitted due to a flag from our content filters. + enum: [ "stop", "length", "content_filter" ] + CompletionLogprobs: + type: object + description: | + The probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. + properties: + text_offset: + type: array + description: The offset of the token from the beginning of the prompt. + items: + type: integer + token_logprobs: + type: array + description: The log probabilities of tokens in the completion. + items: + type: number + nullable: true + tokens: + type: array + description: The tokens generated by the model converted back to text. + items: + type: string + top_logprobs: + type: array + description: The log probabilities of the `logprobs` most likely tokens. + items: + type: object + nullable: true + additionalProperties: + type: number + CreateChatCompletionRequest: + type: object + description: Request object for the Create chat completion endpoint. + properties: + model: + title: ChatCompletionModel + description: ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + example: "gpt-3.5-turbo" + anyOf: + - type: string + - type: string + title: ChatCompletionModels + enum: + [ + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-16k-0613", + ] + x-oaiTypeLabel: string + messages: + description: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + type: array + minItems: 1 + items: + $ref: "#/components/schemas/ChatCompletionMessage" + frequency_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: *completions_frequency_penalty_description + function_call: + description: > + Controls how the model calls functions. "none" means the model will not call a function and instead generates a message. "auto" means the model can pick between generating a message or calling a function. Specifying a particular function via [ChatCompletionFunctionCallOption] forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. + type: object + # TODO add support for oneOf + # title: ChatCompletionFunctionCall + # oneOf: + # - type: string + # enum: [ none, auto ] + # - $ref: "#/components/schemas/ChatCompletionFunctionCallOption" + functions: + description: A list of functions the model may generate JSON inputs for. + type: array + minItems: 1 + maxItems: 128 + items: + $ref: "#/components/schemas/ChatCompletionFunctions" + logit_bias: + type: object + x-oaiTypeLabel: map + default: null + nullable: true + additionalProperties: + type: integer + description: | + Modify the likelihood of specified tokens appearing in the completion. + + Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + max_tokens: + description: | + The maximum number of [tokens](https://platform.openai.com/tokenizer) to generate in the chat completion. + + The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + default: inf + type: integer + nullable: true + n: + type: integer + minimum: 1 + maximum: 128 + default: 1 + example: 1 + nullable: true + description: How many chat completion choices to generate for each input message. + presence_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: *completions_presence_penalty_description + stop: + title: ChatCompletionStop + description: | + Up to 4 sequences where the API will stop generating further tokens. + default: null + oneOf: + - type: string + nullable: true + - type: array + minItems: 1 + maxItems: 4 + items: + type: string + stream: + description: > + If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + type: boolean + nullable: true + default: false + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: *completions_temperature_description + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: *completions_top_p_description + user: *end_user_param_configuration + required: + - model + - messages + ChatCompletionMessage: + type: object + description: A message in a chat conversation. + properties: + role: + $ref: "#/components/schemas/ChatCompletionMessageRole" + content: + type: string + nullable: true + description: The contents of the message. `content` is required for all messages, and may be null for assistant messages with function calls. + function_call: + $ref: "#/components/schemas/ChatCompletionMessageFunctionCall" + name: + type: string + description: The name of the author of this message. `name` is required if role is `function`, and it should be the name of the function whose response is in the `content`. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters. + required: + - role + - content + ChatCompletionMessageRole: + type: string + enum: [ "system", "user", "assistant", "function" ] + description: The role of the messages author. One of `system`, `user`, `assistant`, or `function`. + ChatCompletionMessageFunctionCall: + type: object + description: The name and arguments of a function that should be called, as generated by the model. + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + required: + - arguments + - name + ChatCompletionFunctions: + type: object + description: A function that the model may call. + properties: + name: + type: string + description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + description: + type: string + description: A description of what the function does, used by the model to choose when and how to call the function. + parameters: + $ref: "#/components/schemas/ChatCompletionFunctionParameters" + required: + - name + - parameters + ChatCompletionFunctionParameters: + type: object + description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.\n\nTo describe a function that accepts no parameters, provide the value `{\"type\": \"object\", \"properties\": {}}`." + additionalProperties: true + ChatCompletionFunctionCallOption: + type: object + description: Forces the model to call the specified function. + properties: + name: + type: string + description: The name of the function to call. + required: + - name + CreateChatCompletionResponse: + type: object + description: Represents a chat completion response returned by model, based on the provided input. + properties: + id: + type: string + description: A unique identifier for the chat completion. + choices: + type: array + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + items: + $ref: "#/components/schemas/ChatCompletionResponseChoice" + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. + model: + type: string + description: The model used for the chat completion. + object: + type: string + description: The object type, which is always `chat.completion`. + usage: + $ref: "#/components/schemas/CompletionUsage" + required: + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion object + group: chat + example: *chat_completion_example + ChatCompletionResponseChoice: + type: object + description: A choice the model generated for the input prompt. + required: + - finish_reason + - index + - message + properties: + finish_reason: + $ref: "#/components/schemas/ChatCompletionFinishReason" + index: + type: integer + description: The index of the choice in the list of choices. + message: + $ref: "#/components/schemas/ChatCompletionMessage" + ChatCompletionFinishReason: + type: string + description: &chat_completion_finish_reason_description | + The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + `length` if the maximum number of tokens specified in the request was reached, + `content_filter` if content was omitted due to a flag from our content filters, + or `function_call` if the model called a function. + enum: [ "stop", "length", "function_call", "content_filter" ] + CreateChatCompletionStreamResponse: + type: object + description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. + properties: + id: + type: string + description: A unique identifier for the chat completion. Each chunk has the same ID. + choices: + type: array + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + items: + $ref: "#/components/schemas/ChatCompletionStreamResponseChoice" + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. + model: + type: string + description: The model to generate the completion. + object: + type: string + description: The object type, which is always `chat.completion.chunk`. + required: + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion chunk object + group: chat + example: *chat_completion_chunk_example + ChatCompletionStreamResponseChoice: + type: object + description: A choice the model generated for the input prompt. + required: + - delta + - finish_reason + - index + properties: + delta: + $ref: "#/components/schemas/ChatCompletionStreamResponseDelta" + finish_reason: + $ref: "#/components/schemas/ChatCompletionFinishReason" + nullable: true + index: + type: integer + description: The index of the choice in the list of choices. + ChatCompletionStreamResponseDelta: + type: object + description: A chat completion delta generated by streamed model responses. + properties: + content: + type: string + description: The contents of the chunk message. + nullable: true + function_call: + $ref: "#/components/schemas/ChatCompletionMessageFunctionCall" + role: + $ref: "#/components/schemas/ChatCompletionMessageRole" + CompletionUsage: + type: object + description: Usage statistics for the completion request. + properties: + completion_tokens: + type: integer + nullable: true + description: Number of tokens in the generated completion. + prompt_tokens: + type: integer + description: Number of tokens in the prompt. + total_tokens: + type: integer + description: Total number of tokens used in the request (prompt + completion). + required: + - prompt_tokens + - completion_tokens + - total_tokens + CreateEmbeddingRequest: + type: object + description: Request object for the Create embedding endpoint. + additionalProperties: false + properties: + model: + title: EmbeddingModel + description: *model_description + example: "text-embedding-ada-002" + anyOf: + - type: string + - type: string + title: EmbeddingModels + enum: [ "text-embedding-ada-002" ] + input: + title: EmbeddingInput + description: | + Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`) and cannot be an empty string. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + example: "The quick brown fox jumped over the lazy dog" + oneOf: + - type: string + default: "" + example: "This is a test." + - type: array + minItems: 1 + items: + type: string + default: "" + example: "This is a test." + - type: array + minItems: 1 + items: + type: integer + example: "[1212, 318, 257, 1332, 13]" + - type: array + minItems: 1 + items: + type: array + minItems: 1 + items: + type: integer + example: "[[1212, 318, 257, 1332, 13]]" + encoding_format: + title: EmbeddingEncodingFormat + description: "The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/)." + example: "float" + default: "float" + type: string + enum: [ "float", "base64" ] + user: *end_user_param_configuration + required: + - model + - input + CreateEmbeddingResponse: + type: object + description: Represents an embedding vector returned by embedding endpoint. + properties: + data: + type: array + description: The list of embeddings generated by the model. + items: + $ref: "#/components/schemas/Embedding" + model: + type: string + description: The name of the model used to generate the embedding. + object: + type: string + description: The object type, which is always "list". + usage: + $ref: "#/components/schemas/EmbeddingUsage" + required: + - object + - model + - data + - usage + Embedding: + type: object + description: | + Represents an embedding vector returned by embedding endpoint. + properties: + index: + type: integer + description: The index of the embedding in the list of embeddings. + embedding: + type: array + description: | + The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](https://platform.openai.com/docs/guides/embeddings). + items: + type: number + object: + type: string + description: The object type, which is always "embedding". + required: + - index + - object + - embedding + x-oaiMeta: + name: The embedding object + example: | + { + "object": "embedding", + "embedding": [ + 0.0023064255, + -0.009327292, + .... (1536 floats total for ada-002) + -0.0028842222, + ], + "index": 0 + } + EmbeddingUsage: + type: object + description: The usage information for the request. + properties: + prompt_tokens: + type: integer + description: The number of tokens used by the prompt. + total_tokens: + type: integer + description: The total number of tokens used by the request. + required: + - prompt_tokens + - total_tokens + CreateFineTuningJobRequest: + type: object + description: Request object for the Create fine-tuning job endpoint. + properties: + model: + title: FineTuningModel + description: | + The name of the model to fine-tune. You can select one of the + [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + example: "gpt-3.5-turbo" + anyOf: + - type: string + - type: string + title: FineTuningModels + enum: [ "babbage-002", "davinci-002", "gpt-3.5-turbo" ] + x-oaiTypeLabel: string + training_file: + description: | + The ID of an uploaded file that contains training data. + + See [upload file](https://platform.openai.com/docs/api-reference/files/upload) for how to upload a file. + + Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + + See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + type: string + example: "file-abc123" + hyperparameters: + $ref: "#/components/schemas/FineTuningJobHyperparameters" + suffix: + description: | + A string of up to 18 characters that will be added to your fine-tuned model name. + + For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + type: string + minLength: 1 + maxLength: 40 + default: null + nullable: true + validation_file: + description: | + The ID of an uploaded file that contains validation data. + + If you provide this file, the data is used to generate validation + metrics periodically during fine-tuning. These metrics can be viewed in + the fine-tuning results file. + The same data should not be present in both train and validation files. + + Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. + + See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + type: string + nullable: true + example: "file-abc123" + required: + - model + - training_file + FineTuningJob: + type: object + title: FineTuningJob + description: | + The `fine_tuning.job` object represents a fine-tuning job that has been created through the API. + properties: + id: + type: string + description: The object identifier, which can be referenced in the API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the fine-tuning job was created. + error: + $ref: "#/components/schemas/FineTuningJobError" + fine_tuned_model: + type: string + nullable: true + description: The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. + finished_at: + type: integer + nullable: true + description: The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. + hyperparameters: + $ref: "#/components/schemas/FineTuningJobHyperparameters" + model: + type: string + description: The base model that is being fine-tuned. + object: + type: string + description: The object type, which is always "fine_tuning.job". + organization_id: + type: string + description: The organization that owns the fine-tuning job. + result_files: + type: array + description: The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + items: + type: string + example: file-abc123 + status: + $ref: "#/components/schemas/FineTuningJobStatus" + trained_tokens: + type: integer + nullable: true + description: The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. + training_file: + type: string + description: The file ID used for training. You can retrieve the training data with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + validation_file: + type: string + nullable: true + description: The file ID used for validation. You can retrieve the validation results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + required: + - created_at + - error + - finished_at + - fine_tuned_model + - hyperparameters + - id + - model + - object + - organization_id + - result_files + - status + - trained_tokens + - training_file + - validation_file + x-oaiMeta: + name: The fine-tuning job object + example: *fine_tuning_example + FineTuningJobStatus: + type: string + description: The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. + enum: [ "validating_files", "queued", "running", "succeeded", "failed", "cancelled" ] + FineTuningJobError: + type: object + nullable: true + description: For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. + properties: + code: + type: string + description: A machine-readable error code. + message: + type: string + description: A human-readable error message. + param: + type: string + description: The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. + nullable: true + required: + - code + - message + - param + FineTuningJobHyperparameters: + type: object + description: The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + properties: + n_epochs: + title: FineTuningNEpochs + description: | + The number of epochs to train the model for. An epoch refers to one + full cycle through the training dataset. + oneOf: + - type: string + title: FineTuningNEpochsOptions + enum: [auto] + - type: integer + minimum: 1 + maximum: 50 + default: auto + required: + - n_epochs + ListPaginatedFineTuningJobsResponse: + type: object + description: Represents a list of fine-tuning jobs. + properties: + data: + type: array + description: The list of fine-tuning jobs. + items: + $ref: "#/components/schemas/FineTuningJob" + has_more: + type: boolean + description: Whether there are more fine-tuning jobs to retrieve. + object: + type: string + description: The object type, which is always "list". + required: + - object + - data + - has_more + ListFineTuningJobEventsResponse: + type: object + description: Represents a list of fine-tuning job events. + properties: + data: + type: array + description: The list of fine-tuning job events. + items: + $ref: "#/components/schemas/FineTuningJobEvent" + object: + type: string + description: The object type, which is always "list". + required: + - object + - data + FineTuningJobEvent: + type: object + description: Fine-tuning job event object. + properties: + id: + type: string + description: The event identifier, which can be referenced in the API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the event was created. + level: + type: string + description: The log level of the event. + enum: ["info", "warn", "error"] + message: + type: string + description: The message of the event. + object: + type: string + description: The object type, which is always "fine_tuning.job.event". + required: + - id + - object + - created_at + - level + - message + x-oaiMeta: + name: The fine-tuning job event object + example: | + { + "object": "event", + "id": "ftevent-abc123" + "created_at": 1677610602, + "level": "info", + "message": "Created fine-tuning job" + } + CreateImageRequest: + type: object + description: Request object for the Create image endpoint. + properties: + prompt: + description: A text description of the desired image(s). The maximum length is 1000 characters. + type: string + example: "A cute baby sea otter" + n: &images_n + type: integer + minimum: 1 + maximum: 10 + default: 1 + example: 1 + nullable: true + description: The number of images to generate. Must be between 1 and 10. + response_format: &images_response_format + title: ImageResponseFormat + type: string + enum: ["url", "b64_json"] + default: "url" + example: "url" + nullable: true + description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. + size: &images_size + title: ImageSize + type: string + enum: ["256x256", "512x512", "1024x1024"] + default: "1024x1024" + example: "1024x1024" + nullable: true + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + user: *end_user_param_configuration + required: + - prompt + ImagesResponse: + type: object + description: Represents a generated image returned by the images endpoint. + properties: + created: + type: integer + description: The Unix timestamp (in seconds) when the image was created. + data: + type: array + description: The list of images generated by the model. + items: + $ref: "#/components/schemas/Image" + required: + - created + - data + Image: + type: object + description: Represents the url or the content of an image generated by the OpenAI API. + properties: + b64_json: + type: string + description: The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. + url: + type: string + description: The URL of the generated image, if `response_format` is `url` (default). + x-oaiMeta: + name: The image object + example: | + { + "url": "..." + } + Model: + title: Model + description: Describes an OpenAI model offering that can be used with the API. + properties: + id: + type: string + description: The model identifier, which can be referenced in the API endpoints. + created: + type: integer + description: The Unix timestamp (in seconds) when the model was created. + object: + type: string + description: The object type, which is always "model". + owned_by: + type: string + description: The organization that owns the model. + required: + - id + - object + - created + - owned_by + x-oaiMeta: + name: The model object + example: *retrieve_model_response + ListModelsResponse: + type: object + description: Represents a list of models returned by the List models endpoint. + properties: + object: + type: string + description: The object type, which is always "list". + data: + type: array + description: The list of models. + items: + $ref: "#/components/schemas/Model" + required: + - object + - data + DeleteModelResponse: + type: object + description: Represents a deleted response returned by the Delete model endpoint. + properties: + id: + type: string + description: The model identifier. + deleted: + type: boolean + description: Whether the model was deleted. + object: + type: string + description: The object type, which is always "model". + required: + - id + - object + - deleted + CreateModerationRequest: + type: object + description: Request object for the Create moderation endpoint. + properties: + model: + title: ModerationModel + description: | + Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. + + The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + nullable: false + default: "text-moderation-latest" + example: "text-moderation-stable" + anyOf: + - type: string + - type: string + title: ModerationModels + enum: ["text-moderation-latest", "text-moderation-stable"] + x-oaiTypeLabel: string + input: + title: ModerationInput + description: The input text to classify + oneOf: + - type: string + default: "" + example: "I want to kill them." + - type: array + items: + type: string + default: "" + example: "I want to kill them." + required: + - input + CreateModerationResponse: + type: object + description: Represents policy compliance report by OpenAI's content moderation model against a given input. + properties: + id: + type: string + description: The unique identifier for the moderation request. + model: + type: string + description: The model used to generate the moderation results. + results: + type: array + description: A list of moderation objects. + items: + $ref: "#/components/schemas/Moderation" + required: + - id + - model + - results + x-oaiMeta: + name: The moderation object + example: *moderation_example + Moderation: + type: object + description: Represents policy compliance report by OpenAI's content moderation model against a given input. + properties: + flagged: + type: boolean + description: Whether the content violates [OpenAI's usage policies](https://platform.openai.com/policies/usage-policies). + categories: + $ref: "#/components/schemas/ModerationCategories" + category_scores: + $ref: "#/components/schemas/ModerationCategoriesScores" + required: + - flagged + - categories + - category_scores + ModerationCategories: + type: object + description: A list of the categories, and whether they are flagged or not. + properties: + hate: + type: boolean + description: Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harrassment. + hate/threatening: + type: boolean + description: Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. + harassment: + type: boolean + description: Content that expresses, incites, or promotes harassing language towards any target. + harassment/threatening: + type: boolean + description: Harassment content that also includes violence or serious harm towards any target. + self-harm: + type: boolean + description: Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. + self-harm/intent: + type: boolean + description: Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. + self-harm/instructions: + type: boolean + description: Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. + sexual: + type: boolean + description: Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). + sexual/minors: + type: boolean + description: Sexual content that includes an individual who is under 18 years old. + violence: + type: boolean + description: Content that depicts death, violence, or physical injury. + violence/graphic: + type: boolean + description: Content that depicts death, violence, or physical injury in graphic detail. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + ModerationCategoriesScores: + type: object + description: A list of the categories along with their scores as predicted by model. + properties: + hate: + type: number + description: The score for the category 'hate'. + hate/threatening: + type: number + description: The score for the category 'hate/threatening'. + harassment: + type: number + description: The score for the category 'harassment'. + harassment/threatening: + type: number + description: The score for the category 'harassment/threatening'. + self-harm: + type: number + description: The score for the category 'self-harm'. + self-harm/intent: + type: number + description: The score for the category 'self-harm/intent'. + self-harm/instructions: + type: number + description: The score for the category 'self-harm/instructions'. + sexual: + type: number + description: The score for the category 'sexual'. + sexual/minors: + type: number + description: The score for the category 'sexual/minors'. + violence: + type: number + description: The score for the category 'violence'. + violence/graphic: + type: number + description: The score for the category 'violence/graphic'. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + +security: + - ApiKeyAuth: [ ] diff --git a/packages/openai_dart/oas/openapi_official.yaml b/packages/openai_dart/oas/openapi_official.yaml new file mode 100644 index 00000000..12b5059d --- /dev/null +++ b/packages/openai_dart/oas/openapi_official.yaml @@ -0,0 +1,4617 @@ +openapi: 3.0.0 +info: + title: OpenAI API + description: The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. + version: "2.0.0" + termsOfService: https://openai.com/policies/terms-of-use + contact: + name: OpenAI Support + url: https://help.openai.com/ + license: + name: MIT + url: https://github.com/openai/openai-openapi/blob/master/LICENSE +servers: + - url: https://api.openai.com/v1 +tags: + - name: Audio + description: Learn how to turn audio into text. + - name: Chat + description: Given a list of messages comprising a conversation, the model will return a response. + - name: Completions + description: Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position. + - name: Embeddings + description: Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. + - name: Fine-tuning + description: Manage fine-tuning jobs to tailor a model to your specific training data. + - name: Files + description: Files are used to upload documents that can be used with features like fine-tuning. + - name: Images + description: Given a prompt and/or an input image, the model will generate a new image. + - name: Models + description: List and describe the various models available in the API. + - name: Moderations + description: Given a input text, outputs if the model classifies it as violating OpenAI's content policy. + - name: Fine-tunes + description: Manage legacy fine-tuning jobs to tailor a model to your specific training data. + - name: Edits + description: Given a prompt and an instruction, the model will return an edited version of the prompt. + +paths: + # Note: When adding an endpoint, make sure you also add it in the `groups` section, in the end of this file, + # under the appropriate group + /chat/completions: + post: + operationId: createChatCompletion + tags: + - Chat + summary: Creates a model response for the given chat conversation. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateChatCompletionRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/CreateChatCompletionResponse" + + x-oaiMeta: + name: Create chat completion + group: chat + returns: | + Returns a [chat completion](/docs/api-reference/chat/object) object, or a streamed sequence of [chat completion chunk](/docs/api-reference/chat/streaming) objects if the request is streamed. + path: create + examples: + - title: No Streaming + request: + curl: | + curl https://api.openai.com/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_model_id", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Hello!" + } + ] + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + + completion = openai.ChatCompletion.create( + model="VAR_model_id", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello!"} + ] + ) + + print(completion.choices[0].message) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const completion = await openai.chat.completions.create({ + messages: [{ role: "system", content: "You are a helpful assistant." }], + model: "VAR_model_id", + }); + + console.log(completion.choices[0]); + } + + main(); + response: &chat_completion_example | + { + "id": "chatcmpl-123", + "object": "chat.completion", + "created": 1677652288, + "model": "gpt-3.5-turbo-0613", + "choices": [{ + "index": 0, + "message": { + "role": "assistant", + "content": "\n\nHello there, how may I assist you today?", + }, + "finish_reason": "stop" + }], + "usage": { + "prompt_tokens": 9, + "completion_tokens": 12, + "total_tokens": 21 + } + } + - title: Streaming + request: + curl: | + curl https://api.openai.com/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_model_id", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Hello!" + } + ], + "stream": true + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + + completion = openai.ChatCompletion.create( + model="VAR_model_id", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello!"} + ], + stream=True + ) + + for chunk in completion: + print(chunk.choices[0].delta) + + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const completion = await openai.chat.completions.create({ + model: "VAR_model_id", + messages: [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello!"} + ], + stream: true, + }); + + for await (const chunk of completion) { + console.log(chunk.choices[0].delta.content); + } + } + + main(); + response: &chat_completion_chunk_example | + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613","choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null}]} + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613","choices":[{"index":0,"delta":{"content":"Hello"},"finish_reason":null}]} + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613","choices":[{"index":0,"delta":{"content":"!"},"finish_reason":null}]} + + .... + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613","choices":[{"index":0,"delta":{"content":" today"},"finish_reason":null}]} + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613","choices":[{"index":0,"delta":{"content":"?"},"finish_reason":null}]} + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0613","choices":[{"index":0,"delta":{},"finish_reason":"stop"}]} + - title: Function calling + request: + curl: | + curl https://api.openai.com/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "gpt-3.5-turbo", + "messages": [ + { + "role": "user", + "content": "What is the weather like in Boston?" + } + ], + "functions": [ + { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"] + } + }, + "required": ["location"] + } + } + ], + "function_call": "auto" + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + + functions = [ + { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + ] + messages = [{"role": "user", "content": "What's the weather like in Boston today?"}] + completion = openai.ChatCompletion.create( + model="VAR_model_id", + messages=messages, + functions=functions, + function_call="auto", # auto is default, but we'll be explicit + ) + + print(completion) + + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const messages = [{"role": "user", "content": "What's the weather like in Boston today?"}]; + const functions = [ + { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + ]; + + const response = await openai.chat.completions.create({ + model: "gpt-3.5-turbo", + messages: messages, + functions: functions, + function_call: "auto", // auto is default, but we'll be explicit + }); + + console.log(response); + } + + main(); + response: &chat_completion_function_example | + { + "choices": [ + { + "finish_reason": "function_call", + "index": 0, + "message": { + "content": null, + "function_call": { + "arguments": "{\n \"location\": \"Boston, MA\"\n}", + "name": "get_current_weather" + }, + "role": "assistant" + } + } + ], + "created": 1694028367, + "model": "gpt-3.5-turbo-0613", + "object": "chat.completion", + "usage": { + "completion_tokens": 18, + "prompt_tokens": 82, + "total_tokens": 100 + } + } + /completions: + post: + operationId: createCompletion + tags: + - Completions + summary: Creates a completion for the provided prompt and parameters. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateCompletionRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/CreateCompletionResponse" + x-oaiMeta: + name: Create completion + returns: | + Returns a [completion](/docs/api-reference/completions/object) object, or a sequence of completion objects if the request is streamed. + legacy: true + examples: + - title: No streaming + request: + curl: | + curl https://api.openai.com/v1/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_model_id", + "prompt": "Say this is a test", + "max_tokens": 7, + "temperature": 0 + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Completion.create( + model="VAR_model_id", + prompt="Say this is a test", + max_tokens=7, + temperature=0 + ) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const completion = await openai.completions.create({ + model: "VAR_model_id", + prompt: "Say this is a test.", + max_tokens: 7, + temperature: 0, + }); + + console.log(completion); + } + main(); + response: | + { + "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", + "object": "text_completion", + "created": 1589478378, + "model": "VAR_model_id", + "choices": [ + { + "text": "\n\nThis is indeed a test", + "index": 0, + "logprobs": null, + "finish_reason": "length" + } + ], + "usage": { + "prompt_tokens": 5, + "completion_tokens": 7, + "total_tokens": 12 + } + } + - title: Streaming + request: + curl: | + curl https://api.openai.com/v1/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_model_id", + "prompt": "Say this is a test", + "max_tokens": 7, + "temperature": 0, + "stream": true + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + for chunk in openai.Completion.create( + model="VAR_model_id", + prompt="Say this is a test", + max_tokens=7, + temperature=0, + stream=True + ): + print(chunk['choices'][0]['text']) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const stream = await openai.completions.create({ + model: "VAR_model_id", + prompt: "Say this is a test.", + stream: true, + }); + + for await (const chunk of stream) { + console.log(chunk.choices[0].text) + } + } + main(); + response: | + { + "id": "cmpl-7iA7iJjj8V2zOkCGvWF2hAkDWBQZe", + "object": "text_completion", + "created": 1690759702, + "choices": [ + { + "text": "This", + "index": 0, + "logprobs": null, + "finish_reason": null + } + ], + "model": "gpt-3.5-turbo-instruct" + } + /edits: + post: + operationId: createEdit + deprecated: true + tags: + - Edits + summary: Creates a new edit for the provided input, instruction, and parameters. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateEditRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/CreateEditResponse" + x-oaiMeta: + name: Create edit + returns: | + Returns an [edit](/docs/api-reference/edits/object) object. + group: edits + examples: + request: + curl: | + curl https://api.openai.com/v1/edits \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_model_id", + "input": "What day of the wek is it?", + "instruction": "Fix the spelling mistakes" + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Edit.create( + model="VAR_model_id", + input="What day of the wek is it?", + instruction="Fix the spelling mistakes" + ) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const edit = await openai.edits.create({ + model: "VAR_model_id", + input: "What day of the wek is it?", + instruction: "Fix the spelling mistakes.", + }); + + console.log(edit); + } + + main(); + response: &edit_example | + { + "object": "edit", + "created": 1589478378, + "choices": [ + { + "text": "What day of the week is it?", + "index": 0, + } + ], + "usage": { + "prompt_tokens": 25, + "completion_tokens": 32, + "total_tokens": 57 + } + } + + /images/generations: + post: + operationId: createImage + tags: + - Images + summary: Creates an image given a prompt. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateImageRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ImagesResponse" + x-oaiMeta: + name: Create image + returns: Returns a list of [image](/docs/api-reference/images/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/images/generations \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "prompt": "A cute baby sea otter", + "n": 2, + "size": "1024x1024" + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Image.create( + prompt="A cute baby sea otter", + n=2, + size="1024x1024" + ) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const image = await openai.images.generate({ prompt: "A cute baby sea otter" }); + + console.log(image.data); + } + main(); + response: | + { + "created": 1589478378, + "data": [ + { + "url": "https://..." + }, + { + "url": "https://..." + } + ] + } + + /images/edits: + post: + operationId: createImageEdit + tags: + - Images + summary: Creates an edited or extended image given an original image and a prompt. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/CreateImageEditRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ImagesResponse" + x-oaiMeta: + name: Create image edit + returns: Returns a list of [image](/docs/api-reference/images/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/images/edits \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -F image="@otter.png" \ + -F mask="@mask.png" \ + -F prompt="A cute baby sea otter wearing a beret" \ + -F n=2 \ + -F size="1024x1024" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Image.create_edit( + image=open("otter.png", "rb"), + mask=open("mask.png", "rb"), + prompt="A cute baby sea otter wearing a beret", + n=2, + size="1024x1024" + ) + node.js: |- + import fs from "fs"; + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const image = await openai.images.edit({ + image: fs.createReadStream("otter.png"), + mask: fs.createReadStream("mask.png"), + prompt: "A cute baby sea otter wearing a beret", + }); + + console.log(image.data); + } + main(); + response: | + { + "created": 1589478378, + "data": [ + { + "url": "https://..." + }, + { + "url": "https://..." + } + ] + } + + /images/variations: + post: + operationId: createImageVariation + tags: + - Images + summary: Creates a variation of a given image. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/CreateImageVariationRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ImagesResponse" + x-oaiMeta: + name: Create image variation + returns: Returns a list of [image](/docs/api-reference/images/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/images/variations \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -F image="@otter.png" \ + -F n=2 \ + -F size="1024x1024" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Image.create_variation( + image=open("otter.png", "rb"), + n=2, + size="1024x1024" + ) + node.js: |- + import fs from "fs"; + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const image = await openai.images.createVariation({ + image: fs.createReadStream("otter.png"), + }); + + console.log(image.data); + } + main(); + response: | + { + "created": 1589478378, + "data": [ + { + "url": "https://..." + }, + { + "url": "https://..." + } + ] + } + + /embeddings: + post: + operationId: createEmbedding + tags: + - Embeddings + summary: Creates an embedding vector representing the input text. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateEmbeddingRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/CreateEmbeddingResponse" + x-oaiMeta: + name: Create embeddings + returns: A list of [embedding](/docs/api-reference/embeddings/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/embeddings \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "input": "The food was delicious and the waiter...", + "model": "text-embedding-ada-002", + "encoding_format": "float" + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Embedding.create( + model="text-embedding-ada-002", + input="The food was delicious and the waiter...", + encoding_format="float" + ) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const embedding = await openai.embeddings.create({ + model: "text-embedding-ada-002", + input: "The quick brown fox jumped over the lazy dog", + encoding_format: "float", + }); + + console.log(embedding); + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "object": "embedding", + "embedding": [ + 0.0023064255, + -0.009327292, + .... (1536 floats total for ada-002) + -0.0028842222, + ], + "index": 0 + } + ], + "model": "text-embedding-ada-002", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + + /audio/transcriptions: + post: + operationId: createTranscription + tags: + - Audio + summary: Transcribes audio into the input language. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/CreateTranscriptionRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/CreateTranscriptionResponse" + x-oaiMeta: + name: Create transcription + returns: The transcriped text. + examples: + request: + curl: | + curl https://api.openai.com/v1/audio/transcriptions \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: multipart/form-data" \ + -F file="@/path/to/file/audio.mp3" \ + -F model="whisper-1" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + audio_file = open("audio.mp3", "rb") + transcript = openai.Audio.transcribe("whisper-1", audio_file) + node: |- + import fs from "fs"; + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const transcription = await openai.audio.transcriptions.create({ + file: fs.createReadStream("audio.mp3"), + model: "whisper-1", + }); + + console.log(transcription.text); + } + main(); + response: | + { + "text": "Imagine the wildest idea that you've ever had, and you're curious about how it might scale to something that's a 100, a 1,000 times bigger. This is a place where you can get to do that." + } + + /audio/translations: + post: + operationId: createTranslation + tags: + - Audio + summary: Translates audio into English. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/CreateTranslationRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/CreateTranslationResponse" + x-oaiMeta: + name: Create translation + returns: The translated text. + examples: + request: + curl: | + curl https://api.openai.com/v1/audio/translations \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: multipart/form-data" \ + -F file="@/path/to/file/german.m4a" \ + -F model="whisper-1" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + audio_file = open("german.m4a", "rb") + transcript = openai.Audio.translate("whisper-1", audio_file) + node: | + const { Configuration, OpenAIApi } = require("openai"); + const configuration = new Configuration({ + apiKey: process.env.OPENAI_API_KEY, + }); + const openai = new OpenAIApi(configuration); + const resp = await openai.createTranslation( + fs.createReadStream("audio.mp3"), + "whisper-1" + ); + response: | + { + "text": "Hello, my name is Wolfgang and I come from Germany. Where are you heading today?" + } + + /files: + get: + operationId: listFiles + tags: + - Files + summary: Returns a list of files that belong to the user's organization. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListFilesResponse" + x-oaiMeta: + name: List files + returns: A list of [file](/docs/api-reference/files/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/files \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.File.list() + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const list = await openai.files.list(); + + for await (const file of list) { + console.log(file); + } + } + + main(); + response: | + { + "data": [ + { + "id": "file-abc123", + "object": "file", + "bytes": 175, + "created_at": 1613677385, + "filename": "train.jsonl", + "purpose": "search" + }, + { + "id": "file-abc123", + "object": "file", + "bytes": 140, + "created_at": 1613779121, + "filename": "puppy.jsonl", + "purpose": "search" + } + ], + "object": "list" + } + post: + operationId: createFile + tags: + - Files + summary: | + Upload a file that can be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please [contact us](https://help.openai.com/) if you need to increase the storage limit. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/CreateFileRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/OpenAIFile" + x-oaiMeta: + name: Upload file + returns: The uploaded [file](/docs/api-reference/files/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/files \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -F purpose="fine-tune" \ + -F file="@mydata.jsonl" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.File.create( + file=open("mydata.jsonl", "rb"), + purpose='fine-tune' + ) + node.js: |- + import fs from "fs"; + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const file = await openai.files.create({ + file: fs.createReadStream("mydata.jsonl"), + purpose: "fine-tune", + }); + + console.log(file); + } + + main(); + response: | + { + "id": "file-abc123", + "object": "file", + "bytes": 140, + "created_at": 1613779121, + "filename": "mydata.jsonl", + "purpose": "fine-tune", + "status": "uploaded" | "processed" | "pending" | "error" + } + + /files/{file_id}: + delete: + operationId: deleteFile + tags: + - Files + summary: Delete a file. + parameters: + - in: path + name: file_id + required: true + schema: + type: string + description: The ID of the file to use for this request. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteFileResponse" + x-oaiMeta: + name: Delete file + returns: Deletion status. + examples: + request: + curl: | + curl https://api.openai.com/v1/files/file-abc123 \ + -X DELETE \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.File.delete("file-abc123") + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const file = await openai.files.del("file-abc123"); + + console.log(file); + } + + main(); + response: | + { + "id": "file-abc123", + "object": "file", + "deleted": true + } + get: + operationId: retrieveFile + tags: + - Files + summary: Returns information about a specific file. + parameters: + - in: path + name: file_id + required: true + schema: + type: string + description: The ID of the file to use for this request. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/OpenAIFile" + x-oaiMeta: + name: Retrieve file + returns: The [file](/docs/api-reference/files/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/files/file-abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.File.retrieve("file-abc123") + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const file = await openai.files.retrieve("file-abc123"); + + console.log(file); + } + + main(); + response: | + { + "id": "file-abc123", + "object": "file", + "bytes": 140, + "created_at": 1613779657, + "filename": "mydata.jsonl", + "purpose": "fine-tune" + } + + /files/{file_id}/content: + get: + operationId: downloadFile + tags: + - Files + summary: Returns the contents of the specified file. + parameters: + - in: path + name: file_id + required: true + schema: + type: string + description: The ID of the file to use for this request. + responses: + "200": + description: OK + content: + application/json: + schema: + type: string + x-oaiMeta: + name: Retrieve file content + returns: The file content. + examples: + request: + curl: | + curl https://api.openai.com/v1/files/file-abc123/content \ + -H "Authorization: Bearer $OPENAI_API_KEY" > file.jsonl + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + content = openai.File.download("file-abc123") + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const file = await openai.files.retrieveContent("file-abc123"); + + console.log(file); + } + + main(); + + /fine_tuning/jobs: + post: + operationId: createFineTuningJob + tags: + - Fine-tuning + summary: | + Creates a job that fine-tunes a specified model from a given dataset. + + Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + + [Learn more about fine-tuning](/docs/guides/fine-tuning) + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateFineTuningJobRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/FineTuningJob" + x-oaiMeta: + name: Create fine-tuning job + returns: A [fine-tuning.job](/docs/api-reference/fine-tuning/object) object. + examples: + - title: No hyperparameters + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "training_file": "file-abc123", + "model": "gpt-3.5-turbo" + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.FineTuningJob.create(training_file="file-abc123", model="gpt-3.5-turbo") + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTuning.jobs.create({ + training_file: "file-abc123" + }); + + console.log(fineTune); + } + + main(); + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-3.5-turbo-0613", + "created_at": 1614807352, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "status": "queued", + "validation_file": null, + "training_file": "file-abc123", + } + - title: Hyperparameters + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "training_file": "file-abc123", + "model": "gpt-3.5-turbo", + "hyperparameters": { + "n_epochs": 2 + } + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.FineTuningJob.create(training_file="file-abc123", model="gpt-3.5-turbo", hyperparameters={"n_epochs":2}) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTuning.jobs.create({ + training_file: "file-abc123", + model: "gpt-3.5-turbo", + hyperparameters: { n_epochs: 2 } + }); + + console.log(fineTune); + } + + main(); + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-3.5-turbo-0613", + "created_at": 1614807352, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "status": "queued", + "validation_file": null, + "training_file": "file-abc123", + "hyperparameters":{"n_epochs":2}, + } + - title: Validation file + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "training_file": "file-abc123", + "validation_file": "file-abc123", + "model": "gpt-3.5-turbo" + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.FineTuningJob.create(training_file="file-abc123", validation_file="file-abc123", model="gpt-3.5-turbo") + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTuning.jobs.create({ + training_file: "file-abc123", + validation_file: "file-abc123" + }); + + console.log(fineTune); + } + + main(); + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-3.5-turbo-0613", + "created_at": 1614807352, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "status": "queued", + "validation_file": "file-abc123", + "training_file": "file-abc123", + } + get: + operationId: listPaginatedFineTuningJobs + tags: + - Fine-tuning + summary: | + List your organization's fine-tuning jobs + parameters: + - name: after + in: query + description: Identifier for the last job from the previous pagination request. + required: false + schema: + type: string + - name: limit + in: query + description: Number of fine-tuning jobs to retrieve. + required: false + schema: + type: integer + default: 20 + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListPaginatedFineTuningJobsResponse" + x-oaiMeta: + name: List fine-tuning jobs + returns: A list of paginated [fine-tuning job](/docs/api-reference/fine-tuning/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs?limit=2 \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.FineTuningJob.list() + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const list = await openai.fineTuning.jobs.list(); + + for await (const fineTune of list) { + console.log(fineTune); + } + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "object": "fine_tuning.job.event", + "id": "ft-event-TjX0lMfOniCZX64t9PUQT5hn", + "created_at": 1689813489, + "level": "warn", + "message": "Fine tuning process stopping due to job cancellation", + "data": null, + "type": "message" + }, + { ... }, + { ... } + ], "has_more": true + } + /fine_tuning/jobs/{fine_tuning_job_id}: + get: + operationId: retrieveFineTuningJob + tags: + - Fine-tuning + summary: | + Get info about a fine-tuning job. + + [Learn more about fine-tuning](/docs/guides/fine-tuning) + parameters: + - in: path + name: fine_tuning_job_id + required: true + schema: + type: string + example: ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tuning job. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/FineTuningJob" + x-oaiMeta: + name: Retrieve fine-tuning job + returns: The [fine-tuning](/docs/api-reference/fine-tunes/object) object with the given ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs/ft-AF1WoRqd3aJAHsqc9NY7iL8F \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.FineTuningJob.retrieve("ftjob-abc123") + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTuning.jobs.retrieve("ftjob-abc123"); + + console.log(fineTune); + } + + main(); + response: &fine_tuning_example | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "davinci-002", + "created_at": 1692661014, + "finished_at": 1692661190, + "fine_tuned_model": "ft:davinci-002:my-org:custom_suffix:7q8mpxmy", + "organization_id": "org-123", + "result_files": [ + "file-abc123" + ], + "status": "succeeded", + "validation_file": null, + "training_file": "file-abc123", + "hyperparameters": { + "n_epochs": 4, + }, + "trained_tokens": 5768 + } + /fine_tuning/jobs/{fine_tuning_job_id}/events: + get: + operationId: listFineTuningEvents + tags: + - Fine-tuning + summary: | + Get status updates for a fine-tuning job. + parameters: + - in: path + name: fine_tuning_job_id + required: true + schema: + type: string + example: ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tuning job to get events for. + - name: after + in: query + description: Identifier for the last event from the previous pagination request. + required: false + schema: + type: string + - name: limit + in: query + description: Number of events to retrieve. + required: false + schema: + type: integer + default: 20 + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListFineTuningJobEventsResponse" + x-oaiMeta: + name: List fine-tuning events + returns: A list of fine-tuning event objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/events \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.FineTuningJob.list_events(id="ftjob-abc123", limit=2) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const list = await openai.fineTuning.list_events(id="ftjob-abc123", limit=2); + + for await (const fineTune of list) { + console.log(fineTune); + } + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "object": "fine_tuning.job.event", + "id": "ft-event-ddTJfwuMVpfLXseO0Am0Gqjm", + "created_at": 1692407401, + "level": "info", + "message": "Fine tuning job successfully completed", + "data": null, + "type": "message" + }, + { + "object": "fine_tuning.job.event", + "id": "ft-event-tyiGuB72evQncpH87xe505Sv", + "created_at": 1692407400, + "level": "info", + "message": "New fine-tuned model created: ft:gpt-3.5-turbo:openai::7p4lURel", + "data": null, + "type": "message" + } + ], + "has_more": true + } + + /fine_tuning/jobs/{fine_tuning_job_id}/cancel: + post: + operationId: cancelFineTuningJob + tags: + - Fine-tuning + summary: | + Immediately cancel a fine-tune job. + parameters: + - in: path + name: fine_tuning_job_id + required: true + schema: + type: string + example: ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tuning job to cancel. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/FineTuningJob" + x-oaiMeta: + name: Cancel fine-tuning + returns: The cancelled [fine-tuning](/docs/api-reference/fine-tuning/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/cancel \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.FineTuningJob.cancel("ftjob-abc123") + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTuning.jobs.cancel("ftjob-abc123"); + + console.log(fineTune); + } + main(); + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-3.5-turbo-0613", + "created_at": 1689376978, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "hyperparameters": { + "n_epochs": "auto" + }, + "status": "cancelled", + "validation_file": "file-abc123", + "training_file": "file-abc123" + } + + /fine-tunes: + post: + operationId: createFineTune + deprecated: true + tags: + - Fine-tunes + summary: | + Creates a job that fine-tunes a specified model from a given dataset. + + Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + + [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateFineTuneRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/FineTune" + x-oaiMeta: + name: Create fine-tune + returns: A [fine-tune](/docs/api-reference/fine-tunes/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/fine-tunes \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "training_file": "file-abc123" + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.FineTune.create(training_file="file-abc123") + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTunes.create({ + training_file: "file-abc123" + }); + + console.log(fineTune); + } + + main(); + response: | + { + "id": "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + "object": "fine-tune", + "model": "curie", + "created_at": 1614807352, + "events": [ + { + "object": "fine-tune-event", + "created_at": 1614807352, + "level": "info", + "message": "Job enqueued. Waiting for jobs ahead to complete. Queue number: 0." + } + ], + "fine_tuned_model": null, + "hyperparams": { + "batch_size": 4, + "learning_rate_multiplier": 0.1, + "n_epochs": 4, + "prompt_loss_weight": 0.1, + }, + "organization_id": "org-123", + "result_files": [], + "status": "pending", + "validation_files": [], + "training_files": [ + { + "id": "file-abc123", + "object": "file", + "bytes": 1547276, + "created_at": 1610062281, + "filename": "my-data-train.jsonl", + "purpose": "fine-tune-train" + } + ], + "updated_at": 1614807352, + } + get: + operationId: listFineTunes + deprecated: true + tags: + - Fine-tunes + summary: | + List your organization's fine-tuning jobs + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListFineTunesResponse" + x-oaiMeta: + name: List fine-tunes + returns: A list of [fine-tune](/docs/api-reference/fine-tunes/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/fine-tunes \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.FineTune.list() + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const list = await openai.fineTunes.list(); + + for await (const fineTune of list) { + console.log(fineTune); + } + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + "object": "fine-tune", + "model": "curie", + "created_at": 1614807352, + "fine_tuned_model": null, + "hyperparams": { ... }, + "organization_id": "org-123", + "result_files": [], + "status": "pending", + "validation_files": [], + "training_files": [ { ... } ], + "updated_at": 1614807352, + }, + { ... }, + { ... } + ] + } + + /fine-tunes/{fine_tune_id}: + get: + operationId: retrieveFineTune + deprecated: true + tags: + - Fine-tunes + summary: | + Gets info about the fine-tune job. + + [Learn more about fine-tuning](/docs/guides/legacy-fine-tuning) + parameters: + - in: path + name: fine_tune_id + required: true + schema: + type: string + example: ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tune job + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/FineTune" + x-oaiMeta: + name: Retrieve fine-tune + returns: The [fine-tune](/docs/api-reference/fine-tunes/object) object with the given ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/fine-tunes/ft-AF1WoRqd3aJAHsqc9NY7iL8F \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.FineTune.retrieve(id="ft-AF1WoRqd3aJAHsqc9NY7iL8F") + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTunes.retrieve("ft-AF1WoRqd3aJAHsqc9NY7iL8F"); + + console.log(fineTune); + } + + main(); + response: &fine_tune_example | + { + "id": "ft-AF1WoRqd3aJAHsqc9NY7iL8F", + "object": "fine-tune", + "model": "curie", + "created_at": 1614807352, + "events": [ + { + "object": "fine-tune-event", + "created_at": 1614807352, + "level": "info", + "message": "Job enqueued. Waiting for jobs ahead to complete. Queue number: 0." + }, + { + "object": "fine-tune-event", + "created_at": 1614807356, + "level": "info", + "message": "Job started." + }, + { + "object": "fine-tune-event", + "created_at": 1614807861, + "level": "info", + "message": "Uploaded snapshot: curie:ft-acmeco-2021-03-03-21-44-20." + }, + { + "object": "fine-tune-event", + "created_at": 1614807864, + "level": "info", + "message": "Uploaded result files: file-abc123." + }, + { + "object": "fine-tune-event", + "created_at": 1614807864, + "level": "info", + "message": "Job succeeded." + } + ], + "fine_tuned_model": "curie:ft-acmeco-2021-03-03-21-44-20", + "hyperparams": { + "batch_size": 4, + "learning_rate_multiplier": 0.1, + "n_epochs": 4, + "prompt_loss_weight": 0.1, + }, + "organization_id": "org-123", + "result_files": [ + { + "id": "file-abc123", + "object": "file", + "bytes": 81509, + "created_at": 1614807863, + "filename": "compiled_results.csv", + "purpose": "fine-tune-results" + } + ], + "status": "succeeded", + "validation_files": [], + "training_files": [ + { + "id": "file-abc123", + "object": "file", + "bytes": 1547276, + "created_at": 1610062281, + "filename": "my-data-train.jsonl", + "purpose": "fine-tune-train" + } + ], + "updated_at": 1614807865, + } + + /fine-tunes/{fine_tune_id}/cancel: + post: + operationId: cancelFineTune + deprecated: true + tags: + - Fine-tunes + summary: | + Immediately cancel a fine-tune job. + parameters: + - in: path + name: fine_tune_id + required: true + schema: + type: string + example: ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tune job to cancel + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/FineTune" + x-oaiMeta: + name: Cancel fine-tune + returns: The cancelled [fine-tune](/docs/api-reference/fine-tunes/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/fine-tunes/ft-AF1WoRqd3aJAHsqc9NY7iL8F/cancel \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.FineTune.cancel(id="ft-AF1WoRqd3aJAHsqc9NY7iL8F") + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTunes.cancel("ft-AF1WoRqd3aJAHsqc9NY7iL8F"); + + console.log(fineTune); + } + main(); + response: | + { + "id": "ft-xhrpBbvVUzYGo8oUO1FY4nI7", + "object": "fine-tune", + "model": "curie", + "created_at": 1614807770, + "events": [ { ... } ], + "fine_tuned_model": null, + "hyperparams": { ... }, + "organization_id": "org-123", + "result_files": [], + "status": "cancelled", + "validation_files": [], + "training_files": [ + { + "id": "file-abc123", + "object": "file", + "bytes": 1547276, + "created_at": 1610062281, + "filename": "my-data-train.jsonl", + "purpose": "fine-tune-train" + } + ], + "updated_at": 1614807789, + } + + /fine-tunes/{fine_tune_id}/events: + get: + operationId: listFineTuneEvents + deprecated: true + tags: + - Fine-tunes + summary: | + Get fine-grained status updates for a fine-tune job. + parameters: + - in: path + name: fine_tune_id + required: true + schema: + type: string + example: ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tune job to get events for. + - in: query + name: stream + required: false + schema: + type: boolean + default: false + description: | + Whether to stream events for the fine-tune job. If set to true, + events will be sent as data-only + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available. The stream will terminate with a + `data: [DONE]` message when the job is finished (succeeded, cancelled, + or failed). + + If set to false, only events generated so far will be returned. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListFineTuneEventsResponse" + x-oaiMeta: + name: List fine-tune events + returns: A list of fine-tune event objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/fine-tunes/ft-AF1WoRqd3aJAHsqc9NY7iL8F/events \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.FineTune.list_events(id="ft-AF1WoRqd3aJAHsqc9NY7iL8F") + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTunes.listEvents("ft-AF1WoRqd3aJAHsqc9NY7iL8F"); + + console.log(fineTune); + } + main(); + response: | + { + "object": "list", + "data": [ + { + "object": "fine-tune-event", + "created_at": 1614807352, + "level": "info", + "message": "Job enqueued. Waiting for jobs ahead to complete. Queue number: 0." + }, + { + "object": "fine-tune-event", + "created_at": 1614807356, + "level": "info", + "message": "Job started." + }, + { + "object": "fine-tune-event", + "created_at": 1614807861, + "level": "info", + "message": "Uploaded snapshot: curie:ft-acmeco-2021-03-03-21-44-20." + }, + { + "object": "fine-tune-event", + "created_at": 1614807864, + "level": "info", + "message": "Uploaded result files: file-abc123" + }, + { + "object": "fine-tune-event", + "created_at": 1614807864, + "level": "info", + "message": "Job succeeded." + } + ] + } + + /models: + get: + operationId: listModels + tags: + - Models + summary: Lists the currently available models, and provides basic information about each one such as the owner and availability. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListModelsResponse" + x-oaiMeta: + name: List models + returns: A list of [model](/docs/api-reference/models/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/models \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Model.list() + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const list = await openai.models.list(); + + for await (const model of list) { + console.log(model); + } + } + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "model-id-0", + "object": "model", + "created": 1686935002, + "owned_by": "organization-owner" + }, + { + "id": "model-id-1", + "object": "model", + "created": 1686935002, + "owned_by": "organization-owner", + }, + { + "id": "model-id-2", + "object": "model", + "created": 1686935002, + "owned_by": "openai" + }, + ], + "object": "list" + } + + /models/{model}: + get: + operationId: retrieveModel + tags: + - Models + summary: Retrieves a model instance, providing basic information about the model such as the owner and permissioning. + parameters: + - in: path + name: model + required: true + schema: + type: string + # ideally this will be an actual ID, so this will always work from browser + example: gpt-3.5-turbo + description: The ID of the model to use for this request + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Model" + x-oaiMeta: + name: Retrieve model + returns: The [model](/docs/api-reference/models/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/models/VAR_model_id \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Model.retrieve("VAR_model_id") + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const model = await openai.models.retrieve("gpt-3.5-turbo"); + + console.log(model); + } + + main(); + response: &retrieve_model_response | + { + "id": "VAR_model_id", + "object": "model", + "created": 1686935002, + "owned_by": "openai" + } + delete: + operationId: deleteModel + tags: + - Models + summary: Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. + parameters: + - in: path + name: model + required: true + schema: + type: string + example: ft:gpt-3.5-turbo:acemeco:suffix:abc123 + description: The model to delete + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteModelResponse" + x-oaiMeta: + name: Delete fine-tune model + returns: Deletion status. + examples: + request: + curl: | + curl https://api.openai.com/v1/models/ft:gpt-3.5-turbo:acemeco:suffix:abc123 \ + -X DELETE \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Model.delete("ft:gpt-3.5-turbo:acemeco:suffix:abc123") + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const model = await openai.models.del("ft:gpt-3.5-turbo:acemeco:suffix:abc123"); + + console.log(model); + } + main(); + response: | + { + "id": "ft:gpt-3.5-turbo:acemeco:suffix:abc123", + "object": "model", + "deleted": true + } + + /moderations: + post: + operationId: createModeration + tags: + - Moderations + summary: Classifies if text violates OpenAI's Content Policy + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateModerationRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/CreateModerationResponse" + x-oaiMeta: + name: Create moderation + returns: A [moderation](/docs/api-reference/moderations/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/moderations \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "input": "I want to kill them." + }' + python: | + import os + import openai + openai.api_key = os.getenv("OPENAI_API_KEY") + openai.Moderation.create( + input="I want to kill them.", + ) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const moderation = await openai.moderations.create({ input: "I want to kill them." }); + + console.log(moderation); + } + main(); + response: &moderation_example | + { + "id": "modr-XXXXX", + "model": "text-moderation-005", + "results": [ + { + "flagged": true, + "categories": { + "sexual": false, + "hate": false, + "harassment": false, + "self-harm": false, + "sexual/minors": false, + "hate/threatening": false, + "violence/graphic": false, + "self-harm/intent": false, + "self-harm/instructions": false, + "harassment/threatening": true, + "violence": true, + }, + "category_scores": { + "sexual": 1.2282071e-06, + "hate": 0.010696256, + "harassment": 0.29842457, + "self-harm": 1.5236925e-08, + "sexual/minors": 5.7246268e-08, + "hate/threatening": 0.0060676364, + "violence/graphic": 4.435014e-06, + "self-harm/intent": 8.098441e-10, + "self-harm/instructions": 2.8498655e-11, + "harassment/threatening": 0.63055265, + "violence": 0.99011886, + } + } + ] + } + +components: + + securitySchemes: + ApiKeyAuth: + type: http + scheme: 'bearer' + + schemas: + Error: + type: object + properties: + code: + type: string + nullable: true + message: + type: string + nullable: false + param: + type: string + nullable: true + type: + type: string + nullable: false + required: + - type + - message + - param + - code + + ErrorResponse: + type: object + properties: + error: + $ref: "#/components/schemas/Error" + required: + - error + + ListModelsResponse: + type: object + properties: + object: + type: string + data: + type: array + items: + $ref: "#/components/schemas/Model" + required: + - object + - data + + DeleteModelResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + required: + - id + - object + - deleted + + CreateCompletionRequest: + type: object + properties: + model: + description: &model_description | + ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + anyOf: + - type: string + - type: string + enum: + [ + "babbage-002", + "davinci-002", + "gpt-3.5-turbo-instruct", + "text-davinci-003", + "text-davinci-002", + "text-davinci-001", + "code-davinci-002", + "text-curie-001", + "text-babbage-001", + "text-ada-001", + ] + x-oaiTypeLabel: string + prompt: + description: &completions_prompt_description | + The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. + + Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. + default: "<|endoftext|>" + nullable: true + oneOf: + - type: string + default: "" + example: "This is a test." + - type: array + items: + type: string + default: "" + example: "This is a test." + - type: array + minItems: 1 + items: + type: integer + example: "[1212, 318, 257, 1332, 13]" + - type: array + minItems: 1 + items: + type: array + minItems: 1 + items: + type: integer + example: "[[1212, 318, 257, 1332, 13]]" + best_of: + type: integer + default: 1 + minimum: 0 + maximum: 20 + nullable: true + description: &completions_best_of_description | + Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. + + When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. + + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + echo: + type: boolean + default: false + nullable: true + description: &completions_echo_description > + Echo back the prompt in addition to the completion + frequency_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: &completions_frequency_penalty_description | + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + + [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + logit_bias: &completions_logit_bias + type: object + x-oaiTypeLabel: map + default: null + nullable: true + additionalProperties: + type: integer + description: &completions_logit_bias_description | + Modify the likelihood of specified tokens appearing in the completion. + + Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + + As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. + logprobs: &completions_logprobs_configuration + type: integer + minimum: 0 + maximum: 5 + default: null + nullable: true + description: &completions_logprobs_description | + Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. + + The maximum value for `logprobs` is 5. + max_tokens: + type: integer + minimum: 0 + default: 16 + example: 16 + nullable: true + description: &completions_max_tokens_description | + The maximum number of [tokens](/tokenizer) to generate in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + n: + type: integer + minimum: 1 + maximum: 128 + default: 1 + example: 1 + nullable: true + description: &completions_completions_description | + How many completions to generate for each prompt. + + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + presence_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: &completions_presence_penalty_description | + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + + [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + stop: + description: &completions_stop_description > + Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + default: null + nullable: true + oneOf: + - type: string + default: <|endoftext|> + example: "\n" + nullable: true + - type: array + minItems: 1 + maxItems: 4 + items: + type: string + example: '["\n"]' + stream: + description: > + Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + type: boolean + nullable: true + default: false + suffix: + description: The suffix that comes after a completion of inserted text. + default: null + nullable: true + type: string + example: "test." + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: &completions_temperature_description | + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + + We generally recommend altering this or `top_p` but not both. + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: &completions_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + user: &end_user_param_configuration + type: string + example: user-1234 + description: | + A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + required: + - model + - prompt + + CreateCompletionResponse: + type: object + description: | + Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint). + properties: + id: + type: string + description: A unique identifier for the completion. + choices: + type: array + description: The list of completion choices the model generated for the input prompt. + items: + type: object + required: + - finish_reason + - index + - logprobs + - text + properties: + finish_reason: + type: string + description: &completion_finish_reason_description | + The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + `length` if the maximum number of tokens specified in the request was reached, + or `content_filter` if content was omitted due to a flag from our content filters. + enum: ["stop", "length", "content_filter"] + index: + type: integer + logprobs: + type: object + nullable: true + properties: + text_offset: + type: array + items: + type: integer + token_logprobs: + type: array + items: + type: number + tokens: + type: array + items: + type: string + top_logprobs: + type: array + items: + type: object + additionalProperties: + type: integer + text: + type: string + created: + type: integer + description: The Unix timestamp (in seconds) of when the completion was created. + model: + type: string + description: The model used for completion. + object: + type: string + description: The object type, which is always "text_completion" + usage: + $ref: "#/components/schemas/CompletionUsage" + required: + - id + - object + - created + - model + - choices + x-oaiMeta: + name: The completion object + legacy: true + example: | + { + "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", + "object": "text_completion", + "created": 1589478378, + "model": "gpt-3.5-turbo", + "choices": [ + { + "text": "\n\nThis is indeed a test", + "index": 0, + "logprobs": null, + "finish_reason": "length" + } + ], + "usage": { + "prompt_tokens": 5, + "completion_tokens": 7, + "total_tokens": 12 + } + } + + + ChatCompletionRequestMessage: + type: object + properties: + content: + type: string + nullable: true + description: The contents of the message. `content` is required for all messages, and may be null for assistant messages with function calls. + function_call: + type: object + description: The name and arguments of a function that should be called, as generated by the model. + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + required: + - arguments + - name + name: + type: string + description: The name of the author of this message. `name` is required if role is `function`, and it should be the name of the function whose response is in the `content`. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters. + role: + type: string + enum: ["system", "user", "assistant", "function"] + description: The role of the messages author. One of `system`, `user`, `assistant`, or `function`. + required: + - content + - role + + ChatCompletionFunctionParameters: + type: object + description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.\n\nTo describe a function that accepts no parameters, provide the value `{\"type\": \"object\", \"properties\": {}}`." + additionalProperties: true + + ChatCompletionFunctions: + type: object + properties: + description: + type: string + description: A description of what the function does, used by the model to choose when and how to call the function. + name: + type: string + description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + parameters: + $ref: "#/components/schemas/ChatCompletionFunctionParameters" + required: + - name + - parameters + + ChatCompletionFunctionCallOption: + type: object + properties: + name: + type: string + description: The name of the function to call. + required: + - name + + ChatCompletionResponseMessage: + type: object + description: A chat completion message generated by the model. + properties: + content: + type: string + description: The contents of the message. + nullable: true + function_call: + type: object + description: The name and arguments of a function that should be called, as generated by the model. + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + required: + - name + - arguments + role: + type: string + enum: ["system", "user", "assistant", "function"] + description: The role of the author of this message. + required: + - role + - content + + ChatCompletionStreamResponseDelta: + type: object + description: A chat completion delta generated by streamed model responses. + properties: + content: + type: string + description: The contents of the chunk message. + nullable: true + function_call: + type: object + description: The name and arguments of a function that should be called, as generated by the model. + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + role: + type: string + enum: ["system", "user", "assistant", "function"] + description: The role of the author of this message. + + CreateChatCompletionRequest: + type: object + properties: + messages: + description: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + type: array + minItems: 1 + items: + $ref: "#/components/schemas/ChatCompletionRequestMessage" + model: + description: ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + example: "gpt-3.5-turbo" + anyOf: + - type: string + - type: string + enum: + [ + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-16k-0613", + ] + x-oaiTypeLabel: string + frequency_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: *completions_frequency_penalty_description + function_call: + description: > + Controls how the model calls functions. "none" means the model will not call a function and instead generates a message. "auto" means the model can pick between generating a message or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. + oneOf: + - type: string + enum: [none, auto] + - $ref: "#/components/schemas/ChatCompletionFunctionCallOption" + functions: + description: A list of functions the model may generate JSON inputs for. + type: array + minItems: 1 + maxItems: 128 + items: + $ref: "#/components/schemas/ChatCompletionFunctions" + logit_bias: + type: object + x-oaiTypeLabel: map + default: null + nullable: true + additionalProperties: + type: integer + description: | + Modify the likelihood of specified tokens appearing in the completion. + + Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + max_tokens: + description: | + The maximum number of [tokens](/tokenizer) to generate in the chat completion. + + The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + default: inf + type: integer + nullable: true + n: + type: integer + minimum: 1 + maximum: 128 + default: 1 + example: 1 + nullable: true + description: How many chat completion choices to generate for each input message. + presence_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: *completions_presence_penalty_description + stop: + description: | + Up to 4 sequences where the API will stop generating further tokens. + default: null + oneOf: + - type: string + nullable: true + - type: array + minItems: 1 + maxItems: 4 + items: + type: string + stream: + description: > + If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + type: boolean + nullable: true + default: false + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: *completions_temperature_description + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: *completions_top_p_description + user: *end_user_param_configuration + required: + - model + - messages + + CreateChatCompletionResponse: + type: object + description: Represents a chat completion response returned by model, based on the provided input. + properties: + id: + type: string + description: A unique identifier for the chat completion. + choices: + type: array + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + items: + type: object + required: + - finish_reason + - index + - message + properties: + finish_reason: + type: string + description: &chat_completion_finish_reason_description | + The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + `length` if the maximum number of tokens specified in the request was reached, + `content_filter` if content was omitted due to a flag from our content filters, + or `function_call` if the model called a function. + enum: ["stop", "length", "function_call", "content_filter"] + index: + type: integer + description: The index of the choice in the list of choices. + message: + $ref: "#/components/schemas/ChatCompletionResponseMessage" + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. + model: + type: string + description: The model used for the chat completion. + object: + type: string + description: The object type, which is always `chat.completion`. + usage: + $ref: "#/components/schemas/CompletionUsage" + required: + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion object + group: chat + example: *chat_completion_example + + CreateChatCompletionFunctionResponse: + type: object + description: Represents a chat completion response returned by model, based on the provided input. + properties: + id: + type: string + description: A unique identifier for the chat completion. + choices: + type: array + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + items: + type: object + required: + - finish_reason + - index + - message + properties: + finish_reason: + type: string + description: &chat_completion_function_finish_reason_description | + The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function. + enum: ["stop", "length", "function_call", "content_filter"] + index: + type: integer + description: The index of the choice in the list of choices. + message: + $ref: "#/components/schemas/ChatCompletionResponseMessage" + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. + model: + type: string + description: The model used for the chat completion. + object: + type: string + description: The object type, which is always `chat.completion`. + usage: + $ref: "#/components/schemas/CompletionUsage" + required: + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion object + group: chat + example: *chat_completion_function_example + + ListPaginatedFineTuningJobsResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/FineTuningJob" + has_more: + type: boolean + object: + type: string + required: + - object + - data + - has_more + + CreateChatCompletionStreamResponse: + type: object + description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. + properties: + id: + type: string + description: A unique identifier for the chat completion. Each chunk has the same ID. + choices: + type: array + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + items: + type: object + required: + - delta + - finish_reason + - index + properties: + delta: + $ref: "#/components/schemas/ChatCompletionStreamResponseDelta" + finish_reason: + type: string + description: *chat_completion_finish_reason_description + enum: ["stop", "length", "function_call", "content_filter"] + nullable: true + index: + type: integer + description: The index of the choice in the list of choices. + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. + model: + type: string + description: The model to generate the completion. + object: + type: string + description: The object type, which is always `chat.completion.chunk`. + required: + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion chunk object + group: chat + example: *chat_completion_chunk_example + + CreateEditRequest: + type: object + properties: + instruction: + description: The instruction that tells the model how to edit the prompt. + type: string + example: "Fix the spelling mistakes." + model: + description: ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint. + example: "text-davinci-edit-001" + anyOf: + - type: string + - type: string + enum: ["text-davinci-edit-001", "code-davinci-edit-001"] + x-oaiTypeLabel: string + input: + description: The input text to use as a starting point for the edit. + type: string + default: "" + nullable: true + example: "What day of the wek is it?" + n: + type: integer + minimum: 1 + maximum: 20 + default: 1 + example: 1 + nullable: true + description: How many edits to generate for the input and instruction. + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: *completions_temperature_description + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: *completions_top_p_description + required: + - model + - instruction + + CreateEditResponse: + type: object + title: Edit + deprecated: true + properties: + choices: + type: array + description: A list of edit choices. Can be more than one if `n` is greater than 1. + items: + type: object + required: + - text + - index + - finish_reason + properties: + finish_reason: + type: string + description: *completion_finish_reason_description + enum: ["stop", "length"] + index: + type: integer + description: The index of the choice in the list of choices. + text: + type: string + description: The edited result. + object: + type: string + description: The object type, which is always `edit`. + created: + type: integer + description: The Unix timestamp (in seconds) of when the edit was created. + usage: + $ref: "#/components/schemas/CompletionUsage" + required: + - object + - created + - choices + - usage + x-oaiMeta: + name: The edit object + example: *edit_example + + CreateImageRequest: + type: object + properties: + prompt: + description: A text description of the desired image(s). The maximum length is 1000 characters. + type: string + example: "A cute baby sea otter" + n: &images_n + type: integer + minimum: 1 + maximum: 10 + default: 1 + example: 1 + nullable: true + description: The number of images to generate. Must be between 1 and 10. + response_format: &images_response_format + type: string + enum: ["url", "b64_json"] + default: "url" + example: "url" + nullable: true + description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. + size: &images_size + type: string + enum: ["256x256", "512x512", "1024x1024"] + default: "1024x1024" + example: "1024x1024" + nullable: true + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + user: *end_user_param_configuration + required: + - prompt + + ImagesResponse: + properties: + created: + type: integer + data: + type: array + items: + $ref: "#/components/schemas/Image" + required: + - created + - data + + Image: + type: object + description: Represents the url or the content of an image generated by the OpenAI API. + properties: + b64_json: + type: string + description: The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. + url: + type: string + description: The URL of the generated image, if `response_format` is `url` (default). + x-oaiMeta: + name: The image object + example: | + { + "url": "..." + } + + CreateImageEditRequest: + type: object + properties: + image: + description: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. + type: string + format: binary + prompt: + description: A text description of the desired image(s). The maximum length is 1000 characters. + type: string + example: "A cute baby sea otter wearing a beret" + mask: + description: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. + type: string + format: binary + n: *images_n + size: *images_size + response_format: *images_response_format + user: *end_user_param_configuration + required: + - prompt + - image + + CreateImageVariationRequest: + type: object + properties: + image: + description: The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. + type: string + format: binary + n: *images_n + response_format: *images_response_format + size: *images_size + user: *end_user_param_configuration + required: + - image + + CreateModerationRequest: + type: object + properties: + input: + description: The input text to classify + oneOf: + - type: string + default: "" + example: "I want to kill them." + - type: array + items: + type: string + default: "" + example: "I want to kill them." + model: + description: | + Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. + + The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + nullable: false + default: "text-moderation-latest" + example: "text-moderation-stable" + anyOf: + - type: string + - type: string + enum: ["text-moderation-latest", "text-moderation-stable"] + x-oaiTypeLabel: string + required: + - input + + CreateModerationResponse: + type: object + description: Represents policy compliance report by OpenAI's content moderation model against a given input. + properties: + id: + type: string + description: The unique identifier for the moderation request. + model: + type: string + description: The model used to generate the moderation results. + results: + type: array + description: A list of moderation objects. + items: + type: object + properties: + flagged: + type: boolean + description: Whether the content violates [OpenAI's usage policies](/policies/usage-policies). + categories: + type: object + description: A list of the categories, and whether they are flagged or not. + properties: + hate: + type: boolean + description: Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harrassment. + hate/threatening: + type: boolean + description: Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. + harassment: + type: boolean + description: Content that expresses, incites, or promotes harassing language towards any target. + harassment/threatening: + type: boolean + description: Harassment content that also includes violence or serious harm towards any target. + self-harm: + type: boolean + description: Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. + self-harm/intent: + type: boolean + description: Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. + self-harm/instructions: + type: boolean + description: Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. + sexual: + type: boolean + description: Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). + sexual/minors: + type: boolean + description: Sexual content that includes an individual who is under 18 years old. + violence: + type: boolean + description: Content that depicts death, violence, or physical injury. + violence/graphic: + type: boolean + description: Content that depicts death, violence, or physical injury in graphic detail. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + category_scores: + type: object + description: A list of the categories along with their scores as predicted by model. + properties: + hate: + type: number + description: The score for the category 'hate'. + hate/threatening: + type: number + description: The score for the category 'hate/threatening'. + harassment: + type: number + description: The score for the category 'harassment'. + harassment/threatening: + type: number + description: The score for the category 'harassment/threatening'. + self-harm: + type: number + description: The score for the category 'self-harm'. + self-harm/intent: + type: number + description: The score for the category 'self-harm/intent'. + self-harm/instructions: + type: number + description: The score for the category 'self-harm/instructions'. + sexual: + type: number + description: The score for the category 'sexual'. + sexual/minors: + type: number + description: The score for the category 'sexual/minors'. + violence: + type: number + description: The score for the category 'violence'. + violence/graphic: + type: number + description: The score for the category 'violence/graphic'. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + required: + - flagged + - categories + - category_scores + required: + - id + - model + - results + x-oaiMeta: + name: The moderation object + example: *moderation_example + + ListFilesResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/OpenAIFile" + object: + type: string + required: + - object + - data + + CreateFileRequest: + type: object + additionalProperties: false + properties: + file: + description: | + The file object (not file name) to be uploaded. + + If the `purpose` is set to "fine-tune", the file will be used for fine-tuning. + type: string + format: binary + purpose: + description: | + The intended purpose of the uploaded file. + + Use "fine-tune" for [fine-tuning](/docs/api-reference/fine-tuning). This allows us to validate the format of the uploaded file is correct for fine-tuning. + type: string + required: + - file + - purpose + + DeleteFileResponse: + type: object + properties: + id: + type: string + object: + type: string + deleted: + type: boolean + required: + - id + - object + - deleted + + CreateFineTuningJobRequest: + type: object + properties: + model: + description: | + The name of the model to fine-tune. You can select one of the + [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + example: "gpt-3.5-turbo" + anyOf: + - type: string + - type: string + enum: ["babbage-002", "davinci-002", "gpt-3.5-turbo"] + x-oaiTypeLabel: string + training_file: + description: | + The ID of an uploaded file that contains training data. + + See [upload file](/docs/api-reference/files/upload) for how to upload a file. + + Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + type: string + example: "file-abc123" + hyperparameters: + type: object + description: The hyperparameters used for the fine-tuning job. + properties: + n_epochs: + description: | + The number of epochs to train the model for. An epoch refers to one + full cycle through the training dataset. + oneOf: + - type: string + enum: [auto] + - type: integer + minimum: 1 + maximum: 50 + default: auto + suffix: + description: | + A string of up to 18 characters that will be added to your fine-tuned model name. + + For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + type: string + minLength: 1 + maxLength: 40 + default: null + nullable: true + validation_file: + description: | + The ID of an uploaded file that contains validation data. + + If you provide this file, the data is used to generate validation + metrics periodically during fine-tuning. These metrics can be viewed in + the fine-tuning results file. + The same data should not be present in both train and validation files. + + Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + type: string + nullable: true + example: "file-abc123" + required: + - model + - training_file + + ListFineTuningJobEventsResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/FineTuningJobEvent" + object: + type: string + required: + - object + - data + + CreateFineTuneRequest: + type: object + properties: + training_file: + description: | + The ID of an uploaded file that contains training data. + + See [upload file](/docs/api-reference/files/upload) for how to upload a file. + + Your dataset must be formatted as a JSONL file, where each training + example is a JSON object with the keys "prompt" and "completion". + Additionally, you must upload your file with the purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more details. + type: string + example: "file-abc123" + batch_size: + description: | + The batch size to use for training. The batch size is the number of + training examples used to train a single forward and backward pass. + + By default, the batch size will be dynamically configured to be + ~0.2% of the number of examples in the training set, capped at 256 - + in general, we've found that larger batch sizes tend to work better + for larger datasets. + default: null + type: integer + nullable: true + classification_betas: + description: | + If this is provided, we calculate F-beta scores at the specified + beta values. The F-beta score is a generalization of F-1 score. + This is only used for binary classification. + + With a beta of 1 (i.e. the F-1 score), precision and recall are + given the same weight. A larger beta score puts more weight on + recall and less on precision. A smaller beta score puts more weight + on precision and less on recall. + type: array + items: + type: number + example: [0.6, 1, 1.5, 2] + default: null + nullable: true + classification_n_classes: + description: | + The number of classes in a classification task. + + This parameter is required for multiclass classification. + type: integer + default: null + nullable: true + classification_positive_class: + description: | + The positive class in binary classification. + + This parameter is needed to generate precision, recall, and F1 + metrics when doing binary classification. + type: string + default: null + nullable: true + compute_classification_metrics: + description: | + If set, we calculate classification-specific metrics such as accuracy + and F-1 score using the validation set at the end of every epoch. + These metrics can be viewed in the [results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). + + In order to compute classification metrics, you must provide a + `validation_file`. Additionally, you must + specify `classification_n_classes` for multiclass classification or + `classification_positive_class` for binary classification. + type: boolean + default: false + nullable: true + hyperparameters: + type: object + description: The hyperparameters used for the fine-tuning job. + properties: + n_epochs: + description: | + The number of epochs to train the model for. An epoch refers to one + full cycle through the training dataset. + oneOf: + - type: string + enum: [auto] + - type: integer + minimum: 1 + maximum: 50 + default: auto + learning_rate_multiplier: + description: | + The learning rate multiplier to use for training. + The fine-tuning learning rate is the original learning rate used for + pretraining multiplied by this value. + + By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 + depending on final `batch_size` (larger learning rates tend to + perform better with larger batch sizes). We recommend experimenting + with values in the range 0.02 to 0.2 to see what produces the best + results. + default: null + type: number + nullable: true + model: + description: | + The name of the base model to fine-tune. You can select one of "ada", + "babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21 and before 2023-08-22. + To learn more about these models, see the + [Models](/docs/models) documentation. + default: "curie" + example: "curie" + nullable: true + anyOf: + - type: string + - type: string + enum: ["ada", "babbage", "curie", "davinci"] + x-oaiTypeLabel: string + prompt_loss_weight: + description: | + The weight to use for loss on the prompt tokens. This controls how + much the model tries to learn to generate the prompt (as compared + to the completion which always has a weight of 1.0), and can add + a stabilizing effect to training when completions are short. + + If prompts are extremely long (relative to completions), it may make + sense to reduce this weight so as to avoid over-prioritizing + learning the prompt. + default: 0.01 + type: number + nullable: true + suffix: + description: | + A string of up to 40 characters that will be added to your fine-tuned model name. + + For example, a `suffix` of "custom-model-name" would produce a model name like `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. + type: string + minLength: 1 + maxLength: 40 + default: null + nullable: true + validation_file: + description: | + The ID of an uploaded file that contains validation data. + + If you provide this file, the data is used to generate validation + metrics periodically during fine-tuning. These metrics can be viewed in + the [fine-tuning results file](/docs/guides/legacy-fine-tuning/analyzing-your-fine-tuned-model). + Your train and validation data should be mutually exclusive. + + Your dataset must be formatted as a JSONL file, where each validation + example is a JSON object with the keys "prompt" and "completion". + Additionally, you must upload your file with the purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/creating-training-data) for more details. + type: string + nullable: true + example: "file-abc123" + required: + - training_file + + ListFineTunesResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/FineTune" + object: + type: string + required: + - object + - data + + ListFineTuneEventsResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/FineTuneEvent" + object: + type: string + required: + - object + - data + + CreateEmbeddingRequest: + type: object + additionalProperties: false + properties: + input: + description: | + Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`) and cannot be an empty string. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + example: "The quick brown fox jumped over the lazy dog" + oneOf: + - type: string + default: "" + example: "This is a test." + - type: array + minItems: 1 + items: + type: string + default: "" + example: "This is a test." + - type: array + minItems: 1 + items: + type: integer + example: "[1212, 318, 257, 1332, 13]" + - type: array + minItems: 1 + items: + type: array + minItems: 1 + items: + type: integer + example: "[[1212, 318, 257, 1332, 13]]" + model: + description: *model_description + example: "text-embedding-ada-002" + anyOf: + - type: string + - type: string + enum: ["text-embedding-ada-002"] + x-oaiTypeLabel: string + encoding_format: + description: "The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/)." + example: "float" + default: "float" + type: string + enum: ["float", "base64"] + user: *end_user_param_configuration + required: + - model + - input + + CreateEmbeddingResponse: + type: object + properties: + data: + type: array + description: The list of embeddings generated by the model. + items: + $ref: "#/components/schemas/Embedding" + model: + type: string + description: The name of the model used to generate the embedding. + object: + type: string + description: The object type, which is always "embedding". + usage: + type: object + description: The usage information for the request. + properties: + prompt_tokens: + type: integer + description: The number of tokens used by the prompt. + total_tokens: + type: integer + description: The total number of tokens used by the request. + required: + - prompt_tokens + - total_tokens + required: + - object + - model + - data + - usage + + CreateTranscriptionRequest: + type: object + additionalProperties: false + properties: + file: + description: | + The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + type: string + x-oaiTypeLabel: file + format: binary + model: + description: | + ID of the model to use. Only `whisper-1` is currently available. + example: whisper-1 + anyOf: + - type: string + - type: string + enum: ["whisper-1"] + x-oaiTypeLabel: string + language: + description: | + The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. + type: string + prompt: + description: | + An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + type: string + response_format: + description: | + The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. + type: string + enum: + - json + - text + - srt + - verbose_json + - vtt + default: json + temperature: + description: | + The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + type: number + default: 0 + required: + - file + - model + + # Note: This does not currently support the non-default response format types. + CreateTranscriptionResponse: + type: object + properties: + text: + type: string + required: + - text + + CreateTranslationRequest: + type: object + additionalProperties: false + properties: + file: + description: | + The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + type: string + x-oaiTypeLabel: file + format: binary + model: + description: | + ID of the model to use. Only `whisper-1` is currently available. + example: whisper-1 + anyOf: + - type: string + - type: string + enum: ["whisper-1"] + x-oaiTypeLabel: string + prompt: + description: | + An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + type: string + response_format: + description: | + The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. + type: string + default: json + temperature: + description: | + The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + type: number + default: 0 + required: + - file + - model + + # Note: This does not currently support the non-default response format types. + CreateTranslationResponse: + type: object + properties: + text: + type: string + required: + - text + + Model: + title: Model + description: Describes an OpenAI model offering that can be used with the API. + properties: + id: + type: string + description: The model identifier, which can be referenced in the API endpoints. + created: + type: integer + description: The Unix timestamp (in seconds) when the model was created. + object: + type: string + description: The object type, which is always "model". + owned_by: + type: string + description: The organization that owns the model. + required: + - id + - object + - created + - owned_by + x-oaiMeta: + name: The model object + example: *retrieve_model_response + + OpenAIFile: + title: OpenAIFile + description: | + The `File` object represents a document that has been uploaded to OpenAI. + properties: + id: + type: string + description: The file identifier, which can be referenced in the API endpoints. + bytes: + type: integer + description: The size of the file in bytes. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the file was created. + filename: + type: string + description: The name of the file. + object: + type: string + description: The object type, which is always "file". + purpose: + type: string + description: The intended purpose of the file. Currently, only "fine-tune" is supported. + status: + type: string + description: The current status of the file, which can be either `uploaded`, `processed`, `pending`, `error`, `deleting` or `deleted`. + status_details: + type: string + nullable: true + description: | + Additional details about the status of the file. If the file is in the `error` state, this will include a message describing the error. + required: + - id + - object + - bytes + - created_at + - filename + - purpose + - format + x-oaiMeta: + name: The file object + example: | + { + "id": "file-abc123", + "object": "file", + "bytes": 120000, + "created_at": 1677610602, + "filename": "my_file.jsonl", + "purpose": "fine-tune", + "status": "uploaded", + "status_details": null + } + Embedding: + type: object + description: | + Represents an embedding vector returned by embedding endpoint. + properties: + index: + type: integer + description: The index of the embedding in the list of embeddings. + embedding: + type: array + description: | + The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](/docs/guides/embeddings). + items: + type: number + object: + type: string + description: The object type, which is always "embedding". + required: + - index + - object + - embedding + x-oaiMeta: + name: The embedding object + example: | + { + "object": "embedding", + "embedding": [ + 0.0023064255, + -0.009327292, + .... (1536 floats total for ada-002) + -0.0028842222, + ], + "index": 0 + } + + FineTuningJob: + type: object + title: FineTuningJob + description: | + The `fine_tuning.job` object represents a fine-tuning job that has been created through the API. + properties: + id: + type: string + description: The object identifier, which can be referenced in the API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the fine-tuning job was created. + error: + type: object + nullable: true + description: For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. + properties: + code: + type: string + description: A machine-readable error code. + message: + type: string + description: A human-readable error message. + param: + type: string + description: The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. + nullable: true + required: + - code + - message + - param + fine_tuned_model: + type: string + nullable: true + description: The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. + finished_at: + type: integer + nullable: true + description: The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. + hyperparameters: + type: object + description: The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + properties: + n_epochs: + oneOf: + - type: string + enum: [auto] + - type: integer + minimum: 1 + maximum: 50 + default: auto + description: + The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + + "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. + required: + - n_epochs + model: + type: string + description: The base model that is being fine-tuned. + object: + type: string + description: The object type, which is always "fine_tuning.job". + organization_id: + type: string + description: The organization that owns the fine-tuning job. + result_files: + type: array + description: The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents). + items: + type: string + example: file-abc123 + status: + type: string + description: The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. + trained_tokens: + type: integer + nullable: true + description: The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. + training_file: + type: string + description: The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents). + validation_file: + type: string + nullable: true + description: The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents). + required: + - created_at + - error + - finished_at + - fine_tuned_model + - hyperparameters + - id + - model + - object + - organization_id + - result_files + - status + - trained_tokens + - training_file + - validation_file + x-oaiMeta: + name: The fine-tuning job object + example: *fine_tuning_example + + FineTuningJobEvent: + type: object + description: Fine-tuning job event object + properties: + id: + type: string + created_at: + type: integer + level: + type: string + enum: ["info", "warn", "error"] + message: + type: string + object: + type: string + required: + - id + - object + - created_at + - level + - message + x-oaiMeta: + name: The fine-tuning job event object + example: | + { + "object": "event", + "id": "ftevent-abc123" + "created_at": 1677610602, + "level": "info", + "message": "Created fine-tuning job" + } + + FineTune: + type: object + deprecated: true + description: | + The `FineTune` object represents a legacy fine-tune job that has been created through the API. + properties: + id: + type: string + description: The object identifier, which can be referenced in the API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the fine-tuning job was created. + events: + type: array + description: The list of events that have been observed in the lifecycle of the FineTune job. + items: + $ref: "#/components/schemas/FineTuneEvent" + fine_tuned_model: + type: string + nullable: true + description: The name of the fine-tuned model that is being created. + hyperparams: + type: object + description: The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/legacy-fine-tuning/hyperparameters) for more details. + properties: + batch_size: + type: integer + description: | + The batch size to use for training. The batch size is the number of + training examples used to train a single forward and backward pass. + classification_n_classes: + type: integer + description: | + The number of classes to use for computing classification metrics. + classification_positive_class: + type: string + description: | + The positive class to use for computing classification metrics. + compute_classification_metrics: + type: boolean + description: | + The classification metrics to compute using the validation dataset at the end of every epoch. + learning_rate_multiplier: + type: number + description: | + The learning rate multiplier to use for training. + n_epochs: + type: integer + description: | + The number of epochs to train the model for. An epoch refers to one + full cycle through the training dataset. + prompt_loss_weight: + type: number + description: | + The weight to use for loss on the prompt tokens. + required: + - batch_size + - learning_rate_multiplier + - n_epochs + - prompt_loss_weight + model: + type: string + description: The base model that is being fine-tuned. + object: + type: string + description: The object type, which is always "fine-tune". + organization_id: + type: string + description: The organization that owns the fine-tuning job. + result_files: + type: array + description: The compiled results files for the fine-tuning job. + items: + $ref: "#/components/schemas/OpenAIFile" + status: + type: string + description: The current status of the fine-tuning job, which can be either `created`, `running`, `succeeded`, `failed`, or `cancelled`. + training_files: + type: array + description: The list of files used for training. + items: + $ref: "#/components/schemas/OpenAIFile" + updated_at: + type: integer + description: The Unix timestamp (in seconds) for when the fine-tuning job was last updated. + validation_files: + type: array + description: The list of files used for validation. + items: + $ref: "#/components/schemas/OpenAIFile" + required: + - created_at + - fine_tuned_model + - hyperparams + - id + - model + - object + - organization_id + - result_files + - status + - training_files + - updated_at + - validation_files + x-oaiMeta: + name: The fine-tune object + example: *fine_tune_example + + FineTuneEvent: + type: object + deprecated: true + description: Fine-tune event object + properties: + created_at: + type: integer + level: + type: string + message: + type: string + object: + type: string + required: + - object + - created_at + - level + - message + x-oaiMeta: + name: The fine-tune event object + example: | + { + "object": "event", + "created_at": 1677610602, + "level": "info", + "message": "Created fine-tune job" + } + + CompletionUsage: + type: object + description: Usage statistics for the completion request. + properties: + completion_tokens: + type: integer + description: Number of tokens in the generated completion. + prompt_tokens: + type: integer + description: Number of tokens in the prompt. + total_tokens: + type: integer + description: Total number of tokens used in the request (prompt + completion). + required: + - prompt_tokens + - completion_tokens + - total_tokens + +security: + - ApiKeyAuth: [] + +x-oaiMeta: + groups: + # > General Notes + # The `groups` section is used to generate the API reference pages and navigation, in the same + # order listed below. Additionally, each `group` can have a list of `sections`, each of which + # will become a navigation subroute and subsection under the group. Each section has: + # - `type`: Currently, either an `endpoint` or `object`, depending on how the section needs to + # be rendered + # - `key`: The reference key that can be used to lookup the section definition + # - `path`: The path (url) of the section, which is used to generate the navigation link. + # + # > The `object` sections maps to a schema component and the following fields are read for rendering + # - `x-oaiMeta.name`: The name of the object, which will become the section title + # - `x-oaiMeta.example`: The example object, which will be used to generate the example sample (always JSON) + # - `description`: The description of the object, which will be used to generate the section description + # + # > The `endpoint` section maps to an operation path and the following fields are read for rendering: + # - `x-oaiMeta.name`: The name of the endpoint, which will become the section title + # - `x-oaiMeta.examples`: The endpoint examples, which can be an object (meaning a single variation, most + # endpoints, or an array of objects, meaning multiple variations, e.g. the + # chat completion and completion endpoints, with streamed and non-streamed examples. + # - `x-oaiMeta.returns`: text describing what the endpoint returns. + # - `summary`: The summary of the endpoint, which will be used to generate the section description + - id: audio + title: Audio + description: | + Learn how to turn audio into text. + + Related guide: [Speech to text](/docs/guides/speech-to-text) + sections: + - type: endpoint + key: createTranscription + path: createTranscription + - type: endpoint + key: createTranslation + path: createTranslation + - id: chat + title: Chat + description: | + Given a list of messages comprising a conversation, the model will return a response. + + Related guide: [Chat completions](/docs/guides/gpt) + sections: + - type: object + key: CreateChatCompletionResponse + path: object + - type: object + key: CreateChatCompletionStreamResponse + path: streaming + - type: endpoint + key: createChatCompletion + path: create + - id: completions + title: Completions + legacy: true + description: | + Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position. We recommend most users use our Chat completions API. [Learn more](/docs/deprecations/2023-07-06-gpt-and-embeddings) + + Related guide: [Legacy Completions](/docs/guides/gpt/completions-api) + sections: + - type: object + key: CreateCompletionResponse + path: object + - type: endpoint + key: createCompletion + path: create + - id: embeddings + title: Embeddings + description: | + Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. + + Related guide: [Embeddings](/docs/guides/embeddings) + sections: + - type: object + key: Embedding + path: object + - type: endpoint + key: createEmbedding + path: create + - id: fine-tuning + title: Fine-tuning + description: | + Manage fine-tuning jobs to tailor a model to your specific training data. + + Related guide: [Fine-tune models](/docs/guides/fine-tuning) + sections: + - type: object + key: FineTuningJob + path: object + - type: endpoint + key: createFineTuningJob + path: create + - type: endpoint + key: listPaginatedFineTuningJobs + path: list + - type: endpoint + key: retrieveFineTuningJob + path: retrieve + - type: endpoint + key: cancelFineTuningJob + path: cancel + - type: object + key: FineTuningJobEvent + path: event-object + - type: endpoint + key: listFineTuningEvents + path: list-events + - id: files + title: Files + description: | + Files are used to upload documents that can be used with features like [fine-tuning](/docs/api-reference/fine-tuning). + sections: + - type: object + key: OpenAIFile + path: object + - type: endpoint + key: listFiles + path: list + - type: endpoint + key: createFile + path: create + - type: endpoint + key: deleteFile + path: delete + - type: endpoint + key: retrieveFile + path: retrieve + - type: endpoint + key: downloadFile + path: retrieve-contents + - id: images + title: Images + description: | + Given a prompt and/or an input image, the model will generate a new image. + + Related guide: [Image generation](/docs/guides/images) + sections: + - type: object + key: Image + path: object + - type: endpoint + key: createImage + path: create + - type: endpoint + key: createImageEdit + path: createEdit + - type: endpoint + key: createImageVariation + path: createVariation + - id: models + title: Models + description: | + List and describe the various models available in the API. You can refer to the [Models](/docs/models) documentation to understand what models are available and the differences between them. + sections: + - type: object + key: Model + path: object + - type: endpoint + key: listModels + path: list + - type: endpoint + key: retrieveModel + path: retrieve + - type: endpoint + key: deleteModel + path: delete + - id: moderations + title: Moderations + description: | + Given a input text, outputs if the model classifies it as violating OpenAI's content policy. + + Related guide: [Moderations](/docs/guides/moderation) + sections: + - type: object + key: CreateModerationResponse + path: object + - type: endpoint + key: createModeration + path: create + - id: fine-tunes + title: Fine-tunes + deprecated: true + description: | + Manage legacy fine-tuning jobs to tailor a model to your specific training data. + + We recommend transitioning to the updating [fine-tuning API](/docs/guides/fine-tuning) + sections: + - type: object + key: FineTune + path: object + - type: endpoint + key: createFineTune + path: create + - type: endpoint + key: listFineTunes + path: list + - type: endpoint + key: retrieveFineTune + path: retrieve + - type: endpoint + key: cancelFineTune + path: cancel + - type: object + key: FineTuneEvent + path: event-object + - type: endpoint + key: listFineTuneEvents + path: list-events + - id: edits + title: Edits + deprecated: true + description: | + Given a prompt and an instruction, the model will return an edited version of the prompt. + sections: + - type: object + key: CreateEditResponse + path: object + - type: endpoint + key: createEdit + path: create diff --git a/packages/openai_dart/pubspec.yaml b/packages/openai_dart/pubspec.yaml new file mode 100644 index 00000000..6dfad279 --- /dev/null +++ b/packages/openai_dart/pubspec.yaml @@ -0,0 +1,31 @@ +name: openai_dart +description: Dart Client for the OpenAI API (completions, chat, embeddings, etc.). +version: 0.0.1-dev.1 +repository: https://github.com/davidmigloz/langchain_dart/tree/main/packages/openai_dart +issue_tracker: https://github.com/davidmigloz/langchain_dart/issues +homepage: https://github.com/davidmigloz/langchain_dart +documentation: https://langchaindart.com + +topics: + - ai + - nlp + - llms + - gpt + +environment: + sdk: ">=3.0.0 <4.0.0" + +dependencies: + fetch_client: ^1.0.2 + freezed_annotation: ^2.4.1 + http: ^1.1.0 + json_annotation: ^4.8.1 + +dev_dependencies: + build_runner: ^2.4.6 + freezed: ^2.4.5 + json_serializable: ^6.7.1 + openapi_spec: ^0.7.1 +# openapi_spec: +# path: ../../../openapi_spec + test: ^1.24.3 diff --git a/packages/openai_dart/test/openai_client_chat_test.dart b/packages/openai_dart/test/openai_client_chat_test.dart new file mode 100644 index 00000000..0ea8b169 --- /dev/null +++ b/packages/openai_dart/test/openai_client_chat_test.dart @@ -0,0 +1,284 @@ +@TestOn('vm') +library; // Uses dart:io + +import 'dart:convert'; +import 'dart:io'; + +import 'package:openai_dart/openai_dart.dart'; +import 'package:test/test.dart'; + +void main() { + group('OpenAI Chat Completions API tests', () { + late OpenAIClient client; + + setUp(() { + client = OpenAIClient( + apiKey: Platform.environment['OPENAI_API_KEY']!, + ); + }); + + tearDown(() { + client.endSession(); + }); + + test('Test call chat completion API', () async { + final models = [ + ChatCompletionModels.gpt35Turbo, + ChatCompletionModels.gpt4, + ]; + + for (final model in models) { + final request = CreateChatCompletionRequest( + model: ChatCompletionModel.enumeration(model), + messages: [ + const ChatCompletionMessage( + role: ChatCompletionMessageRole.system, + content: 'You are a helpful assistant.', + ), + const ChatCompletionMessage( + role: ChatCompletionMessageRole.user, + content: 'Hello!', + ), + ], + temperature: 0, + ); + final res = await client.createChatCompletion(request: request); + expect(res.choices, hasLength(1)); + final choice = res.choices.first; + expect(choice.index, 0); + expect(choice.finishReason, ChatCompletionFinishReason.stop); + final message = choice.message; + expect(message.role, ChatCompletionMessageRole.assistant); + expect(message.content, isNotEmpty); + expect(message.functionCall, isNull); + expect(message.name, isNull); + expect(res.id, isNotEmpty); + expect(res.created, greaterThan(0)); + expect(res.model, startsWith('gpt-')); + expect(res.object, 'chat.completion'); + expect(res.usage?.promptTokens, greaterThan(0)); + expect(res.usage?.completionTokens, greaterThan(0)); + expect(res.usage?.totalTokens, greaterThan(0)); + } + }); + + test('Test call chat completion API with stop sequence', () async { + const request = CreateChatCompletionRequest( + model: ChatCompletionModel.enumeration( + ChatCompletionModels.gpt35Turbo, + ), + messages: [ + ChatCompletionMessage( + role: ChatCompletionMessageRole.system, + content: + 'You are a helpful assistant that replies only with numbers ' + 'in order without any spaces or commas', + ), + ChatCompletionMessage( + role: ChatCompletionMessageRole.user, + content: 'List the numbers from 1 to 9', + ), + ], + maxTokens: 10, + temperature: 0, + stop: ChatCompletionStop.string('4'), + ); + final res = await client.createChatCompletion(request: request); + expect(res.choices, hasLength(1)); + final message = res.choices.first.message; + expect(message.content?.trim(), contains('123')); + expect(message.content?.trim(), isNot(contains('456789'))); + expect( + res.choices.first.finishReason, + ChatCompletionFinishReason.stop, + ); + }); + + test('Test call chat completions API with max tokens', () async { + const request = CreateChatCompletionRequest( + model: ChatCompletionModel.enumeration( + ChatCompletionModels.gpt35Turbo, + ), + messages: [ + ChatCompletionMessage( + role: ChatCompletionMessageRole.system, + content: 'You are a helpful assistant.', + ), + ChatCompletionMessage( + role: ChatCompletionMessageRole.user, + content: 'Tell me a joke', + ), + ], + maxTokens: 2, + ); + final res = await client.createChatCompletion(request: request); + expect(res.choices, isNotEmpty); + expect( + res.choices.first.finishReason, + ChatCompletionFinishReason.length, + ); + }); + + test('Test call chat completions API with other parameters', () async { + const request = CreateChatCompletionRequest( + model: ChatCompletionModel.enumeration( + ChatCompletionModels.gpt35Turbo, + ), + messages: [ + ChatCompletionMessage( + role: ChatCompletionMessageRole.system, + content: 'You are a helpful assistant.', + ), + ChatCompletionMessage( + role: ChatCompletionMessageRole.user, + content: 'Tell me a joke', + ), + ], + maxTokens: 2, + presencePenalty: 0.6, + frequencyPenalty: 0.6, + logitBias: {'50256': -100}, + n: 2, + temperature: 0, + topP: 0.2, + user: 'user_123', + ); + final res = await client.createChatCompletion(request: request); + expect(res.choices, hasLength(2)); + }); + + test('Test call chat completions streaming API', () async { + const request = CreateChatCompletionRequest( + model: ChatCompletionModel.enumeration( + ChatCompletionModels.gpt35Turbo, + ), + messages: [ + ChatCompletionMessage( + role: ChatCompletionMessageRole.system, + content: + 'You are a helpful assistant that replies only with numbers ' + 'in order without any spaces or commas', + ), + ChatCompletionMessage( + role: ChatCompletionMessageRole.user, + content: 'List the numbers from 1 to 9', + ), + ], + ); + final stream = client.createChatCompletionStream(request: request); + String text = ''; + ChatCompletionStreamResponseChoice? lastChoice; + await for (final res in stream) { + expect(res.id, isNotEmpty); + expect(res.created, greaterThan(0)); + expect(res.model, startsWith('gpt-3.5-turbo')); + expect(res.object, 'chat.completion.chunk'); + expect(res.choices, hasLength(1)); + final choice = res.choices.first; + expect(choice.index, 0); + text += res.choices.first.delta.content?.trim() ?? ''; + lastChoice = choice; + } + expect(lastChoice?.finishReason, ChatCompletionFinishReason.stop); + expect(text, contains('123456789')); + }); + + test('Test call chat completions API functions', () async { + const function = ChatCompletionFunctions( + name: 'get_current_weather', + description: 'Get the current weather in a given location', + parameters: { + 'type': 'object', + 'properties': { + 'location': { + 'type': 'string', + 'description': 'The city and state, e.g. San Francisco, CA', + }, + 'unit': { + 'type': 'string', + 'description': 'The unit of temperature to return', + 'enum': ['celsius', 'fahrenheit'], + }, + }, + 'required': ['location'], + }, + ); + + const request1 = CreateChatCompletionRequest( + model: ChatCompletionModel.enumeration( + ChatCompletionModels.gpt35Turbo, + ), + messages: [ + ChatCompletionMessage( + role: ChatCompletionMessageRole.system, + content: 'You are a helpful assistant.', + ), + ChatCompletionMessage( + role: ChatCompletionMessageRole.user, + content: 'What’s the weather like in Boston right now?', + ), + ], + functions: [function], + ); + final res1 = await client.createChatCompletion(request: request1); + expect(res1.choices, hasLength(1)); + + final choice1 = res1.choices.first; + expect(choice1.finishReason, ChatCompletionFinishReason.functionCall); + + final aiMessage1 = choice1.message; + expect(aiMessage1.role, ChatCompletionMessageRole.assistant); + expect(aiMessage1.content, isNull); + expect(aiMessage1.name, isNull); + expect(aiMessage1.functionCall, isNotNull); + + final functionCall = aiMessage1.functionCall!; + expect(functionCall.name, function.name); + expect(functionCall.arguments, isNotEmpty); + final arguments = json.decode( + functionCall.arguments, + ) as Map; + expect(arguments.containsKey('location'), isTrue); + expect(arguments['location'], contains('Boston')); + + final functionResult = { + 'temperature': '22', + 'unit': 'celsius', + 'description': 'Sunny', + }; + + final request2 = CreateChatCompletionRequest( + model: const ChatCompletionModel.enumeration( + ChatCompletionModels.gpt35Turbo, + ), + messages: [ + const ChatCompletionMessage( + role: ChatCompletionMessageRole.system, + content: 'You are a helpful assistant.', + ), + const ChatCompletionMessage( + role: ChatCompletionMessageRole.user, + content: 'What’s the weather like in Boston right now?', + ), + ChatCompletionMessage( + role: ChatCompletionMessageRole.function, + name: function.name, + content: json.encode(functionResult), + ), + ], + functions: [function], + ); + final res2 = await client.createChatCompletion(request: request2); + expect(res2.choices, hasLength(1)); + + final choice2 = res2.choices.first; + expect(choice2.finishReason, ChatCompletionFinishReason.stop); + + final aiMessage2 = choice2.message; + expect(aiMessage2.role, ChatCompletionMessageRole.assistant); + expect(aiMessage2.content, contains('22')); + expect(aiMessage2.functionCall, isNull); + expect(aiMessage2.name, isNull); + }); + }); +} diff --git a/packages/openai_dart/test/openai_client_completions_test.dart b/packages/openai_dart/test/openai_client_completions_test.dart new file mode 100644 index 00000000..8fe5088d --- /dev/null +++ b/packages/openai_dart/test/openai_client_completions_test.dart @@ -0,0 +1,158 @@ +@TestOn('vm') +library; // Uses dart:io + +import 'dart:io'; + +import 'package:openai_dart/openai_dart.dart'; +import 'package:test/test.dart'; + +void main() { + group('OpenAI Completions API tests', () { + late OpenAIClient client; + + setUp(() { + client = OpenAIClient( + apiKey: Platform.environment['OPENAI_API_KEY']!, + ); + }); + + tearDown(() { + client.endSession(); + }); + + test('Test call completions API', () async { + final models = [ + CompletionModels.textDavinci003, + CompletionModels.gpt35TurboInstruct, + ]; + + for (final model in models) { + final request = CreateCompletionRequest( + model: CompletionModel.enumeration(model), + prompt: const CompletionPrompt.string( + 'List the numbers from 1 to 9 in order without any spaces or commas.', + ), + maxTokens: 10, + temperature: 0, + ); + final res = await client.createCompletion(request: request); + expect(res.choices, hasLength(1)); + expect(res.choices.first.text.trim(), contains('123456789')); + expect( + res.choices.first.finishReason, + CompletionFinishReason.stop, + ); + expect(res.id, isNotEmpty); + expect( + res.model.replaceAll(RegExp('[-.]'), '').toLowerCase(), + model.name.toLowerCase(), + ); + expect(res.object, 'text_completion'); + expect(res.usage?.promptTokens, greaterThan(0)); + expect(res.usage?.completionTokens, greaterThan(0)); + expect(res.usage?.totalTokens, greaterThan(0)); + } + }); + + test('Test call completions API with stop sequence', () async { + const request = CreateCompletionRequest( + model: CompletionModel.enumeration(CompletionModels.gpt35TurboInstruct), + prompt: CompletionPrompt.string( + 'List the numbers from 1 to 9 in order without any spaces or commas.', + ), + maxTokens: 10, + temperature: 0, + stop: CompletionStop.string('4'), + ); + final res = await client.createCompletion(request: request); + expect(res.choices, hasLength(1)); + expect(res.choices.first.text.trim(), contains('123')); + expect(res.choices.first.text.trim(), isNot(contains('456789'))); + expect( + res.choices.first.finishReason, + CompletionFinishReason.stop, + ); + }); + + test('Test call completions API with max tokens', () async { + const request = CreateCompletionRequest( + model: CompletionModel.enumeration(CompletionModels.gpt35TurboInstruct), + prompt: CompletionPrompt.string('Tell me a joke'), + maxTokens: 2, + ); + final res = await client.createCompletion(request: request); + expect(res.choices, isNotEmpty); + expect( + res.choices.first.finishReason, + CompletionFinishReason.length, + ); + }); + + test('Test call completions API with echo', () async { + const request = CreateCompletionRequest( + model: CompletionModel.enumeration(CompletionModels.gpt35TurboInstruct), + prompt: CompletionPrompt.string('Say this is a test'), + echo: true, + maxTokens: 20, + ); + final res = await client.createCompletion(request: request); + expect(res.choices.first.text.trim(), startsWith('Say this is a test')); + }); + + test('Test call completions API with suffix', () async { + const request = CreateCompletionRequest( + model: CompletionModel.enumeration(CompletionModels.gpt35TurboInstruct), + prompt: CompletionPrompt.string( + 'List the numbers from 1 to 9 in order without any spaces or commas.', + ), + suffix: '789', + ); + final res = await client.createCompletion(request: request); + final completion = res.choices.first.text.trim(); + expect(completion, contains('123456')); + expect(completion, isNot(contains('789'))); + }); + + test('Test call completions API with other parameters', () async { + const request = CreateCompletionRequest( + model: CompletionModel.enumeration(CompletionModels.gpt35TurboInstruct), + prompt: CompletionPrompt.string('Say this is a test'), + bestOf: 5, + frequencyPenalty: 0.5, + logitBias: {'50256': -100}, + logprobs: 2, + maxTokens: 20, + n: 2, + presencePenalty: 0.5, + temperature: 0, + topP: 0.2, + user: 'user_123', + ); + final res = await client.createCompletion(request: request); + expect(res.choices, hasLength(2)); + expect(res.choices.first.logprobs, isNotNull); + expect(res.choices.first.logprobs?.tokens, isNotEmpty); + expect(res.choices.first.logprobs?.tokenLogprobs, isNotEmpty); + expect(res.choices.first.logprobs?.textOffset, isNotEmpty); + expect(res.choices.first.logprobs?.topLogprobs, isNotEmpty); + }); + + test('Test call completions streaming API', () async { + const request = CreateCompletionRequest( + model: CompletionModel.enumeration(CompletionModels.gpt35TurboInstruct), + prompt: CompletionPrompt.string( + 'List the numbers from 1 to 9 in order without any spaces or commas.', + ), + maxTokens: 10, + temperature: 0, + ); + final stream = client.createCompletionStream(request: request); + String text = ''; + await for (final res in stream) { + expect(res.choices, isNotEmpty); + text += res.choices.first.text.trim(); + } + expect(text, contains('123456789')); + }); + }); +} diff --git a/packages/openai_dart/test/openai_client_embeddings_test.dart b/packages/openai_dart/test/openai_client_embeddings_test.dart new file mode 100644 index 00000000..080ab483 --- /dev/null +++ b/packages/openai_dart/test/openai_client_embeddings_test.dart @@ -0,0 +1,41 @@ +@TestOn('vm') +library; // Uses dart:io + +import 'dart:io'; + +import 'package:openai_dart/openai_dart.dart'; +import 'package:test/test.dart'; + +void main() { + group('OpenAI Embeddings API tests', () { + late OpenAIClient client; + + setUp(() { + client = OpenAIClient( + apiKey: Platform.environment['OPENAI_API_KEY']!, + ); + }); + + tearDown(() { + client.endSession(); + }); + + test('Test call embeddings API', () async { + const request = CreateEmbeddingRequest( + model: EmbeddingModel.enumeration(EmbeddingModels.textEmbeddingAda002), + input: EmbeddingInput.string( + 'The food was delicious and the waiter...', + ), + ); + final res = await client.createEmbedding(request: request); + expect(res.data, hasLength(1)); + expect(res.data.first.index, 0); + expect(res.data.first.embedding, hasLength(1536)); + expect(res.data.first.object, 'embedding'); + expect(res.model, startsWith('text-embedding-ada-002')); + expect(res.object, 'list'); + expect(res.usage.promptTokens, greaterThan(0)); + expect(res.usage.totalTokens, greaterThan(0)); + }); + }); +} diff --git a/packages/openai_dart/test/openai_client_fine_tuning_test.dart b/packages/openai_dart/test/openai_client_fine_tuning_test.dart new file mode 100644 index 00000000..dd8bd831 --- /dev/null +++ b/packages/openai_dart/test/openai_client_fine_tuning_test.dart @@ -0,0 +1,73 @@ +@TestOn('vm') +library; // Uses dart:io + +import 'dart:io'; + +import 'package:openai_dart/openai_dart.dart'; +import 'package:test/test.dart'; + +void main() { + group('OpenAI Fine-tuning API tests', () { + late OpenAIClient client; + + setUp(() { + client = OpenAIClient( + apiKey: Platform.environment['OPENAI_API_KEY']!, + ); + }); + + tearDown(() { + client.endSession(); + }); + + test('Test create fine-tuning job', skip: true, () async { + const request = CreateFineTuningJobRequest( + model: FineTuningModel.enumeration(FineTuningModels.gpt35Turbo), + trainingFile: 'file-abc123', + validationFile: 'file-abc123', + hyperparameters: FineTuningJobHyperparameters( + nEpochs: FineTuningNEpochs.enumeration(FineTuningNEpochsOptions.auto), + ), + ); + final res = await client.createFineTuningJob(request: request); + expect(res.id, isNotEmpty); + expect(res.createdAt, greaterThan(0)); + expect(res.error, isNull); + expect(res.object, 'fine_tuning.job'); + expect(res.fineTunedModel, isNotNull); + }); + + test('Test list fine-tuning jobs', () async { + final res = await client.listPaginatedFineTuningJobs(); + expect(res.data, isEmpty); + expect(res.hasMore, false); + expect(res.object, 'list'); + }); + + test('Test retrieve fine-tuning job', skip: true, () async { + final res = await client.retrieveFineTuningJob( + fineTuningJobId: 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + ); + expect(res.id, isNotEmpty); + expect(res.createdAt, greaterThan(0)); + expect(res.error, isNull); + expect(res.object, 'fine_tuning.job'); + expect(res.fineTunedModel, isNotNull); + }); + + test('Test cancel fine-tuning jobs', skip: true, () async { + final res = await client.cancelFineTuningJob( + fineTuningJobId: 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + ); + expect(res.status, FineTuningJobStatus.cancelled); + }); + + test('Test list fine-tuning events', skip: true, () async { + final res = await client.listFineTuningEvents( + fineTuningJobId: 'ft-AF1WoRqd3aJAHsqc9NY7iL8F', + ); + expect(res.data, isEmpty); + expect(res.object, 'list'); + }); + }); +} diff --git a/packages/openai_dart/test/openai_client_images_test.dart b/packages/openai_dart/test/openai_client_images_test.dart new file mode 100644 index 00000000..14713f79 --- /dev/null +++ b/packages/openai_dart/test/openai_client_images_test.dart @@ -0,0 +1,50 @@ +@TestOn('vm') +library; // Uses dart:io + +import 'dart:io'; + +import 'package:openai_dart/openai_dart.dart'; +import 'package:test/test.dart'; + +void main() { + group('OpenAI Images API tests', () { + late OpenAIClient client; + + setUp(() { + client = OpenAIClient( + apiKey: Platform.environment['OPENAI_API_KEY']!, + ); + }); + + tearDown(() { + client.endSession(); + }); + + test('Test call create image API', () async { + const request = CreateImageRequest( + prompt: 'A cute baby sea otter', + ); + final res = await client.createImage(request: request); + expect(res.created, greaterThan(0)); + expect(res.data, hasLength(1)); + final image = res.data.first; + expect(image.b64Json, isNull); + expect(image.url, startsWith('http')); + }); + + test('Test call create image API returned in base64', () async { + const request = CreateImageRequest( + prompt: 'A cute baby sea otter', + n: 2, + responseFormat: ImageResponseFormat.b64Json, + size: ImageSize.v256x256, + ); + final res = await client.createImage(request: request); + expect(res.created, greaterThan(0)); + expect(res.data, hasLength(2)); + final image1 = res.data.first; + expect(image1.b64Json, isNotEmpty); + expect(image1.url, isNull); + }); + }); +} diff --git a/packages/openai_dart/test/openai_client_models_test.dart b/packages/openai_dart/test/openai_client_models_test.dart new file mode 100644 index 00000000..cb9d2eca --- /dev/null +++ b/packages/openai_dart/test/openai_client_models_test.dart @@ -0,0 +1,51 @@ +@TestOn('vm') +library; // Uses dart:io + +import 'dart:io'; + +import 'package:openai_dart/openai_dart.dart'; +import 'package:test/test.dart'; + +void main() { + group('OpenAI Models API tests', () { + late OpenAIClient client; + + setUp(() { + client = OpenAIClient( + apiKey: Platform.environment['OPENAI_API_KEY']!, + ); + }); + + tearDown(() { + client.endSession(); + }); + + test('Test call list models API', () async { + final res = await client.listModels(); + expect(res.object, 'list'); + expect(res.data, isNotEmpty); + final model = res.data.first; + expect(model.id, isNotEmpty); + expect(model.object, 'model'); + expect(model.created, greaterThan(0)); + expect(model.ownedBy, isNotEmpty); + }); + + test('Test call retrieve model API', () async { + final res = await client.retrieveModel(model: 'gpt-4'); + expect(res.id, 'gpt-4'); + expect(res.object, 'model'); + expect(res.created, greaterThan(0)); + expect(res.ownedBy, 'openai'); + }); + + test('Test call delete fine-tune model API', skip: true, () async { + final res = await client.deleteModel( + model: 'ft:gpt-3.5-turbo-0613:langchain::7qTVM5AR', + ); + expect(res.id, 'ft:gpt-3.5-turbo-0613:langchain::7qTVM5AR'); + expect(res.deleted, true); + expect(res.object, 'model'); + }); + }); +} diff --git a/packages/openai_dart/test/openai_client_moderations_test.dart b/packages/openai_dart/test/openai_client_moderations_test.dart new file mode 100644 index 00000000..70355e6c --- /dev/null +++ b/packages/openai_dart/test/openai_client_moderations_test.dart @@ -0,0 +1,42 @@ +@TestOn('vm') +library; // Uses dart:io + +import 'dart:io'; + +import 'package:openai_dart/openai_dart.dart'; +import 'package:test/test.dart'; + +void main() { + group('OpenAI Moderations API tests', () { + late OpenAIClient client; + + setUp(() { + client = OpenAIClient( + apiKey: Platform.environment['OPENAI_API_KEY']!, + ); + }); + + tearDown(() { + client.endSession(); + }); + + test('Test call moderations API', () async { + const request = CreateModerationRequest( + model: ModerationModel.enumeration( + ModerationModels.textModerationLatest, + ), + input: ModerationInput.string('I want to kill them.'), + ); + final res = await client.createModeration(request: request); + expect(res.id, isNotEmpty); + expect(res.model, startsWith('text-moderation-')); + expect(res.results, hasLength(1)); + final result = res.results.first; + expect(result.flagged, isTrue); + expect(result.categories.violence, isTrue); + expect(result.categories.sexual, isFalse); + expect(result.categoryScores.violence, greaterThan(0.8)); + expect(result.categoryScores.sexual, lessThan(0.2)); + }); + }); +}