Skip to content

Commit

Permalink
feat: Add GPT-4o-mini to model catalog (#497)
Browse files Browse the repository at this point in the history
  • Loading branch information
davidmigloz committed Jul 20, 2024
1 parent cfa974a commit faa23ae
Show file tree
Hide file tree
Showing 11 changed files with 701 additions and 241 deletions.
16 changes: 8 additions & 8 deletions packages/langchain_openai/lib/src/chat_models/types.dart
Original file line number Diff line number Diff line change
Expand Up @@ -30,23 +30,23 @@ class ChatOpenAIOptions extends ChatModelOptions {
///
/// Available models:
/// - `gpt-4`
/// - `gpt-4-0314`
/// - `gpt-4-0613`
/// - `gpt-4-32k`
/// - `gpt-4-32k-0314`
/// - `gpt-4-32k-0613`
/// - `gpt-4-turbo-preview`
/// - `gpt-4-1106-preview`
/// - `gpt-4-0125-preview`
/// - `gpt-4-0314`
/// - `gpt-4-0613`
/// - `gpt-4-1106-preview`
/// - `gpt-4-turbo`
/// - `gpt-4-turbo-2024-04-09`
/// - `gpt-4-turbo-preview`
/// - `gpt-4-vision-preview`
/// - `gpt-4o`
/// - `gpt-4o-2024-05-13`
/// - `gpt-4o-mini`
/// - `gpt-4o-mini-2024-07-18`
/// - `gpt-3.5-turbo`
/// - `gpt-3.5-turbo-16k`
/// - `gpt-3.5-turbo-0301`
/// - `gpt-3.5-turbo-0613`
/// - `gpt-3.5-turbo-1106`
/// - `gpt-3.5-turbo-16k-0613`
///
/// Mind that the list may be outdated.
/// See https://platform.openai.com/docs/models for the latest list.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ sealed class ChatCompletionUserMessageContent
with _$ChatCompletionUserMessageContent {
const ChatCompletionUserMessageContent._();

/// An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4-vision-preview` model.
/// An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4o` model.
const factory ChatCompletionUserMessageContent.parts(
List<ChatCompletionMessageContentPart> value,
) = ChatCompletionMessageContentParts;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,10 @@ enum AssistantModels {
gpt4o,
@JsonValue('gpt-4o-2024-05-13')
gpt4o20240513,
@JsonValue('gpt-4o-mini')
gpt4oMini,
@JsonValue('gpt-4o-mini-2024-07-18')
gpt4oMini20240718,
@JsonValue('gpt-3.5-turbo')
gpt35Turbo,
@JsonValue('gpt-3.5-turbo-16k')
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ class CreateChatCompletionRequest with _$CreateChatCompletionRequest {
/// - If set to 'auto', the system will utilize scale tier credits until they are exhausted.
/// - If set to 'default', the request will be processed using the default service tier with a lower
/// uptime SLA and no latency guarantee.
/// - When not set, the default behavior is 'auto'.
///
/// When this parameter is set, the response body will include the `service_tier` utilized.
@JsonKey(
Expand Down Expand Up @@ -302,6 +303,10 @@ enum ChatCompletionModels {
gpt4o,
@JsonValue('gpt-4o-2024-05-13')
gpt4o20240513,
@JsonValue('gpt-4o-mini')
gpt4oMini,
@JsonValue('gpt-4o-mini-2024-07-18')
gpt4oMini20240718,
@JsonValue('gpt-3.5-turbo')
gpt35Turbo,
@JsonValue('gpt-3.5-turbo-16k')
Expand Down Expand Up @@ -423,6 +428,7 @@ class ChatCompletionResponseFormat with _$ChatCompletionResponseFormat {
/// - If set to 'auto', the system will utilize scale tier credits until they are exhausted.
/// - If set to 'default', the request will be processed using the default service tier with a lower
/// uptime SLA and no latency guarantee.
/// - When not set, the default behavior is 'auto'.
///
/// When this parameter is set, the response body will include the `service_tier` utilized.
enum CreateChatCompletionRequestServiceTier {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -203,6 +203,10 @@ enum RunModels {
gpt4o,
@JsonValue('gpt-4o-2024-05-13')
gpt4o20240513,
@JsonValue('gpt-4o-mini')
gpt4oMini,
@JsonValue('gpt-4o-mini-2024-07-18')
gpt4oMini20240718,
@JsonValue('gpt-3.5-turbo')
gpt35Turbo,
@JsonValue('gpt-3.5-turbo-16k')
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,10 @@ enum ThreadAndRunModels {
gpt4o,
@JsonValue('gpt-4o-2024-05-13')
gpt4o20240513,
@JsonValue('gpt-4o-mini')
gpt4oMini,
@JsonValue('gpt-4o-mini-2024-07-18')
gpt4oMini20240718,
@JsonValue('gpt-3.5-turbo')
gpt35Turbo,
@JsonValue('gpt-3.5-turbo-16k')
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,10 @@ class FineTuningJobHyperparameters with _$FineTuningJobHyperparameters {

/// Factory constructor for FineTuningJobHyperparameters
const factory FineTuningJobHyperparameters({
/// The number of epochs to train the model for. An epoch refers to one
/// full cycle through the training dataset.
/// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.
///
/// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number
/// manually, we support any number between 1 and 50 epochs.
@_FineTuningNEpochsConverter()
@JsonKey(name: 'n_epochs')
required FineTuningNEpochs nEpochs,
Expand Down Expand Up @@ -56,8 +58,10 @@ enum FineTuningNEpochsOptions {
// CLASS: FineTuningNEpochs
// ==========================================

/// The number of epochs to train the model for. An epoch refers to one
/// full cycle through the training dataset.
/// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.
///
/// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number
/// manually, we support any number between 1 and 50 epochs.
@freezed
sealed class FineTuningNEpochs with _$FineTuningNEpochs {
const FineTuningNEpochs._();
Expand Down
21 changes: 15 additions & 6 deletions packages/openai_dart/lib/src/generated/schema/schema.freezed.dart
Original file line number Diff line number Diff line change
Expand Up @@ -3463,6 +3463,7 @@ mixin _$CreateChatCompletionRequest {
/// - If set to 'auto', the system will utilize scale tier credits until they are exhausted.
/// - If set to 'default', the request will be processed using the default service tier with a lower
/// uptime SLA and no latency guarantee.
/// - When not set, the default behavior is 'auto'.
///
/// When this parameter is set, the response body will include the `service_tier` utilized.
@JsonKey(
Expand Down Expand Up @@ -4151,6 +4152,7 @@ class _$CreateChatCompletionRequestImpl extends _CreateChatCompletionRequest {
/// - If set to 'auto', the system will utilize scale tier credits until they are exhausted.
/// - If set to 'default', the request will be processed using the default service tier with a lower
/// uptime SLA and no latency guarantee.
/// - When not set, the default behavior is 'auto'.
///
/// When this parameter is set, the response body will include the `service_tier` utilized.
@override
Expand Down Expand Up @@ -4477,6 +4479,7 @@ abstract class _CreateChatCompletionRequest
/// - If set to 'auto', the system will utilize scale tier credits until they are exhausted.
/// - If set to 'default', the request will be processed using the default service tier with a lower
/// uptime SLA and no latency guarantee.
/// - When not set, the default behavior is 'auto'.
///
/// When this parameter is set, the response body will include the `service_tier` utilized.
@JsonKey(
Expand Down Expand Up @@ -15766,8 +15769,10 @@ FineTuningJobHyperparameters _$FineTuningJobHyperparametersFromJson(

/// @nodoc
mixin _$FineTuningJobHyperparameters {
/// The number of epochs to train the model for. An epoch refers to one
/// full cycle through the training dataset.
/// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.
///
/// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number
/// manually, we support any number between 1 and 50 epochs.
@_FineTuningNEpochsConverter()
@JsonKey(name: 'n_epochs')
FineTuningNEpochs get nEpochs => throw _privateConstructorUsedError;
Expand Down Expand Up @@ -15882,8 +15887,10 @@ class _$FineTuningJobHyperparametersImpl extends _FineTuningJobHyperparameters {
Map<String, dynamic> json) =>
_$$FineTuningJobHyperparametersImplFromJson(json);

/// The number of epochs to train the model for. An epoch refers to one
/// full cycle through the training dataset.
/// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.
///
/// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number
/// manually, we support any number between 1 and 50 epochs.
@override
@_FineTuningNEpochsConverter()
@JsonKey(name: 'n_epochs')
Expand Down Expand Up @@ -15936,8 +15943,10 @@ abstract class _FineTuningJobHyperparameters

@override

/// The number of epochs to train the model for. An epoch refers to one
/// full cycle through the training dataset.
/// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.
///
/// "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number
/// manually, we support any number between 1 and 50 epochs.
@_FineTuningNEpochsConverter()
@JsonKey(name: 'n_epochs')
FineTuningNEpochs get nEpochs;
Expand Down
8 changes: 8 additions & 0 deletions packages/openai_dart/lib/src/generated/schema/schema.g.dart

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

29 changes: 20 additions & 9 deletions packages/openai_dart/oas/openapi_curated.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1820,6 +1820,8 @@ components:
"gpt-4-vision-preview",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-mini",
"gpt-4o-mini-2024-07-18",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
Expand Down Expand Up @@ -1915,6 +1917,7 @@ components:
- If set to 'auto', the system will utilize scale tier credits until they are exhausted.
- If set to 'default', the request will be processed using the default service tier with a lower
uptime SLA and no latency guarantee.
- When not set, the default behavior is 'auto'.
When this parameter is set, the response body will include the `service_tier` utilized.
type: string
Expand Down Expand Up @@ -2071,7 +2074,7 @@ components:
- type: string
description: The text contents of the message.
- type: array
description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4-vision-preview` model.
description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4o` model.
items:
$ref: "#/components/schemas/ChatCompletionMessageContentPart"
minItems: 1
Expand Down Expand Up @@ -2918,8 +2921,10 @@ components:
n_epochs:
title: FineTuningNEpochs
description: |
The number of epochs to train the model for. An epoch refers to one
full cycle through the training dataset.
The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.
"auto" decides the optimal number of epochs based on the size of the dataset. If setting the number
manually, we support any number between 1 and 50 epochs.
oneOf:
- type: string
title: FineTuningNEpochsOptions
Expand Down Expand Up @@ -3523,6 +3528,8 @@ components:
"gpt-4-vision-preview",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-mini",
"gpt-4o-mini-2024-07-18",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
Expand Down Expand Up @@ -4085,6 +4092,8 @@ components:
"gpt-4-vision-preview",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-mini",
"gpt-4o-mini-2024-07-18",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
Expand Down Expand Up @@ -4327,6 +4336,8 @@ components:
"gpt-4-vision-preview",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-mini",
"gpt-4o-mini-2024-07-18",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
Expand Down Expand Up @@ -5872,7 +5883,7 @@ components:
propertyName: type
AutoChunkingStrategyRequestParam:
type: object
description: |
description: |
Auto Chunking Strategy, the default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800`
and `chunk_overlap_tokens` of `400`.
additionalProperties: false
Expand Down Expand Up @@ -5906,7 +5917,7 @@ components:
type: integer
minimum: 100
maximum: 4096
description: |
description: |
The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the
maximum value is `4096`.
chunk_overlap_tokens:
Expand All @@ -5922,13 +5933,13 @@ components:
type: object
description: The chunking strategy used to chunk the file(s).
oneOf:
- $ref: "#/components/schemas/StaticChunkingStrategyResponseParam"
- $ref: "#/components/schemas/OtherChunkingStrategyResponseParam"
- $ref: "#/components/schemas/StaticChunkingStrategyResponseParam"
- $ref: "#/components/schemas/OtherChunkingStrategyResponseParam"
discriminator:
propertyName: type
propertyName: type
OtherChunkingStrategyResponseParam:
type: object
description: |
description: |
Other Chunking Strategy. This is returned when the chunking strategy is unknown. Typically, this is because
the file was indexed before the `chunking_strategy` concept was introduced in the API.
additionalProperties: false
Expand Down
Loading

0 comments on commit faa23ae

Please sign in to comment.