Skip to content

Commit

Permalink
feat(openai_dart): Support new completion API functionality (#199)
Browse files Browse the repository at this point in the history
- Support `seed`

> The new seed parameter enables reproducible outputs by making the model return consistent completions most of the time. This beta feature is useful for use cases such as replaying requests for debugging, writing more comprehensive unit tests, and generally having a higher degree of control over the model behavior. We at OpenAI have been using this feature internally for our own unit tests and have found it invaluable. We’re excited to see how developers will use it.
  • Loading branch information
davidmigloz committed Nov 7, 2023
1 parent 01820d6 commit f12f6f5
Show file tree
Hide file tree
Showing 6 changed files with 177 additions and 20 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ class CreateCompletionRequest with _$CreateCompletionRequest {

/// Modify the likelihood of specified tokens appearing in the completion.
///
/// Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](https://platform.openai.com/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
/// Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](https://platform.openai.com/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
///
/// As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated.
@JsonKey(name: 'logit_bias', includeIfNull: false)
Expand Down Expand Up @@ -72,6 +72,11 @@ class CreateCompletionRequest with _$CreateCompletionRequest {
@Default(0.0)
double? presencePenalty,

/// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.
///
/// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.
@JsonKey(includeIfNull: false) int? seed,

/// Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
@_CompletionStopConverter()
@JsonKey(includeIfNull: false)
Expand Down Expand Up @@ -113,6 +118,7 @@ class CreateCompletionRequest with _$CreateCompletionRequest {
'max_tokens',
'n',
'presence_penalty',
'seed',
'stop',
'stream',
'suffix',
Expand All @@ -137,6 +143,8 @@ class CreateCompletionRequest with _$CreateCompletionRequest {
static const presencePenaltyDefaultValue = 0.0;
static const presencePenaltyMinValue = -2.0;
static const presencePenaltyMaxValue = 2.0;
static const seedMinValue = -9223372036854775808;
static const seedMaxValue = 9223372036854775807;
static const temperatureDefaultValue = 1.0;
static const temperatureMinValue = 0.0;
static const temperatureMaxValue = 2.0;
Expand Down Expand Up @@ -181,6 +189,12 @@ class CreateCompletionRequest with _$CreateCompletionRequest {
if (presencePenalty != null && presencePenalty! > presencePenaltyMaxValue) {
return "The value of 'presencePenalty' cannot be > $presencePenaltyMaxValue";
}
if (seed != null && seed! < seedMinValue) {
return "The value of 'seed' cannot be < $seedMinValue";
}
if (seed != null && seed! > seedMaxValue) {
return "The value of 'seed' cannot be > $seedMaxValue";
}
if (temperature != null && temperature! < temperatureMinValue) {
return "The value of 'temperature' cannot be < $temperatureMinValue";
}
Expand Down Expand Up @@ -209,6 +223,7 @@ class CreateCompletionRequest with _$CreateCompletionRequest {
'max_tokens': maxTokens,
'n': n,
'presence_penalty': presencePenalty,
'seed': seed,
'stop': stop,
'stream': stream,
'suffix': suffix,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,14 @@ class CreateCompletionResponse with _$CreateCompletionResponse {
/// The model used for completion.
required String model,

/// This fingerprint represents the backend configuration that the model runs with.
///
/// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.
@JsonKey(name: 'system_fingerprint', includeIfNull: false)
String? systemFingerprint,

/// The object type, which is always "text_completion"
required String object,
required CreateCompletionResponseObject object,

/// Usage statistics for the completion request.
@JsonKey(includeIfNull: false) CompletionUsage? usage,
Expand All @@ -44,6 +50,7 @@ class CreateCompletionResponse with _$CreateCompletionResponse {
'choices',
'created',
'model',
'system_fingerprint',
'object',
'usage'
];
Expand All @@ -60,8 +67,19 @@ class CreateCompletionResponse with _$CreateCompletionResponse {
'choices': choices,
'created': created,
'model': model,
'system_fingerprint': systemFingerprint,
'object': object,
'usage': usage,
};
}
}

// ==========================================
// ENUM: CreateCompletionResponseObject
// ==========================================

/// The object type, which is always "text_completion"
enum CreateCompletionResponseObject {
@JsonValue('text_completion')
textCompletion,
}

0 comments on commit f12f6f5

Please sign in to comment.