Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
60 changes: 60 additions & 0 deletions openai-java-core/src/main/kotlin/com/openai/models/Assistant.kt
Original file line number Diff line number Diff line change
Expand Up @@ -500,14 +500,74 @@ private constructor(
fun responseFormat(behavior: AssistantResponseFormatOption.Behavior) =
responseFormat(AssistantResponseFormatOption.ofBehavior(behavior))

/**
* Specifies the format that the model must output. Compatible with
* [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
* Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
* Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
* which ensures the model will match your supplied JSON schema. Learn more in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
* model generates is valid JSON.
*
* **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
* yourself via a system or user message. Without this, the model may generate an unending
* stream of whitespace until the generation reaches the token limit, resulting in a
* long-running and seemingly "stuck" request. Also note that the message content may be
* partially cut off if `finish_reason="length"`, which indicates the generation exceeded
* `max_tokens` or the conversation exceeded the max context length.
*/
fun responseFormat(responseFormatText: ResponseFormatText) =
responseFormat(AssistantResponseFormatOption.ofResponseFormatText(responseFormatText))

/**
* Specifies the format that the model must output. Compatible with
* [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
* Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
* Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
* which ensures the model will match your supplied JSON schema. Learn more in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
* model generates is valid JSON.
*
* **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
* yourself via a system or user message. Without this, the model may generate an unending
* stream of whitespace until the generation reaches the token limit, resulting in a
* long-running and seemingly "stuck" request. Also note that the message content may be
* partially cut off if `finish_reason="length"`, which indicates the generation exceeded
* `max_tokens` or the conversation exceeded the max context length.
*/
fun responseFormat(responseFormatJsonObject: ResponseFormatJsonObject) =
responseFormat(
AssistantResponseFormatOption.ofResponseFormatJsonObject(responseFormatJsonObject)
)

/**
* Specifies the format that the model must output. Compatible with
* [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
* Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
* Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
* which ensures the model will match your supplied JSON schema. Learn more in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
* model generates is valid JSON.
*
* **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
* yourself via a system or user message. Without this, the model may generate an unending
* stream of whitespace until the generation reaches the token limit, resulting in a
* long-running and seemingly "stuck" request. Also note that the message content may be
* partially cut off if `finish_reason="length"`, which indicates the generation exceeded
* `max_tokens` or the conversation exceeded the max context length.
*/
fun responseFormat(responseFormatJsonSchema: ResponseFormatJsonSchema) =
responseFormat(
AssistantResponseFormatOption.ofResponseFormatJsonSchema(responseFormatJsonSchema)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -592,18 +592,81 @@ constructor(
fun responseFormat(behavior: AssistantResponseFormatOption.Behavior) =
responseFormat(AssistantResponseFormatOption.ofBehavior(behavior))

/**
* Specifies the format that the model must output. Compatible with
* [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
* Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all
* GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
* Outputs which ensures the model will match your supplied JSON schema. Learn more in
* the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message
* the model generates is valid JSON.
*
* **Important:** when using JSON mode, you **must** also instruct the model to produce
* JSON yourself via a system or user message. Without this, the model may generate an
* unending stream of whitespace until the generation reaches the token limit, resulting
* in a long-running and seemingly "stuck" request. Also note that the message content
* may be partially cut off if `finish_reason="length"`, which indicates the generation
* exceeded `max_tokens` or the conversation exceeded the max context length.
*/
fun responseFormat(responseFormatText: ResponseFormatText) =
responseFormat(
AssistantResponseFormatOption.ofResponseFormatText(responseFormatText)
)

/**
* Specifies the format that the model must output. Compatible with
* [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
* Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all
* GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
* Outputs which ensures the model will match your supplied JSON schema. Learn more in
* the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message
* the model generates is valid JSON.
*
* **Important:** when using JSON mode, you **must** also instruct the model to produce
* JSON yourself via a system or user message. Without this, the model may generate an
* unending stream of whitespace until the generation reaches the token limit, resulting
* in a long-running and seemingly "stuck" request. Also note that the message content
* may be partially cut off if `finish_reason="length"`, which indicates the generation
* exceeded `max_tokens` or the conversation exceeded the max context length.
*/
fun responseFormat(responseFormatJsonObject: ResponseFormatJsonObject) =
responseFormat(
AssistantResponseFormatOption.ofResponseFormatJsonObject(
responseFormatJsonObject
)
)

/**
* Specifies the format that the model must output. Compatible with
* [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
* Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all
* GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
* Outputs which ensures the model will match your supplied JSON schema. Learn more in
* the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message
* the model generates is valid JSON.
*
* **Important:** when using JSON mode, you **must** also instruct the model to produce
* JSON yourself via a system or user message. Without this, the model may generate an
* unending stream of whitespace until the generation reaches the token limit, resulting
* in a long-running and seemingly "stuck" request. Also note that the message content
* may be partially cut off if `finish_reason="length"`, which indicates the generation
* exceeded `max_tokens` or the conversation exceeded the max context length.
*/
fun responseFormat(responseFormatJsonSchema: ResponseFormatJsonSchema) =
responseFormat(
AssistantResponseFormatOption.ofResponseFormatJsonSchema(
Expand Down Expand Up @@ -976,14 +1039,74 @@ constructor(
body.responseFormat(behavior)
}

/**
* Specifies the format that the model must output. Compatible with
* [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
* Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
* Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
* which ensures the model will match your supplied JSON schema. Learn more in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
* model generates is valid JSON.
*
* **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
* yourself via a system or user message. Without this, the model may generate an unending
* stream of whitespace until the generation reaches the token limit, resulting in a
* long-running and seemingly "stuck" request. Also note that the message content may be
* partially cut off if `finish_reason="length"`, which indicates the generation exceeded
* `max_tokens` or the conversation exceeded the max context length.
*/
fun responseFormat(responseFormatText: ResponseFormatText) = apply {
body.responseFormat(responseFormatText)
}

/**
* Specifies the format that the model must output. Compatible with
* [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
* Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
* Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
* which ensures the model will match your supplied JSON schema. Learn more in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
* model generates is valid JSON.
*
* **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
* yourself via a system or user message. Without this, the model may generate an unending
* stream of whitespace until the generation reaches the token limit, resulting in a
* long-running and seemingly "stuck" request. Also note that the message content may be
* partially cut off if `finish_reason="length"`, which indicates the generation exceeded
* `max_tokens` or the conversation exceeded the max context length.
*/
fun responseFormat(responseFormatJsonObject: ResponseFormatJsonObject) = apply {
body.responseFormat(responseFormatJsonObject)
}

/**
* Specifies the format that the model must output. Compatible with
* [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
* Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
* Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
* which ensures the model will match your supplied JSON schema. Learn more in the
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
*
* Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
* model generates is valid JSON.
*
* **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
* yourself via a system or user message. Without this, the model may generate an unending
* stream of whitespace until the generation reaches the token limit, resulting in a
* long-running and seemingly "stuck" request. Also note that the message content may be
* partially cut off if `finish_reason="length"`, which indicates the generation exceeded
* `max_tokens` or the conversation exceeded the max context length.
*/
fun responseFormat(responseFormatJsonSchema: ResponseFormatJsonSchema) = apply {
body.responseFormat(responseFormatJsonSchema)
}
Expand Down Expand Up @@ -1805,6 +1928,10 @@ constructor(
)
)

/**
* The chunking strategy used to chunk the file(s). If not set, will use the
* `auto` strategy. Only applicable if `file_ids` is non-empty.
*/
fun chunkingStrategy(
staticFileChunkingStrategyParam: StaticFileChunkingStrategyParam
) =
Expand Down
Loading