diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 90eeef65..6b467676 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "4.8.0"
+ ".": "4.9.0"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index 38f260dd..b4550c39 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 135
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-ca24bc4d8125b5153514ce643c4e3220f25971b7d67ca384d56d493c72c0d977.yml
-openapi_spec_hash: c6f048c7b3d29f4de48fde0e845ba33f
-config_hash: b876221dfb213df9f0a999e75d38a65e
+configured_endpoints: 136
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-fe8a79e6fd407e6c9afec60971f03076b65f711ccd6ea16457933b0e24fb1f6d.yml
+openapi_spec_hash: 38c0a73f4e08843732c5f8002a809104
+config_hash: 2c350086d87a4b4532077363087840e7
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 4203879a..09229e41 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,24 @@
# Changelog
+## 4.9.0 (2025-12-04)
+
+Full Changelog: [v4.8.0...v4.9.0](https://github.com/openai/openai-java/compare/v4.8.0...v4.9.0)
+
+### Features
+
+* **api:** gpt-5.1-codex-max and responses/compact ([651c44f](https://github.com/openai/openai-java/commit/651c44f570ba07784d715a382d94b255fd3afa60))
+
+
+### Bug Fixes
+
+* **api:** align types of input items / output items for typescript ([9202c69](https://github.com/openai/openai-java/commit/9202c695d939def7c9598e9ee75999b8ebd87e32))
+* **client:** cancel okhttp call when future cancelled ([c665e21](https://github.com/openai/openai-java/commit/c665e21c83123931baed5b21b9bbaa96a4d77495))
+
+
+### Documentation
+
+* remove `$` for better copy-pasteabality ([66f7a4b](https://github.com/openai/openai-java/commit/66f7a4b3d2b88fc3e80c1552d0a0df86cd45c1ff))
+
## 4.8.0 (2025-11-13)
Full Changelog: [v4.7.2...v4.8.0](https://github.com/openai/openai-java/compare/v4.7.2...v4.8.0)
diff --git a/README.md b/README.md
index e8a28b7d..30f01cdc 100644
--- a/README.md
+++ b/README.md
@@ -2,8 +2,8 @@
-[](https://central.sonatype.com/artifact/com.openai/openai-java/4.8.0)
-[](https://javadoc.io/doc/com.openai/openai-java/4.8.0)
+[](https://central.sonatype.com/artifact/com.openai/openai-java/4.9.0)
+[](https://javadoc.io/doc/com.openai/openai-java/4.9.0)
@@ -11,7 +11,7 @@ The OpenAI Java SDK provides convenient access to the [OpenAI REST API](https://
-The REST API documentation can be found on [platform.openai.com](https://platform.openai.com/docs). Javadocs are available on [javadoc.io](https://javadoc.io/doc/com.openai/openai-java/4.8.0).
+The REST API documentation can be found on [platform.openai.com](https://platform.openai.com/docs). Javadocs are available on [javadoc.io](https://javadoc.io/doc/com.openai/openai-java/4.9.0).
@@ -24,7 +24,7 @@ The REST API documentation can be found on [platform.openai.com](https://platfor
### Gradle
```kotlin
-implementation("com.openai:openai-java:4.8.0")
+implementation("com.openai:openai-java:4.9.0")
```
### Maven
@@ -33,7 +33,7 @@ implementation("com.openai:openai-java:4.8.0")
com.openaiopenai-java
- 4.8.0
+ 4.9.0
```
@@ -1310,13 +1310,13 @@ The SDK uses the standard [OkHttp logging interceptor](https://github.com/square
Enable logging by setting the `OPENAI_LOG` environment variable to `info`:
```sh
-$ export OPENAI_LOG=info
+export OPENAI_LOG=info
```
Or to `debug` for more verbose logging:
```sh
-$ export OPENAI_LOG=debug
+export OPENAI_LOG=debug
```
## ProGuard and R8
@@ -1342,7 +1342,7 @@ If you're using Spring Boot, then you can use the SDK's [Spring Boot starter](ht
#### Gradle
```kotlin
-implementation("com.openai:openai-java-spring-boot-starter:4.8.0")
+implementation("com.openai:openai-java-spring-boot-starter:4.9.0")
```
#### Maven
@@ -1351,7 +1351,7 @@ implementation("com.openai:openai-java-spring-boot-starter:4.8.0")
com.openaiopenai-java-spring-boot-starter
- 4.8.0
+ 4.9.0
```
diff --git a/build.gradle.kts b/build.gradle.kts
index 6ec56e42..6347a8c0 100644
--- a/build.gradle.kts
+++ b/build.gradle.kts
@@ -8,7 +8,7 @@ repositories {
allprojects {
group = "com.openai"
- version = "4.8.0" // x-release-please-version
+ version = "4.9.0" // x-release-please-version
}
subprojects {
diff --git a/gradle.properties b/gradle.properties
index 6680f9ce..5d1dabd2 100644
--- a/gradle.properties
+++ b/gradle.properties
@@ -2,6 +2,9 @@ org.gradle.caching=true
org.gradle.configuration-cache=true
org.gradle.parallel=true
org.gradle.daemon=false
+kotlin.daemon.enabled=false
+kotlin.compiler.execution.strategy=in-process
+kotlin.incremental=false
# These options improve our compilation and test performance. They are inherited by the Kotlin daemon.
org.gradle.jvmargs=\
-Xms2g \
diff --git a/openai-java-client-okhttp/build.gradle.kts b/openai-java-client-okhttp/build.gradle.kts
index 4f14e269..272eaba2 100644
--- a/openai-java-client-okhttp/build.gradle.kts
+++ b/openai-java-client-okhttp/build.gradle.kts
@@ -11,4 +11,5 @@ dependencies {
testImplementation(kotlin("test"))
testImplementation("org.assertj:assertj-core:3.25.3")
+ testImplementation("com.github.tomakehurst:wiremock-jre8:2.35.2")
}
diff --git a/openai-java-client-okhttp/src/main/kotlin/com/openai/client/okhttp/OkHttpClient.kt b/openai-java-client-okhttp/src/main/kotlin/com/openai/client/okhttp/OkHttpClient.kt
index d99c47cb..f036cbd2 100644
--- a/openai-java-client-okhttp/src/main/kotlin/com/openai/client/okhttp/OkHttpClient.kt
+++ b/openai-java-client-okhttp/src/main/kotlin/com/openai/client/okhttp/OkHttpClient.kt
@@ -13,6 +13,7 @@ import java.io.IOException
import java.io.InputStream
import java.net.Proxy
import java.time.Duration
+import java.util.concurrent.CancellationException
import java.util.concurrent.CompletableFuture
import javax.net.ssl.HostnameVerifier
import javax.net.ssl.SSLSocketFactory
@@ -29,8 +30,8 @@ import okhttp3.Response
import okhttp3.logging.HttpLoggingInterceptor
import okio.BufferedSink
-class OkHttpClient private constructor(private val okHttpClient: okhttp3.OkHttpClient) :
- HttpClient {
+class OkHttpClient
+private constructor(@JvmSynthetic internal val okHttpClient: okhttp3.OkHttpClient) : HttpClient {
override fun execute(request: HttpRequest, requestOptions: RequestOptions): HttpResponse {
val call = newCall(request, requestOptions)
@@ -50,20 +51,25 @@ class OkHttpClient private constructor(private val okHttpClient: okhttp3.OkHttpC
): CompletableFuture {
val future = CompletableFuture()
- request.body?.run { future.whenComplete { _, _ -> close() } }
-
- newCall(request, requestOptions)
- .enqueue(
- object : Callback {
- override fun onResponse(call: Call, response: Response) {
- future.complete(response.toResponse())
- }
+ val call = newCall(request, requestOptions)
+ call.enqueue(
+ object : Callback {
+ override fun onResponse(call: Call, response: Response) {
+ future.complete(response.toResponse())
+ }
- override fun onFailure(call: Call, e: IOException) {
- future.completeExceptionally(OpenAIIoException("Request failed", e))
- }
+ override fun onFailure(call: Call, e: IOException) {
+ future.completeExceptionally(OpenAIIoException("Request failed", e))
}
- )
+ }
+ )
+
+ future.whenComplete { _, e ->
+ if (e is CancellationException) {
+ call.cancel()
+ }
+ request.body?.close()
+ }
return future
}
diff --git a/openai-java-client-okhttp/src/test/kotlin/com/openai/client/okhttp/OkHttpClientTest.kt b/openai-java-client-okhttp/src/test/kotlin/com/openai/client/okhttp/OkHttpClientTest.kt
new file mode 100644
index 00000000..b202c83c
--- /dev/null
+++ b/openai-java-client-okhttp/src/test/kotlin/com/openai/client/okhttp/OkHttpClientTest.kt
@@ -0,0 +1,44 @@
+package com.openai.client.okhttp
+
+import com.github.tomakehurst.wiremock.client.WireMock.*
+import com.github.tomakehurst.wiremock.junit5.WireMockRuntimeInfo
+import com.github.tomakehurst.wiremock.junit5.WireMockTest
+import com.openai.core.http.HttpMethod
+import com.openai.core.http.HttpRequest
+import org.assertj.core.api.Assertions.assertThat
+import org.junit.jupiter.api.BeforeEach
+import org.junit.jupiter.api.Test
+import org.junit.jupiter.api.parallel.ResourceLock
+
+@WireMockTest
+@ResourceLock("https://github.com/wiremock/wiremock/issues/169")
+internal class OkHttpClientTest {
+
+ private lateinit var baseUrl: String
+ private lateinit var httpClient: OkHttpClient
+
+ @BeforeEach
+ fun beforeEach(wmRuntimeInfo: WireMockRuntimeInfo) {
+ baseUrl = wmRuntimeInfo.httpBaseUrl
+ httpClient = OkHttpClient.builder().build()
+ }
+
+ @Test
+ fun executeAsync_whenFutureCancelled_cancelsUnderlyingCall() {
+ stubFor(post(urlPathEqualTo("/something")).willReturn(ok()))
+ val responseFuture =
+ httpClient.executeAsync(
+ HttpRequest.builder()
+ .method(HttpMethod.POST)
+ .baseUrl(baseUrl)
+ .addPathSegment("something")
+ .build()
+ )
+ val call = httpClient.okHttpClient.dispatcher.runningCalls().single()
+
+ responseFuture.cancel(false)
+
+ // Should have cancelled the underlying call
+ assertThat(call.isCanceled()).isTrue()
+ }
+}
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/AllModels.kt b/openai-java-core/src/main/kotlin/com/openai/models/AllModels.kt
index 8862a6c6..891771e2 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/AllModels.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/AllModels.kt
@@ -258,6 +258,8 @@ private constructor(
@JvmField val GPT_5_PRO_2025_10_06 = of("gpt-5-pro-2025-10-06")
+ @JvmField val GPT_5_1_CODEX_MAX = of("gpt-5.1-codex-max")
+
@JvmStatic fun of(value: String) = ResponsesOnlyModel(JsonField.of(value))
}
@@ -276,6 +278,7 @@ private constructor(
GPT_5_CODEX,
GPT_5_PRO,
GPT_5_PRO_2025_10_06,
+ GPT_5_1_CODEX_MAX,
}
/**
@@ -301,6 +304,7 @@ private constructor(
GPT_5_CODEX,
GPT_5_PRO,
GPT_5_PRO_2025_10_06,
+ GPT_5_1_CODEX_MAX,
/**
* An enum member indicating that [ResponsesOnlyModel] was instantiated with an unknown
* value.
@@ -330,6 +334,7 @@ private constructor(
GPT_5_CODEX -> Value.GPT_5_CODEX
GPT_5_PRO -> Value.GPT_5_PRO
GPT_5_PRO_2025_10_06 -> Value.GPT_5_PRO_2025_10_06
+ GPT_5_1_CODEX_MAX -> Value.GPT_5_1_CODEX_MAX
else -> Value._UNKNOWN
}
@@ -357,6 +362,7 @@ private constructor(
GPT_5_CODEX -> Known.GPT_5_CODEX
GPT_5_PRO -> Known.GPT_5_PRO
GPT_5_PRO_2025_10_06 -> Known.GPT_5_PRO_2025_10_06
+ GPT_5_1_CODEX_MAX -> Known.GPT_5_1_CODEX_MAX
else -> throw OpenAIInvalidDataException("Unknown ResponsesOnlyModel: $value")
}
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/Reasoning.kt b/openai-java-core/src/main/kotlin/com/openai/models/Reasoning.kt
index bd2d4558..b102ab22 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/Reasoning.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/Reasoning.kt
@@ -45,14 +45,15 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported
- * values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing reasoning effort can
- * result in faster responses and fewer tokens used on reasoning in a response.
+ * values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort
+ * can result in faster responses and fewer tokens used on reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning
* values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for
* all reasoning values in gpt-5.1.
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support
* `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -144,14 +145,16 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a response.
+ * supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on reasoning in a
+ * response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning
* values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported
* for all reasoning values in gpt-5.1.
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support
* `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*/
fun effort(effort: ReasoningEffort?) = effort(JsonField.ofNullable(effort))
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/ReasoningEffort.kt b/openai-java-core/src/main/kotlin/com/openai/models/ReasoningEffort.kt
index 00fce409..5519cce6 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/ReasoningEffort.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/ReasoningEffort.kt
@@ -10,13 +10,14 @@ import com.openai.errors.OpenAIInvalidDataException
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values
- * are `none`, `minimal`, `low`, `medium`, and `high`. Reducing reasoning effort can result in
- * faster responses and fewer tokens used on reasoning in a response.
+ * are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result
+ * in faster responses and fewer tokens used on reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values
* for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all
* reasoning values in gpt-5.1.
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*/
class ReasoningEffort @JsonCreator private constructor(private val value: JsonField) :
Enum {
@@ -42,6 +43,8 @@ class ReasoningEffort @JsonCreator private constructor(private val value: JsonFi
@JvmField val HIGH = of("high")
+ @JvmField val XHIGH = of("xhigh")
+
@JvmStatic fun of(value: String) = ReasoningEffort(JsonField.of(value))
}
@@ -52,6 +55,7 @@ class ReasoningEffort @JsonCreator private constructor(private val value: JsonFi
LOW,
MEDIUM,
HIGH,
+ XHIGH,
}
/**
@@ -69,6 +73,7 @@ class ReasoningEffort @JsonCreator private constructor(private val value: JsonFi
LOW,
MEDIUM,
HIGH,
+ XHIGH,
/**
* An enum member indicating that [ReasoningEffort] was instantiated with an unknown value.
*/
@@ -89,6 +94,7 @@ class ReasoningEffort @JsonCreator private constructor(private val value: JsonFi
LOW -> Value.LOW
MEDIUM -> Value.MEDIUM
HIGH -> Value.HIGH
+ XHIGH -> Value.XHIGH
else -> Value._UNKNOWN
}
@@ -107,6 +113,7 @@ class ReasoningEffort @JsonCreator private constructor(private val value: JsonFi
LOW -> Known.LOW
MEDIUM -> Known.MEDIUM
HIGH -> Known.HIGH
+ XHIGH -> Known.XHIGH
else -> throw OpenAIInvalidDataException("Unknown ReasoningEffort: $value")
}
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/ResponsesModel.kt b/openai-java-core/src/main/kotlin/com/openai/models/ResponsesModel.kt
index bbf0eee4..e64aa924 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/ResponsesModel.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/ResponsesModel.kt
@@ -257,6 +257,8 @@ private constructor(
@JvmField val GPT_5_PRO_2025_10_06 = of("gpt-5-pro-2025-10-06")
+ @JvmField val GPT_5_1_CODEX_MAX = of("gpt-5.1-codex-max")
+
@JvmStatic fun of(value: String) = ResponsesOnlyModel(JsonField.of(value))
}
@@ -275,6 +277,7 @@ private constructor(
GPT_5_CODEX,
GPT_5_PRO,
GPT_5_PRO_2025_10_06,
+ GPT_5_1_CODEX_MAX,
}
/**
@@ -300,6 +303,7 @@ private constructor(
GPT_5_CODEX,
GPT_5_PRO,
GPT_5_PRO_2025_10_06,
+ GPT_5_1_CODEX_MAX,
/**
* An enum member indicating that [ResponsesOnlyModel] was instantiated with an unknown
* value.
@@ -329,6 +333,7 @@ private constructor(
GPT_5_CODEX -> Value.GPT_5_CODEX
GPT_5_PRO -> Value.GPT_5_PRO
GPT_5_PRO_2025_10_06 -> Value.GPT_5_PRO_2025_10_06
+ GPT_5_1_CODEX_MAX -> Value.GPT_5_1_CODEX_MAX
else -> Value._UNKNOWN
}
@@ -356,6 +361,7 @@ private constructor(
GPT_5_CODEX -> Known.GPT_5_CODEX
GPT_5_PRO -> Known.GPT_5_PRO
GPT_5_PRO_2025_10_06 -> Known.GPT_5_PRO_2025_10_06
+ GPT_5_1_CODEX_MAX -> Known.GPT_5_1_CODEX_MAX
else -> throw OpenAIInvalidDataException("Unknown ResponsesOnlyModel: $value")
}
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/beta/assistants/AssistantCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/beta/assistants/AssistantCreateParams.kt
index 9403087e..3c5939db 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/beta/assistants/AssistantCreateParams.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/beta/assistants/AssistantCreateParams.kt
@@ -98,14 +98,15 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported
- * values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing reasoning effort can
- * result in faster responses and fewer tokens used on reasoning in a response.
+ * values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort
+ * can result in faster responses and fewer tokens used on reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning
* values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for
* all reasoning values in gpt-5.1.
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support
* `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -403,14 +404,16 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a response.
+ * supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on reasoning in a
+ * response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning
* values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported
* for all reasoning values in gpt-5.1.
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support
* `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*/
fun reasoningEffort(reasoningEffort: ReasoningEffort?) = apply {
body.reasoningEffort(reasoningEffort)
@@ -871,14 +874,16 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a response.
+ * supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on reasoning in a
+ * response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning
* values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported
* for all reasoning values in gpt-5.1.
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support
* `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -1208,15 +1213,16 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
- * reasoning effort can result in faster responses and fewer tokens used on reasoning in
- * a response.
+ * supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
+ * Reducing reasoning effort can result in faster responses and fewer tokens used on
+ * reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
* reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls
* are supported for all reasoning values in gpt-5.1.
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
* support `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*/
fun reasoningEffort(reasoningEffort: ReasoningEffort?) =
reasoningEffort(JsonField.ofNullable(reasoningEffort))
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/beta/assistants/AssistantUpdateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/beta/assistants/AssistantUpdateParams.kt
index f5088a30..9fca0bf2 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/beta/assistants/AssistantUpdateParams.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/beta/assistants/AssistantUpdateParams.kt
@@ -90,14 +90,15 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported
- * values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing reasoning effort can
- * result in faster responses and fewer tokens used on reasoning in a response.
+ * values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort
+ * can result in faster responses and fewer tokens used on reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning
* values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for
* all reasoning values in gpt-5.1.
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support
* `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -397,14 +398,16 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a response.
+ * supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on reasoning in a
+ * response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning
* values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported
* for all reasoning values in gpt-5.1.
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support
* `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*/
fun reasoningEffort(reasoningEffort: ReasoningEffort?) = apply {
body.reasoningEffort(reasoningEffort)
@@ -865,14 +868,16 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a response.
+ * supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on reasoning in a
+ * response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning
* values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported
* for all reasoning values in gpt-5.1.
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support
* `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -1195,15 +1200,16 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
- * reasoning effort can result in faster responses and fewer tokens used on reasoning in
- * a response.
+ * supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
+ * Reducing reasoning effort can result in faster responses and fewer tokens used on
+ * reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
* reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls
* are supported for all reasoning values in gpt-5.1.
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
* support `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*/
fun reasoningEffort(reasoningEffort: ReasoningEffort?) =
reasoningEffort(JsonField.ofNullable(reasoningEffort))
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/beta/threads/runs/RunCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/beta/threads/runs/RunCreateParams.kt
index f36832b1..71e92717 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/beta/threads/runs/RunCreateParams.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/beta/threads/runs/RunCreateParams.kt
@@ -167,14 +167,15 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported
- * values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing reasoning effort can
- * result in faster responses and fewer tokens used on reasoning in a response.
+ * values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort
+ * can result in faster responses and fewer tokens used on reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning
* values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for
* all reasoning values in gpt-5.1.
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support
* `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -692,14 +693,16 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a response.
+ * supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on reasoning in a
+ * response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning
* values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported
* for all reasoning values in gpt-5.1.
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support
* `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*/
fun reasoningEffort(reasoningEffort: ReasoningEffort?) = apply {
body.reasoningEffort(reasoningEffort)
@@ -1290,14 +1293,16 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a response.
+ * supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on reasoning in a
+ * response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning
* values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported
* for all reasoning values in gpt-5.1.
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support
* `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -1838,15 +1843,16 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
- * reasoning effort can result in faster responses and fewer tokens used on reasoning in
- * a response.
+ * supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
+ * Reducing reasoning effort can result in faster responses and fewer tokens used on
+ * reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
* reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls
* are supported for all reasoning values in gpt-5.1.
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
* support `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*/
fun reasoningEffort(reasoningEffort: ReasoningEffort?) =
reasoningEffort(JsonField.ofNullable(reasoningEffort))
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/chat/completions/ChatCompletionCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/chat/completions/ChatCompletionCreateParams.kt
index 8d4586e2..c42eff41 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/chat/completions/ChatCompletionCreateParams.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/chat/completions/ChatCompletionCreateParams.kt
@@ -278,14 +278,15 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported
- * values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing reasoning effort can
- * result in faster responses and fewer tokens used on reasoning in a response.
+ * values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort
+ * can result in faster responses and fewer tokens used on reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning
* values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for
* all reasoning values in gpt-5.1.
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support
* `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -1397,14 +1398,16 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a response.
+ * supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on reasoning in a
+ * response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning
* values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported
* for all reasoning values in gpt-5.1.
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support
* `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*/
fun reasoningEffort(reasoningEffort: ReasoningEffort?) = apply {
body.reasoningEffort(reasoningEffort)
@@ -2434,14 +2437,16 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a response.
+ * supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on reasoning in a
+ * response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning
* values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported
* for all reasoning values in gpt-5.1.
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support
* `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -3729,15 +3734,16 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
- * reasoning effort can result in faster responses and fewer tokens used on reasoning in
- * a response.
+ * supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
+ * Reducing reasoning effort can result in faster responses and fewer tokens used on
+ * reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
* reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls
* are supported for all reasoning values in gpt-5.1.
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
* support `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*/
fun reasoningEffort(reasoningEffort: ReasoningEffort?) =
reasoningEffort(JsonField.ofNullable(reasoningEffort))
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/containers/ContainerCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/containers/ContainerCreateParams.kt
index b2df2416..49dfcf01 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/containers/ContainerCreateParams.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/containers/ContainerCreateParams.kt
@@ -55,6 +55,14 @@ private constructor(
*/
fun fileIds(): Optional> = body.fileIds()
+ /**
+ * Optional memory limit for the container. Defaults to "1g".
+ *
+ * @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
+ * server responded with an unexpected value).
+ */
+ fun memoryLimit(): Optional = body.memoryLimit()
+
/**
* Returns the raw JSON value of [name].
*
@@ -76,6 +84,13 @@ private constructor(
*/
fun _fileIds(): JsonField> = body._fileIds()
+ /**
+ * Returns the raw JSON value of [memoryLimit].
+ *
+ * Unlike [memoryLimit], this method doesn't throw if the JSON field has an unexpected type.
+ */
+ fun _memoryLimit(): JsonField = body._memoryLimit()
+
fun _additionalBodyProperties(): Map = body._additionalProperties()
/** Additional headers to send with the request. */
@@ -121,6 +136,7 @@ private constructor(
* - [name]
* - [expiresAfter]
* - [fileIds]
+ * - [memoryLimit]
*/
fun body(body: Body) = apply { this.body = body.toBuilder() }
@@ -168,6 +184,20 @@ private constructor(
*/
fun addFileId(fileId: String) = apply { body.addFileId(fileId) }
+ /** Optional memory limit for the container. Defaults to "1g". */
+ fun memoryLimit(memoryLimit: MemoryLimit) = apply { body.memoryLimit(memoryLimit) }
+
+ /**
+ * Sets [Builder.memoryLimit] to an arbitrary JSON value.
+ *
+ * You should usually call [Builder.memoryLimit] with a well-typed [MemoryLimit] value
+ * instead. This method is primarily for setting the field to an undocumented or not yet
+ * supported value.
+ */
+ fun memoryLimit(memoryLimit: JsonField) = apply {
+ body.memoryLimit(memoryLimit)
+ }
+
fun additionalBodyProperties(additionalBodyProperties: Map) = apply {
body.additionalProperties(additionalBodyProperties)
}
@@ -317,6 +347,7 @@ private constructor(
private val name: JsonField,
private val expiresAfter: JsonField,
private val fileIds: JsonField>,
+ private val memoryLimit: JsonField,
private val additionalProperties: MutableMap,
) {
@@ -329,7 +360,10 @@ private constructor(
@JsonProperty("file_ids")
@ExcludeMissing
fileIds: JsonField> = JsonMissing.of(),
- ) : this(name, expiresAfter, fileIds, mutableMapOf())
+ @JsonProperty("memory_limit")
+ @ExcludeMissing
+ memoryLimit: JsonField = JsonMissing.of(),
+ ) : this(name, expiresAfter, fileIds, memoryLimit, mutableMapOf())
/**
* Name of the container to create.
@@ -355,6 +389,14 @@ private constructor(
*/
fun fileIds(): Optional> = fileIds.getOptional("file_ids")
+ /**
+ * Optional memory limit for the container. Defaults to "1g".
+ *
+ * @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
+ * server responded with an unexpected value).
+ */
+ fun memoryLimit(): Optional = memoryLimit.getOptional("memory_limit")
+
/**
* Returns the raw JSON value of [name].
*
@@ -379,6 +421,15 @@ private constructor(
*/
@JsonProperty("file_ids") @ExcludeMissing fun _fileIds(): JsonField> = fileIds
+ /**
+ * Returns the raw JSON value of [memoryLimit].
+ *
+ * Unlike [memoryLimit], this method doesn't throw if the JSON field has an unexpected type.
+ */
+ @JsonProperty("memory_limit")
+ @ExcludeMissing
+ fun _memoryLimit(): JsonField = memoryLimit
+
@JsonAnySetter
private fun putAdditionalProperty(key: String, value: JsonValue) {
additionalProperties.put(key, value)
@@ -410,6 +461,7 @@ private constructor(
private var name: JsonField? = null
private var expiresAfter: JsonField = JsonMissing.of()
private var fileIds: JsonField>? = null
+ private var memoryLimit: JsonField = JsonMissing.of()
private var additionalProperties: MutableMap = mutableMapOf()
@JvmSynthetic
@@ -417,6 +469,7 @@ private constructor(
name = body.name
expiresAfter = body.expiresAfter
fileIds = body.fileIds.map { it.toMutableList() }
+ memoryLimit = body.memoryLimit
additionalProperties = body.additionalProperties.toMutableMap()
}
@@ -472,6 +525,20 @@ private constructor(
}
}
+ /** Optional memory limit for the container. Defaults to "1g". */
+ fun memoryLimit(memoryLimit: MemoryLimit) = memoryLimit(JsonField.of(memoryLimit))
+
+ /**
+ * Sets [Builder.memoryLimit] to an arbitrary JSON value.
+ *
+ * You should usually call [Builder.memoryLimit] with a well-typed [MemoryLimit] value
+ * instead. This method is primarily for setting the field to an undocumented or not yet
+ * supported value.
+ */
+ fun memoryLimit(memoryLimit: JsonField) = apply {
+ this.memoryLimit = memoryLimit
+ }
+
fun additionalProperties(additionalProperties: Map) = apply {
this.additionalProperties.clear()
putAllAdditionalProperties(additionalProperties)
@@ -508,6 +575,7 @@ private constructor(
checkRequired("name", name),
expiresAfter,
(fileIds ?: JsonMissing.of()).map { it.toImmutable() },
+ memoryLimit,
additionalProperties.toMutableMap(),
)
}
@@ -522,6 +590,7 @@ private constructor(
name()
expiresAfter().ifPresent { it.validate() }
fileIds()
+ memoryLimit().ifPresent { it.validate() }
validated = true
}
@@ -543,7 +612,8 @@ private constructor(
internal fun validity(): Int =
(if (name.asKnown().isPresent) 1 else 0) +
(expiresAfter.asKnown().getOrNull()?.validity() ?: 0) +
- (fileIds.asKnown().getOrNull()?.size ?: 0)
+ (fileIds.asKnown().getOrNull()?.size ?: 0) +
+ (memoryLimit.asKnown().getOrNull()?.validity() ?: 0)
override fun equals(other: Any?): Boolean {
if (this === other) {
@@ -554,17 +624,18 @@ private constructor(
name == other.name &&
expiresAfter == other.expiresAfter &&
fileIds == other.fileIds &&
+ memoryLimit == other.memoryLimit &&
additionalProperties == other.additionalProperties
}
private val hashCode: Int by lazy {
- Objects.hash(name, expiresAfter, fileIds, additionalProperties)
+ Objects.hash(name, expiresAfter, fileIds, memoryLimit, additionalProperties)
}
override fun hashCode(): Int = hashCode
override fun toString() =
- "Body{name=$name, expiresAfter=$expiresAfter, fileIds=$fileIds, additionalProperties=$additionalProperties}"
+ "Body{name=$name, expiresAfter=$expiresAfter, fileIds=$fileIds, memoryLimit=$memoryLimit, additionalProperties=$additionalProperties}"
}
/** Container expiration time in seconds relative to the 'anchor' time. */
@@ -889,6 +960,147 @@ private constructor(
"ExpiresAfter{anchor=$anchor, minutes=$minutes, additionalProperties=$additionalProperties}"
}
+ /** Optional memory limit for the container. Defaults to "1g". */
+ class MemoryLimit @JsonCreator private constructor(private val value: JsonField) :
+ Enum {
+
+ /**
+ * Returns this class instance's raw value.
+ *
+ * This is usually only useful if this instance was deserialized from data that doesn't
+ * match any known member, and you want to know that value. For example, if the SDK is on an
+ * older version than the API, then the API may respond with new members that the SDK is
+ * unaware of.
+ */
+ @com.fasterxml.jackson.annotation.JsonValue fun _value(): JsonField = value
+
+ companion object {
+
+ @JvmField val _1G = of("1g")
+
+ @JvmField val _4G = of("4g")
+
+ @JvmField val _16G = of("16g")
+
+ @JvmField val _64G = of("64g")
+
+ @JvmStatic fun of(value: String) = MemoryLimit(JsonField.of(value))
+ }
+
+ /** An enum containing [MemoryLimit]'s known values. */
+ enum class Known {
+ _1G,
+ _4G,
+ _16G,
+ _64G,
+ }
+
+ /**
+ * An enum containing [MemoryLimit]'s known values, as well as an [_UNKNOWN] member.
+ *
+ * An instance of [MemoryLimit] can contain an unknown value in a couple of cases:
+ * - It was deserialized from data that doesn't match any known member. For example, if the
+ * SDK is on an older version than the API, then the API may respond with new members that
+ * the SDK is unaware of.
+ * - It was constructed with an arbitrary value using the [of] method.
+ */
+ enum class Value {
+ _1G,
+ _4G,
+ _16G,
+ _64G,
+ /**
+ * An enum member indicating that [MemoryLimit] was instantiated with an unknown value.
+ */
+ _UNKNOWN,
+ }
+
+ /**
+ * Returns an enum member corresponding to this class instance's value, or [Value._UNKNOWN]
+ * if the class was instantiated with an unknown value.
+ *
+ * Use the [known] method instead if you're certain the value is always known or if you want
+ * to throw for the unknown case.
+ */
+ fun value(): Value =
+ when (this) {
+ _1G -> Value._1G
+ _4G -> Value._4G
+ _16G -> Value._16G
+ _64G -> Value._64G
+ else -> Value._UNKNOWN
+ }
+
+ /**
+ * Returns an enum member corresponding to this class instance's value.
+ *
+ * Use the [value] method instead if you're uncertain the value is always known and don't
+ * want to throw for the unknown case.
+ *
+ * @throws OpenAIInvalidDataException if this class instance's value is a not a known
+ * member.
+ */
+ fun known(): Known =
+ when (this) {
+ _1G -> Known._1G
+ _4G -> Known._4G
+ _16G -> Known._16G
+ _64G -> Known._64G
+ else -> throw OpenAIInvalidDataException("Unknown MemoryLimit: $value")
+ }
+
+ /**
+ * Returns this class instance's primitive wire representation.
+ *
+ * This differs from the [toString] method because that method is primarily for debugging
+ * and generally doesn't throw.
+ *
+ * @throws OpenAIInvalidDataException if this class instance's value does not have the
+ * expected primitive type.
+ */
+ fun asString(): String =
+ _value().asString().orElseThrow { OpenAIInvalidDataException("Value is not a String") }
+
+ private var validated: Boolean = false
+
+ fun validate(): MemoryLimit = apply {
+ if (validated) {
+ return@apply
+ }
+
+ known()
+ validated = true
+ }
+
+ fun isValid(): Boolean =
+ try {
+ validate()
+ true
+ } catch (e: OpenAIInvalidDataException) {
+ false
+ }
+
+ /**
+ * Returns a score indicating how many valid values are contained in this object
+ * recursively.
+ *
+ * Used for best match union deserialization.
+ */
+ @JvmSynthetic internal fun validity(): Int = if (value() == Value._UNKNOWN) 0 else 1
+
+ override fun equals(other: Any?): Boolean {
+ if (this === other) {
+ return true
+ }
+
+ return other is MemoryLimit && value == other.value
+ }
+
+ override fun hashCode() = value.hashCode()
+
+ override fun toString() = value.toString()
+ }
+
override fun equals(other: Any?): Boolean {
if (this === other) {
return true
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/containers/ContainerCreateResponse.kt b/openai-java-core/src/main/kotlin/com/openai/models/containers/ContainerCreateResponse.kt
index e7991d9c..803ee160 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/containers/ContainerCreateResponse.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/containers/ContainerCreateResponse.kt
@@ -27,6 +27,8 @@ private constructor(
private val object_: JsonField,
private val status: JsonField,
private val expiresAfter: JsonField,
+ private val lastActiveAt: JsonField,
+ private val memoryLimit: JsonField,
private val additionalProperties: MutableMap,
) {
@@ -40,7 +42,23 @@ private constructor(
@JsonProperty("expires_after")
@ExcludeMissing
expiresAfter: JsonField = JsonMissing.of(),
- ) : this(id, createdAt, name, object_, status, expiresAfter, mutableMapOf())
+ @JsonProperty("last_active_at")
+ @ExcludeMissing
+ lastActiveAt: JsonField = JsonMissing.of(),
+ @JsonProperty("memory_limit")
+ @ExcludeMissing
+ memoryLimit: JsonField = JsonMissing.of(),
+ ) : this(
+ id,
+ createdAt,
+ name,
+ object_,
+ status,
+ expiresAfter,
+ lastActiveAt,
+ memoryLimit,
+ mutableMapOf(),
+ )
/**
* Unique identifier for the container.
@@ -92,6 +110,22 @@ private constructor(
*/
fun expiresAfter(): Optional = expiresAfter.getOptional("expires_after")
+ /**
+ * Unix timestamp (in seconds) when the container was last active.
+ *
+ * @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
+ * server responded with an unexpected value).
+ */
+ fun lastActiveAt(): Optional = lastActiveAt.getOptional("last_active_at")
+
+ /**
+ * The memory limit configured for the container.
+ *
+ * @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
+ * server responded with an unexpected value).
+ */
+ fun memoryLimit(): Optional = memoryLimit.getOptional("memory_limit")
+
/**
* Returns the raw JSON value of [id].
*
@@ -136,6 +170,24 @@ private constructor(
@ExcludeMissing
fun _expiresAfter(): JsonField = expiresAfter
+ /**
+ * Returns the raw JSON value of [lastActiveAt].
+ *
+ * Unlike [lastActiveAt], this method doesn't throw if the JSON field has an unexpected type.
+ */
+ @JsonProperty("last_active_at")
+ @ExcludeMissing
+ fun _lastActiveAt(): JsonField = lastActiveAt
+
+ /**
+ * Returns the raw JSON value of [memoryLimit].
+ *
+ * Unlike [memoryLimit], this method doesn't throw if the JSON field has an unexpected type.
+ */
+ @JsonProperty("memory_limit")
+ @ExcludeMissing
+ fun _memoryLimit(): JsonField = memoryLimit
+
@JsonAnySetter
private fun putAdditionalProperty(key: String, value: JsonValue) {
additionalProperties.put(key, value)
@@ -174,6 +226,8 @@ private constructor(
private var object_: JsonField? = null
private var status: JsonField? = null
private var expiresAfter: JsonField = JsonMissing.of()
+ private var lastActiveAt: JsonField = JsonMissing.of()
+ private var memoryLimit: JsonField = JsonMissing.of()
private var additionalProperties: MutableMap = mutableMapOf()
@JvmSynthetic
@@ -184,6 +238,8 @@ private constructor(
object_ = containerCreateResponse.object_
status = containerCreateResponse.status
expiresAfter = containerCreateResponse.expiresAfter
+ lastActiveAt = containerCreateResponse.lastActiveAt
+ memoryLimit = containerCreateResponse.memoryLimit
additionalProperties = containerCreateResponse.additionalProperties.toMutableMap()
}
@@ -260,6 +316,32 @@ private constructor(
this.expiresAfter = expiresAfter
}
+ /** Unix timestamp (in seconds) when the container was last active. */
+ fun lastActiveAt(lastActiveAt: Long) = lastActiveAt(JsonField.of(lastActiveAt))
+
+ /**
+ * Sets [Builder.lastActiveAt] to an arbitrary JSON value.
+ *
+ * You should usually call [Builder.lastActiveAt] with a well-typed [Long] value instead.
+ * This method is primarily for setting the field to an undocumented or not yet supported
+ * value.
+ */
+ fun lastActiveAt(lastActiveAt: JsonField) = apply { this.lastActiveAt = lastActiveAt }
+
+ /** The memory limit configured for the container. */
+ fun memoryLimit(memoryLimit: MemoryLimit) = memoryLimit(JsonField.of(memoryLimit))
+
+ /**
+ * Sets [Builder.memoryLimit] to an arbitrary JSON value.
+ *
+ * You should usually call [Builder.memoryLimit] with a well-typed [MemoryLimit] value
+ * instead. This method is primarily for setting the field to an undocumented or not yet
+ * supported value.
+ */
+ fun memoryLimit(memoryLimit: JsonField) = apply {
+ this.memoryLimit = memoryLimit
+ }
+
fun additionalProperties(additionalProperties: Map) = apply {
this.additionalProperties.clear()
putAllAdditionalProperties(additionalProperties)
@@ -303,6 +385,8 @@ private constructor(
checkRequired("object_", object_),
checkRequired("status", status),
expiresAfter,
+ lastActiveAt,
+ memoryLimit,
additionalProperties.toMutableMap(),
)
}
@@ -320,6 +404,8 @@ private constructor(
object_()
status()
expiresAfter().ifPresent { it.validate() }
+ lastActiveAt()
+ memoryLimit().ifPresent { it.validate() }
validated = true
}
@@ -343,7 +429,9 @@ private constructor(
(if (name.asKnown().isPresent) 1 else 0) +
(if (object_.asKnown().isPresent) 1 else 0) +
(if (status.asKnown().isPresent) 1 else 0) +
- (expiresAfter.asKnown().getOrNull()?.validity() ?: 0)
+ (expiresAfter.asKnown().getOrNull()?.validity() ?: 0) +
+ (if (lastActiveAt.asKnown().isPresent) 1 else 0) +
+ (memoryLimit.asKnown().getOrNull()?.validity() ?: 0)
/**
* The container will expire after this time period. The anchor is the reference point for the
@@ -652,6 +740,147 @@ private constructor(
"ExpiresAfter{anchor=$anchor, minutes=$minutes, additionalProperties=$additionalProperties}"
}
+ /** The memory limit configured for the container. */
+ class MemoryLimit @JsonCreator private constructor(private val value: JsonField) :
+ Enum {
+
+ /**
+ * Returns this class instance's raw value.
+ *
+ * This is usually only useful if this instance was deserialized from data that doesn't
+ * match any known member, and you want to know that value. For example, if the SDK is on an
+ * older version than the API, then the API may respond with new members that the SDK is
+ * unaware of.
+ */
+ @com.fasterxml.jackson.annotation.JsonValue fun _value(): JsonField = value
+
+ companion object {
+
+ @JvmField val _1G = of("1g")
+
+ @JvmField val _4G = of("4g")
+
+ @JvmField val _16G = of("16g")
+
+ @JvmField val _64G = of("64g")
+
+ @JvmStatic fun of(value: String) = MemoryLimit(JsonField.of(value))
+ }
+
+ /** An enum containing [MemoryLimit]'s known values. */
+ enum class Known {
+ _1G,
+ _4G,
+ _16G,
+ _64G,
+ }
+
+ /**
+ * An enum containing [MemoryLimit]'s known values, as well as an [_UNKNOWN] member.
+ *
+ * An instance of [MemoryLimit] can contain an unknown value in a couple of cases:
+ * - It was deserialized from data that doesn't match any known member. For example, if the
+ * SDK is on an older version than the API, then the API may respond with new members that
+ * the SDK is unaware of.
+ * - It was constructed with an arbitrary value using the [of] method.
+ */
+ enum class Value {
+ _1G,
+ _4G,
+ _16G,
+ _64G,
+ /**
+ * An enum member indicating that [MemoryLimit] was instantiated with an unknown value.
+ */
+ _UNKNOWN,
+ }
+
+ /**
+ * Returns an enum member corresponding to this class instance's value, or [Value._UNKNOWN]
+ * if the class was instantiated with an unknown value.
+ *
+ * Use the [known] method instead if you're certain the value is always known or if you want
+ * to throw for the unknown case.
+ */
+ fun value(): Value =
+ when (this) {
+ _1G -> Value._1G
+ _4G -> Value._4G
+ _16G -> Value._16G
+ _64G -> Value._64G
+ else -> Value._UNKNOWN
+ }
+
+ /**
+ * Returns an enum member corresponding to this class instance's value.
+ *
+ * Use the [value] method instead if you're uncertain the value is always known and don't
+ * want to throw for the unknown case.
+ *
+ * @throws OpenAIInvalidDataException if this class instance's value is a not a known
+ * member.
+ */
+ fun known(): Known =
+ when (this) {
+ _1G -> Known._1G
+ _4G -> Known._4G
+ _16G -> Known._16G
+ _64G -> Known._64G
+ else -> throw OpenAIInvalidDataException("Unknown MemoryLimit: $value")
+ }
+
+ /**
+ * Returns this class instance's primitive wire representation.
+ *
+ * This differs from the [toString] method because that method is primarily for debugging
+ * and generally doesn't throw.
+ *
+ * @throws OpenAIInvalidDataException if this class instance's value does not have the
+ * expected primitive type.
+ */
+ fun asString(): String =
+ _value().asString().orElseThrow { OpenAIInvalidDataException("Value is not a String") }
+
+ private var validated: Boolean = false
+
+ fun validate(): MemoryLimit = apply {
+ if (validated) {
+ return@apply
+ }
+
+ known()
+ validated = true
+ }
+
+ fun isValid(): Boolean =
+ try {
+ validate()
+ true
+ } catch (e: OpenAIInvalidDataException) {
+ false
+ }
+
+ /**
+ * Returns a score indicating how many valid values are contained in this object
+ * recursively.
+ *
+ * Used for best match union deserialization.
+ */
+ @JvmSynthetic internal fun validity(): Int = if (value() == Value._UNKNOWN) 0 else 1
+
+ override fun equals(other: Any?): Boolean {
+ if (this === other) {
+ return true
+ }
+
+ return other is MemoryLimit && value == other.value
+ }
+
+ override fun hashCode() = value.hashCode()
+
+ override fun toString() = value.toString()
+ }
+
override fun equals(other: Any?): Boolean {
if (this === other) {
return true
@@ -664,15 +893,27 @@ private constructor(
object_ == other.object_ &&
status == other.status &&
expiresAfter == other.expiresAfter &&
+ lastActiveAt == other.lastActiveAt &&
+ memoryLimit == other.memoryLimit &&
additionalProperties == other.additionalProperties
}
private val hashCode: Int by lazy {
- Objects.hash(id, createdAt, name, object_, status, expiresAfter, additionalProperties)
+ Objects.hash(
+ id,
+ createdAt,
+ name,
+ object_,
+ status,
+ expiresAfter,
+ lastActiveAt,
+ memoryLimit,
+ additionalProperties,
+ )
}
override fun hashCode(): Int = hashCode
override fun toString() =
- "ContainerCreateResponse{id=$id, createdAt=$createdAt, name=$name, object_=$object_, status=$status, expiresAfter=$expiresAfter, additionalProperties=$additionalProperties}"
+ "ContainerCreateResponse{id=$id, createdAt=$createdAt, name=$name, object_=$object_, status=$status, expiresAfter=$expiresAfter, lastActiveAt=$lastActiveAt, memoryLimit=$memoryLimit, additionalProperties=$additionalProperties}"
}
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/containers/ContainerListResponse.kt b/openai-java-core/src/main/kotlin/com/openai/models/containers/ContainerListResponse.kt
index d344a1c6..d8f8b9bf 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/containers/ContainerListResponse.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/containers/ContainerListResponse.kt
@@ -27,6 +27,8 @@ private constructor(
private val object_: JsonField,
private val status: JsonField,
private val expiresAfter: JsonField,
+ private val lastActiveAt: JsonField,
+ private val memoryLimit: JsonField,
private val additionalProperties: MutableMap,
) {
@@ -40,7 +42,23 @@ private constructor(
@JsonProperty("expires_after")
@ExcludeMissing
expiresAfter: JsonField = JsonMissing.of(),
- ) : this(id, createdAt, name, object_, status, expiresAfter, mutableMapOf())
+ @JsonProperty("last_active_at")
+ @ExcludeMissing
+ lastActiveAt: JsonField = JsonMissing.of(),
+ @JsonProperty("memory_limit")
+ @ExcludeMissing
+ memoryLimit: JsonField = JsonMissing.of(),
+ ) : this(
+ id,
+ createdAt,
+ name,
+ object_,
+ status,
+ expiresAfter,
+ lastActiveAt,
+ memoryLimit,
+ mutableMapOf(),
+ )
/**
* Unique identifier for the container.
@@ -92,6 +110,22 @@ private constructor(
*/
fun expiresAfter(): Optional = expiresAfter.getOptional("expires_after")
+ /**
+ * Unix timestamp (in seconds) when the container was last active.
+ *
+ * @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
+ * server responded with an unexpected value).
+ */
+ fun lastActiveAt(): Optional = lastActiveAt.getOptional("last_active_at")
+
+ /**
+ * The memory limit configured for the container.
+ *
+ * @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
+ * server responded with an unexpected value).
+ */
+ fun memoryLimit(): Optional = memoryLimit.getOptional("memory_limit")
+
/**
* Returns the raw JSON value of [id].
*
@@ -136,6 +170,24 @@ private constructor(
@ExcludeMissing
fun _expiresAfter(): JsonField = expiresAfter
+ /**
+ * Returns the raw JSON value of [lastActiveAt].
+ *
+ * Unlike [lastActiveAt], this method doesn't throw if the JSON field has an unexpected type.
+ */
+ @JsonProperty("last_active_at")
+ @ExcludeMissing
+ fun _lastActiveAt(): JsonField = lastActiveAt
+
+ /**
+ * Returns the raw JSON value of [memoryLimit].
+ *
+ * Unlike [memoryLimit], this method doesn't throw if the JSON field has an unexpected type.
+ */
+ @JsonProperty("memory_limit")
+ @ExcludeMissing
+ fun _memoryLimit(): JsonField = memoryLimit
+
@JsonAnySetter
private fun putAdditionalProperty(key: String, value: JsonValue) {
additionalProperties.put(key, value)
@@ -174,6 +226,8 @@ private constructor(
private var object_: JsonField? = null
private var status: JsonField? = null
private var expiresAfter: JsonField = JsonMissing.of()
+ private var lastActiveAt: JsonField = JsonMissing.of()
+ private var memoryLimit: JsonField = JsonMissing.of()
private var additionalProperties: MutableMap = mutableMapOf()
@JvmSynthetic
@@ -184,6 +238,8 @@ private constructor(
object_ = containerListResponse.object_
status = containerListResponse.status
expiresAfter = containerListResponse.expiresAfter
+ lastActiveAt = containerListResponse.lastActiveAt
+ memoryLimit = containerListResponse.memoryLimit
additionalProperties = containerListResponse.additionalProperties.toMutableMap()
}
@@ -260,6 +316,32 @@ private constructor(
this.expiresAfter = expiresAfter
}
+ /** Unix timestamp (in seconds) when the container was last active. */
+ fun lastActiveAt(lastActiveAt: Long) = lastActiveAt(JsonField.of(lastActiveAt))
+
+ /**
+ * Sets [Builder.lastActiveAt] to an arbitrary JSON value.
+ *
+ * You should usually call [Builder.lastActiveAt] with a well-typed [Long] value instead.
+ * This method is primarily for setting the field to an undocumented or not yet supported
+ * value.
+ */
+ fun lastActiveAt(lastActiveAt: JsonField) = apply { this.lastActiveAt = lastActiveAt }
+
+ /** The memory limit configured for the container. */
+ fun memoryLimit(memoryLimit: MemoryLimit) = memoryLimit(JsonField.of(memoryLimit))
+
+ /**
+ * Sets [Builder.memoryLimit] to an arbitrary JSON value.
+ *
+ * You should usually call [Builder.memoryLimit] with a well-typed [MemoryLimit] value
+ * instead. This method is primarily for setting the field to an undocumented or not yet
+ * supported value.
+ */
+ fun memoryLimit(memoryLimit: JsonField) = apply {
+ this.memoryLimit = memoryLimit
+ }
+
fun additionalProperties(additionalProperties: Map) = apply {
this.additionalProperties.clear()
putAllAdditionalProperties(additionalProperties)
@@ -303,6 +385,8 @@ private constructor(
checkRequired("object_", object_),
checkRequired("status", status),
expiresAfter,
+ lastActiveAt,
+ memoryLimit,
additionalProperties.toMutableMap(),
)
}
@@ -320,6 +404,8 @@ private constructor(
object_()
status()
expiresAfter().ifPresent { it.validate() }
+ lastActiveAt()
+ memoryLimit().ifPresent { it.validate() }
validated = true
}
@@ -343,7 +429,9 @@ private constructor(
(if (name.asKnown().isPresent) 1 else 0) +
(if (object_.asKnown().isPresent) 1 else 0) +
(if (status.asKnown().isPresent) 1 else 0) +
- (expiresAfter.asKnown().getOrNull()?.validity() ?: 0)
+ (expiresAfter.asKnown().getOrNull()?.validity() ?: 0) +
+ (if (lastActiveAt.asKnown().isPresent) 1 else 0) +
+ (memoryLimit.asKnown().getOrNull()?.validity() ?: 0)
/**
* The container will expire after this time period. The anchor is the reference point for the
@@ -652,6 +740,147 @@ private constructor(
"ExpiresAfter{anchor=$anchor, minutes=$minutes, additionalProperties=$additionalProperties}"
}
+ /** The memory limit configured for the container. */
+ class MemoryLimit @JsonCreator private constructor(private val value: JsonField) :
+ Enum {
+
+ /**
+ * Returns this class instance's raw value.
+ *
+ * This is usually only useful if this instance was deserialized from data that doesn't
+ * match any known member, and you want to know that value. For example, if the SDK is on an
+ * older version than the API, then the API may respond with new members that the SDK is
+ * unaware of.
+ */
+ @com.fasterxml.jackson.annotation.JsonValue fun _value(): JsonField = value
+
+ companion object {
+
+ @JvmField val _1G = of("1g")
+
+ @JvmField val _4G = of("4g")
+
+ @JvmField val _16G = of("16g")
+
+ @JvmField val _64G = of("64g")
+
+ @JvmStatic fun of(value: String) = MemoryLimit(JsonField.of(value))
+ }
+
+ /** An enum containing [MemoryLimit]'s known values. */
+ enum class Known {
+ _1G,
+ _4G,
+ _16G,
+ _64G,
+ }
+
+ /**
+ * An enum containing [MemoryLimit]'s known values, as well as an [_UNKNOWN] member.
+ *
+ * An instance of [MemoryLimit] can contain an unknown value in a couple of cases:
+ * - It was deserialized from data that doesn't match any known member. For example, if the
+ * SDK is on an older version than the API, then the API may respond with new members that
+ * the SDK is unaware of.
+ * - It was constructed with an arbitrary value using the [of] method.
+ */
+ enum class Value {
+ _1G,
+ _4G,
+ _16G,
+ _64G,
+ /**
+ * An enum member indicating that [MemoryLimit] was instantiated with an unknown value.
+ */
+ _UNKNOWN,
+ }
+
+ /**
+ * Returns an enum member corresponding to this class instance's value, or [Value._UNKNOWN]
+ * if the class was instantiated with an unknown value.
+ *
+ * Use the [known] method instead if you're certain the value is always known or if you want
+ * to throw for the unknown case.
+ */
+ fun value(): Value =
+ when (this) {
+ _1G -> Value._1G
+ _4G -> Value._4G
+ _16G -> Value._16G
+ _64G -> Value._64G
+ else -> Value._UNKNOWN
+ }
+
+ /**
+ * Returns an enum member corresponding to this class instance's value.
+ *
+ * Use the [value] method instead if you're uncertain the value is always known and don't
+ * want to throw for the unknown case.
+ *
+ * @throws OpenAIInvalidDataException if this class instance's value is a not a known
+ * member.
+ */
+ fun known(): Known =
+ when (this) {
+ _1G -> Known._1G
+ _4G -> Known._4G
+ _16G -> Known._16G
+ _64G -> Known._64G
+ else -> throw OpenAIInvalidDataException("Unknown MemoryLimit: $value")
+ }
+
+ /**
+ * Returns this class instance's primitive wire representation.
+ *
+ * This differs from the [toString] method because that method is primarily for debugging
+ * and generally doesn't throw.
+ *
+ * @throws OpenAIInvalidDataException if this class instance's value does not have the
+ * expected primitive type.
+ */
+ fun asString(): String =
+ _value().asString().orElseThrow { OpenAIInvalidDataException("Value is not a String") }
+
+ private var validated: Boolean = false
+
+ fun validate(): MemoryLimit = apply {
+ if (validated) {
+ return@apply
+ }
+
+ known()
+ validated = true
+ }
+
+ fun isValid(): Boolean =
+ try {
+ validate()
+ true
+ } catch (e: OpenAIInvalidDataException) {
+ false
+ }
+
+ /**
+ * Returns a score indicating how many valid values are contained in this object
+ * recursively.
+ *
+ * Used for best match union deserialization.
+ */
+ @JvmSynthetic internal fun validity(): Int = if (value() == Value._UNKNOWN) 0 else 1
+
+ override fun equals(other: Any?): Boolean {
+ if (this === other) {
+ return true
+ }
+
+ return other is MemoryLimit && value == other.value
+ }
+
+ override fun hashCode() = value.hashCode()
+
+ override fun toString() = value.toString()
+ }
+
override fun equals(other: Any?): Boolean {
if (this === other) {
return true
@@ -664,15 +893,27 @@ private constructor(
object_ == other.object_ &&
status == other.status &&
expiresAfter == other.expiresAfter &&
+ lastActiveAt == other.lastActiveAt &&
+ memoryLimit == other.memoryLimit &&
additionalProperties == other.additionalProperties
}
private val hashCode: Int by lazy {
- Objects.hash(id, createdAt, name, object_, status, expiresAfter, additionalProperties)
+ Objects.hash(
+ id,
+ createdAt,
+ name,
+ object_,
+ status,
+ expiresAfter,
+ lastActiveAt,
+ memoryLimit,
+ additionalProperties,
+ )
}
override fun hashCode(): Int = hashCode
override fun toString() =
- "ContainerListResponse{id=$id, createdAt=$createdAt, name=$name, object_=$object_, status=$status, expiresAfter=$expiresAfter, additionalProperties=$additionalProperties}"
+ "ContainerListResponse{id=$id, createdAt=$createdAt, name=$name, object_=$object_, status=$status, expiresAfter=$expiresAfter, lastActiveAt=$lastActiveAt, memoryLimit=$memoryLimit, additionalProperties=$additionalProperties}"
}
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/containers/ContainerRetrieveResponse.kt b/openai-java-core/src/main/kotlin/com/openai/models/containers/ContainerRetrieveResponse.kt
index 383d268e..59ab1ad1 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/containers/ContainerRetrieveResponse.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/containers/ContainerRetrieveResponse.kt
@@ -27,6 +27,8 @@ private constructor(
private val object_: JsonField,
private val status: JsonField,
private val expiresAfter: JsonField,
+ private val lastActiveAt: JsonField,
+ private val memoryLimit: JsonField,
private val additionalProperties: MutableMap,
) {
@@ -40,7 +42,23 @@ private constructor(
@JsonProperty("expires_after")
@ExcludeMissing
expiresAfter: JsonField = JsonMissing.of(),
- ) : this(id, createdAt, name, object_, status, expiresAfter, mutableMapOf())
+ @JsonProperty("last_active_at")
+ @ExcludeMissing
+ lastActiveAt: JsonField = JsonMissing.of(),
+ @JsonProperty("memory_limit")
+ @ExcludeMissing
+ memoryLimit: JsonField = JsonMissing.of(),
+ ) : this(
+ id,
+ createdAt,
+ name,
+ object_,
+ status,
+ expiresAfter,
+ lastActiveAt,
+ memoryLimit,
+ mutableMapOf(),
+ )
/**
* Unique identifier for the container.
@@ -92,6 +110,22 @@ private constructor(
*/
fun expiresAfter(): Optional = expiresAfter.getOptional("expires_after")
+ /**
+ * Unix timestamp (in seconds) when the container was last active.
+ *
+ * @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
+ * server responded with an unexpected value).
+ */
+ fun lastActiveAt(): Optional = lastActiveAt.getOptional("last_active_at")
+
+ /**
+ * The memory limit configured for the container.
+ *
+ * @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
+ * server responded with an unexpected value).
+ */
+ fun memoryLimit(): Optional = memoryLimit.getOptional("memory_limit")
+
/**
* Returns the raw JSON value of [id].
*
@@ -136,6 +170,24 @@ private constructor(
@ExcludeMissing
fun _expiresAfter(): JsonField = expiresAfter
+ /**
+ * Returns the raw JSON value of [lastActiveAt].
+ *
+ * Unlike [lastActiveAt], this method doesn't throw if the JSON field has an unexpected type.
+ */
+ @JsonProperty("last_active_at")
+ @ExcludeMissing
+ fun _lastActiveAt(): JsonField = lastActiveAt
+
+ /**
+ * Returns the raw JSON value of [memoryLimit].
+ *
+ * Unlike [memoryLimit], this method doesn't throw if the JSON field has an unexpected type.
+ */
+ @JsonProperty("memory_limit")
+ @ExcludeMissing
+ fun _memoryLimit(): JsonField = memoryLimit
+
@JsonAnySetter
private fun putAdditionalProperty(key: String, value: JsonValue) {
additionalProperties.put(key, value)
@@ -174,6 +226,8 @@ private constructor(
private var object_: JsonField? = null
private var status: JsonField? = null
private var expiresAfter: JsonField = JsonMissing.of()
+ private var lastActiveAt: JsonField = JsonMissing.of()
+ private var memoryLimit: JsonField = JsonMissing.of()
private var additionalProperties: MutableMap = mutableMapOf()
@JvmSynthetic
@@ -184,6 +238,8 @@ private constructor(
object_ = containerRetrieveResponse.object_
status = containerRetrieveResponse.status
expiresAfter = containerRetrieveResponse.expiresAfter
+ lastActiveAt = containerRetrieveResponse.lastActiveAt
+ memoryLimit = containerRetrieveResponse.memoryLimit
additionalProperties = containerRetrieveResponse.additionalProperties.toMutableMap()
}
@@ -260,6 +316,32 @@ private constructor(
this.expiresAfter = expiresAfter
}
+ /** Unix timestamp (in seconds) when the container was last active. */
+ fun lastActiveAt(lastActiveAt: Long) = lastActiveAt(JsonField.of(lastActiveAt))
+
+ /**
+ * Sets [Builder.lastActiveAt] to an arbitrary JSON value.
+ *
+ * You should usually call [Builder.lastActiveAt] with a well-typed [Long] value instead.
+ * This method is primarily for setting the field to an undocumented or not yet supported
+ * value.
+ */
+ fun lastActiveAt(lastActiveAt: JsonField) = apply { this.lastActiveAt = lastActiveAt }
+
+ /** The memory limit configured for the container. */
+ fun memoryLimit(memoryLimit: MemoryLimit) = memoryLimit(JsonField.of(memoryLimit))
+
+ /**
+ * Sets [Builder.memoryLimit] to an arbitrary JSON value.
+ *
+ * You should usually call [Builder.memoryLimit] with a well-typed [MemoryLimit] value
+ * instead. This method is primarily for setting the field to an undocumented or not yet
+ * supported value.
+ */
+ fun memoryLimit(memoryLimit: JsonField) = apply {
+ this.memoryLimit = memoryLimit
+ }
+
fun additionalProperties(additionalProperties: Map) = apply {
this.additionalProperties.clear()
putAllAdditionalProperties(additionalProperties)
@@ -303,6 +385,8 @@ private constructor(
checkRequired("object_", object_),
checkRequired("status", status),
expiresAfter,
+ lastActiveAt,
+ memoryLimit,
additionalProperties.toMutableMap(),
)
}
@@ -320,6 +404,8 @@ private constructor(
object_()
status()
expiresAfter().ifPresent { it.validate() }
+ lastActiveAt()
+ memoryLimit().ifPresent { it.validate() }
validated = true
}
@@ -343,7 +429,9 @@ private constructor(
(if (name.asKnown().isPresent) 1 else 0) +
(if (object_.asKnown().isPresent) 1 else 0) +
(if (status.asKnown().isPresent) 1 else 0) +
- (expiresAfter.asKnown().getOrNull()?.validity() ?: 0)
+ (expiresAfter.asKnown().getOrNull()?.validity() ?: 0) +
+ (if (lastActiveAt.asKnown().isPresent) 1 else 0) +
+ (memoryLimit.asKnown().getOrNull()?.validity() ?: 0)
/**
* The container will expire after this time period. The anchor is the reference point for the
@@ -652,6 +740,147 @@ private constructor(
"ExpiresAfter{anchor=$anchor, minutes=$minutes, additionalProperties=$additionalProperties}"
}
+ /** The memory limit configured for the container. */
+ class MemoryLimit @JsonCreator private constructor(private val value: JsonField) :
+ Enum {
+
+ /**
+ * Returns this class instance's raw value.
+ *
+ * This is usually only useful if this instance was deserialized from data that doesn't
+ * match any known member, and you want to know that value. For example, if the SDK is on an
+ * older version than the API, then the API may respond with new members that the SDK is
+ * unaware of.
+ */
+ @com.fasterxml.jackson.annotation.JsonValue fun _value(): JsonField = value
+
+ companion object {
+
+ @JvmField val _1G = of("1g")
+
+ @JvmField val _4G = of("4g")
+
+ @JvmField val _16G = of("16g")
+
+ @JvmField val _64G = of("64g")
+
+ @JvmStatic fun of(value: String) = MemoryLimit(JsonField.of(value))
+ }
+
+ /** An enum containing [MemoryLimit]'s known values. */
+ enum class Known {
+ _1G,
+ _4G,
+ _16G,
+ _64G,
+ }
+
+ /**
+ * An enum containing [MemoryLimit]'s known values, as well as an [_UNKNOWN] member.
+ *
+ * An instance of [MemoryLimit] can contain an unknown value in a couple of cases:
+ * - It was deserialized from data that doesn't match any known member. For example, if the
+ * SDK is on an older version than the API, then the API may respond with new members that
+ * the SDK is unaware of.
+ * - It was constructed with an arbitrary value using the [of] method.
+ */
+ enum class Value {
+ _1G,
+ _4G,
+ _16G,
+ _64G,
+ /**
+ * An enum member indicating that [MemoryLimit] was instantiated with an unknown value.
+ */
+ _UNKNOWN,
+ }
+
+ /**
+ * Returns an enum member corresponding to this class instance's value, or [Value._UNKNOWN]
+ * if the class was instantiated with an unknown value.
+ *
+ * Use the [known] method instead if you're certain the value is always known or if you want
+ * to throw for the unknown case.
+ */
+ fun value(): Value =
+ when (this) {
+ _1G -> Value._1G
+ _4G -> Value._4G
+ _16G -> Value._16G
+ _64G -> Value._64G
+ else -> Value._UNKNOWN
+ }
+
+ /**
+ * Returns an enum member corresponding to this class instance's value.
+ *
+ * Use the [value] method instead if you're uncertain the value is always known and don't
+ * want to throw for the unknown case.
+ *
+ * @throws OpenAIInvalidDataException if this class instance's value is a not a known
+ * member.
+ */
+ fun known(): Known =
+ when (this) {
+ _1G -> Known._1G
+ _4G -> Known._4G
+ _16G -> Known._16G
+ _64G -> Known._64G
+ else -> throw OpenAIInvalidDataException("Unknown MemoryLimit: $value")
+ }
+
+ /**
+ * Returns this class instance's primitive wire representation.
+ *
+ * This differs from the [toString] method because that method is primarily for debugging
+ * and generally doesn't throw.
+ *
+ * @throws OpenAIInvalidDataException if this class instance's value does not have the
+ * expected primitive type.
+ */
+ fun asString(): String =
+ _value().asString().orElseThrow { OpenAIInvalidDataException("Value is not a String") }
+
+ private var validated: Boolean = false
+
+ fun validate(): MemoryLimit = apply {
+ if (validated) {
+ return@apply
+ }
+
+ known()
+ validated = true
+ }
+
+ fun isValid(): Boolean =
+ try {
+ validate()
+ true
+ } catch (e: OpenAIInvalidDataException) {
+ false
+ }
+
+ /**
+ * Returns a score indicating how many valid values are contained in this object
+ * recursively.
+ *
+ * Used for best match union deserialization.
+ */
+ @JvmSynthetic internal fun validity(): Int = if (value() == Value._UNKNOWN) 0 else 1
+
+ override fun equals(other: Any?): Boolean {
+ if (this === other) {
+ return true
+ }
+
+ return other is MemoryLimit && value == other.value
+ }
+
+ override fun hashCode() = value.hashCode()
+
+ override fun toString() = value.toString()
+ }
+
override fun equals(other: Any?): Boolean {
if (this === other) {
return true
@@ -664,15 +893,27 @@ private constructor(
object_ == other.object_ &&
status == other.status &&
expiresAfter == other.expiresAfter &&
+ lastActiveAt == other.lastActiveAt &&
+ memoryLimit == other.memoryLimit &&
additionalProperties == other.additionalProperties
}
private val hashCode: Int by lazy {
- Objects.hash(id, createdAt, name, object_, status, expiresAfter, additionalProperties)
+ Objects.hash(
+ id,
+ createdAt,
+ name,
+ object_,
+ status,
+ expiresAfter,
+ lastActiveAt,
+ memoryLimit,
+ additionalProperties,
+ )
}
override fun hashCode(): Int = hashCode
override fun toString() =
- "ContainerRetrieveResponse{id=$id, createdAt=$createdAt, name=$name, object_=$object_, status=$status, expiresAfter=$expiresAfter, additionalProperties=$additionalProperties}"
+ "ContainerRetrieveResponse{id=$id, createdAt=$createdAt, name=$name, object_=$object_, status=$status, expiresAfter=$expiresAfter, lastActiveAt=$lastActiveAt, memoryLimit=$memoryLimit, additionalProperties=$additionalProperties}"
}
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/conversations/ConversationCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/conversations/ConversationCreateParams.kt
index 6cba6d50..b886b09c 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/conversations/ConversationCreateParams.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/conversations/ConversationCreateParams.kt
@@ -18,6 +18,7 @@ import com.openai.core.toImmutable
import com.openai.errors.OpenAIInvalidDataException
import com.openai.models.responses.EasyInputMessage
import com.openai.models.responses.ResponseCodeInterpreterToolCall
+import com.openai.models.responses.ResponseCompactionItemParam
import com.openai.models.responses.ResponseComputerToolCall
import com.openai.models.responses.ResponseCustomToolCall
import com.openai.models.responses.ResponseCustomToolCallOutput
@@ -196,6 +197,21 @@ private constructor(
/** Alias for calling [addItem] with `ResponseInputItem.ofReasoning(reasoning)`. */
fun addItem(reasoning: ResponseReasoningItem) = apply { body.addItem(reasoning) }
+ /** Alias for calling [addItem] with `ResponseInputItem.ofCompaction(compaction)`. */
+ fun addItem(compaction: ResponseCompactionItemParam) = apply { body.addItem(compaction) }
+
+ /**
+ * Alias for calling [addItem] with the following:
+ * ```java
+ * ResponseCompactionItemParam.builder()
+ * .encryptedContent(encryptedContent)
+ * .build()
+ * ```
+ */
+ fun addCompactionItem(encryptedContent: String) = apply {
+ body.addCompactionItem(encryptedContent)
+ }
+
/**
* Alias for calling [addItem] with
* `ResponseInputItem.ofImageGenerationCall(imageGenerationCall)`.
@@ -644,6 +660,23 @@ private constructor(
fun addItem(reasoning: ResponseReasoningItem) =
addItem(ResponseInputItem.ofReasoning(reasoning))
+ /** Alias for calling [addItem] with `ResponseInputItem.ofCompaction(compaction)`. */
+ fun addItem(compaction: ResponseCompactionItemParam) =
+ addItem(ResponseInputItem.ofCompaction(compaction))
+
+ /**
+ * Alias for calling [addItem] with the following:
+ * ```java
+ * ResponseCompactionItemParam.builder()
+ * .encryptedContent(encryptedContent)
+ * .build()
+ * ```
+ */
+ fun addCompactionItem(encryptedContent: String) =
+ addItem(
+ ResponseCompactionItemParam.builder().encryptedContent(encryptedContent).build()
+ )
+
/**
* Alias for calling [addItem] with
* `ResponseInputItem.ofImageGenerationCall(imageGenerationCall)`.
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/conversations/items/ItemCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/conversations/items/ItemCreateParams.kt
index 3ee3db14..f629d720 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/conversations/items/ItemCreateParams.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/conversations/items/ItemCreateParams.kt
@@ -19,6 +19,7 @@ import com.openai.core.toImmutable
import com.openai.errors.OpenAIInvalidDataException
import com.openai.models.responses.EasyInputMessage
import com.openai.models.responses.ResponseCodeInterpreterToolCall
+import com.openai.models.responses.ResponseCompactionItemParam
import com.openai.models.responses.ResponseComputerToolCall
import com.openai.models.responses.ResponseCustomToolCall
import com.openai.models.responses.ResponseCustomToolCallOutput
@@ -218,6 +219,21 @@ private constructor(
/** Alias for calling [addItem] with `ResponseInputItem.ofReasoning(reasoning)`. */
fun addItem(reasoning: ResponseReasoningItem) = apply { body.addItem(reasoning) }
+ /** Alias for calling [addItem] with `ResponseInputItem.ofCompaction(compaction)`. */
+ fun addItem(compaction: ResponseCompactionItemParam) = apply { body.addItem(compaction) }
+
+ /**
+ * Alias for calling [addItem] with the following:
+ * ```java
+ * ResponseCompactionItemParam.builder()
+ * .encryptedContent(encryptedContent)
+ * .build()
+ * ```
+ */
+ fun addCompactionItem(encryptedContent: String) = apply {
+ body.addCompactionItem(encryptedContent)
+ }
+
/**
* Alias for calling [addItem] with
* `ResponseInputItem.ofImageGenerationCall(imageGenerationCall)`.
@@ -639,6 +655,23 @@ private constructor(
fun addItem(reasoning: ResponseReasoningItem) =
addItem(ResponseInputItem.ofReasoning(reasoning))
+ /** Alias for calling [addItem] with `ResponseInputItem.ofCompaction(compaction)`. */
+ fun addItem(compaction: ResponseCompactionItemParam) =
+ addItem(ResponseInputItem.ofCompaction(compaction))
+
+ /**
+ * Alias for calling [addItem] with the following:
+ * ```java
+ * ResponseCompactionItemParam.builder()
+ * .encryptedContent(encryptedContent)
+ * .build()
+ * ```
+ */
+ fun addCompactionItem(encryptedContent: String) =
+ addItem(
+ ResponseCompactionItemParam.builder().encryptedContent(encryptedContent).build()
+ )
+
/**
* Alias for calling [addItem] with
* `ResponseInputItem.ofImageGenerationCall(imageGenerationCall)`.
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/evals/runs/CreateEvalCompletionsRunDataSource.kt b/openai-java-core/src/main/kotlin/com/openai/models/evals/runs/CreateEvalCompletionsRunDataSource.kt
index 64c30b68..6e50e10a 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/evals/runs/CreateEvalCompletionsRunDataSource.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/evals/runs/CreateEvalCompletionsRunDataSource.kt
@@ -4474,14 +4474,16 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a response.
+ * supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on reasoning in a
+ * response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning
* values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported
* for all reasoning values in gpt-5.1.
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support
* `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -4662,15 +4664,16 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
- * reasoning effort can result in faster responses and fewer tokens used on reasoning in
- * a response.
+ * supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
+ * Reducing reasoning effort can result in faster responses and fewer tokens used on
+ * reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
* reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls
* are supported for all reasoning values in gpt-5.1.
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
* support `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*/
fun reasoningEffort(reasoningEffort: ReasoningEffort?) =
reasoningEffort(JsonField.ofNullable(reasoningEffort))
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/evals/runs/RunCancelResponse.kt b/openai-java-core/src/main/kotlin/com/openai/models/evals/runs/RunCancelResponse.kt
index 9f9dc382..43eeef52 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/evals/runs/RunCancelResponse.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/evals/runs/RunCancelResponse.kt
@@ -2735,9 +2735,9 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning).
- * Currently supported values are `none`, `minimal`, `low`, `medium`, and
- * `high`. Reducing reasoning effort can result in faster responses and fewer
- * tokens used on reasoning in a response.
+ * Currently supported values are `none`, `minimal`, `low`, `medium`, `high`,
+ * and `xhigh`. Reducing reasoning effort can result in faster responses and
+ * fewer tokens used on reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The
* supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and
* `high`. Tool calls are supported for all reasoning values in gpt-5.1.
@@ -2745,6 +2745,7 @@ private constructor(
* not support `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
* effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type
* (e.g. if the server responded with an unexpected value).
@@ -3057,9 +3058,9 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning).
- * Currently supported values are `none`, `minimal`, `low`, `medium`, and
- * `high`. Reducing reasoning effort can result in faster responses and
- * fewer tokens used on reasoning in a response.
+ * Currently supported values are `none`, `minimal`, `low`, `medium`,
+ * `high`, and `xhigh`. Reducing reasoning effort can result in faster
+ * responses and fewer tokens used on reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The
* supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`,
* and `high`. Tool calls are supported for all reasoning values in
@@ -3068,6 +3069,7 @@ private constructor(
* do not support `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
* effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*/
fun reasoningEffort(reasoningEffort: ReasoningEffort?) =
reasoningEffort(JsonField.ofNullable(reasoningEffort))
@@ -6060,8 +6062,8 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
- * reasoning effort can result in faster responses and fewer tokens used on
+ * supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
+ * Reducing reasoning effort can result in faster responses and fewer tokens used on
* reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
* reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
@@ -6069,6 +6071,7 @@ private constructor(
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
* support `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g.
* if the server responded with an unexpected value).
@@ -6255,9 +6258,9 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning).
- * Currently supported values are `none`, `minimal`, `low`, `medium`, and
- * `high`. Reducing reasoning effort can result in faster responses and fewer
- * tokens used on reasoning in a response.
+ * Currently supported values are `none`, `minimal`, `low`, `medium`, `high`,
+ * and `xhigh`. Reducing reasoning effort can result in faster responses and
+ * fewer tokens used on reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The
* supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and
* `high`. Tool calls are supported for all reasoning values in gpt-5.1.
@@ -6265,6 +6268,7 @@ private constructor(
* not support `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
* effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*/
fun reasoningEffort(reasoningEffort: ReasoningEffort?) =
reasoningEffort(JsonField.ofNullable(reasoningEffort))
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/evals/runs/RunCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/evals/runs/RunCreateParams.kt
index 862eddee..4f48b185 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/evals/runs/RunCreateParams.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/evals/runs/RunCreateParams.kt
@@ -2497,9 +2497,9 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning).
- * Currently supported values are `none`, `minimal`, `low`, `medium`, and
- * `high`. Reducing reasoning effort can result in faster responses and fewer
- * tokens used on reasoning in a response.
+ * Currently supported values are `none`, `minimal`, `low`, `medium`, `high`,
+ * and `xhigh`. Reducing reasoning effort can result in faster responses and
+ * fewer tokens used on reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The
* supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and
* `high`. Tool calls are supported for all reasoning values in gpt-5.1.
@@ -2507,6 +2507,7 @@ private constructor(
* not support `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
* effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type
* (e.g. if the server responded with an unexpected value).
@@ -2817,9 +2818,9 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning).
- * Currently supported values are `none`, `minimal`, `low`, `medium`, and
- * `high`. Reducing reasoning effort can result in faster responses and
- * fewer tokens used on reasoning in a response.
+ * Currently supported values are `none`, `minimal`, `low`, `medium`,
+ * `high`, and `xhigh`. Reducing reasoning effort can result in faster
+ * responses and fewer tokens used on reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The
* supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`,
* and `high`. Tool calls are supported for all reasoning values in
@@ -2828,6 +2829,7 @@ private constructor(
* do not support `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
* effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*/
fun reasoningEffort(reasoningEffort: ReasoningEffort?) =
reasoningEffort(JsonField.ofNullable(reasoningEffort))
@@ -5945,8 +5947,8 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
- * reasoning effort can result in faster responses and fewer tokens used on
+ * supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
+ * Reducing reasoning effort can result in faster responses and fewer tokens used on
* reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
* reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
@@ -5954,6 +5956,7 @@ private constructor(
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
* support `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g.
* if the server responded with an unexpected value).
@@ -6140,9 +6143,9 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning).
- * Currently supported values are `none`, `minimal`, `low`, `medium`, and
- * `high`. Reducing reasoning effort can result in faster responses and fewer
- * tokens used on reasoning in a response.
+ * Currently supported values are `none`, `minimal`, `low`, `medium`, `high`,
+ * and `xhigh`. Reducing reasoning effort can result in faster responses and
+ * fewer tokens used on reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The
* supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and
* `high`. Tool calls are supported for all reasoning values in gpt-5.1.
@@ -6150,6 +6153,7 @@ private constructor(
* not support `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
* effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*/
fun reasoningEffort(reasoningEffort: ReasoningEffort?) =
reasoningEffort(JsonField.ofNullable(reasoningEffort))
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/evals/runs/RunCreateResponse.kt b/openai-java-core/src/main/kotlin/com/openai/models/evals/runs/RunCreateResponse.kt
index bed308e3..31809894 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/evals/runs/RunCreateResponse.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/evals/runs/RunCreateResponse.kt
@@ -2735,9 +2735,9 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning).
- * Currently supported values are `none`, `minimal`, `low`, `medium`, and
- * `high`. Reducing reasoning effort can result in faster responses and fewer
- * tokens used on reasoning in a response.
+ * Currently supported values are `none`, `minimal`, `low`, `medium`, `high`,
+ * and `xhigh`. Reducing reasoning effort can result in faster responses and
+ * fewer tokens used on reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The
* supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and
* `high`. Tool calls are supported for all reasoning values in gpt-5.1.
@@ -2745,6 +2745,7 @@ private constructor(
* not support `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
* effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type
* (e.g. if the server responded with an unexpected value).
@@ -3057,9 +3058,9 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning).
- * Currently supported values are `none`, `minimal`, `low`, `medium`, and
- * `high`. Reducing reasoning effort can result in faster responses and
- * fewer tokens used on reasoning in a response.
+ * Currently supported values are `none`, `minimal`, `low`, `medium`,
+ * `high`, and `xhigh`. Reducing reasoning effort can result in faster
+ * responses and fewer tokens used on reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The
* supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`,
* and `high`. Tool calls are supported for all reasoning values in
@@ -3068,6 +3069,7 @@ private constructor(
* do not support `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
* effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*/
fun reasoningEffort(reasoningEffort: ReasoningEffort?) =
reasoningEffort(JsonField.ofNullable(reasoningEffort))
@@ -6060,8 +6062,8 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
- * reasoning effort can result in faster responses and fewer tokens used on
+ * supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
+ * Reducing reasoning effort can result in faster responses and fewer tokens used on
* reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
* reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
@@ -6069,6 +6071,7 @@ private constructor(
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
* support `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g.
* if the server responded with an unexpected value).
@@ -6255,9 +6258,9 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning).
- * Currently supported values are `none`, `minimal`, `low`, `medium`, and
- * `high`. Reducing reasoning effort can result in faster responses and fewer
- * tokens used on reasoning in a response.
+ * Currently supported values are `none`, `minimal`, `low`, `medium`, `high`,
+ * and `xhigh`. Reducing reasoning effort can result in faster responses and
+ * fewer tokens used on reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The
* supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and
* `high`. Tool calls are supported for all reasoning values in gpt-5.1.
@@ -6265,6 +6268,7 @@ private constructor(
* not support `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
* effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*/
fun reasoningEffort(reasoningEffort: ReasoningEffort?) =
reasoningEffort(JsonField.ofNullable(reasoningEffort))
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/evals/runs/RunListResponse.kt b/openai-java-core/src/main/kotlin/com/openai/models/evals/runs/RunListResponse.kt
index 0241c34c..6383fc57 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/evals/runs/RunListResponse.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/evals/runs/RunListResponse.kt
@@ -2735,9 +2735,9 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning).
- * Currently supported values are `none`, `minimal`, `low`, `medium`, and
- * `high`. Reducing reasoning effort can result in faster responses and fewer
- * tokens used on reasoning in a response.
+ * Currently supported values are `none`, `minimal`, `low`, `medium`, `high`,
+ * and `xhigh`. Reducing reasoning effort can result in faster responses and
+ * fewer tokens used on reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The
* supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and
* `high`. Tool calls are supported for all reasoning values in gpt-5.1.
@@ -2745,6 +2745,7 @@ private constructor(
* not support `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
* effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type
* (e.g. if the server responded with an unexpected value).
@@ -3057,9 +3058,9 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning).
- * Currently supported values are `none`, `minimal`, `low`, `medium`, and
- * `high`. Reducing reasoning effort can result in faster responses and
- * fewer tokens used on reasoning in a response.
+ * Currently supported values are `none`, `minimal`, `low`, `medium`,
+ * `high`, and `xhigh`. Reducing reasoning effort can result in faster
+ * responses and fewer tokens used on reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The
* supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`,
* and `high`. Tool calls are supported for all reasoning values in
@@ -3068,6 +3069,7 @@ private constructor(
* do not support `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
* effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*/
fun reasoningEffort(reasoningEffort: ReasoningEffort?) =
reasoningEffort(JsonField.ofNullable(reasoningEffort))
@@ -6060,8 +6062,8 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
- * reasoning effort can result in faster responses and fewer tokens used on
+ * supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
+ * Reducing reasoning effort can result in faster responses and fewer tokens used on
* reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
* reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
@@ -6069,6 +6071,7 @@ private constructor(
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
* support `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g.
* if the server responded with an unexpected value).
@@ -6255,9 +6258,9 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning).
- * Currently supported values are `none`, `minimal`, `low`, `medium`, and
- * `high`. Reducing reasoning effort can result in faster responses and fewer
- * tokens used on reasoning in a response.
+ * Currently supported values are `none`, `minimal`, `low`, `medium`, `high`,
+ * and `xhigh`. Reducing reasoning effort can result in faster responses and
+ * fewer tokens used on reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The
* supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and
* `high`. Tool calls are supported for all reasoning values in gpt-5.1.
@@ -6265,6 +6268,7 @@ private constructor(
* not support `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
* effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*/
fun reasoningEffort(reasoningEffort: ReasoningEffort?) =
reasoningEffort(JsonField.ofNullable(reasoningEffort))
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/evals/runs/RunRetrieveResponse.kt b/openai-java-core/src/main/kotlin/com/openai/models/evals/runs/RunRetrieveResponse.kt
index 74e92339..9957806c 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/evals/runs/RunRetrieveResponse.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/evals/runs/RunRetrieveResponse.kt
@@ -2735,9 +2735,9 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning).
- * Currently supported values are `none`, `minimal`, `low`, `medium`, and
- * `high`. Reducing reasoning effort can result in faster responses and fewer
- * tokens used on reasoning in a response.
+ * Currently supported values are `none`, `minimal`, `low`, `medium`, `high`,
+ * and `xhigh`. Reducing reasoning effort can result in faster responses and
+ * fewer tokens used on reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The
* supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and
* `high`. Tool calls are supported for all reasoning values in gpt-5.1.
@@ -2745,6 +2745,7 @@ private constructor(
* not support `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
* effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type
* (e.g. if the server responded with an unexpected value).
@@ -3057,9 +3058,9 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning).
- * Currently supported values are `none`, `minimal`, `low`, `medium`, and
- * `high`. Reducing reasoning effort can result in faster responses and
- * fewer tokens used on reasoning in a response.
+ * Currently supported values are `none`, `minimal`, `low`, `medium`,
+ * `high`, and `xhigh`. Reducing reasoning effort can result in faster
+ * responses and fewer tokens used on reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The
* supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`,
* and `high`. Tool calls are supported for all reasoning values in
@@ -3068,6 +3069,7 @@ private constructor(
* do not support `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
* effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*/
fun reasoningEffort(reasoningEffort: ReasoningEffort?) =
reasoningEffort(JsonField.ofNullable(reasoningEffort))
@@ -6060,8 +6062,8 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
- * reasoning effort can result in faster responses and fewer tokens used on
+ * supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
+ * Reducing reasoning effort can result in faster responses and fewer tokens used on
* reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
* reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
@@ -6069,6 +6071,7 @@ private constructor(
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
* support `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g.
* if the server responded with an unexpected value).
@@ -6255,9 +6258,9 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning).
- * Currently supported values are `none`, `minimal`, `low`, `medium`, and
- * `high`. Reducing reasoning effort can result in faster responses and fewer
- * tokens used on reasoning in a response.
+ * Currently supported values are `none`, `minimal`, `low`, `medium`, `high`,
+ * and `xhigh`. Reducing reasoning effort can result in faster responses and
+ * fewer tokens used on reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The
* supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and
* `high`. Tool calls are supported for all reasoning values in gpt-5.1.
@@ -6265,6 +6268,7 @@ private constructor(
* not support `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
* effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*/
fun reasoningEffort(reasoningEffort: ReasoningEffort?) =
reasoningEffort(JsonField.ofNullable(reasoningEffort))
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/graders/gradermodels/ScoreModelGrader.kt b/openai-java-core/src/main/kotlin/com/openai/models/graders/gradermodels/ScoreModelGrader.kt
index 34b14865..c62e4d9e 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/graders/gradermodels/ScoreModelGrader.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/graders/gradermodels/ScoreModelGrader.kt
@@ -1786,14 +1786,16 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a response.
+ * supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on reasoning in a
+ * response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning
* values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported
* for all reasoning values in gpt-5.1.
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support
* `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -1939,15 +1941,16 @@ private constructor(
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
- * reasoning effort can result in faster responses and fewer tokens used on reasoning in
- * a response.
+ * supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`.
+ * Reducing reasoning effort can result in faster responses and fewer tokens used on
+ * reasoning in a response.
* - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
* reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls
* are supported for all reasoning values in gpt-5.1.
* - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
* support `none`.
* - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
*/
fun reasoningEffort(reasoningEffort: ReasoningEffort?) =
reasoningEffort(JsonField.ofNullable(reasoningEffort))
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/realtime/InputAudioBufferDtmfEventReceivedEvent.kt b/openai-java-core/src/main/kotlin/com/openai/models/realtime/InputAudioBufferDtmfEventReceivedEvent.kt
new file mode 100644
index 00000000..01f67ebf
--- /dev/null
+++ b/openai-java-core/src/main/kotlin/com/openai/models/realtime/InputAudioBufferDtmfEventReceivedEvent.kt
@@ -0,0 +1,263 @@
+// File generated from our OpenAPI spec by Stainless.
+
+package com.openai.models.realtime
+
+import com.fasterxml.jackson.annotation.JsonAnyGetter
+import com.fasterxml.jackson.annotation.JsonAnySetter
+import com.fasterxml.jackson.annotation.JsonCreator
+import com.fasterxml.jackson.annotation.JsonProperty
+import com.openai.core.ExcludeMissing
+import com.openai.core.JsonField
+import com.openai.core.JsonMissing
+import com.openai.core.JsonValue
+import com.openai.core.checkRequired
+import com.openai.errors.OpenAIInvalidDataException
+import java.util.Collections
+import java.util.Objects
+
+/**
+ * **SIP Only:** Returned when an DTMF event is received. A DTMF event is a message that represents
+ * a telephone keypad press (0–9, *, #, A–D). The `event` property is the keypad that the user
+ * press. The `received_at` is the UTC Unix Timestamp that the server received the event.
+ */
+class InputAudioBufferDtmfEventReceivedEvent
+@JsonCreator(mode = JsonCreator.Mode.DISABLED)
+private constructor(
+ private val event: JsonField,
+ private val receivedAt: JsonField,
+ private val type: JsonValue,
+ private val additionalProperties: MutableMap,
+) {
+
+ @JsonCreator
+ private constructor(
+ @JsonProperty("event") @ExcludeMissing event: JsonField = JsonMissing.of(),
+ @JsonProperty("received_at") @ExcludeMissing receivedAt: JsonField = JsonMissing.of(),
+ @JsonProperty("type") @ExcludeMissing type: JsonValue = JsonMissing.of(),
+ ) : this(event, receivedAt, type, mutableMapOf())
+
+ /**
+ * The telephone keypad that was pressed by the user.
+ *
+ * @throws OpenAIInvalidDataException if the JSON field has an unexpected type or is
+ * unexpectedly missing or null (e.g. if the server responded with an unexpected value).
+ */
+ fun event(): String = event.getRequired("event")
+
+ /**
+ * UTC Unix Timestamp when DTMF Event was received by server.
+ *
+ * @throws OpenAIInvalidDataException if the JSON field has an unexpected type or is
+ * unexpectedly missing or null (e.g. if the server responded with an unexpected value).
+ */
+ fun receivedAt(): Long = receivedAt.getRequired("received_at")
+
+ /**
+ * The event type, must be `input_audio_buffer.dtmf_event_received`.
+ *
+ * Expected to always return the following:
+ * ```java
+ * JsonValue.from("input_audio_buffer.dtmf_event_received")
+ * ```
+ *
+ * However, this method can be useful for debugging and logging (e.g. if the server responded
+ * with an unexpected value).
+ */
+ @JsonProperty("type") @ExcludeMissing fun _type(): JsonValue = type
+
+ /**
+ * Returns the raw JSON value of [event].
+ *
+ * Unlike [event], this method doesn't throw if the JSON field has an unexpected type.
+ */
+ @JsonProperty("event") @ExcludeMissing fun _event(): JsonField = event
+
+ /**
+ * Returns the raw JSON value of [receivedAt].
+ *
+ * Unlike [receivedAt], this method doesn't throw if the JSON field has an unexpected type.
+ */
+ @JsonProperty("received_at") @ExcludeMissing fun _receivedAt(): JsonField = receivedAt
+
+ @JsonAnySetter
+ private fun putAdditionalProperty(key: String, value: JsonValue) {
+ additionalProperties.put(key, value)
+ }
+
+ @JsonAnyGetter
+ @ExcludeMissing
+ fun _additionalProperties(): Map =
+ Collections.unmodifiableMap(additionalProperties)
+
+ fun toBuilder() = Builder().from(this)
+
+ companion object {
+
+ /**
+ * Returns a mutable builder for constructing an instance of
+ * [InputAudioBufferDtmfEventReceivedEvent].
+ *
+ * The following fields are required:
+ * ```java
+ * .event()
+ * .receivedAt()
+ * ```
+ */
+ @JvmStatic fun builder() = Builder()
+ }
+
+ /** A builder for [InputAudioBufferDtmfEventReceivedEvent]. */
+ class Builder internal constructor() {
+
+ private var event: JsonField? = null
+ private var receivedAt: JsonField? = null
+ private var type: JsonValue = JsonValue.from("input_audio_buffer.dtmf_event_received")
+ private var additionalProperties: MutableMap = mutableMapOf()
+
+ @JvmSynthetic
+ internal fun from(
+ inputAudioBufferDtmfEventReceivedEvent: InputAudioBufferDtmfEventReceivedEvent
+ ) = apply {
+ event = inputAudioBufferDtmfEventReceivedEvent.event
+ receivedAt = inputAudioBufferDtmfEventReceivedEvent.receivedAt
+ type = inputAudioBufferDtmfEventReceivedEvent.type
+ additionalProperties =
+ inputAudioBufferDtmfEventReceivedEvent.additionalProperties.toMutableMap()
+ }
+
+ /** The telephone keypad that was pressed by the user. */
+ fun event(event: String) = event(JsonField.of(event))
+
+ /**
+ * Sets [Builder.event] to an arbitrary JSON value.
+ *
+ * You should usually call [Builder.event] with a well-typed [String] value instead. This
+ * method is primarily for setting the field to an undocumented or not yet supported value.
+ */
+ fun event(event: JsonField) = apply { this.event = event }
+
+ /** UTC Unix Timestamp when DTMF Event was received by server. */
+ fun receivedAt(receivedAt: Long) = receivedAt(JsonField.of(receivedAt))
+
+ /**
+ * Sets [Builder.receivedAt] to an arbitrary JSON value.
+ *
+ * You should usually call [Builder.receivedAt] with a well-typed [Long] value instead. This
+ * method is primarily for setting the field to an undocumented or not yet supported value.
+ */
+ fun receivedAt(receivedAt: JsonField) = apply { this.receivedAt = receivedAt }
+
+ /**
+ * Sets the field to an arbitrary JSON value.
+ *
+ * It is usually unnecessary to call this method because the field defaults to the
+ * following:
+ * ```java
+ * JsonValue.from("input_audio_buffer.dtmf_event_received")
+ * ```
+ *
+ * This method is primarily for setting the field to an undocumented or not yet supported
+ * value.
+ */
+ fun type(type: JsonValue) = apply { this.type = type }
+
+ fun additionalProperties(additionalProperties: Map) = apply {
+ this.additionalProperties.clear()
+ putAllAdditionalProperties(additionalProperties)
+ }
+
+ fun putAdditionalProperty(key: String, value: JsonValue) = apply {
+ additionalProperties.put(key, value)
+ }
+
+ fun putAllAdditionalProperties(additionalProperties: Map) = apply {
+ this.additionalProperties.putAll(additionalProperties)
+ }
+
+ fun removeAdditionalProperty(key: String) = apply { additionalProperties.remove(key) }
+
+ fun removeAllAdditionalProperties(keys: Set) = apply {
+ keys.forEach(::removeAdditionalProperty)
+ }
+
+ /**
+ * Returns an immutable instance of [InputAudioBufferDtmfEventReceivedEvent].
+ *
+ * Further updates to this [Builder] will not mutate the returned instance.
+ *
+ * The following fields are required:
+ * ```java
+ * .event()
+ * .receivedAt()
+ * ```
+ *
+ * @throws IllegalStateException if any required field is unset.
+ */
+ fun build(): InputAudioBufferDtmfEventReceivedEvent =
+ InputAudioBufferDtmfEventReceivedEvent(
+ checkRequired("event", event),
+ checkRequired("receivedAt", receivedAt),
+ type,
+ additionalProperties.toMutableMap(),
+ )
+ }
+
+ private var validated: Boolean = false
+
+ fun validate(): InputAudioBufferDtmfEventReceivedEvent = apply {
+ if (validated) {
+ return@apply
+ }
+
+ event()
+ receivedAt()
+ _type().let {
+ if (it != JsonValue.from("input_audio_buffer.dtmf_event_received")) {
+ throw OpenAIInvalidDataException("'type' is invalid, received $it")
+ }
+ }
+ validated = true
+ }
+
+ fun isValid(): Boolean =
+ try {
+ validate()
+ true
+ } catch (e: OpenAIInvalidDataException) {
+ false
+ }
+
+ /**
+ * Returns a score indicating how many valid values are contained in this object recursively.
+ *
+ * Used for best match union deserialization.
+ */
+ @JvmSynthetic
+ internal fun validity(): Int =
+ (if (event.asKnown().isPresent) 1 else 0) +
+ (if (receivedAt.asKnown().isPresent) 1 else 0) +
+ type.let {
+ if (it == JsonValue.from("input_audio_buffer.dtmf_event_received")) 1 else 0
+ }
+
+ override fun equals(other: Any?): Boolean {
+ if (this === other) {
+ return true
+ }
+
+ return other is InputAudioBufferDtmfEventReceivedEvent &&
+ event == other.event &&
+ receivedAt == other.receivedAt &&
+ type == other.type &&
+ additionalProperties == other.additionalProperties
+ }
+
+ private val hashCode: Int by lazy {
+ Objects.hash(event, receivedAt, type, additionalProperties)
+ }
+
+ override fun hashCode(): Int = hashCode
+
+ override fun toString() =
+ "InputAudioBufferDtmfEventReceivedEvent{event=$event, receivedAt=$receivedAt, type=$type, additionalProperties=$additionalProperties}"
+}
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/realtime/OutputAudioBufferClearEvent.kt b/openai-java-core/src/main/kotlin/com/openai/models/realtime/OutputAudioBufferClearEvent.kt
index 4a21ca93..f07856af 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/realtime/OutputAudioBufferClearEvent.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/realtime/OutputAudioBufferClearEvent.kt
@@ -16,9 +16,9 @@ import java.util.Objects
import java.util.Optional
/**
- * **WebRTC Only:** Emit to cut off the current audio response. This will trigger the server to stop
- * generating audio and emit a `output_audio_buffer.cleared` event. This event should be preceded by
- * a `response.cancel` client event to stop the generation of the current response.
+ * **WebRTC/SIP Only:** Emit to cut off the current audio response. This will trigger the server to
+ * stop generating audio and emit a `output_audio_buffer.cleared` event. This event should be
+ * preceded by a `response.cancel` client event to stop the generation of the current response.
* [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
*/
class OutputAudioBufferClearEvent
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeAudioInputTurnDetection.kt b/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeAudioInputTurnDetection.kt
index b99f1923..198924a6 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeAudioInputTurnDetection.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeAudioInputTurnDetection.kt
@@ -308,7 +308,12 @@ private constructor(
@JsonProperty("type") @ExcludeMissing fun _type(): JsonValue = type
/**
- * Whether or not to automatically generate a response when a VAD stop event occurs.
+ * Whether or not to automatically generate a response when a VAD stop event occurs. If
+ * `interrupt_response` is set to `false` this may fail to create a response if the model is
+ * already responding.
+ *
+ * If both `create_response` and `interrupt_response` are set to `false`, the model will
+ * never respond automatically but VAD events will still be emitted.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -334,8 +339,12 @@ private constructor(
fun idleTimeoutMs(): Optional = idleTimeoutMs.getOptional("idle_timeout_ms")
/**
- * Whether or not to automatically interrupt any ongoing response with output to the default
- * conversation (i.e. `conversation` of `auto`) when a VAD start event occurs.
+ * Whether or not to automatically interrupt (cancel) any ongoing response with output to
+ * the default conversation (i.e. `conversation` of `auto`) when a VAD start event occurs.
+ * If `true` then the response will be cancelled, otherwise it will continue until complete.
+ *
+ * If both `create_response` and `interrupt_response` are set to `false`, the model will
+ * never respond automatically but VAD events will still be emitted.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -486,7 +495,14 @@ private constructor(
*/
fun type(type: JsonValue) = apply { this.type = type }
- /** Whether or not to automatically generate a response when a VAD stop event occurs. */
+ /**
+ * Whether or not to automatically generate a response when a VAD stop event occurs. If
+ * `interrupt_response` is set to `false` this may fail to create a response if the
+ * model is already responding.
+ *
+ * If both `create_response` and `interrupt_response` are set to `false`, the model will
+ * never respond automatically but VAD events will still be emitted.
+ */
fun createResponse(createResponse: Boolean) =
createResponse(JsonField.of(createResponse))
@@ -540,8 +556,13 @@ private constructor(
}
/**
- * Whether or not to automatically interrupt any ongoing response with output to the
- * default conversation (i.e. `conversation` of `auto`) when a VAD start event occurs.
+ * Whether or not to automatically interrupt (cancel) any ongoing response with output
+ * to the default conversation (i.e. `conversation` of `auto`) when a VAD start event
+ * occurs. If `true` then the response will be cancelled, otherwise it will continue
+ * until complete.
+ *
+ * If both `create_response` and `interrupt_response` are set to `false`, the model will
+ * never respond automatically but VAD events will still be emitted.
*/
fun interruptResponse(interruptResponse: Boolean) =
interruptResponse(JsonField.of(interruptResponse))
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeClientEvent.kt b/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeClientEvent.kt
index 5a0300e3..897150aa 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeClientEvent.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeClientEvent.kt
@@ -107,8 +107,8 @@ private constructor(
Optional.ofNullable(inputAudioBufferClear)
/**
- * **WebRTC Only:** Emit to cut off the current audio response. This will trigger the server to
- * stop generating audio and emit a `output_audio_buffer.cleared` event. This event should be
+ * **WebRTC/SIP Only:** Emit to cut off the current audio response. This will trigger the server
+ * to stop generating audio and emit a `output_audio_buffer.cleared` event. This event should be
* preceded by a `response.cancel` client event to stop the generation of the current response.
* [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
*/
@@ -266,8 +266,8 @@ private constructor(
inputAudioBufferClear.getOrThrow("inputAudioBufferClear")
/**
- * **WebRTC Only:** Emit to cut off the current audio response. This will trigger the server to
- * stop generating audio and emit a `output_audio_buffer.cleared` event. This event should be
+ * **WebRTC/SIP Only:** Emit to cut off the current audio response. This will trigger the server
+ * to stop generating audio and emit a `output_audio_buffer.cleared` event. This event should be
* preceded by a `response.cancel` client event to stop the generation of the current response.
* [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
*/
@@ -631,10 +631,10 @@ private constructor(
RealtimeClientEvent(inputAudioBufferClear = inputAudioBufferClear)
/**
- * **WebRTC Only:** Emit to cut off the current audio response. This will trigger the server
- * to stop generating audio and emit a `output_audio_buffer.cleared` event. This event
- * should be preceded by a `response.cancel` client event to stop the generation of the
- * current response.
+ * **WebRTC/SIP Only:** Emit to cut off the current audio response. This will trigger the
+ * server to stop generating audio and emit a `output_audio_buffer.cleared` event. This
+ * event should be preceded by a `response.cancel` client event to stop the generation of
+ * the current response.
* [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
*/
@JvmStatic
@@ -785,10 +785,10 @@ private constructor(
fun visitInputAudioBufferClear(inputAudioBufferClear: InputAudioBufferClearEvent): T
/**
- * **WebRTC Only:** Emit to cut off the current audio response. This will trigger the server
- * to stop generating audio and emit a `output_audio_buffer.cleared` event. This event
- * should be preceded by a `response.cancel` client event to stop the generation of the
- * current response.
+ * **WebRTC/SIP Only:** Emit to cut off the current audio response. This will trigger the
+ * server to stop generating audio and emit a `output_audio_buffer.cleared` event. This
+ * event should be preceded by a `response.cancel` client event to stop the generation of
+ * the current response.
* [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
*/
fun visitOutputAudioBufferClear(outputAudioBufferClear: OutputAudioBufferClearEvent): T
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeServerEvent.kt b/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeServerEvent.kt
index 517884ca..9f5c943d 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeServerEvent.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeServerEvent.kt
@@ -49,6 +49,7 @@ private constructor(
private val error: RealtimeErrorEvent? = null,
private val inputAudioBufferCleared: InputAudioBufferClearedEvent? = null,
private val inputAudioBufferCommitted: InputAudioBufferCommittedEvent? = null,
+ private val inputAudioBufferDtmfEventReceived: InputAudioBufferDtmfEventReceivedEvent? = null,
private val inputAudioBufferSpeechStarted: InputAudioBufferSpeechStartedEvent? = null,
private val inputAudioBufferSpeechStopped: InputAudioBufferSpeechStoppedEvent? = null,
private val rateLimitsUpdated: RateLimitsUpdatedEvent? = null,
@@ -187,6 +188,15 @@ private constructor(
fun inputAudioBufferCommitted(): Optional =
Optional.ofNullable(inputAudioBufferCommitted)
+ /**
+ * **SIP Only:** Returned when an DTMF event is received. A DTMF event is a message that
+ * represents a telephone keypad press (0–9, *, #, A–D). The `event` property is the keypad that
+ * the user press. The `received_at` is the UTC Unix Timestamp that the server received the
+ * event.
+ */
+ fun inputAudioBufferDtmfEventReceived(): Optional =
+ Optional.ofNullable(inputAudioBufferDtmfEventReceived)
+
/**
* Sent by the server when in `server_vad` mode to indicate that speech has been detected in the
* audio buffer. This can happen any time audio is added to the buffer (unless speech is already
@@ -318,8 +328,8 @@ private constructor(
fun sessionUpdated(): Optional = Optional.ofNullable(sessionUpdated)
/**
- * **WebRTC Only:** Emitted when the server begins streaming audio to the client. This event is
- * emitted after an audio content part has been added (`response.content_part.added`) to the
+ * **WebRTC/SIP Only:** Emitted when the server begins streaming audio to the client. This event
+ * is emitted after an audio content part has been added (`response.content_part.added`) to the
* response.
* [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
*/
@@ -327,7 +337,7 @@ private constructor(
Optional.ofNullable(outputAudioBufferStarted)
/**
- * **WebRTC Only:** Emitted when the output audio buffer has been completely drained on the
+ * **WebRTC/SIP Only:** Emitted when the output audio buffer has been completely drained on the
* server, and no more audio is forthcoming. This event is emitted after the full response data
* has been sent to the client (`response.done`).
* [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
@@ -336,10 +346,10 @@ private constructor(
Optional.ofNullable(outputAudioBufferStopped)
/**
- * **WebRTC Only:** Emitted when the output audio buffer is cleared. This happens either in VAD
- * mode when the user has interrupted (`input_audio_buffer.speech_started`), or when the client
- * has emitted the `output_audio_buffer.clear` event to manually cut off the current audio
- * response.
+ * **WebRTC/SIP Only:** Emitted when the output audio buffer is cleared. This happens either in
+ * VAD mode when the user has interrupted (`input_audio_buffer.speech_started`), or when the
+ * client has emitted the `output_audio_buffer.clear` event to manually cut off the current
+ * audio response.
* [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
*/
fun outputAudioBufferCleared(): Optional =
@@ -450,6 +460,8 @@ private constructor(
fun isInputAudioBufferCommitted(): Boolean = inputAudioBufferCommitted != null
+ fun isInputAudioBufferDtmfEventReceived(): Boolean = inputAudioBufferDtmfEventReceived != null
+
fun isInputAudioBufferSpeechStarted(): Boolean = inputAudioBufferSpeechStarted != null
fun isInputAudioBufferSpeechStopped(): Boolean = inputAudioBufferSpeechStopped != null
@@ -624,6 +636,15 @@ private constructor(
fun asInputAudioBufferCommitted(): InputAudioBufferCommittedEvent =
inputAudioBufferCommitted.getOrThrow("inputAudioBufferCommitted")
+ /**
+ * **SIP Only:** Returned when an DTMF event is received. A DTMF event is a message that
+ * represents a telephone keypad press (0–9, *, #, A–D). The `event` property is the keypad that
+ * the user press. The `received_at` is the UTC Unix Timestamp that the server received the
+ * event.
+ */
+ fun asInputAudioBufferDtmfEventReceived(): InputAudioBufferDtmfEventReceivedEvent =
+ inputAudioBufferDtmfEventReceived.getOrThrow("inputAudioBufferDtmfEventReceived")
+
/**
* Sent by the server when in `server_vad` mode to indicate that speech has been detected in the
* audio buffer. This can happen any time audio is added to the buffer (unless speech is already
@@ -755,8 +776,8 @@ private constructor(
fun asSessionUpdated(): SessionUpdatedEvent = sessionUpdated.getOrThrow("sessionUpdated")
/**
- * **WebRTC Only:** Emitted when the server begins streaming audio to the client. This event is
- * emitted after an audio content part has been added (`response.content_part.added`) to the
+ * **WebRTC/SIP Only:** Emitted when the server begins streaming audio to the client. This event
+ * is emitted after an audio content part has been added (`response.content_part.added`) to the
* response.
* [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
*/
@@ -764,7 +785,7 @@ private constructor(
outputAudioBufferStarted.getOrThrow("outputAudioBufferStarted")
/**
- * **WebRTC Only:** Emitted when the output audio buffer has been completely drained on the
+ * **WebRTC/SIP Only:** Emitted when the output audio buffer has been completely drained on the
* server, and no more audio is forthcoming. This event is emitted after the full response data
* has been sent to the client (`response.done`).
* [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
@@ -773,10 +794,10 @@ private constructor(
outputAudioBufferStopped.getOrThrow("outputAudioBufferStopped")
/**
- * **WebRTC Only:** Emitted when the output audio buffer is cleared. This happens either in VAD
- * mode when the user has interrupted (`input_audio_buffer.speech_started`), or when the client
- * has emitted the `output_audio_buffer.clear` event to manually cut off the current audio
- * response.
+ * **WebRTC/SIP Only:** Emitted when the output audio buffer is cleared. This happens either in
+ * VAD mode when the user has interrupted (`input_audio_buffer.speech_started`), or when the
+ * client has emitted the `output_audio_buffer.clear` event to manually cut off the current
+ * audio response.
* [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
*/
fun asOutputAudioBufferCleared(): OutputAudioBufferCleared =
@@ -895,6 +916,8 @@ private constructor(
visitor.visitInputAudioBufferCleared(inputAudioBufferCleared)
inputAudioBufferCommitted != null ->
visitor.visitInputAudioBufferCommitted(inputAudioBufferCommitted)
+ inputAudioBufferDtmfEventReceived != null ->
+ visitor.visitInputAudioBufferDtmfEventReceived(inputAudioBufferDtmfEventReceived)
inputAudioBufferSpeechStarted != null ->
visitor.visitInputAudioBufferSpeechStarted(inputAudioBufferSpeechStarted)
inputAudioBufferSpeechStopped != null ->
@@ -1037,6 +1060,12 @@ private constructor(
inputAudioBufferCommitted.validate()
}
+ override fun visitInputAudioBufferDtmfEventReceived(
+ inputAudioBufferDtmfEventReceived: InputAudioBufferDtmfEventReceivedEvent
+ ) {
+ inputAudioBufferDtmfEventReceived.validate()
+ }
+
override fun visitInputAudioBufferSpeechStarted(
inputAudioBufferSpeechStarted: InputAudioBufferSpeechStartedEvent
) {
@@ -1294,6 +1323,10 @@ private constructor(
inputAudioBufferCommitted: InputAudioBufferCommittedEvent
) = inputAudioBufferCommitted.validity()
+ override fun visitInputAudioBufferDtmfEventReceived(
+ inputAudioBufferDtmfEventReceived: InputAudioBufferDtmfEventReceivedEvent
+ ) = inputAudioBufferDtmfEventReceived.validity()
+
override fun visitInputAudioBufferSpeechStarted(
inputAudioBufferSpeechStarted: InputAudioBufferSpeechStartedEvent
) = inputAudioBufferSpeechStarted.validity()
@@ -1448,6 +1481,7 @@ private constructor(
error == other.error &&
inputAudioBufferCleared == other.inputAudioBufferCleared &&
inputAudioBufferCommitted == other.inputAudioBufferCommitted &&
+ inputAudioBufferDtmfEventReceived == other.inputAudioBufferDtmfEventReceived &&
inputAudioBufferSpeechStarted == other.inputAudioBufferSpeechStarted &&
inputAudioBufferSpeechStopped == other.inputAudioBufferSpeechStopped &&
rateLimitsUpdated == other.rateLimitsUpdated &&
@@ -1498,6 +1532,7 @@ private constructor(
error,
inputAudioBufferCleared,
inputAudioBufferCommitted,
+ inputAudioBufferDtmfEventReceived,
inputAudioBufferSpeechStarted,
inputAudioBufferSpeechStopped,
rateLimitsUpdated,
@@ -1557,6 +1592,8 @@ private constructor(
"RealtimeServerEvent{inputAudioBufferCleared=$inputAudioBufferCleared}"
inputAudioBufferCommitted != null ->
"RealtimeServerEvent{inputAudioBufferCommitted=$inputAudioBufferCommitted}"
+ inputAudioBufferDtmfEventReceived != null ->
+ "RealtimeServerEvent{inputAudioBufferDtmfEventReceived=$inputAudioBufferDtmfEventReceived}"
inputAudioBufferSpeechStarted != null ->
"RealtimeServerEvent{inputAudioBufferSpeechStarted=$inputAudioBufferSpeechStarted}"
inputAudioBufferSpeechStopped != null ->
@@ -1750,6 +1787,20 @@ private constructor(
fun ofInputAudioBufferCommitted(inputAudioBufferCommitted: InputAudioBufferCommittedEvent) =
RealtimeServerEvent(inputAudioBufferCommitted = inputAudioBufferCommitted)
+ /**
+ * **SIP Only:** Returned when an DTMF event is received. A DTMF event is a message that
+ * represents a telephone keypad press (0–9, *, #, A–D). The `event` property is the keypad
+ * that the user press. The `received_at` is the UTC Unix Timestamp that the server received
+ * the event.
+ */
+ @JvmStatic
+ fun ofInputAudioBufferDtmfEventReceived(
+ inputAudioBufferDtmfEventReceived: InputAudioBufferDtmfEventReceivedEvent
+ ) =
+ RealtimeServerEvent(
+ inputAudioBufferDtmfEventReceived = inputAudioBufferDtmfEventReceived
+ )
+
/**
* Sent by the server when in `server_vad` mode to indicate that speech has been detected in
* the audio buffer. This can happen any time audio is added to the buffer (unless speech is
@@ -1925,9 +1976,9 @@ private constructor(
RealtimeServerEvent(sessionUpdated = sessionUpdated)
/**
- * **WebRTC Only:** Emitted when the server begins streaming audio to the client. This event
- * is emitted after an audio content part has been added (`response.content_part.added`) to
- * the response.
+ * **WebRTC/SIP Only:** Emitted when the server begins streaming audio to the client. This
+ * event is emitted after an audio content part has been added
+ * (`response.content_part.added`) to the response.
* [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
*/
@JvmStatic
@@ -1935,9 +1986,9 @@ private constructor(
RealtimeServerEvent(outputAudioBufferStarted = outputAudioBufferStarted)
/**
- * **WebRTC Only:** Emitted when the output audio buffer has been completely drained on the
- * server, and no more audio is forthcoming. This event is emitted after the full response
- * data has been sent to the client (`response.done`).
+ * **WebRTC/SIP Only:** Emitted when the output audio buffer has been completely drained on
+ * the server, and no more audio is forthcoming. This event is emitted after the full
+ * response data has been sent to the client (`response.done`).
* [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
*/
@JvmStatic
@@ -1945,10 +1996,10 @@ private constructor(
RealtimeServerEvent(outputAudioBufferStopped = outputAudioBufferStopped)
/**
- * **WebRTC Only:** Emitted when the output audio buffer is cleared. This happens either in
- * VAD mode when the user has interrupted (`input_audio_buffer.speech_started`), or when the
- * client has emitted the `output_audio_buffer.clear` event to manually cut off the current
- * audio response.
+ * **WebRTC/SIP Only:** Emitted when the output audio buffer is cleared. This happens either
+ * in VAD mode when the user has interrupted (`input_audio_buffer.speech_started`), or when
+ * the client has emitted the `output_audio_buffer.clear` event to manually cut off the
+ * current audio response.
* [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
*/
@JvmStatic
@@ -2164,6 +2215,16 @@ private constructor(
inputAudioBufferCommitted: InputAudioBufferCommittedEvent
): T
+ /**
+ * **SIP Only:** Returned when an DTMF event is received. A DTMF event is a message that
+ * represents a telephone keypad press (0–9, *, #, A–D). The `event` property is the keypad
+ * that the user press. The `received_at` is the UTC Unix Timestamp that the server received
+ * the event.
+ */
+ fun visitInputAudioBufferDtmfEventReceived(
+ inputAudioBufferDtmfEventReceived: InputAudioBufferDtmfEventReceivedEvent
+ ): T
+
/**
* Sent by the server when in `server_vad` mode to indicate that speech has been detected in
* the audio buffer. This can happen any time audio is added to the buffer (unless speech is
@@ -2297,26 +2358,26 @@ private constructor(
fun visitSessionUpdated(sessionUpdated: SessionUpdatedEvent): T
/**
- * **WebRTC Only:** Emitted when the server begins streaming audio to the client. This event
- * is emitted after an audio content part has been added (`response.content_part.added`) to
- * the response.
+ * **WebRTC/SIP Only:** Emitted when the server begins streaming audio to the client. This
+ * event is emitted after an audio content part has been added
+ * (`response.content_part.added`) to the response.
* [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
*/
fun visitOutputAudioBufferStarted(outputAudioBufferStarted: OutputAudioBufferStarted): T
/**
- * **WebRTC Only:** Emitted when the output audio buffer has been completely drained on the
- * server, and no more audio is forthcoming. This event is emitted after the full response
- * data has been sent to the client (`response.done`).
+ * **WebRTC/SIP Only:** Emitted when the output audio buffer has been completely drained on
+ * the server, and no more audio is forthcoming. This event is emitted after the full
+ * response data has been sent to the client (`response.done`).
* [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
*/
fun visitOutputAudioBufferStopped(outputAudioBufferStopped: OutputAudioBufferStopped): T
/**
- * **WebRTC Only:** Emitted when the output audio buffer is cleared. This happens either in
- * VAD mode when the user has interrupted (`input_audio_buffer.speech_started`), or when the
- * client has emitted the `output_audio_buffer.clear` event to manually cut off the current
- * audio response.
+ * **WebRTC/SIP Only:** Emitted when the output audio buffer is cleared. This happens either
+ * in VAD mode when the user has interrupted (`input_audio_buffer.speech_started`), or when
+ * the client has emitted the `output_audio_buffer.clear` event to manually cut off the
+ * current audio response.
* [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
*/
fun visitOutputAudioBufferCleared(outputAudioBufferCleared: OutputAudioBufferCleared): T
@@ -2499,6 +2560,18 @@ private constructor(
?.let { RealtimeServerEvent(inputAudioBufferCommitted = it, _json = json) }
?: RealtimeServerEvent(_json = json)
}
+ "input_audio_buffer.dtmf_event_received" -> {
+ return tryDeserialize(
+ node,
+ jacksonTypeRef(),
+ )
+ ?.let {
+ RealtimeServerEvent(
+ inputAudioBufferDtmfEventReceived = it,
+ _json = json,
+ )
+ } ?: RealtimeServerEvent(_json = json)
+ }
"input_audio_buffer.speech_started" -> {
return tryDeserialize(
node,
@@ -2744,6 +2817,8 @@ private constructor(
generator.writeObject(value.inputAudioBufferCleared)
value.inputAudioBufferCommitted != null ->
generator.writeObject(value.inputAudioBufferCommitted)
+ value.inputAudioBufferDtmfEventReceived != null ->
+ generator.writeObject(value.inputAudioBufferDtmfEventReceived)
value.inputAudioBufferSpeechStarted != null ->
generator.writeObject(value.inputAudioBufferSpeechStarted)
value.inputAudioBufferSpeechStopped != null ->
@@ -3164,8 +3239,8 @@ private constructor(
}
/**
- * **WebRTC Only:** Emitted when the server begins streaming audio to the client. This event is
- * emitted after an audio content part has been added (`response.content_part.added`) to the
+ * **WebRTC/SIP Only:** Emitted when the server begins streaming audio to the client. This event
+ * is emitted after an audio content part has been added (`response.content_part.added`) to the
* response.
* [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
*/
@@ -3413,7 +3488,7 @@ private constructor(
}
/**
- * **WebRTC Only:** Emitted when the output audio buffer has been completely drained on the
+ * **WebRTC/SIP Only:** Emitted when the output audio buffer has been completely drained on the
* server, and no more audio is forthcoming. This event is emitted after the full response data
* has been sent to the client (`response.done`).
* [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
@@ -3662,10 +3737,10 @@ private constructor(
}
/**
- * **WebRTC Only:** Emitted when the output audio buffer is cleared. This happens either in VAD
- * mode when the user has interrupted (`input_audio_buffer.speech_started`), or when the client
- * has emitted the `output_audio_buffer.clear` event to manually cut off the current audio
- * response.
+ * **WebRTC/SIP Only:** Emitted when the output audio buffer is cleared. This happens either in
+ * VAD mode when the user has interrupted (`input_audio_buffer.speech_started`), or when the
+ * client has emitted the `output_audio_buffer.clear` event to manually cut off the current
+ * audio response.
* [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
*/
class OutputAudioBufferCleared
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeSession.kt b/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeSession.kt
index 877d611e..88bc0c56 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeSession.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeSession.kt
@@ -2990,7 +2990,12 @@ private constructor(
@JsonProperty("type") @ExcludeMissing fun _type(): JsonValue = type
/**
- * Whether or not to automatically generate a response when a VAD stop event occurs.
+ * Whether or not to automatically generate a response when a VAD stop event occurs. If
+ * `interrupt_response` is set to `false` this may fail to create a response if the
+ * model is already responding.
+ *
+ * If both `create_response` and `interrupt_response` are set to `false`, the model will
+ * never respond automatically but VAD events will still be emitted.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if
* the server responded with an unexpected value).
@@ -3016,8 +3021,13 @@ private constructor(
fun idleTimeoutMs(): Optional = idleTimeoutMs.getOptional("idle_timeout_ms")
/**
- * Whether or not to automatically interrupt any ongoing response with output to the
- * default conversation (i.e. `conversation` of `auto`) when a VAD start event occurs.
+ * Whether or not to automatically interrupt (cancel) any ongoing response with output
+ * to the default conversation (i.e. `conversation` of `auto`) when a VAD start event
+ * occurs. If `true` then the response will be cancelled, otherwise it will continue
+ * until complete.
+ *
+ * If both `create_response` and `interrupt_response` are set to `false`, the model will
+ * never respond automatically but VAD events will still be emitted.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if
* the server responded with an unexpected value).
@@ -3173,6 +3183,11 @@ private constructor(
/**
* Whether or not to automatically generate a response when a VAD stop event occurs.
+ * If `interrupt_response` is set to `false` this may fail to create a response if
+ * the model is already responding.
+ *
+ * If both `create_response` and `interrupt_response` are set to `false`, the model
+ * will never respond automatically but VAD events will still be emitted.
*/
fun createResponse(createResponse: Boolean) =
createResponse(JsonField.of(createResponse))
@@ -3228,9 +3243,13 @@ private constructor(
}
/**
- * Whether or not to automatically interrupt any ongoing response with output to the
- * default conversation (i.e. `conversation` of `auto`) when a VAD start event
- * occurs.
+ * Whether or not to automatically interrupt (cancel) any ongoing response with
+ * output to the default conversation (i.e. `conversation` of `auto`) when a VAD
+ * start event occurs. If `true` then the response will be cancelled, otherwise it
+ * will continue until complete.
+ *
+ * If both `create_response` and `interrupt_response` are set to `false`, the model
+ * will never respond automatically but VAD events will still be emitted.
*/
fun interruptResponse(interruptResponse: Boolean) =
interruptResponse(JsonField.of(interruptResponse))
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeSessionCreateRequest.kt b/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeSessionCreateRequest.kt
index 8ce46c57..b6b530d8 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeSessionCreateRequest.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeSessionCreateRequest.kt
@@ -224,12 +224,16 @@ private constructor(
* When the number of tokens in a conversation exceeds the model's input token limit, the
* conversation be truncated, meaning messages (starting from the oldest) will not be included
* in the model's context. A 32k context model with 4,096 max output tokens can only include
- * 28,224 tokens in the context before truncation occurs. Clients can configure truncation
- * behavior to truncate with a lower max token limit, which is an effective way to control token
- * usage and cost. Truncation will reduce the number of cached tokens on the next turn (busting
- * the cache), since messages are dropped from the beginning of the context. However, clients
- * can also configure truncation to retain messages up to a fraction of the maximum context
- * size, which will reduce the need for future truncations and thus improve the cache rate.
+ * 28,224 tokens in the context before truncation occurs.
+ *
+ * Clients can configure truncation behavior to truncate with a lower max token limit, which is
+ * an effective way to control token usage and cost.
+ *
+ * Truncation will reduce the number of cached tokens on the next turn (busting the cache),
+ * since messages are dropped from the beginning of the context. However, clients can also
+ * configure truncation to retain messages up to a fraction of the maximum context size, which
+ * will reduce the need for future truncations and thus improve the cache rate.
+ *
* Truncation can be disabled entirely, which means the server will never truncate but would
* instead return an error if the conversation exceeds the model's input token limit.
*
@@ -678,15 +682,18 @@ private constructor(
* When the number of tokens in a conversation exceeds the model's input token limit, the
* conversation be truncated, meaning messages (starting from the oldest) will not be
* included in the model's context. A 32k context model with 4,096 max output tokens can
- * only include 28,224 tokens in the context before truncation occurs. Clients can configure
- * truncation behavior to truncate with a lower max token limit, which is an effective way
- * to control token usage and cost. Truncation will reduce the number of cached tokens on
- * the next turn (busting the cache), since messages are dropped from the beginning of the
- * context. However, clients can also configure truncation to retain messages up to a
- * fraction of the maximum context size, which will reduce the need for future truncations
- * and thus improve the cache rate. Truncation can be disabled entirely, which means the
- * server will never truncate but would instead return an error if the conversation exceeds
- * the model's input token limit.
+ * only include 28,224 tokens in the context before truncation occurs.
+ *
+ * Clients can configure truncation behavior to truncate with a lower max token limit, which
+ * is an effective way to control token usage and cost.
+ *
+ * Truncation will reduce the number of cached tokens on the next turn (busting the cache),
+ * since messages are dropped from the beginning of the context. However, clients can also
+ * configure truncation to retain messages up to a fraction of the maximum context size,
+ * which will reduce the need for future truncations and thus improve the cache rate.
+ *
+ * Truncation can be disabled entirely, which means the server will never truncate but would
+ * instead return an error if the conversation exceeds the model's input token limit.
*/
fun truncation(truncation: RealtimeTruncation) = truncation(JsonField.of(truncation))
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeTranscriptionSessionAudioInputTurnDetection.kt b/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeTranscriptionSessionAudioInputTurnDetection.kt
index 218e0007..8969e7ea 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeTranscriptionSessionAudioInputTurnDetection.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeTranscriptionSessionAudioInputTurnDetection.kt
@@ -331,7 +331,12 @@ private constructor(
@JsonProperty("type") @ExcludeMissing fun _type(): JsonValue = type
/**
- * Whether or not to automatically generate a response when a VAD stop event occurs.
+ * Whether or not to automatically generate a response when a VAD stop event occurs. If
+ * `interrupt_response` is set to `false` this may fail to create a response if the model is
+ * already responding.
+ *
+ * If both `create_response` and `interrupt_response` are set to `false`, the model will
+ * never respond automatically but VAD events will still be emitted.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -357,8 +362,12 @@ private constructor(
fun idleTimeoutMs(): Optional = idleTimeoutMs.getOptional("idle_timeout_ms")
/**
- * Whether or not to automatically interrupt any ongoing response with output to the default
- * conversation (i.e. `conversation` of `auto`) when a VAD start event occurs.
+ * Whether or not to automatically interrupt (cancel) any ongoing response with output to
+ * the default conversation (i.e. `conversation` of `auto`) when a VAD start event occurs.
+ * If `true` then the response will be cancelled, otherwise it will continue until complete.
+ *
+ * If both `create_response` and `interrupt_response` are set to `false`, the model will
+ * never respond automatically but VAD events will still be emitted.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
* server responded with an unexpected value).
@@ -509,7 +518,14 @@ private constructor(
*/
fun type(type: JsonValue) = apply { this.type = type }
- /** Whether or not to automatically generate a response when a VAD stop event occurs. */
+ /**
+ * Whether or not to automatically generate a response when a VAD stop event occurs. If
+ * `interrupt_response` is set to `false` this may fail to create a response if the
+ * model is already responding.
+ *
+ * If both `create_response` and `interrupt_response` are set to `false`, the model will
+ * never respond automatically but VAD events will still be emitted.
+ */
fun createResponse(createResponse: Boolean) =
createResponse(JsonField.of(createResponse))
@@ -563,8 +579,13 @@ private constructor(
}
/**
- * Whether or not to automatically interrupt any ongoing response with output to the
- * default conversation (i.e. `conversation` of `auto`) when a VAD start event occurs.
+ * Whether or not to automatically interrupt (cancel) any ongoing response with output
+ * to the default conversation (i.e. `conversation` of `auto`) when a VAD start event
+ * occurs. If `true` then the response will be cancelled, otherwise it will continue
+ * until complete.
+ *
+ * If both `create_response` and `interrupt_response` are set to `false`, the model will
+ * never respond automatically but VAD events will still be emitted.
*/
fun interruptResponse(interruptResponse: Boolean) =
interruptResponse(JsonField.of(interruptResponse))
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeTruncation.kt b/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeTruncation.kt
index 42acf274..fb192b3e 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeTruncation.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/realtime/RealtimeTruncation.kt
@@ -25,14 +25,18 @@ import java.util.Optional
* When the number of tokens in a conversation exceeds the model's input token limit, the
* conversation be truncated, meaning messages (starting from the oldest) will not be included in
* the model's context. A 32k context model with 4,096 max output tokens can only include 28,224
- * tokens in the context before truncation occurs. Clients can configure truncation behavior to
- * truncate with a lower max token limit, which is an effective way to control token usage and cost.
+ * tokens in the context before truncation occurs.
+ *
+ * Clients can configure truncation behavior to truncate with a lower max token limit, which is an
+ * effective way to control token usage and cost.
+ *
* Truncation will reduce the number of cached tokens on the next turn (busting the cache), since
* messages are dropped from the beginning of the context. However, clients can also configure
* truncation to retain messages up to a fraction of the maximum context size, which will reduce the
- * need for future truncations and thus improve the cache rate. Truncation can be disabled entirely,
- * which means the server will never truncate but would instead return an error if the conversation
- * exceeds the model's input token limit.
+ * need for future truncations and thus improve the cache rate.
+ *
+ * Truncation can be disabled entirely, which means the server will never truncate but would instead
+ * return an error if the conversation exceeds the model's input token limit.
*/
@JsonDeserialize(using = RealtimeTruncation.Deserializer::class)
@JsonSerialize(using = RealtimeTruncation.Serializer::class)
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/realtime/clientsecrets/RealtimeSessionCreateResponse.kt b/openai-java-core/src/main/kotlin/com/openai/models/realtime/clientsecrets/RealtimeSessionCreateResponse.kt
index 01e4d2c4..181b2f86 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/realtime/clientsecrets/RealtimeSessionCreateResponse.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/realtime/clientsecrets/RealtimeSessionCreateResponse.kt
@@ -240,12 +240,16 @@ private constructor(
* When the number of tokens in a conversation exceeds the model's input token limit, the
* conversation be truncated, meaning messages (starting from the oldest) will not be included
* in the model's context. A 32k context model with 4,096 max output tokens can only include
- * 28,224 tokens in the context before truncation occurs. Clients can configure truncation
- * behavior to truncate with a lower max token limit, which is an effective way to control token
- * usage and cost. Truncation will reduce the number of cached tokens on the next turn (busting
- * the cache), since messages are dropped from the beginning of the context. However, clients
- * can also configure truncation to retain messages up to a fraction of the maximum context
- * size, which will reduce the need for future truncations and thus improve the cache rate.
+ * 28,224 tokens in the context before truncation occurs.
+ *
+ * Clients can configure truncation behavior to truncate with a lower max token limit, which is
+ * an effective way to control token usage and cost.
+ *
+ * Truncation will reduce the number of cached tokens on the next turn (busting the cache),
+ * since messages are dropped from the beginning of the context. However, clients can also
+ * configure truncation to retain messages up to a fraction of the maximum context size, which
+ * will reduce the need for future truncations and thus improve the cache rate.
+ *
* Truncation can be disabled entirely, which means the server will never truncate but would
* instead return an error if the conversation exceeds the model's input token limit.
*
@@ -691,15 +695,18 @@ private constructor(
* When the number of tokens in a conversation exceeds the model's input token limit, the
* conversation be truncated, meaning messages (starting from the oldest) will not be
* included in the model's context. A 32k context model with 4,096 max output tokens can
- * only include 28,224 tokens in the context before truncation occurs. Clients can configure
- * truncation behavior to truncate with a lower max token limit, which is an effective way
- * to control token usage and cost. Truncation will reduce the number of cached tokens on
- * the next turn (busting the cache), since messages are dropped from the beginning of the
- * context. However, clients can also configure truncation to retain messages up to a
- * fraction of the maximum context size, which will reduce the need for future truncations
- * and thus improve the cache rate. Truncation can be disabled entirely, which means the
- * server will never truncate but would instead return an error if the conversation exceeds
- * the model's input token limit.
+ * only include 28,224 tokens in the context before truncation occurs.
+ *
+ * Clients can configure truncation behavior to truncate with a lower max token limit, which
+ * is an effective way to control token usage and cost.
+ *
+ * Truncation will reduce the number of cached tokens on the next turn (busting the cache),
+ * since messages are dropped from the beginning of the context. However, clients can also
+ * configure truncation to retain messages up to a fraction of the maximum context size,
+ * which will reduce the need for future truncations and thus improve the cache rate.
+ *
+ * Truncation can be disabled entirely, which means the server will never truncate but would
+ * instead return an error if the conversation exceeds the model's input token limit.
*/
fun truncation(truncation: RealtimeTruncation) = truncation(JsonField.of(truncation))
@@ -1781,7 +1788,11 @@ private constructor(
/**
* Whether or not to automatically generate a response when a VAD stop event
- * occurs.
+ * occurs. If `interrupt_response` is set to `false` this may fail to create a
+ * response if the model is already responding.
+ *
+ * If both `create_response` and `interrupt_response` are set to `false`, the
+ * model will never respond automatically but VAD events will still be emitted.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type
* (e.g. if the server responded with an unexpected value).
@@ -1810,9 +1821,13 @@ private constructor(
idleTimeoutMs.getOptional("idle_timeout_ms")
/**
- * Whether or not to automatically interrupt any ongoing response with output to
- * the default conversation (i.e. `conversation` of `auto`) when a VAD start
- * event occurs.
+ * Whether or not to automatically interrupt (cancel) any ongoing response with
+ * output to the default conversation (i.e. `conversation` of `auto`) when a VAD
+ * start event occurs. If `true` then the response will be cancelled, otherwise
+ * it will continue until complete.
+ *
+ * If both `create_response` and `interrupt_response` are set to `false`, the
+ * model will never respond automatically but VAD events will still be emitted.
*
* @throws OpenAIInvalidDataException if the JSON field has an unexpected type
* (e.g. if the server responded with an unexpected value).
@@ -1972,7 +1987,12 @@ private constructor(
/**
* Whether or not to automatically generate a response when a VAD stop event
- * occurs.
+ * occurs. If `interrupt_response` is set to `false` this may fail to create
+ * a response if the model is already responding.
+ *
+ * If both `create_response` and `interrupt_response` are set to `false`,
+ * the model will never respond automatically but VAD events will still be
+ * emitted.
*/
fun createResponse(createResponse: Boolean) =
createResponse(JsonField.of(createResponse))
@@ -2033,9 +2053,14 @@ private constructor(
}
/**
- * Whether or not to automatically interrupt any ongoing response with
- * output to the default conversation (i.e. `conversation` of `auto`) when a
- * VAD start event occurs.
+ * Whether or not to automatically interrupt (cancel) any ongoing response
+ * with output to the default conversation (i.e. `conversation` of `auto`)
+ * when a VAD start event occurs. If `true` then the response will be
+ * cancelled, otherwise it will continue until complete.
+ *
+ * If both `create_response` and `interrupt_response` are set to `false`,
+ * the model will never respond automatically but VAD events will still be
+ * emitted.
*/
fun interruptResponse(interruptResponse: Boolean) =
interruptResponse(JsonField.of(interruptResponse))
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/responses/CompactedResponse.kt b/openai-java-core/src/main/kotlin/com/openai/models/responses/CompactedResponse.kt
new file mode 100644
index 00000000..fae2bd3b
--- /dev/null
+++ b/openai-java-core/src/main/kotlin/com/openai/models/responses/CompactedResponse.kt
@@ -0,0 +1,450 @@
+// File generated from our OpenAPI spec by Stainless.
+
+package com.openai.models.responses
+
+import com.fasterxml.jackson.annotation.JsonAnyGetter
+import com.fasterxml.jackson.annotation.JsonAnySetter
+import com.fasterxml.jackson.annotation.JsonCreator
+import com.fasterxml.jackson.annotation.JsonProperty
+import com.openai.core.ExcludeMissing
+import com.openai.core.JsonField
+import com.openai.core.JsonMissing
+import com.openai.core.JsonValue
+import com.openai.core.checkKnown
+import com.openai.core.checkRequired
+import com.openai.core.toImmutable
+import com.openai.errors.OpenAIInvalidDataException
+import java.util.Collections
+import java.util.Objects
+import kotlin.jvm.optionals.getOrNull
+
+class CompactedResponse
+@JsonCreator(mode = JsonCreator.Mode.DISABLED)
+private constructor(
+ private val id: JsonField,
+ private val createdAt: JsonField,
+ private val object_: JsonValue,
+ private val output: JsonField>,
+ private val usage: JsonField,
+ private val additionalProperties: MutableMap,
+) {
+
+ @JsonCreator
+ private constructor(
+ @JsonProperty("id") @ExcludeMissing id: JsonField = JsonMissing.of(),
+ @JsonProperty("created_at") @ExcludeMissing createdAt: JsonField = JsonMissing.of(),
+ @JsonProperty("object") @ExcludeMissing object_: JsonValue = JsonMissing.of(),
+ @JsonProperty("output")
+ @ExcludeMissing
+ output: JsonField> = JsonMissing.of(),
+ @JsonProperty("usage") @ExcludeMissing usage: JsonField = JsonMissing.of(),
+ ) : this(id, createdAt, object_, output, usage, mutableMapOf())
+
+ /**
+ * The unique identifier for the compacted response.
+ *
+ * @throws OpenAIInvalidDataException if the JSON field has an unexpected type or is
+ * unexpectedly missing or null (e.g. if the server responded with an unexpected value).
+ */
+ fun id(): String = id.getRequired("id")
+
+ /**
+ * Unix timestamp (in seconds) when the compacted conversation was created.
+ *
+ * @throws OpenAIInvalidDataException if the JSON field has an unexpected type or is
+ * unexpectedly missing or null (e.g. if the server responded with an unexpected value).
+ */
+ fun createdAt(): Long = createdAt.getRequired("created_at")
+
+ /**
+ * The object type. Always `response.compaction`.
+ *
+ * Expected to always return the following:
+ * ```java
+ * JsonValue.from("response.compaction")
+ * ```
+ *
+ * However, this method can be useful for debugging and logging (e.g. if the server responded
+ * with an unexpected value).
+ */
+ @JsonProperty("object") @ExcludeMissing fun _object_(): JsonValue = object_
+
+ /**
+ * The compacted list of output items. This is a list of all user messages, followed by a single
+ * compaction item.
+ *
+ * @throws OpenAIInvalidDataException if the JSON field has an unexpected type or is
+ * unexpectedly missing or null (e.g. if the server responded with an unexpected value).
+ */
+ fun output(): List = output.getRequired("output")
+
+ /**
+ * Token accounting for the compaction pass, including cached, reasoning, and total tokens.
+ *
+ * @throws OpenAIInvalidDataException if the JSON field has an unexpected type or is
+ * unexpectedly missing or null (e.g. if the server responded with an unexpected value).
+ */
+ fun usage(): ResponseUsage = usage.getRequired("usage")
+
+ /**
+ * Returns the raw JSON value of [id].
+ *
+ * Unlike [id], this method doesn't throw if the JSON field has an unexpected type.
+ */
+ @JsonProperty("id") @ExcludeMissing fun _id(): JsonField = id
+
+ /**
+ * Returns the raw JSON value of [createdAt].
+ *
+ * Unlike [createdAt], this method doesn't throw if the JSON field has an unexpected type.
+ */
+ @JsonProperty("created_at") @ExcludeMissing fun _createdAt(): JsonField = createdAt
+
+ /**
+ * Returns the raw JSON value of [output].
+ *
+ * Unlike [output], this method doesn't throw if the JSON field has an unexpected type.
+ */
+ @JsonProperty("output")
+ @ExcludeMissing
+ fun _output(): JsonField> = output
+
+ /**
+ * Returns the raw JSON value of [usage].
+ *
+ * Unlike [usage], this method doesn't throw if the JSON field has an unexpected type.
+ */
+ @JsonProperty("usage") @ExcludeMissing fun _usage(): JsonField = usage
+
+ @JsonAnySetter
+ private fun putAdditionalProperty(key: String, value: JsonValue) {
+ additionalProperties.put(key, value)
+ }
+
+ @JsonAnyGetter
+ @ExcludeMissing
+ fun _additionalProperties(): Map =
+ Collections.unmodifiableMap(additionalProperties)
+
+ fun toBuilder() = Builder().from(this)
+
+ companion object {
+
+ /**
+ * Returns a mutable builder for constructing an instance of [CompactedResponse].
+ *
+ * The following fields are required:
+ * ```java
+ * .id()
+ * .createdAt()
+ * .output()
+ * .usage()
+ * ```
+ */
+ @JvmStatic fun builder() = Builder()
+ }
+
+ /** A builder for [CompactedResponse]. */
+ class Builder internal constructor() {
+
+ private var id: JsonField? = null
+ private var createdAt: JsonField? = null
+ private var object_: JsonValue = JsonValue.from("response.compaction")
+ private var output: JsonField>? = null
+ private var usage: JsonField? = null
+ private var additionalProperties: MutableMap = mutableMapOf()
+
+ @JvmSynthetic
+ internal fun from(compactedResponse: CompactedResponse) = apply {
+ id = compactedResponse.id
+ createdAt = compactedResponse.createdAt
+ object_ = compactedResponse.object_
+ output = compactedResponse.output.map { it.toMutableList() }
+ usage = compactedResponse.usage
+ additionalProperties = compactedResponse.additionalProperties.toMutableMap()
+ }
+
+ /** The unique identifier for the compacted response. */
+ fun id(id: String) = id(JsonField.of(id))
+
+ /**
+ * Sets [Builder.id] to an arbitrary JSON value.
+ *
+ * You should usually call [Builder.id] with a well-typed [String] value instead. This
+ * method is primarily for setting the field to an undocumented or not yet supported value.
+ */
+ fun id(id: JsonField) = apply { this.id = id }
+
+ /** Unix timestamp (in seconds) when the compacted conversation was created. */
+ fun createdAt(createdAt: Long) = createdAt(JsonField.of(createdAt))
+
+ /**
+ * Sets [Builder.createdAt] to an arbitrary JSON value.
+ *
+ * You should usually call [Builder.createdAt] with a well-typed [Long] value instead. This
+ * method is primarily for setting the field to an undocumented or not yet supported value.
+ */
+ fun createdAt(createdAt: JsonField) = apply { this.createdAt = createdAt }
+
+ /**
+ * Sets the field to an arbitrary JSON value.
+ *
+ * It is usually unnecessary to call this method because the field defaults to the
+ * following:
+ * ```java
+ * JsonValue.from("response.compaction")
+ * ```
+ *
+ * This method is primarily for setting the field to an undocumented or not yet supported
+ * value.
+ */
+ fun object_(object_: JsonValue) = apply { this.object_ = object_ }
+
+ /**
+ * The compacted list of output items. This is a list of all user messages, followed by a
+ * single compaction item.
+ */
+ fun output(output: List) = output(JsonField.of(output))
+
+ /**
+ * Sets [Builder.output] to an arbitrary JSON value.
+ *
+ * You should usually call [Builder.output] with a well-typed `List`
+ * value instead. This method is primarily for setting the field to an undocumented or not
+ * yet supported value.
+ */
+ fun output(output: JsonField>) = apply {
+ this.output = output.map { it.toMutableList() }
+ }
+
+ /**
+ * Adds a single [ResponseOutputItem] to [Builder.output].
+ *
+ * @throws IllegalStateException if the field was previously set to a non-list.
+ */
+ fun addOutput(output: ResponseOutputItem) = apply {
+ this.output =
+ (this.output ?: JsonField.of(mutableListOf())).also {
+ checkKnown("output", it).add(output)
+ }
+ }
+
+ /** Alias for calling [addOutput] with `ResponseOutputItem.ofMessage(message)`. */
+ fun addOutput(message: ResponseOutputMessage) =
+ addOutput(ResponseOutputItem.ofMessage(message))
+
+ /**
+ * Alias for calling [addOutput] with `ResponseOutputItem.ofFileSearchCall(fileSearchCall)`.
+ */
+ fun addOutput(fileSearchCall: ResponseFileSearchToolCall) =
+ addOutput(ResponseOutputItem.ofFileSearchCall(fileSearchCall))
+
+ /** Alias for calling [addOutput] with `ResponseOutputItem.ofFunctionCall(functionCall)`. */
+ fun addOutput(functionCall: ResponseFunctionToolCall) =
+ addOutput(ResponseOutputItem.ofFunctionCall(functionCall))
+
+ /**
+ * Alias for calling [addOutput] with `ResponseOutputItem.ofWebSearchCall(webSearchCall)`.
+ */
+ fun addOutput(webSearchCall: ResponseFunctionWebSearch) =
+ addOutput(ResponseOutputItem.ofWebSearchCall(webSearchCall))
+
+ /** Alias for calling [addOutput] with `ResponseOutputItem.ofComputerCall(computerCall)`. */
+ fun addOutput(computerCall: ResponseComputerToolCall) =
+ addOutput(ResponseOutputItem.ofComputerCall(computerCall))
+
+ /** Alias for calling [addOutput] with `ResponseOutputItem.ofReasoning(reasoning)`. */
+ fun addOutput(reasoning: ResponseReasoningItem) =
+ addOutput(ResponseOutputItem.ofReasoning(reasoning))
+
+ /** Alias for calling [addOutput] with `ResponseOutputItem.ofCompaction(compaction)`. */
+ fun addOutput(compaction: ResponseCompactionItem) =
+ addOutput(ResponseOutputItem.ofCompaction(compaction))
+
+ /**
+ * Alias for calling [addOutput] with
+ * `ResponseOutputItem.ofImageGenerationCall(imageGenerationCall)`.
+ */
+ fun addOutput(imageGenerationCall: ResponseOutputItem.ImageGenerationCall) =
+ addOutput(ResponseOutputItem.ofImageGenerationCall(imageGenerationCall))
+
+ /**
+ * Alias for calling [addOutput] with
+ * `ResponseOutputItem.ofCodeInterpreterCall(codeInterpreterCall)`.
+ */
+ fun addOutput(codeInterpreterCall: ResponseCodeInterpreterToolCall) =
+ addOutput(ResponseOutputItem.ofCodeInterpreterCall(codeInterpreterCall))
+
+ /**
+ * Alias for calling [addOutput] with `ResponseOutputItem.ofLocalShellCall(localShellCall)`.
+ */
+ fun addOutput(localShellCall: ResponseOutputItem.LocalShellCall) =
+ addOutput(ResponseOutputItem.ofLocalShellCall(localShellCall))
+
+ /** Alias for calling [addOutput] with `ResponseOutputItem.ofShellCall(shellCall)`. */
+ fun addOutput(shellCall: ResponseFunctionShellToolCall) =
+ addOutput(ResponseOutputItem.ofShellCall(shellCall))
+
+ /**
+ * Alias for calling [addOutput] with
+ * `ResponseOutputItem.ofShellCallOutput(shellCallOutput)`.
+ */
+ fun addOutput(shellCallOutput: ResponseFunctionShellToolCallOutput) =
+ addOutput(ResponseOutputItem.ofShellCallOutput(shellCallOutput))
+
+ /**
+ * Alias for calling [addOutput] with `ResponseOutputItem.ofApplyPatchCall(applyPatchCall)`.
+ */
+ fun addOutput(applyPatchCall: ResponseApplyPatchToolCall) =
+ addOutput(ResponseOutputItem.ofApplyPatchCall(applyPatchCall))
+
+ /**
+ * Alias for calling [addOutput] with
+ * `ResponseOutputItem.ofApplyPatchCallOutput(applyPatchCallOutput)`.
+ */
+ fun addOutput(applyPatchCallOutput: ResponseApplyPatchToolCallOutput) =
+ addOutput(ResponseOutputItem.ofApplyPatchCallOutput(applyPatchCallOutput))
+
+ /** Alias for calling [addOutput] with `ResponseOutputItem.ofMcpCall(mcpCall)`. */
+ fun addOutput(mcpCall: ResponseOutputItem.McpCall) =
+ addOutput(ResponseOutputItem.ofMcpCall(mcpCall))
+
+ /** Alias for calling [addOutput] with `ResponseOutputItem.ofMcpListTools(mcpListTools)`. */
+ fun addOutput(mcpListTools: ResponseOutputItem.McpListTools) =
+ addOutput(ResponseOutputItem.ofMcpListTools(mcpListTools))
+
+ /**
+ * Alias for calling [addOutput] with
+ * `ResponseOutputItem.ofMcpApprovalRequest(mcpApprovalRequest)`.
+ */
+ fun addOutput(mcpApprovalRequest: ResponseOutputItem.McpApprovalRequest) =
+ addOutput(ResponseOutputItem.ofMcpApprovalRequest(mcpApprovalRequest))
+
+ /**
+ * Alias for calling [addOutput] with `ResponseOutputItem.ofCustomToolCall(customToolCall)`.
+ */
+ fun addOutput(customToolCall: ResponseCustomToolCall) =
+ addOutput(ResponseOutputItem.ofCustomToolCall(customToolCall))
+
+ /**
+ * Token accounting for the compaction pass, including cached, reasoning, and total tokens.
+ */
+ fun usage(usage: ResponseUsage) = usage(JsonField.of(usage))
+
+ /**
+ * Sets [Builder.usage] to an arbitrary JSON value.
+ *
+ * You should usually call [Builder.usage] with a well-typed [ResponseUsage] value instead.
+ * This method is primarily for setting the field to an undocumented or not yet supported
+ * value.
+ */
+ fun usage(usage: JsonField) = apply { this.usage = usage }
+
+ fun additionalProperties(additionalProperties: Map) = apply {
+ this.additionalProperties.clear()
+ putAllAdditionalProperties(additionalProperties)
+ }
+
+ fun putAdditionalProperty(key: String, value: JsonValue) = apply {
+ additionalProperties.put(key, value)
+ }
+
+ fun putAllAdditionalProperties(additionalProperties: Map) = apply {
+ this.additionalProperties.putAll(additionalProperties)
+ }
+
+ fun removeAdditionalProperty(key: String) = apply { additionalProperties.remove(key) }
+
+ fun removeAllAdditionalProperties(keys: Set) = apply {
+ keys.forEach(::removeAdditionalProperty)
+ }
+
+ /**
+ * Returns an immutable instance of [CompactedResponse].
+ *
+ * Further updates to this [Builder] will not mutate the returned instance.
+ *
+ * The following fields are required:
+ * ```java
+ * .id()
+ * .createdAt()
+ * .output()
+ * .usage()
+ * ```
+ *
+ * @throws IllegalStateException if any required field is unset.
+ */
+ fun build(): CompactedResponse =
+ CompactedResponse(
+ checkRequired("id", id),
+ checkRequired("createdAt", createdAt),
+ object_,
+ checkRequired("output", output).map { it.toImmutable() },
+ checkRequired("usage", usage),
+ additionalProperties.toMutableMap(),
+ )
+ }
+
+ private var validated: Boolean = false
+
+ fun validate(): CompactedResponse = apply {
+ if (validated) {
+ return@apply
+ }
+
+ id()
+ createdAt()
+ _object_().let {
+ if (it != JsonValue.from("response.compaction")) {
+ throw OpenAIInvalidDataException("'object_' is invalid, received $it")
+ }
+ }
+ output().forEach { it.validate() }
+ usage().validate()
+ validated = true
+ }
+
+ fun isValid(): Boolean =
+ try {
+ validate()
+ true
+ } catch (e: OpenAIInvalidDataException) {
+ false
+ }
+
+ /**
+ * Returns a score indicating how many valid values are contained in this object recursively.
+ *
+ * Used for best match union deserialization.
+ */
+ @JvmSynthetic
+ internal fun validity(): Int =
+ (if (id.asKnown().isPresent) 1 else 0) +
+ (if (createdAt.asKnown().isPresent) 1 else 0) +
+ object_.let { if (it == JsonValue.from("response.compaction")) 1 else 0 } +
+ (output.asKnown().getOrNull()?.sumOf { it.validity().toInt() } ?: 0) +
+ (usage.asKnown().getOrNull()?.validity() ?: 0)
+
+ override fun equals(other: Any?): Boolean {
+ if (this === other) {
+ return true
+ }
+
+ return other is CompactedResponse &&
+ id == other.id &&
+ createdAt == other.createdAt &&
+ object_ == other.object_ &&
+ output == other.output &&
+ usage == other.usage &&
+ additionalProperties == other.additionalProperties
+ }
+
+ private val hashCode: Int by lazy {
+ Objects.hash(id, createdAt, object_, output, usage, additionalProperties)
+ }
+
+ override fun hashCode(): Int = hashCode
+
+ override fun toString() =
+ "CompactedResponse{id=$id, createdAt=$createdAt, object_=$object_, output=$output, usage=$usage, additionalProperties=$additionalProperties}"
+}
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/responses/Response.kt b/openai-java-core/src/main/kotlin/com/openai/models/responses/Response.kt
index b4d19f09..aa63a0fa 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/responses/Response.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/responses/Response.kt
@@ -1089,6 +1089,10 @@ private constructor(
fun addOutput(reasoning: ResponseReasoningItem) =
addOutput(ResponseOutputItem.ofReasoning(reasoning))
+ /** Alias for calling [addOutput] with `ResponseOutputItem.ofCompaction(compaction)`. */
+ fun addOutput(compaction: ResponseCompactionItem) =
+ addOutput(ResponseOutputItem.ofCompaction(compaction))
+
/**
* Alias for calling [addOutput] with
* `ResponseOutputItem.ofImageGenerationCall(imageGenerationCall)`.
@@ -2578,7 +2582,7 @@ private constructor(
/** Forces the model to call the apply_patch tool when executing a tool call. */
fun applyPatch(): Optional = Optional.ofNullable(applyPatch)
- /** Forces the model to call the function shell tool when a tool call is required. */
+ /** Forces the model to call the shell tool when a tool call is required. */
fun shell(): Optional = Optional.ofNullable(shell)
fun isOptions(): Boolean = options != null
@@ -2630,7 +2634,7 @@ private constructor(
/** Forces the model to call the apply_patch tool when executing a tool call. */
fun asApplyPatch(): ToolChoiceApplyPatch = applyPatch.getOrThrow("applyPatch")
- /** Forces the model to call the function shell tool when a tool call is required. */
+ /** Forces the model to call the shell tool when a tool call is required. */
fun asShell(): ToolChoiceShell = shell.getOrThrow("shell")
fun _json(): Optional = Optional.ofNullable(_json)
@@ -2804,7 +2808,7 @@ private constructor(
@JvmStatic
fun ofApplyPatch(applyPatch: ToolChoiceApplyPatch) = ToolChoice(applyPatch = applyPatch)
- /** Forces the model to call the function shell tool when a tool call is required. */
+ /** Forces the model to call the shell tool when a tool call is required. */
@JvmStatic fun ofShell(shell: ToolChoiceShell) = ToolChoice(shell = shell)
}
@@ -2848,7 +2852,7 @@ private constructor(
/** Forces the model to call the apply_patch tool when executing a tool call. */
fun visitApplyPatch(applyPatch: ToolChoiceApplyPatch): T
- /** Forces the model to call the function shell tool when a tool call is required. */
+ /** Forces the model to call the shell tool when a tool call is required. */
fun visitShell(shell: ToolChoiceShell): T
/**
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/responses/ResponseApplyPatchToolCall.kt b/openai-java-core/src/main/kotlin/com/openai/models/responses/ResponseApplyPatchToolCall.kt
index bf3e8167..b406fb42 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/responses/ResponseApplyPatchToolCall.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/responses/ResponseApplyPatchToolCall.kt
@@ -34,10 +34,10 @@ class ResponseApplyPatchToolCall
private constructor(
private val id: JsonField,
private val callId: JsonField,
+ private val operation: JsonField,
private val status: JsonField,
private val type: JsonValue,
private val createdBy: JsonField,
- private val operation: JsonField,
private val additionalProperties: MutableMap,
) {
@@ -45,13 +45,13 @@ private constructor(
private constructor(
@JsonProperty("id") @ExcludeMissing id: JsonField = JsonMissing.of(),
@JsonProperty("call_id") @ExcludeMissing callId: JsonField = JsonMissing.of(),
- @JsonProperty("status") @ExcludeMissing status: JsonField = JsonMissing.of(),
- @JsonProperty("type") @ExcludeMissing type: JsonValue = JsonMissing.of(),
- @JsonProperty("created_by") @ExcludeMissing createdBy: JsonField = JsonMissing.of(),
@JsonProperty("operation")
@ExcludeMissing
operation: JsonField = JsonMissing.of(),
- ) : this(id, callId, status, type, createdBy, operation, mutableMapOf())
+ @JsonProperty("status") @ExcludeMissing status: JsonField = JsonMissing.of(),
+ @JsonProperty("type") @ExcludeMissing type: JsonValue = JsonMissing.of(),
+ @JsonProperty("created_by") @ExcludeMissing createdBy: JsonField = JsonMissing.of(),
+ ) : this(id, callId, operation, status, type, createdBy, mutableMapOf())
/**
* The unique ID of the apply patch tool call. Populated when this item is returned via API.
@@ -69,6 +69,14 @@ private constructor(
*/
fun callId(): String = callId.getRequired("call_id")
+ /**
+ * One of the create_file, delete_file, or update_file operations applied via apply_patch.
+ *
+ * @throws OpenAIInvalidDataException if the JSON field has an unexpected type or is
+ * unexpectedly missing or null (e.g. if the server responded with an unexpected value).
+ */
+ fun operation(): Operation = operation.getRequired("operation")
+
/**
* The status of the apply patch tool call. One of `in_progress` or `completed`.
*
@@ -98,14 +106,6 @@ private constructor(
*/
fun createdBy(): Optional = createdBy.getOptional("created_by")
- /**
- * One of the create_file, delete_file, or update_file operations applied via apply_patch.
- *
- * @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
- * server responded with an unexpected value).
- */
- fun operation(): Optional = operation.getOptional("operation")
-
/**
* Returns the raw JSON value of [id].
*
@@ -120,6 +120,13 @@ private constructor(
*/
@JsonProperty("call_id") @ExcludeMissing fun _callId(): JsonField = callId
+ /**
+ * Returns the raw JSON value of [operation].
+ *
+ * Unlike [operation], this method doesn't throw if the JSON field has an unexpected type.
+ */
+ @JsonProperty("operation") @ExcludeMissing fun _operation(): JsonField = operation
+
/**
* Returns the raw JSON value of [status].
*
@@ -134,13 +141,6 @@ private constructor(
*/
@JsonProperty("created_by") @ExcludeMissing fun _createdBy(): JsonField = createdBy
- /**
- * Returns the raw JSON value of [operation].
- *
- * Unlike [operation], this method doesn't throw if the JSON field has an unexpected type.
- */
- @JsonProperty("operation") @ExcludeMissing fun _operation(): JsonField = operation
-
@JsonAnySetter
private fun putAdditionalProperty(key: String, value: JsonValue) {
additionalProperties.put(key, value)
@@ -162,6 +162,7 @@ private constructor(
* ```java
* .id()
* .callId()
+ * .operation()
* .status()
* ```
*/
@@ -173,20 +174,20 @@ private constructor(
private var id: JsonField? = null
private var callId: JsonField? = null
+ private var operation: JsonField? = null
private var status: JsonField? = null
private var type: JsonValue = JsonValue.from("apply_patch_call")
private var createdBy: JsonField = JsonMissing.of()
- private var operation: JsonField = JsonMissing.of()
private var additionalProperties: MutableMap = mutableMapOf()
@JvmSynthetic
internal fun from(responseApplyPatchToolCall: ResponseApplyPatchToolCall) = apply {
id = responseApplyPatchToolCall.id
callId = responseApplyPatchToolCall.callId
+ operation = responseApplyPatchToolCall.operation
status = responseApplyPatchToolCall.status
type = responseApplyPatchToolCall.type
createdBy = responseApplyPatchToolCall.createdBy
- operation = responseApplyPatchToolCall.operation
additionalProperties = responseApplyPatchToolCall.additionalProperties.toMutableMap()
}
@@ -214,43 +215,6 @@ private constructor(
*/
fun callId(callId: JsonField) = apply { this.callId = callId }
- /** The status of the apply patch tool call. One of `in_progress` or `completed`. */
- fun status(status: Status) = status(JsonField.of(status))
-
- /**
- * Sets [Builder.status] to an arbitrary JSON value.
- *
- * You should usually call [Builder.status] with a well-typed [Status] value instead. This
- * method is primarily for setting the field to an undocumented or not yet supported value.
- */
- fun status(status: JsonField) = apply { this.status = status }
-
- /**
- * Sets the field to an arbitrary JSON value.
- *
- * It is usually unnecessary to call this method because the field defaults to the
- * following:
- * ```java
- * JsonValue.from("apply_patch_call")
- * ```
- *
- * This method is primarily for setting the field to an undocumented or not yet supported
- * value.
- */
- fun type(type: JsonValue) = apply { this.type = type }
-
- /** The ID of the entity that created this tool call. */
- fun createdBy(createdBy: String) = createdBy(JsonField.of(createdBy))
-
- /**
- * Sets [Builder.createdBy] to an arbitrary JSON value.
- *
- * You should usually call [Builder.createdBy] with a well-typed [String] value instead.
- * This method is primarily for setting the field to an undocumented or not yet supported
- * value.
- */
- fun createdBy(createdBy: JsonField) = apply { this.createdBy = createdBy }
-
/**
* One of the create_file, delete_file, or update_file operations applied via apply_patch.
*/
@@ -288,6 +252,43 @@ private constructor(
fun operation(updateFile: Operation.UpdateFile) =
operation(Operation.ofUpdateFile(updateFile))
+ /** The status of the apply patch tool call. One of `in_progress` or `completed`. */
+ fun status(status: Status) = status(JsonField.of(status))
+
+ /**
+ * Sets [Builder.status] to an arbitrary JSON value.
+ *
+ * You should usually call [Builder.status] with a well-typed [Status] value instead. This
+ * method is primarily for setting the field to an undocumented or not yet supported value.
+ */
+ fun status(status: JsonField) = apply { this.status = status }
+
+ /**
+ * Sets the field to an arbitrary JSON value.
+ *
+ * It is usually unnecessary to call this method because the field defaults to the
+ * following:
+ * ```java
+ * JsonValue.from("apply_patch_call")
+ * ```
+ *
+ * This method is primarily for setting the field to an undocumented or not yet supported
+ * value.
+ */
+ fun type(type: JsonValue) = apply { this.type = type }
+
+ /** The ID of the entity that created this tool call. */
+ fun createdBy(createdBy: String) = createdBy(JsonField.of(createdBy))
+
+ /**
+ * Sets [Builder.createdBy] to an arbitrary JSON value.
+ *
+ * You should usually call [Builder.createdBy] with a well-typed [String] value instead.
+ * This method is primarily for setting the field to an undocumented or not yet supported
+ * value.
+ */
+ fun createdBy(createdBy: JsonField) = apply { this.createdBy = createdBy }
+
fun additionalProperties(additionalProperties: Map) = apply {
this.additionalProperties.clear()
putAllAdditionalProperties(additionalProperties)
@@ -316,6 +317,7 @@ private constructor(
* ```java
* .id()
* .callId()
+ * .operation()
* .status()
* ```
*
@@ -325,10 +327,10 @@ private constructor(
ResponseApplyPatchToolCall(
checkRequired("id", id),
checkRequired("callId", callId),
+ checkRequired("operation", operation),
checkRequired("status", status),
type,
createdBy,
- operation,
additionalProperties.toMutableMap(),
)
}
@@ -342,6 +344,7 @@ private constructor(
id()
callId()
+ operation().validate()
status().validate()
_type().let {
if (it != JsonValue.from("apply_patch_call")) {
@@ -349,7 +352,6 @@ private constructor(
}
}
createdBy()
- operation().ifPresent { it.validate() }
validated = true
}
@@ -370,136 +372,10 @@ private constructor(
internal fun validity(): Int =
(if (id.asKnown().isPresent) 1 else 0) +
(if (callId.asKnown().isPresent) 1 else 0) +
+ (operation.asKnown().getOrNull()?.validity() ?: 0) +
(status.asKnown().getOrNull()?.validity() ?: 0) +
type.let { if (it == JsonValue.from("apply_patch_call")) 1 else 0 } +
- (if (createdBy.asKnown().isPresent) 1 else 0) +
- (operation.asKnown().getOrNull()?.validity() ?: 0)
-
- /** The status of the apply patch tool call. One of `in_progress` or `completed`. */
- class Status @JsonCreator private constructor(private val value: JsonField) : Enum {
-
- /**
- * Returns this class instance's raw value.
- *
- * This is usually only useful if this instance was deserialized from data that doesn't
- * match any known member, and you want to know that value. For example, if the SDK is on an
- * older version than the API, then the API may respond with new members that the SDK is
- * unaware of.
- */
- @com.fasterxml.jackson.annotation.JsonValue fun _value(): JsonField = value
-
- companion object {
-
- @JvmField val IN_PROGRESS = of("in_progress")
-
- @JvmField val COMPLETED = of("completed")
-
- @JvmStatic fun of(value: String) = Status(JsonField.of(value))
- }
-
- /** An enum containing [Status]'s known values. */
- enum class Known {
- IN_PROGRESS,
- COMPLETED,
- }
-
- /**
- * An enum containing [Status]'s known values, as well as an [_UNKNOWN] member.
- *
- * An instance of [Status] can contain an unknown value in a couple of cases:
- * - It was deserialized from data that doesn't match any known member. For example, if the
- * SDK is on an older version than the API, then the API may respond with new members that
- * the SDK is unaware of.
- * - It was constructed with an arbitrary value using the [of] method.
- */
- enum class Value {
- IN_PROGRESS,
- COMPLETED,
- /** An enum member indicating that [Status] was instantiated with an unknown value. */
- _UNKNOWN,
- }
-
- /**
- * Returns an enum member corresponding to this class instance's value, or [Value._UNKNOWN]
- * if the class was instantiated with an unknown value.
- *
- * Use the [known] method instead if you're certain the value is always known or if you want
- * to throw for the unknown case.
- */
- fun value(): Value =
- when (this) {
- IN_PROGRESS -> Value.IN_PROGRESS
- COMPLETED -> Value.COMPLETED
- else -> Value._UNKNOWN
- }
-
- /**
- * Returns an enum member corresponding to this class instance's value.
- *
- * Use the [value] method instead if you're uncertain the value is always known and don't
- * want to throw for the unknown case.
- *
- * @throws OpenAIInvalidDataException if this class instance's value is a not a known
- * member.
- */
- fun known(): Known =
- when (this) {
- IN_PROGRESS -> Known.IN_PROGRESS
- COMPLETED -> Known.COMPLETED
- else -> throw OpenAIInvalidDataException("Unknown Status: $value")
- }
-
- /**
- * Returns this class instance's primitive wire representation.
- *
- * This differs from the [toString] method because that method is primarily for debugging
- * and generally doesn't throw.
- *
- * @throws OpenAIInvalidDataException if this class instance's value does not have the
- * expected primitive type.
- */
- fun asString(): String =
- _value().asString().orElseThrow { OpenAIInvalidDataException("Value is not a String") }
-
- private var validated: Boolean = false
-
- fun validate(): Status = apply {
- if (validated) {
- return@apply
- }
-
- known()
- validated = true
- }
-
- fun isValid(): Boolean =
- try {
- validate()
- true
- } catch (e: OpenAIInvalidDataException) {
- false
- }
-
- /**
- * Returns a score indicating how many valid values are contained in this object
- * recursively.
- *
- * Used for best match union deserialization.
- */
- @JvmSynthetic internal fun validity(): Int = if (value() == Value._UNKNOWN) 0 else 1
-
- override fun equals(other: Any?): Boolean {
- if (this === other) {
- return true
- }
-
- return other is Status && value == other.value
- }
-
- override fun hashCode() = value.hashCode()
-
- override fun toString() = value.toString()
- }
+ (if (createdBy.asKnown().isPresent) 1 else 0)
/** One of the create_file, delete_file, or update_file operations applied via apply_patch. */
@JsonDeserialize(using = Operation.Deserializer::class)
@@ -1403,6 +1279,132 @@ private constructor(
}
}
+ /** The status of the apply patch tool call. One of `in_progress` or `completed`. */
+ class Status @JsonCreator private constructor(private val value: JsonField) : Enum {
+
+ /**
+ * Returns this class instance's raw value.
+ *
+ * This is usually only useful if this instance was deserialized from data that doesn't
+ * match any known member, and you want to know that value. For example, if the SDK is on an
+ * older version than the API, then the API may respond with new members that the SDK is
+ * unaware of.
+ */
+ @com.fasterxml.jackson.annotation.JsonValue fun _value(): JsonField = value
+
+ companion object {
+
+ @JvmField val IN_PROGRESS = of("in_progress")
+
+ @JvmField val COMPLETED = of("completed")
+
+ @JvmStatic fun of(value: String) = Status(JsonField.of(value))
+ }
+
+ /** An enum containing [Status]'s known values. */
+ enum class Known {
+ IN_PROGRESS,
+ COMPLETED,
+ }
+
+ /**
+ * An enum containing [Status]'s known values, as well as an [_UNKNOWN] member.
+ *
+ * An instance of [Status] can contain an unknown value in a couple of cases:
+ * - It was deserialized from data that doesn't match any known member. For example, if the
+ * SDK is on an older version than the API, then the API may respond with new members that
+ * the SDK is unaware of.
+ * - It was constructed with an arbitrary value using the [of] method.
+ */
+ enum class Value {
+ IN_PROGRESS,
+ COMPLETED,
+ /** An enum member indicating that [Status] was instantiated with an unknown value. */
+ _UNKNOWN,
+ }
+
+ /**
+ * Returns an enum member corresponding to this class instance's value, or [Value._UNKNOWN]
+ * if the class was instantiated with an unknown value.
+ *
+ * Use the [known] method instead if you're certain the value is always known or if you want
+ * to throw for the unknown case.
+ */
+ fun value(): Value =
+ when (this) {
+ IN_PROGRESS -> Value.IN_PROGRESS
+ COMPLETED -> Value.COMPLETED
+ else -> Value._UNKNOWN
+ }
+
+ /**
+ * Returns an enum member corresponding to this class instance's value.
+ *
+ * Use the [value] method instead if you're uncertain the value is always known and don't
+ * want to throw for the unknown case.
+ *
+ * @throws OpenAIInvalidDataException if this class instance's value is a not a known
+ * member.
+ */
+ fun known(): Known =
+ when (this) {
+ IN_PROGRESS -> Known.IN_PROGRESS
+ COMPLETED -> Known.COMPLETED
+ else -> throw OpenAIInvalidDataException("Unknown Status: $value")
+ }
+
+ /**
+ * Returns this class instance's primitive wire representation.
+ *
+ * This differs from the [toString] method because that method is primarily for debugging
+ * and generally doesn't throw.
+ *
+ * @throws OpenAIInvalidDataException if this class instance's value does not have the
+ * expected primitive type.
+ */
+ fun asString(): String =
+ _value().asString().orElseThrow { OpenAIInvalidDataException("Value is not a String") }
+
+ private var validated: Boolean = false
+
+ fun validate(): Status = apply {
+ if (validated) {
+ return@apply
+ }
+
+ known()
+ validated = true
+ }
+
+ fun isValid(): Boolean =
+ try {
+ validate()
+ true
+ } catch (e: OpenAIInvalidDataException) {
+ false
+ }
+
+ /**
+ * Returns a score indicating how many valid values are contained in this object
+ * recursively.
+ *
+ * Used for best match union deserialization.
+ */
+ @JvmSynthetic internal fun validity(): Int = if (value() == Value._UNKNOWN) 0 else 1
+
+ override fun equals(other: Any?): Boolean {
+ if (this === other) {
+ return true
+ }
+
+ return other is Status && value == other.value
+ }
+
+ override fun hashCode() = value.hashCode()
+
+ override fun toString() = value.toString()
+ }
+
override fun equals(other: Any?): Boolean {
if (this === other) {
return true
@@ -1411,19 +1413,19 @@ private constructor(
return other is ResponseApplyPatchToolCall &&
id == other.id &&
callId == other.callId &&
+ operation == other.operation &&
status == other.status &&
type == other.type &&
createdBy == other.createdBy &&
- operation == other.operation &&
additionalProperties == other.additionalProperties
}
private val hashCode: Int by lazy {
- Objects.hash(id, callId, status, type, createdBy, operation, additionalProperties)
+ Objects.hash(id, callId, operation, status, type, createdBy, additionalProperties)
}
override fun hashCode(): Int = hashCode
override fun toString() =
- "ResponseApplyPatchToolCall{id=$id, callId=$callId, status=$status, type=$type, createdBy=$createdBy, operation=$operation, additionalProperties=$additionalProperties}"
+ "ResponseApplyPatchToolCall{id=$id, callId=$callId, operation=$operation, status=$status, type=$type, createdBy=$createdBy, additionalProperties=$additionalProperties}"
}
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/responses/ResponseApplyPatchToolCallOutput.kt b/openai-java-core/src/main/kotlin/com/openai/models/responses/ResponseApplyPatchToolCallOutput.kt
index 0956f691..86d82ee4 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/responses/ResponseApplyPatchToolCallOutput.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/responses/ResponseApplyPatchToolCallOutput.kt
@@ -24,10 +24,10 @@ class ResponseApplyPatchToolCallOutput
private constructor(
private val id: JsonField,
private val callId: JsonField,
- private val output: JsonField,
private val status: JsonField,
private val type: JsonValue,
private val createdBy: JsonField,
+ private val output: JsonField,
private val additionalProperties: MutableMap,
) {
@@ -35,11 +35,11 @@ private constructor(
private constructor(
@JsonProperty("id") @ExcludeMissing id: JsonField = JsonMissing.of(),
@JsonProperty("call_id") @ExcludeMissing callId: JsonField = JsonMissing.of(),
- @JsonProperty("output") @ExcludeMissing output: JsonField = JsonMissing.of(),
@JsonProperty("status") @ExcludeMissing status: JsonField = JsonMissing.of(),
@JsonProperty("type") @ExcludeMissing type: JsonValue = JsonMissing.of(),
@JsonProperty("created_by") @ExcludeMissing createdBy: JsonField = JsonMissing.of(),
- ) : this(id, callId, output, status, type, createdBy, mutableMapOf())
+ @JsonProperty("output") @ExcludeMissing output: JsonField = JsonMissing.of(),
+ ) : this(id, callId, status, type, createdBy, output, mutableMapOf())
/**
* The unique ID of the apply patch tool call output. Populated when this item is returned via
@@ -58,14 +58,6 @@ private constructor(
*/
fun callId(): String = callId.getRequired("call_id")
- /**
- * Optional textual output returned by the apply patch tool.
- *
- * @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
- * server responded with an unexpected value).
- */
- fun output(): Optional = output.getOptional("output")
-
/**
* The status of the apply patch tool call output. One of `completed` or `failed`.
*
@@ -95,6 +87,14 @@ private constructor(
*/
fun createdBy(): Optional = createdBy.getOptional("created_by")
+ /**
+ * Optional textual output returned by the apply patch tool.
+ *
+ * @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
+ * server responded with an unexpected value).
+ */
+ fun output(): Optional = output.getOptional("output")
+
/**
* Returns the raw JSON value of [id].
*
@@ -109,13 +109,6 @@ private constructor(
*/
@JsonProperty("call_id") @ExcludeMissing fun _callId(): JsonField = callId
- /**
- * Returns the raw JSON value of [output].
- *
- * Unlike [output], this method doesn't throw if the JSON field has an unexpected type.
- */
- @JsonProperty("output") @ExcludeMissing fun _output(): JsonField = output
-
/**
* Returns the raw JSON value of [status].
*
@@ -130,6 +123,13 @@ private constructor(
*/
@JsonProperty("created_by") @ExcludeMissing fun _createdBy(): JsonField = createdBy
+ /**
+ * Returns the raw JSON value of [output].
+ *
+ * Unlike [output], this method doesn't throw if the JSON field has an unexpected type.
+ */
+ @JsonProperty("output") @ExcludeMissing fun _output(): JsonField = output
+
@JsonAnySetter
private fun putAdditionalProperty(key: String, value: JsonValue) {
additionalProperties.put(key, value)
@@ -152,7 +152,6 @@ private constructor(
* ```java
* .id()
* .callId()
- * .output()
* .status()
* ```
*/
@@ -164,10 +163,10 @@ private constructor(
private var id: JsonField? = null
private var callId: JsonField? = null
- private var output: JsonField? = null
private var status: JsonField? = null
private var type: JsonValue = JsonValue.from("apply_patch_call_output")
private var createdBy: JsonField = JsonMissing.of()
+ private var output: JsonField = JsonMissing.of()
private var additionalProperties: MutableMap = mutableMapOf()
@JvmSynthetic
@@ -175,10 +174,10 @@ private constructor(
apply {
id = responseApplyPatchToolCallOutput.id
callId = responseApplyPatchToolCallOutput.callId
- output = responseApplyPatchToolCallOutput.output
status = responseApplyPatchToolCallOutput.status
type = responseApplyPatchToolCallOutput.type
createdBy = responseApplyPatchToolCallOutput.createdBy
+ output = responseApplyPatchToolCallOutput.output
additionalProperties =
responseApplyPatchToolCallOutput.additionalProperties.toMutableMap()
}
@@ -208,20 +207,6 @@ private constructor(
*/
fun callId(callId: JsonField) = apply { this.callId = callId }
- /** Optional textual output returned by the apply patch tool. */
- fun output(output: String?) = output(JsonField.ofNullable(output))
-
- /** Alias for calling [Builder.output] with `output.orElse(null)`. */
- fun output(output: Optional) = output(output.getOrNull())
-
- /**
- * Sets [Builder.output] to an arbitrary JSON value.
- *
- * You should usually call [Builder.output] with a well-typed [String] value instead. This
- * method is primarily for setting the field to an undocumented or not yet supported value.
- */
- fun output(output: JsonField) = apply { this.output = output }
-
/** The status of the apply patch tool call output. One of `completed` or `failed`. */
fun status(status: Status) = status(JsonField.of(status))
@@ -259,6 +244,20 @@ private constructor(
*/
fun createdBy(createdBy: JsonField) = apply { this.createdBy = createdBy }
+ /** Optional textual output returned by the apply patch tool. */
+ fun output(output: String?) = output(JsonField.ofNullable(output))
+
+ /** Alias for calling [Builder.output] with `output.orElse(null)`. */
+ fun output(output: Optional) = output(output.getOrNull())
+
+ /**
+ * Sets [Builder.output] to an arbitrary JSON value.
+ *
+ * You should usually call [Builder.output] with a well-typed [String] value instead. This
+ * method is primarily for setting the field to an undocumented or not yet supported value.
+ */
+ fun output(output: JsonField) = apply { this.output = output }
+
fun additionalProperties(additionalProperties: Map) = apply {
this.additionalProperties.clear()
putAllAdditionalProperties(additionalProperties)
@@ -287,7 +286,6 @@ private constructor(
* ```java
* .id()
* .callId()
- * .output()
* .status()
* ```
*
@@ -297,10 +295,10 @@ private constructor(
ResponseApplyPatchToolCallOutput(
checkRequired("id", id),
checkRequired("callId", callId),
- checkRequired("output", output),
checkRequired("status", status),
type,
createdBy,
+ output,
additionalProperties.toMutableMap(),
)
}
@@ -314,7 +312,6 @@ private constructor(
id()
callId()
- output()
status().validate()
_type().let {
if (it != JsonValue.from("apply_patch_call_output")) {
@@ -322,6 +319,7 @@ private constructor(
}
}
createdBy()
+ output()
validated = true
}
@@ -342,10 +340,10 @@ private constructor(
internal fun validity(): Int =
(if (id.asKnown().isPresent) 1 else 0) +
(if (callId.asKnown().isPresent) 1 else 0) +
- (if (output.asKnown().isPresent) 1 else 0) +
(status.asKnown().getOrNull()?.validity() ?: 0) +
type.let { if (it == JsonValue.from("apply_patch_call_output")) 1 else 0 } +
- (if (createdBy.asKnown().isPresent) 1 else 0)
+ (if (createdBy.asKnown().isPresent) 1 else 0) +
+ (if (output.asKnown().isPresent) 1 else 0)
/** The status of the apply patch tool call output. One of `completed` or `failed`. */
class Status @JsonCreator private constructor(private val value: JsonField) : Enum {
@@ -481,19 +479,19 @@ private constructor(
return other is ResponseApplyPatchToolCallOutput &&
id == other.id &&
callId == other.callId &&
- output == other.output &&
status == other.status &&
type == other.type &&
createdBy == other.createdBy &&
+ output == other.output &&
additionalProperties == other.additionalProperties
}
private val hashCode: Int by lazy {
- Objects.hash(id, callId, output, status, type, createdBy, additionalProperties)
+ Objects.hash(id, callId, status, type, createdBy, output, additionalProperties)
}
override fun hashCode(): Int = hashCode
override fun toString() =
- "ResponseApplyPatchToolCallOutput{id=$id, callId=$callId, output=$output, status=$status, type=$type, createdBy=$createdBy, additionalProperties=$additionalProperties}"
+ "ResponseApplyPatchToolCallOutput{id=$id, callId=$callId, status=$status, type=$type, createdBy=$createdBy, output=$output, additionalProperties=$additionalProperties}"
}
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/responses/ResponseCompactParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/responses/ResponseCompactParams.kt
new file mode 100644
index 00000000..f042314c
--- /dev/null
+++ b/openai-java-core/src/main/kotlin/com/openai/models/responses/ResponseCompactParams.kt
@@ -0,0 +1,1526 @@
+// File generated from our OpenAPI spec by Stainless.
+
+package com.openai.models.responses
+
+import com.fasterxml.jackson.annotation.JsonAnyGetter
+import com.fasterxml.jackson.annotation.JsonAnySetter
+import com.fasterxml.jackson.annotation.JsonCreator
+import com.fasterxml.jackson.annotation.JsonProperty
+import com.fasterxml.jackson.core.JsonGenerator
+import com.fasterxml.jackson.core.ObjectCodec
+import com.fasterxml.jackson.databind.JsonNode
+import com.fasterxml.jackson.databind.SerializerProvider
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize
+import com.fasterxml.jackson.databind.annotation.JsonSerialize
+import com.fasterxml.jackson.module.kotlin.jacksonTypeRef
+import com.openai.core.BaseDeserializer
+import com.openai.core.BaseSerializer
+import com.openai.core.Enum
+import com.openai.core.ExcludeMissing
+import com.openai.core.JsonField
+import com.openai.core.JsonMissing
+import com.openai.core.JsonValue
+import com.openai.core.Params
+import com.openai.core.allMaxBy
+import com.openai.core.getOrThrow
+import com.openai.core.http.Headers
+import com.openai.core.http.QueryParams
+import com.openai.core.toImmutable
+import com.openai.errors.OpenAIInvalidDataException
+import java.util.Collections
+import java.util.Objects
+import java.util.Optional
+import kotlin.jvm.optionals.getOrNull
+
+/** Compact conversation */
+class ResponseCompactParams
+private constructor(
+ private val body: Body,
+ private val additionalHeaders: Headers,
+ private val additionalQueryParams: QueryParams,
+) : Params {
+
+ /**
+ * Text, image, or file inputs to the model, used to generate a response
+ *
+ * @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
+ * server responded with an unexpected value).
+ */
+ fun input(): Optional = body.input()
+
+ /**
+ * A system (or developer) message inserted into the model's context. When used along with
+ * `previous_response_id`, the instructions from a previous response will not be carried over to
+ * the next response. This makes it simple to swap out system (or developer) messages in new
+ * responses.
+ *
+ * @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
+ * server responded with an unexpected value).
+ */
+ fun instructions(): Optional = body.instructions()
+
+ /**
+ * Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a wide range of
+ * models with different capabilities, performance characteristics, and price points. Refer to
+ * the [model guide](https://platform.openai.com/docs/models) to browse and compare available
+ * models.
+ *
+ * @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
+ * server responded with an unexpected value).
+ */
+ fun model(): Optional = body.model()
+
+ /**
+ * The unique ID of the previous response to the model. Use this to create multi-turn
+ * conversations. Learn more about
+ * [conversation state](https://platform.openai.com/docs/guides/conversation-state). Cannot be
+ * used in conjunction with `conversation`.
+ *
+ * @throws OpenAIInvalidDataException if the JSON field has an unexpected type (e.g. if the
+ * server responded with an unexpected value).
+ */
+ fun previousResponseId(): Optional = body.previousResponseId()
+
+ /**
+ * Returns the raw JSON value of [input].
+ *
+ * Unlike [input], this method doesn't throw if the JSON field has an unexpected type.
+ */
+ fun _input(): JsonField = body._input()
+
+ /**
+ * Returns the raw JSON value of [instructions].
+ *
+ * Unlike [instructions], this method doesn't throw if the JSON field has an unexpected type.
+ */
+ fun _instructions(): JsonField = body._instructions()
+
+ /**
+ * Returns the raw JSON value of [model].
+ *
+ * Unlike [model], this method doesn't throw if the JSON field has an unexpected type.
+ */
+ fun _model(): JsonField = body._model()
+
+ /**
+ * Returns the raw JSON value of [previousResponseId].
+ *
+ * Unlike [previousResponseId], this method doesn't throw if the JSON field has an unexpected
+ * type.
+ */
+ fun _previousResponseId(): JsonField = body._previousResponseId()
+
+ fun _additionalBodyProperties(): Map = body._additionalProperties()
+
+ /** Additional headers to send with the request. */
+ fun _additionalHeaders(): Headers = additionalHeaders
+
+ /** Additional query param to send with the request. */
+ fun _additionalQueryParams(): QueryParams = additionalQueryParams
+
+ fun toBuilder() = Builder().from(this)
+
+ companion object {
+
+ @JvmStatic fun none(): ResponseCompactParams = builder().build()
+
+ /** Returns a mutable builder for constructing an instance of [ResponseCompactParams]. */
+ @JvmStatic fun builder() = Builder()
+ }
+
+ /** A builder for [ResponseCompactParams]. */
+ class Builder internal constructor() {
+
+ private var body: Body.Builder = Body.builder()
+ private var additionalHeaders: Headers.Builder = Headers.builder()
+ private var additionalQueryParams: QueryParams.Builder = QueryParams.builder()
+
+ @JvmSynthetic
+ internal fun from(responseCompactParams: ResponseCompactParams) = apply {
+ body = responseCompactParams.body.toBuilder()
+ additionalHeaders = responseCompactParams.additionalHeaders.toBuilder()
+ additionalQueryParams = responseCompactParams.additionalQueryParams.toBuilder()
+ }
+
+ /**
+ * Sets the entire request body.
+ *
+ * This is generally only useful if you are already constructing the body separately.
+ * Otherwise, it's more convenient to use the top-level setters instead:
+ * - [input]
+ * - [instructions]
+ * - [model]
+ * - [previousResponseId]
+ */
+ fun body(body: Body) = apply { this.body = body.toBuilder() }
+
+ /** Text, image, or file inputs to the model, used to generate a response */
+ fun input(input: Input?) = apply { body.input(input) }
+
+ /** Alias for calling [Builder.input] with `input.orElse(null)`. */
+ fun input(input: Optional) = input(input.getOrNull())
+
+ /**
+ * Sets [Builder.input] to an arbitrary JSON value.
+ *
+ * You should usually call [Builder.input] with a well-typed [Input] value instead. This
+ * method is primarily for setting the field to an undocumented or not yet supported value.
+ */
+ fun input(input: JsonField) = apply { body.input(input) }
+
+ /** Alias for calling [input] with `Input.ofString(string)`. */
+ fun input(string: String) = apply { body.input(string) }
+
+ /** Alias for calling [input] with `Input.ofResponseInputItems(responseInputItems)`. */
+ fun inputOfResponseInputItems(responseInputItems: List) = apply {
+ body.inputOfResponseInputItems(responseInputItems)
+ }
+
+ /**
+ * A system (or developer) message inserted into the model's context. When used along with
+ * `previous_response_id`, the instructions from a previous response will not be carried
+ * over to the next response. This makes it simple to swap out system (or developer)
+ * messages in new responses.
+ */
+ fun instructions(instructions: String?) = apply { body.instructions(instructions) }
+
+ /** Alias for calling [Builder.instructions] with `instructions.orElse(null)`. */
+ fun instructions(instructions: Optional) = instructions(instructions.getOrNull())
+
+ /**
+ * Sets [Builder.instructions] to an arbitrary JSON value.
+ *
+ * You should usually call [Builder.instructions] with a well-typed [String] value instead.
+ * This method is primarily for setting the field to an undocumented or not yet supported
+ * value.
+ */
+ fun instructions(instructions: JsonField) = apply {
+ body.instructions(instructions)
+ }
+
+ /**
+ * Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a wide range
+ * of models with different capabilities, performance characteristics, and price points.
+ * Refer to the [model guide](https://platform.openai.com/docs/models) to browse and compare
+ * available models.
+ */
+ fun model(model: Model?) = apply { body.model(model) }
+
+ /** Alias for calling [Builder.model] with `model.orElse(null)`. */
+ fun model(model: Optional) = model(model.getOrNull())
+
+ /**
+ * Sets [Builder.model] to an arbitrary JSON value.
+ *
+ * You should usually call [Builder.model] with a well-typed [Model] value instead. This
+ * method is primarily for setting the field to an undocumented or not yet supported value.
+ */
+ fun model(model: JsonField) = apply { body.model(model) }
+
+ /**
+ * Sets [model] to an arbitrary [String].
+ *
+ * You should usually call [model] with a well-typed [Model] constant instead. This method
+ * is primarily for setting the field to an undocumented or not yet supported value.
+ */
+ fun model(value: String) = apply { body.model(value) }
+
+ /**
+ * The unique ID of the previous response to the model. Use this to create multi-turn
+ * conversations. Learn more about
+ * [conversation state](https://platform.openai.com/docs/guides/conversation-state). Cannot
+ * be used in conjunction with `conversation`.
+ */
+ fun previousResponseId(previousResponseId: String?) = apply {
+ body.previousResponseId(previousResponseId)
+ }
+
+ /**
+ * Alias for calling [Builder.previousResponseId] with `previousResponseId.orElse(null)`.
+ */
+ fun previousResponseId(previousResponseId: Optional) =
+ previousResponseId(previousResponseId.getOrNull())
+
+ /**
+ * Sets [Builder.previousResponseId] to an arbitrary JSON value.
+ *
+ * You should usually call [Builder.previousResponseId] with a well-typed [String] value
+ * instead. This method is primarily for setting the field to an undocumented or not yet
+ * supported value.
+ */
+ fun previousResponseId(previousResponseId: JsonField) = apply {
+ body.previousResponseId(previousResponseId)
+ }
+
+ fun additionalBodyProperties(additionalBodyProperties: Map) = apply {
+ body.additionalProperties(additionalBodyProperties)
+ }
+
+ fun putAdditionalBodyProperty(key: String, value: JsonValue) = apply {
+ body.putAdditionalProperty(key, value)
+ }
+
+ fun putAllAdditionalBodyProperties(additionalBodyProperties: Map) =
+ apply {
+ body.putAllAdditionalProperties(additionalBodyProperties)
+ }
+
+ fun removeAdditionalBodyProperty(key: String) = apply { body.removeAdditionalProperty(key) }
+
+ fun removeAllAdditionalBodyProperties(keys: Set) = apply {
+ body.removeAllAdditionalProperties(keys)
+ }
+
+ fun additionalHeaders(additionalHeaders: Headers) = apply {
+ this.additionalHeaders.clear()
+ putAllAdditionalHeaders(additionalHeaders)
+ }
+
+ fun additionalHeaders(additionalHeaders: Map>) = apply {
+ this.additionalHeaders.clear()
+ putAllAdditionalHeaders(additionalHeaders)
+ }
+
+ fun putAdditionalHeader(name: String, value: String) = apply {
+ additionalHeaders.put(name, value)
+ }
+
+ fun putAdditionalHeaders(name: String, values: Iterable