diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 17a9c5d6e..7c53a1b7c 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "6.8.1"
+ ".": "6.9.0"
}
diff --git a/.stats.yml b/.stats.yml
index d697cee16..38f260dd9 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 135
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-eeba8addf3a5f412e5ce8d22031e60c61650cee3f5d9e587a2533f6818a249ea.yml
-openapi_spec_hash: 0a4d8ad2469823ce24a3fd94f23f1c2b
-config_hash: 0bb1941a78ece0b610a2fbba7d74a84c
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-ca24bc4d8125b5153514ce643c4e3220f25971b7d67ca384d56d493c72c0d977.yml
+openapi_spec_hash: c6f048c7b3d29f4de48fde0e845ba33f
+config_hash: b876221dfb213df9f0a999e75d38a65e
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 41127d099..027db0155 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,18 @@
# Changelog
+## 6.9.0 (2025-11-13)
+
+Full Changelog: [v6.8.1...v6.9.0](https://github.com/openai/openai-node/compare/v6.8.1...v6.9.0)
+
+### Features
+
+* **api:** gpt 5.1 ([58e78a8](https://github.com/openai/openai-node/commit/58e78a86899b43eb9afdb6f0afd60b912eae41f2))
+
+
+### Chores
+
+* add typescript-estree dependency for jsr readme script ([3759514](https://github.com/openai/openai-node/commit/37595149720edd7bf4c11760084cad7753c2f289))
+
## 6.8.1 (2025-11-05)
Full Changelog: [v6.8.0...v6.8.1](https://github.com/openai/openai-node/compare/v6.8.0...v6.8.1)
diff --git a/api.md b/api.md
index 55645f057..6da5a8f46 100644
--- a/api.md
+++ b/api.md
@@ -695,12 +695,16 @@ Methods:
Types:
+- ApplyPatchTool
- ComputerTool
- CustomTool
- EasyInputMessage
- FileSearchTool
+- FunctionShellTool
- FunctionTool
- Response
+- ResponseApplyPatchToolCall
+- ResponseApplyPatchToolCallOutput
- ResponseAudioDeltaEvent
- ResponseAudioDoneEvent
- ResponseAudioTranscriptDeltaEvent
@@ -737,6 +741,9 @@ Types:
- ResponseFunctionCallArgumentsDoneEvent
- ResponseFunctionCallOutputItem
- ResponseFunctionCallOutputItemList
+- ResponseFunctionShellCallOutputContent
+- ResponseFunctionShellToolCall
+- ResponseFunctionShellToolCallOutput
- ResponseFunctionToolCall
- ResponseFunctionToolCallItem
- ResponseFunctionToolCallOutputItem
@@ -799,10 +806,12 @@ Types:
- ResponseWebSearchCallSearchingEvent
- Tool
- ToolChoiceAllowed
+- ToolChoiceApplyPatch
- ToolChoiceCustom
- ToolChoiceFunction
- ToolChoiceMcp
- ToolChoiceOptions
+- ToolChoiceShell
- ToolChoiceTypes
- WebSearchPreviewTool
- WebSearchTool
diff --git a/jsr.json b/jsr.json
index cbf4cfe5a..eb46274a9 100644
--- a/jsr.json
+++ b/jsr.json
@@ -1,6 +1,6 @@
{
"name": "@openai/openai",
- "version": "6.8.1",
+ "version": "6.9.0",
"exports": {
".": "./index.ts",
"./helpers/zod": "./helpers/zod.ts",
diff --git a/package.json b/package.json
index 81533fe3d..5a7ef2e68 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "openai",
- "version": "6.8.1",
+ "version": "6.9.0",
"description": "The official TypeScript library for the OpenAI API",
"author": "OpenAI ",
"types": "dist/index.d.ts",
@@ -37,6 +37,7 @@
"@typescript-eslint/eslint-plugin": "8.31.1",
"@typescript-eslint/parser": "8.31.1",
"deep-object-diff": "^1.1.9",
+ "@typescript-eslint/typescript-estree": "8.31.1",
"eslint": "^9.20.1",
"eslint-plugin-prettier": "^5.4.1",
"eslint-plugin-unused-imports": "^4.1.4",
diff --git a/src/resources/batches.ts b/src/resources/batches.ts
index ca3989ef4..c3aabd4db 100644
--- a/src/resources/batches.ts
+++ b/src/resources/batches.ts
@@ -287,11 +287,17 @@ export interface BatchCreateParams {
/**
* The endpoint to be used for all requests in the batch. Currently
- * `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions`
- * are supported. Note that `/v1/embeddings` batches are also restricted to a
- * maximum of 50,000 embedding inputs across all requests in the batch.
+ * `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`,
+ * and `/v1/moderations` are supported. Note that `/v1/embeddings` batches are also
+ * restricted to a maximum of 50,000 embedding inputs across all requests in the
+ * batch.
*/
- endpoint: '/v1/responses' | '/v1/chat/completions' | '/v1/embeddings' | '/v1/completions';
+ endpoint:
+ | '/v1/responses'
+ | '/v1/chat/completions'
+ | '/v1/embeddings'
+ | '/v1/completions'
+ | '/v1/moderations';
/**
* The ID of an uploaded file that contains requests for the new batch.
diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts
index eff0a66f8..3d38c5550 100644
--- a/src/resources/beta/assistants.ts
+++ b/src/resources/beta/assistants.ts
@@ -1160,12 +1160,16 @@ export interface AssistantCreateParams {
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a
- * response.
+ * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on
+ * reasoning in a response.
*
- * Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
- * effort.
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
+ * reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
+ * calls are supported for all reasoning values in gpt-5.1.
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
+ * support `none`.
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
*/
reasoning_effort?: Shared.ReasoningEffort | null;
@@ -1416,12 +1420,16 @@ export interface AssistantUpdateParams {
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a
- * response.
+ * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on
+ * reasoning in a response.
*
- * Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
- * effort.
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
+ * reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
+ * calls are supported for all reasoning values in gpt-5.1.
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
+ * support `none`.
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
*/
reasoning_effort?: Shared.ReasoningEffort | null;
diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts
index 91eaa4fa9..5e5954c84 100644
--- a/src/resources/beta/threads/runs/runs.ts
+++ b/src/resources/beta/threads/runs/runs.ts
@@ -725,12 +725,16 @@ export interface RunCreateParamsBase {
/**
* Body param: Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a
- * response.
+ * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on
+ * reasoning in a response.
*
- * Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
- * effort.
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
+ * reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
+ * calls are supported for all reasoning values in gpt-5.1.
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
+ * support `none`.
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
*/
reasoning_effort?: Shared.ReasoningEffort | null;
diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts
index 935bc0849..80e426bdf 100644
--- a/src/resources/chat/completions/completions.ts
+++ b/src/resources/chat/completions/completions.ts
@@ -1638,15 +1638,27 @@ export interface ChatCompletionCreateParamsBase {
*/
prompt_cache_key?: string;
+ /**
+ * The retention policy for the prompt cache. Set to `24h` to enable extended
+ * prompt caching, which keeps cached prefixes active for longer, up to a maximum
+ * of 24 hours.
+ * [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
+ */
+ prompt_cache_retention?: 'in-memory' | '24h' | null;
+
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a
- * response.
+ * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on
+ * reasoning in a response.
*
- * Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
- * effort.
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
+ * reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
+ * calls are supported for all reasoning values in gpt-5.1.
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
+ * support `none`.
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
*/
reasoning_effort?: Shared.ReasoningEffort | null;
diff --git a/src/resources/conversations/items.ts b/src/resources/conversations/items.ts
index dc660573b..0e3ae0140 100644
--- a/src/resources/conversations/items.ts
+++ b/src/resources/conversations/items.ts
@@ -89,6 +89,10 @@ export type ConversationItem =
| ResponsesAPI.ResponseCodeInterpreterToolCall
| ConversationItem.LocalShellCall
| ConversationItem.LocalShellCallOutput
+ | ResponsesAPI.ResponseFunctionShellToolCall
+ | ResponsesAPI.ResponseFunctionShellToolCallOutput
+ | ResponsesAPI.ResponseApplyPatchToolCall
+ | ResponsesAPI.ResponseApplyPatchToolCallOutput
| ConversationItem.McpListTools
| ConversationItem.McpApprovalRequest
| ConversationItem.McpApprovalResponse
diff --git a/src/resources/evals/runs/runs.ts b/src/resources/evals/runs/runs.ts
index f26a162f9..3b5a5847d 100644
--- a/src/resources/evals/runs/runs.ts
+++ b/src/resources/evals/runs/runs.ts
@@ -288,12 +288,16 @@ export namespace CreateEvalCompletionsRunDataSource {
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a
- * response.
+ * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on
+ * reasoning in a response.
*
- * Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
- * effort.
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
+ * reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
+ * calls are supported for all reasoning values in gpt-5.1.
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
+ * support `none`.
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
*/
reasoning_effort?: Shared.ReasoningEffort | null;
@@ -592,12 +596,16 @@ export namespace RunCreateResponse {
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a
- * response.
+ * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on
+ * reasoning in a response.
*
- * Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
- * effort.
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
+ * reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
+ * calls are supported for all reasoning values in gpt-5.1.
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
+ * support `none`.
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
*/
reasoning_effort?: Shared.ReasoningEffort | null;
@@ -739,12 +747,16 @@ export namespace RunCreateResponse {
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a
- * response.
+ * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on
+ * reasoning in a response.
*
- * Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
- * effort.
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
+ * reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
+ * calls are supported for all reasoning values in gpt-5.1.
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
+ * support `none`.
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
*/
reasoning_effort?: Shared.ReasoningEffort | null;
@@ -1084,12 +1096,16 @@ export namespace RunRetrieveResponse {
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a
- * response.
+ * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on
+ * reasoning in a response.
*
- * Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
- * effort.
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
+ * reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
+ * calls are supported for all reasoning values in gpt-5.1.
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
+ * support `none`.
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
*/
reasoning_effort?: Shared.ReasoningEffort | null;
@@ -1231,12 +1247,16 @@ export namespace RunRetrieveResponse {
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a
- * response.
+ * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on
+ * reasoning in a response.
*
- * Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
- * effort.
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
+ * reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
+ * calls are supported for all reasoning values in gpt-5.1.
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
+ * support `none`.
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
*/
reasoning_effort?: Shared.ReasoningEffort | null;
@@ -1573,12 +1593,16 @@ export namespace RunListResponse {
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a
- * response.
+ * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on
+ * reasoning in a response.
*
- * Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
- * effort.
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
+ * reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
+ * calls are supported for all reasoning values in gpt-5.1.
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
+ * support `none`.
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
*/
reasoning_effort?: Shared.ReasoningEffort | null;
@@ -1720,12 +1744,16 @@ export namespace RunListResponse {
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a
- * response.
+ * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on
+ * reasoning in a response.
*
- * Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
- * effort.
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
+ * reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
+ * calls are supported for all reasoning values in gpt-5.1.
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
+ * support `none`.
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
*/
reasoning_effort?: Shared.ReasoningEffort | null;
@@ -2073,12 +2101,16 @@ export namespace RunCancelResponse {
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a
- * response.
+ * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on
+ * reasoning in a response.
*
- * Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
- * effort.
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
+ * reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
+ * calls are supported for all reasoning values in gpt-5.1.
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
+ * support `none`.
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
*/
reasoning_effort?: Shared.ReasoningEffort | null;
@@ -2220,12 +2252,16 @@ export namespace RunCancelResponse {
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a
- * response.
+ * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on
+ * reasoning in a response.
*
- * Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
- * effort.
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
+ * reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
+ * calls are supported for all reasoning values in gpt-5.1.
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
+ * support `none`.
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
*/
reasoning_effort?: Shared.ReasoningEffort | null;
@@ -2512,12 +2548,16 @@ export namespace RunCreateParams {
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a
- * response.
+ * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on
+ * reasoning in a response.
*
- * Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
- * effort.
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
+ * reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
+ * calls are supported for all reasoning values in gpt-5.1.
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
+ * support `none`.
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
*/
reasoning_effort?: Shared.ReasoningEffort | null;
@@ -2659,12 +2699,16 @@ export namespace RunCreateParams {
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a
- * response.
+ * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on
+ * reasoning in a response.
*
- * Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
- * effort.
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
+ * reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
+ * calls are supported for all reasoning values in gpt-5.1.
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
+ * support `none`.
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
*/
reasoning_effort?: Shared.ReasoningEffort | null;
diff --git a/src/resources/graders/grader-models.ts b/src/resources/graders/grader-models.ts
index e374a084a..9cb8c16c4 100644
--- a/src/resources/graders/grader-models.ts
+++ b/src/resources/graders/grader-models.ts
@@ -279,12 +279,16 @@ export namespace ScoreModelGrader {
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a
- * response.
+ * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on
+ * reasoning in a response.
*
- * Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
- * effort.
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
+ * reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
+ * calls are supported for all reasoning values in gpt-5.1.
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
+ * support `none`.
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
*/
reasoning_effort?: Shared.ReasoningEffort | null;
diff --git a/src/resources/responses/input-tokens.ts b/src/resources/responses/input-tokens.ts
index da7b3079a..c3826ce1e 100644
--- a/src/resources/responses/input-tokens.ts
+++ b/src/resources/responses/input-tokens.ts
@@ -100,6 +100,8 @@ export interface InputTokenCountParams {
| ResponsesAPI.ToolChoiceFunction
| ResponsesAPI.ToolChoiceMcp
| ResponsesAPI.ToolChoiceCustom
+ | ResponsesAPI.ToolChoiceApplyPatch
+ | ResponsesAPI.ToolChoiceShell
| null;
/**
diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts
index 87035f731..374744cbc 100644
--- a/src/resources/responses/responses.ts
+++ b/src/resources/responses/responses.ts
@@ -45,6 +45,10 @@ export type ParsedResponseOutputItem =
| ResponseOutputItem.ImageGenerationCall
| ResponseCodeInterpreterToolCall
| ResponseOutputItem.LocalShellCall
+ | ResponseFunctionShellToolCall
+ | ResponseFunctionShellToolCallOutput
+ | ResponseApplyPatchToolCall
+ | ResponseApplyPatchToolCallOutput
| ResponseOutputItem.McpCall
| ResponseOutputItem.McpListTools
| ResponseOutputItem.McpApprovalRequest
@@ -206,6 +210,16 @@ export class Responses extends APIResource {
export type ResponseItemsPage = CursorPage;
+/**
+ * Allows the assistant to create, delete, or update files using unified diffs.
+ */
+export interface ApplyPatchTool {
+ /**
+ * The type of the tool. Always `apply_patch`.
+ */
+ type: 'apply_patch';
+}
+
/**
* A tool that controls a virtual computer. Learn more about the
* [computer tool](https://platform.openai.com/docs/guides/tools-computer-use).
@@ -360,6 +374,16 @@ export namespace FileSearchTool {
}
}
+/**
+ * A tool that allows the model to execute shell commands.
+ */
+export interface FunctionShellTool {
+ /**
+ * The type of the shell tool. Always `shell`.
+ */
+ type: 'shell';
+}
+
/**
* Defines a function in your own code the model can choose to call. Learn more
* about
@@ -484,7 +508,9 @@ export interface Response {
| ToolChoiceTypes
| ToolChoiceFunction
| ToolChoiceMcp
- | ToolChoiceCustom;
+ | ToolChoiceCustom
+ | ToolChoiceApplyPatch
+ | ToolChoiceShell;
/**
* An array of tools the model may call while generating a response. You can
@@ -558,6 +584,14 @@ export interface Response {
*/
prompt_cache_key?: string;
+ /**
+ * The retention policy for the prompt cache. Set to `24h` to enable extended
+ * prompt caching, which keeps cached prefixes active for longer, up to a maximum
+ * of 24 hours.
+ * [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
+ */
+ prompt_cache_retention?: 'in-memory' | '24h' | null;
+
/**
* **gpt-5 and o-series models only**
*
@@ -660,6 +694,139 @@ export namespace Response {
}
}
+/**
+ * A tool call that applies file diffs by creating, deleting, or updating files.
+ */
+export interface ResponseApplyPatchToolCall {
+ /**
+ * The unique ID of the apply patch tool call. Populated when this item is returned
+ * via API.
+ */
+ id: string;
+
+ /**
+ * The unique ID of the apply patch tool call generated by the model.
+ */
+ call_id: string;
+
+ /**
+ * The status of the apply patch tool call. One of `in_progress` or `completed`.
+ */
+ status: 'in_progress' | 'completed';
+
+ /**
+ * The type of the item. Always `apply_patch_call`.
+ */
+ type: 'apply_patch_call';
+
+ /**
+ * The ID of the entity that created this tool call.
+ */
+ created_by?: string;
+
+ /**
+ * One of the create_file, delete_file, or update_file operations applied via
+ * apply_patch.
+ */
+ operation?:
+ | ResponseApplyPatchToolCall.CreateFile
+ | ResponseApplyPatchToolCall.DeleteFile
+ | ResponseApplyPatchToolCall.UpdateFile;
+}
+
+export namespace ResponseApplyPatchToolCall {
+ /**
+ * Instruction describing how to create a file via the apply_patch tool.
+ */
+ export interface CreateFile {
+ /**
+ * Diff to apply.
+ */
+ diff: string;
+
+ /**
+ * Path of the file to create.
+ */
+ path: string;
+
+ /**
+ * Create a new file with the provided diff.
+ */
+ type: 'create_file';
+ }
+
+ /**
+ * Instruction describing how to delete a file via the apply_patch tool.
+ */
+ export interface DeleteFile {
+ /**
+ * Path of the file to delete.
+ */
+ path: string;
+
+ /**
+ * Delete the specified file.
+ */
+ type: 'delete_file';
+ }
+
+ /**
+ * Instruction describing how to update a file via the apply_patch tool.
+ */
+ export interface UpdateFile {
+ /**
+ * Diff to apply.
+ */
+ diff: string;
+
+ /**
+ * Path of the file to update.
+ */
+ path: string;
+
+ /**
+ * Update an existing file with the provided diff.
+ */
+ type: 'update_file';
+ }
+}
+
+/**
+ * The output emitted by an apply patch tool call.
+ */
+export interface ResponseApplyPatchToolCallOutput {
+ /**
+ * The unique ID of the apply patch tool call output. Populated when this item is
+ * returned via API.
+ */
+ id: string;
+
+ /**
+ * The unique ID of the apply patch tool call generated by the model.
+ */
+ call_id: string;
+
+ /**
+ * Optional textual output returned by the apply patch tool.
+ */
+ output: string | null;
+
+ /**
+ * The status of the apply patch tool call output. One of `completed` or `failed`.
+ */
+ status: 'completed' | 'failed';
+
+ /**
+ * The type of the item. Always `apply_patch_call_output`.
+ */
+ type: 'apply_patch_call_output';
+
+ /**
+ * The ID of the entity that created this tool call output.
+ */
+ created_by?: string;
+}
+
/**
* Emitted when there is a partial audio response.
*/
@@ -1932,6 +2099,189 @@ export type ResponseFunctionCallOutputItem =
export type ResponseFunctionCallOutputItemList = Array;
+/**
+ * Captured stdout and stderr for a portion of a function shell tool call output.
+ */
+export interface ResponseFunctionShellCallOutputContent {
+ /**
+ * The exit or timeout outcome associated with this chunk.
+ */
+ outcome: ResponseFunctionShellCallOutputContent.Timeout | ResponseFunctionShellCallOutputContent.Exit;
+
+ /**
+ * Captured stderr output for this chunk of the shell call.
+ */
+ stderr: string;
+
+ /**
+ * Captured stdout output for this chunk of the shell call.
+ */
+ stdout: string;
+}
+
+export namespace ResponseFunctionShellCallOutputContent {
+ /**
+ * Indicates that the function shell call exceeded its configured time limit.
+ */
+ export interface Timeout {
+ /**
+ * The outcome type. Always `timeout`.
+ */
+ type: 'timeout';
+ }
+
+ /**
+ * Indicates that the shell commands finished and returned an exit code.
+ */
+ export interface Exit {
+ /**
+ * The exit code returned by the shell process.
+ */
+ exit_code: number;
+
+ /**
+ * The outcome type. Always `exit`.
+ */
+ type: 'exit';
+ }
+}
+
+/**
+ * A tool call that executes one or more shell commands in a managed environment.
+ */
+export interface ResponseFunctionShellToolCall {
+ /**
+ * The unique ID of the function shell tool call. Populated when this item is
+ * returned via API.
+ */
+ id: string;
+
+ /**
+ * The shell commands and limits that describe how to run the tool call.
+ */
+ action: ResponseFunctionShellToolCall.Action;
+
+ /**
+ * The unique ID of the function shell tool call generated by the model.
+ */
+ call_id: string;
+
+ /**
+ * The status of the shell call. One of `in_progress`, `completed`, or
+ * `incomplete`.
+ */
+ status: 'in_progress' | 'completed' | 'incomplete';
+
+ /**
+ * The type of the item. Always `shell_call`.
+ */
+ type: 'shell_call';
+
+ /**
+ * The ID of the entity that created this tool call.
+ */
+ created_by?: string;
+}
+
+export namespace ResponseFunctionShellToolCall {
+ /**
+ * The shell commands and limits that describe how to run the tool call.
+ */
+ export interface Action {
+ commands: Array;
+
+ /**
+ * Optional maximum number of characters to return from each command.
+ */
+ max_output_length: number | null;
+
+ /**
+ * Optional timeout in milliseconds for the commands.
+ */
+ timeout_ms: number | null;
+ }
+}
+
+/**
+ * The output of a shell tool call.
+ */
+export interface ResponseFunctionShellToolCallOutput {
+ /**
+ * The unique ID of the shell call output. Populated when this item is returned via
+ * API.
+ */
+ id: string;
+
+ /**
+ * The unique ID of the shell tool call generated by the model.
+ */
+ call_id: string;
+
+ /**
+ * The maximum length of the shell command output. This is generated by the model
+ * and should be passed back with the raw output.
+ */
+ max_output_length: number | null;
+
+ /**
+ * An array of shell call output contents
+ */
+ output: Array;
+
+ /**
+ * The type of the shell call output. Always `shell_call_output`.
+ */
+ type: 'shell_call_output';
+
+ created_by?: string;
+}
+
+export namespace ResponseFunctionShellToolCallOutput {
+ /**
+ * The content of a shell call output.
+ */
+ export interface Output {
+ /**
+ * Represents either an exit outcome (with an exit code) or a timeout outcome for a
+ * shell call output chunk.
+ */
+ outcome: Output.Timeout | Output.Exit;
+
+ stderr: string;
+
+ stdout: string;
+
+ created_by?: string;
+ }
+
+ export namespace Output {
+ /**
+ * Indicates that the function shell call exceeded its configured time limit.
+ */
+ export interface Timeout {
+ /**
+ * The outcome type. Always `timeout`.
+ */
+ type: 'timeout';
+ }
+
+ /**
+ * Indicates that the shell commands finished and returned an exit code.
+ */
+ export interface Exit {
+ /**
+ * Exit code from the shell process.
+ */
+ exit_code: number;
+
+ /**
+ * The outcome type. Always `exit`.
+ */
+ type: 'exit';
+ }
+ }
+}
+
/**
* A tool call to run a function. See the
* [function calling guide](https://platform.openai.com/docs/guides/function-calling)
@@ -2468,6 +2818,10 @@ export type ResponseInputItem =
| ResponseCodeInterpreterToolCall
| ResponseInputItem.LocalShellCall
| ResponseInputItem.LocalShellCallOutput
+ | ResponseInputItem.ShellCall
+ | ResponseInputItem.ShellCallOutput
+ | ResponseInputItem.ApplyPatchCall
+ | ResponseInputItem.ApplyPatchCallOutput
| ResponseInputItem.McpListTools
| ResponseInputItem.McpApprovalRequest
| ResponseInputItem.McpApprovalResponse
@@ -2714,6 +3068,216 @@ export namespace ResponseInputItem {
status?: 'in_progress' | 'completed' | 'incomplete' | null;
}
+ /**
+ * A tool representing a request to execute one or more shell commands.
+ */
+ export interface ShellCall {
+ /**
+ * The shell commands and limits that describe how to run the tool call.
+ */
+ action: ShellCall.Action;
+
+ /**
+ * The unique ID of the function shell tool call generated by the model.
+ */
+ call_id: string;
+
+ /**
+ * The type of the item. Always `function_shell_call`.
+ */
+ type: 'shell_call';
+
+ /**
+ * The unique ID of the function shell tool call. Populated when this item is
+ * returned via API.
+ */
+ id?: string | null;
+
+ /**
+ * The status of the shell call. One of `in_progress`, `completed`, or
+ * `incomplete`.
+ */
+ status?: 'in_progress' | 'completed' | 'incomplete' | null;
+ }
+
+ export namespace ShellCall {
+ /**
+ * The shell commands and limits that describe how to run the tool call.
+ */
+ export interface Action {
+ /**
+ * Ordered shell commands for the execution environment to run.
+ */
+ commands: Array;
+
+ /**
+ * Maximum number of UTF-8 characters to capture from combined stdout and stderr
+ * output.
+ */
+ max_output_length?: number | null;
+
+ /**
+ * Maximum wall-clock time in milliseconds to allow the shell commands to run.
+ */
+ timeout_ms?: number | null;
+ }
+ }
+
+ /**
+ * The streamed output items emitted by a function shell tool call.
+ */
+ export interface ShellCallOutput {
+ /**
+ * The unique ID of the function shell tool call generated by the model.
+ */
+ call_id: string;
+
+ /**
+ * Captured chunks of stdout and stderr output, along with their associated
+ * outcomes.
+ */
+ output: Array;
+
+ /**
+ * The type of the item. Always `function_shell_call_output`.
+ */
+ type: 'shell_call_output';
+
+ /**
+ * The unique ID of the function shell tool call output. Populated when this item
+ * is returned via API.
+ */
+ id?: string | null;
+
+ /**
+ * The maximum number of UTF-8 characters captured for this shell call's combined
+ * output.
+ */
+ max_output_length?: number | null;
+ }
+
+ /**
+ * A tool call representing a request to create, delete, or update files using diff
+ * patches.
+ */
+ export interface ApplyPatchCall {
+ /**
+ * The unique ID of the apply patch tool call generated by the model.
+ */
+ call_id: string;
+
+ /**
+ * The specific create, delete, or update instruction for the apply_patch tool
+ * call.
+ */
+ operation: ApplyPatchCall.CreateFile | ApplyPatchCall.DeleteFile | ApplyPatchCall.UpdateFile;
+
+ /**
+ * The status of the apply patch tool call. One of `in_progress` or `completed`.
+ */
+ status: 'in_progress' | 'completed';
+
+ /**
+ * The type of the item. Always `apply_patch_call`.
+ */
+ type: 'apply_patch_call';
+
+ /**
+ * The unique ID of the apply patch tool call. Populated when this item is returned
+ * via API.
+ */
+ id?: string | null;
+ }
+
+ export namespace ApplyPatchCall {
+ /**
+ * Instruction for creating a new file via the apply_patch tool.
+ */
+ export interface CreateFile {
+ /**
+ * Unified diff content to apply when creating the file.
+ */
+ diff: string;
+
+ /**
+ * Path of the file to create relative to the workspace root.
+ */
+ path: string;
+
+ /**
+ * The operation type. Always `create_file`.
+ */
+ type: 'create_file';
+ }
+
+ /**
+ * Instruction for deleting an existing file via the apply_patch tool.
+ */
+ export interface DeleteFile {
+ /**
+ * Path of the file to delete relative to the workspace root.
+ */
+ path: string;
+
+ /**
+ * The operation type. Always `delete_file`.
+ */
+ type: 'delete_file';
+ }
+
+ /**
+ * Instruction for updating an existing file via the apply_patch tool.
+ */
+ export interface UpdateFile {
+ /**
+ * Unified diff content to apply to the existing file.
+ */
+ diff: string;
+
+ /**
+ * Path of the file to update relative to the workspace root.
+ */
+ path: string;
+
+ /**
+ * The operation type. Always `update_file`.
+ */
+ type: 'update_file';
+ }
+ }
+
+ /**
+ * The streamed output emitted by an apply patch tool call.
+ */
+ export interface ApplyPatchCallOutput {
+ /**
+ * The unique ID of the apply patch tool call generated by the model.
+ */
+ call_id: string;
+
+ /**
+ * The status of the apply patch tool call output. One of `completed` or `failed`.
+ */
+ status: 'completed' | 'failed';
+
+ /**
+ * The type of the item. Always `apply_patch_call_output`.
+ */
+ type: 'apply_patch_call_output';
+
+ /**
+ * The unique ID of the apply patch tool call output. Populated when this item is
+ * returned via API.
+ */
+ id?: string | null;
+
+ /**
+ * Optional human-readable log text from the apply patch tool (e.g., patch results
+ * or errors).
+ */
+ output?: string;
+ }
+
/**
* A list of tools available on an MCP server.
*/
@@ -2981,6 +3545,10 @@ export type ResponseItem =
| ResponseCodeInterpreterToolCall
| ResponseItem.LocalShellCall
| ResponseItem.LocalShellCallOutput
+ | ResponseFunctionShellToolCall
+ | ResponseFunctionShellToolCallOutput
+ | ResponseApplyPatchToolCall
+ | ResponseApplyPatchToolCallOutput
| ResponseItem.McpListTools
| ResponseItem.McpApprovalRequest
| ResponseItem.McpApprovalResponse
@@ -3521,6 +4089,10 @@ export type ResponseOutputItem =
| ResponseOutputItem.ImageGenerationCall
| ResponseCodeInterpreterToolCall
| ResponseOutputItem.LocalShellCall
+ | ResponseFunctionShellToolCall
+ | ResponseFunctionShellToolCallOutput
+ | ResponseApplyPatchToolCall
+ | ResponseApplyPatchToolCallOutput
| ResponseOutputItem.McpCall
| ResponseOutputItem.McpListTools
| ResponseOutputItem.McpApprovalRequest
@@ -4884,8 +5456,10 @@ export type Tool =
| Tool.CodeInterpreter
| Tool.ImageGeneration
| Tool.LocalShell
+ | FunctionShellTool
| CustomTool
- | WebSearchPreviewTool;
+ | WebSearchPreviewTool
+ | ApplyPatchTool;
export namespace Tool {
/**
@@ -5209,6 +5783,16 @@ export interface ToolChoiceAllowed {
type: 'allowed_tools';
}
+/**
+ * Forces the model to call the apply_patch tool when executing a tool call.
+ */
+export interface ToolChoiceApplyPatch {
+ /**
+ * The tool to call. Always `apply_patch`.
+ */
+ type: 'apply_patch';
+}
+
/**
* Use this option to force the model to call a specific custom tool.
*/
@@ -5272,6 +5856,16 @@ export interface ToolChoiceMcp {
*/
export type ToolChoiceOptions = 'none' | 'auto' | 'required';
+/**
+ * Forces the model to call the function shell tool when a tool call is required.
+ */
+export interface ToolChoiceShell {
+ /**
+ * The tool to call. Always `shell`.
+ */
+ type: 'shell';
+}
+
/**
* Indicates that the model should use a built-in tool to generate a response.
* [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools).
@@ -5548,6 +6142,14 @@ export interface ResponseCreateParamsBase {
*/
prompt_cache_key?: string;
+ /**
+ * The retention policy for the prompt cache. Set to `24h` to enable extended
+ * prompt caching, which keeps cached prefixes active for longer, up to a maximum
+ * of 24 hours.
+ * [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
+ */
+ prompt_cache_retention?: 'in-memory' | '24h' | null;
+
/**
* **gpt-5 and o-series models only**
*
@@ -5632,7 +6234,9 @@ export interface ResponseCreateParamsBase {
| ToolChoiceTypes
| ToolChoiceFunction
| ToolChoiceMcp
- | ToolChoiceCustom;
+ | ToolChoiceCustom
+ | ToolChoiceApplyPatch
+ | ToolChoiceShell;
/**
* An array of tools the model may call while generating a response. You can
@@ -5801,12 +6405,16 @@ Responses.InputTokens = InputTokens;
export declare namespace Responses {
export {
+ type ApplyPatchTool as ApplyPatchTool,
type ComputerTool as ComputerTool,
type CustomTool as CustomTool,
type EasyInputMessage as EasyInputMessage,
type FileSearchTool as FileSearchTool,
+ type FunctionShellTool as FunctionShellTool,
type FunctionTool as FunctionTool,
type Response as Response,
+ type ResponseApplyPatchToolCall as ResponseApplyPatchToolCall,
+ type ResponseApplyPatchToolCallOutput as ResponseApplyPatchToolCallOutput,
type ResponseAudioDeltaEvent as ResponseAudioDeltaEvent,
type ResponseAudioDoneEvent as ResponseAudioDoneEvent,
type ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent,
@@ -5843,6 +6451,9 @@ export declare namespace Responses {
type ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent,
type ResponseFunctionCallOutputItem as ResponseFunctionCallOutputItem,
type ResponseFunctionCallOutputItemList as ResponseFunctionCallOutputItemList,
+ type ResponseFunctionShellCallOutputContent as ResponseFunctionShellCallOutputContent,
+ type ResponseFunctionShellToolCall as ResponseFunctionShellToolCall,
+ type ResponseFunctionShellToolCallOutput as ResponseFunctionShellToolCallOutput,
type ResponseFunctionToolCall as ResponseFunctionToolCall,
type ResponseFunctionToolCallItem as ResponseFunctionToolCallItem,
type ResponseFunctionToolCallOutputItem as ResponseFunctionToolCallOutputItem,
@@ -5905,10 +6516,12 @@ export declare namespace Responses {
type ResponseWebSearchCallSearchingEvent as ResponseWebSearchCallSearchingEvent,
type Tool as Tool,
type ToolChoiceAllowed as ToolChoiceAllowed,
+ type ToolChoiceApplyPatch as ToolChoiceApplyPatch,
type ToolChoiceCustom as ToolChoiceCustom,
type ToolChoiceFunction as ToolChoiceFunction,
type ToolChoiceMcp as ToolChoiceMcp,
type ToolChoiceOptions as ToolChoiceOptions,
+ type ToolChoiceShell as ToolChoiceShell,
type ToolChoiceTypes as ToolChoiceTypes,
type WebSearchPreviewTool as WebSearchPreviewTool,
type WebSearchTool as WebSearchTool,
diff --git a/src/resources/shared.ts b/src/resources/shared.ts
index 1cc395127..1cec69428 100644
--- a/src/resources/shared.ts
+++ b/src/resources/shared.ts
@@ -18,6 +18,11 @@ export type AllModels =
| 'gpt-5-pro-2025-10-06';
export type ChatModel =
+ | 'gpt-5.1'
+ | 'gpt-5.1-2025-11-13'
+ | 'gpt-5.1-codex'
+ | 'gpt-5.1-mini'
+ | 'gpt-5.1-chat-latest'
| 'gpt-5'
| 'gpt-5-mini'
| 'gpt-5-nano'
@@ -241,12 +246,16 @@ export interface Reasoning {
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a
- * response.
+ * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on
+ * reasoning in a response.
*
- * Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
- * effort.
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
+ * reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
+ * calls are supported for all reasoning values in gpt-5.1.
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
+ * support `none`.
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
*/
effort?: ReasoningEffort | null;
@@ -272,14 +281,18 @@ export interface Reasoning {
/**
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
- * effort can result in faster responses and fewer tokens used on reasoning in a
- * response.
+ * supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
+ * reasoning effort can result in faster responses and fewer tokens used on
+ * reasoning in a response.
*
- * Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
- * effort.
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
+ * reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
+ * calls are supported for all reasoning values in gpt-5.1.
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
+ * support `none`.
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
*/
-export type ReasoningEffort = 'minimal' | 'low' | 'medium' | 'high' | null;
+export type ReasoningEffort = 'none' | 'minimal' | 'low' | 'medium' | 'high' | null;
/**
* JSON object response format. An older method of generating JSON responses. Using
diff --git a/src/version.ts b/src/version.ts
index 8bf880778..a56dd5aa4 100644
--- a/src/version.ts
+++ b/src/version.ts
@@ -1 +1 @@
-export const VERSION = '6.8.1'; // x-release-please-version
+export const VERSION = '6.9.0'; // x-release-please-version
diff --git a/tests/api-resources/beta/assistants.test.ts b/tests/api-resources/beta/assistants.test.ts
index 6551d2d9a..d6f3fd141 100644
--- a/tests/api-resources/beta/assistants.test.ts
+++ b/tests/api-resources/beta/assistants.test.ts
@@ -26,7 +26,7 @@ describe('resource assistants', () => {
instructions: 'instructions',
metadata: { foo: 'string' },
name: 'name',
- reasoning_effort: 'minimal',
+ reasoning_effort: 'none',
response_format: 'auto',
temperature: 1,
tool_resources: {
diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts
index 1e537d676..1d9badfee 100644
--- a/tests/api-resources/beta/threads/runs/runs.test.ts
+++ b/tests/api-resources/beta/threads/runs/runs.test.ts
@@ -38,7 +38,7 @@ describe('resource runs', () => {
metadata: { foo: 'string' },
model: 'string',
parallel_tool_calls: true,
- reasoning_effort: 'minimal',
+ reasoning_effort: 'none',
response_format: 'auto',
stream: false,
temperature: 1,
diff --git a/tests/api-resources/chat/completions/completions.test.ts b/tests/api-resources/chat/completions/completions.test.ts
index 3a2fc3670..ec4694b34 100644
--- a/tests/api-resources/chat/completions/completions.test.ts
+++ b/tests/api-resources/chat/completions/completions.test.ts
@@ -41,7 +41,8 @@ describe('resource completions', () => {
prediction: { content: 'string', type: 'content' },
presence_penalty: -2,
prompt_cache_key: 'prompt-cache-key-1234',
- reasoning_effort: 'minimal',
+ prompt_cache_retention: 'in-memory',
+ reasoning_effort: 'none',
response_format: { type: 'text' },
safety_identifier: 'safety-identifier-1234',
seed: -9007199254740991,
diff --git a/tests/api-resources/responses/input-tokens.test.ts b/tests/api-resources/responses/input-tokens.test.ts
index c613479cf..48e5372f1 100644
--- a/tests/api-resources/responses/input-tokens.test.ts
+++ b/tests/api-resources/responses/input-tokens.test.ts
@@ -30,7 +30,7 @@ describe('resource inputTokens', () => {
model: 'model',
parallel_tool_calls: true,
previous_response_id: 'resp_123',
- reasoning: { effort: 'minimal', generate_summary: 'auto', summary: 'auto' },
+ reasoning: { effort: 'none', generate_summary: 'auto', summary: 'auto' },
text: { format: { type: 'text' }, verbosity: 'low' },
tool_choice: 'none',
tools: [