From 72b68092c4f26f43019b0b6061dab1fe771d3e5c Mon Sep 17 00:00:00 2001
From: josh <144584931+dancer@users.noreply.github.com>
Date: Mon, 11 Aug 2025 09:56:17 +0100
Subject: [PATCH 1/4] docs(providers/openai): remove older openai model
variants (#7955)
## background
remove older openai models
https://github.com/vercel/ai/pull/7938#discussion_r2265712315
---
.../docs/02-foundations/02-providers-and-models.mdx | 11 -----------
content/providers/01-ai-sdk-providers/index.mdx | 8 --------
2 files changed, 19 deletions(-)
diff --git a/content/docs/02-foundations/02-providers-and-models.mdx b/content/docs/02-foundations/02-providers-and-models.mdx
index a827a650bfe8..5ff2fc447967 100644
--- a/content/docs/02-foundations/02-providers-and-models.mdx
+++ b/content/docs/02-foundations/02-providers-and-models.mdx
@@ -112,17 +112,6 @@ Here are the capabilities of popular models:
| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-5-mini` | | | | |
| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-5-nano` | | | | |
| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-5-chat-latest` | | | | |
-| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4.1` | | | | |
-| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4.1-mini` | | | | |
-| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4.1-nano` | | | | |
-| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4o` | | | | |
-| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4o-mini` | | | | |
-| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4.1` | | | | |
-| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4` | | | | |
-| [OpenAI](/providers/ai-sdk-providers/openai) | `o3-mini` | | | | |
-| [OpenAI](/providers/ai-sdk-providers/openai) | `o3` | | | | |
-| [OpenAI](/providers/ai-sdk-providers/openai) | `o4-mini` | | | | |
-| [OpenAI](/providers/ai-sdk-providers/openai) | `o1` | | | | |
| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-opus-4-latest` | | | | |
| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-sonnet-4-latest` | | | | |
| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3-7-sonnet-latest` | | | | |
diff --git a/content/providers/01-ai-sdk-providers/index.mdx b/content/providers/01-ai-sdk-providers/index.mdx
index e6f923e79b96..440842c5dbda 100644
--- a/content/providers/01-ai-sdk-providers/index.mdx
+++ b/content/providers/01-ai-sdk-providers/index.mdx
@@ -33,14 +33,6 @@ Not all providers support all AI SDK features. Here's a quick comparison of the
| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-5-mini` | | | | |
| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-5-nano` | | | | |
| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-5-chat-latest` | | | | |
-| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4.1` | | | | |
-| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4.1-mini` | | | | |
-| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4.1-nano` | | | | |
-| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4o` | | | | |
-| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4o-mini` | | | | |
-| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4.1` | | | | |
-| [OpenAI](/providers/ai-sdk-providers/openai) | `gpt-4` | | | | |
-| [OpenAI](/providers/ai-sdk-providers/openai) | `o1` | | | | |
| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3.7-sonnet-latest` | | | | |
| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3.5-sonnet-latest` | | | | |
| [Anthropic](/providers/ai-sdk-providers/anthropic) | `claude-3.5-haiku-latest` | | | | |
From 82ed6c29a4392cb0ee0224452e806c3dfebb5392 Mon Sep 17 00:00:00 2001
From: Nico Albanese <49612682+nicoalbanese@users.noreply.github.com>
Date: Mon, 11 Aug 2025 11:59:22 +0200
Subject: [PATCH 2/4] docs: document hasToolCall and stepCountIs (#7959)
Fixes #7954
---
.../01-ai-sdk-core/70-step-count-is.mdx | 84 ++++++++++++
.../01-ai-sdk-core/71-has-tool-call.mdx | 120 ++++++++++++++++++
2 files changed, 204 insertions(+)
create mode 100644 content/docs/07-reference/01-ai-sdk-core/70-step-count-is.mdx
create mode 100644 content/docs/07-reference/01-ai-sdk-core/71-has-tool-call.mdx
diff --git a/content/docs/07-reference/01-ai-sdk-core/70-step-count-is.mdx b/content/docs/07-reference/01-ai-sdk-core/70-step-count-is.mdx
new file mode 100644
index 000000000000..2f9c7ca186b4
--- /dev/null
+++ b/content/docs/07-reference/01-ai-sdk-core/70-step-count-is.mdx
@@ -0,0 +1,84 @@
+---
+title: stepCountIs
+description: API Reference for stepCountIs.
+---
+
+# `stepCountIs()`
+
+Creates a stop condition that stops when the number of steps reaches a specified count.
+
+This function is used with `stopWhen` in `generateText` and `streamText` to control when a tool-calling loop should stop based on the number of steps executed.
+
+```ts
+import { openai } from '@ai-sdk/openai';
+import { generateText, stepCountIs } from 'ai';
+
+const result = await generateText({
+ model: openai('gpt-4o'),
+ tools: {
+ // your tools
+ },
+ // Stop after 5 steps
+ stopWhen: stepCountIs(5),
+});
+```
+
+## Import
+
+
+
+## API Signature
+
+### Parameters
+
+
+
+### Returns
+
+A `StopCondition` function that returns `true` when the step count reaches the specified number. The function can be used with the `stopWhen` parameter in `generateText` and `streamText`.
+
+## Examples
+
+### Basic Usage
+
+Stop after 3 steps:
+
+```ts
+import { generateText, stepCountIs } from 'ai';
+
+const result = await generateText({
+ model: yourModel,
+ tools: yourTools,
+ stopWhen: stepCountIs(3),
+});
+```
+
+### Combining with Other Conditions
+
+You can combine multiple stop conditions in an array:
+
+```ts
+import { generateText, stepCountIs, hasToolCall } from 'ai';
+
+const result = await generateText({
+ model: yourModel,
+ tools: yourTools,
+ // Stop after 10 steps OR when finalAnswer tool is called
+ stopWhen: [stepCountIs(10), hasToolCall('finalAnswer')],
+});
+```
+
+## See also
+
+- [`hasToolCall()`](/docs/reference/ai-sdk-core/has-tool-call)
+- [`generateText()`](/docs/reference/ai-sdk-core/generate-text)
+- [`streamText()`](/docs/reference/ai-sdk-core/stream-text)
diff --git a/content/docs/07-reference/01-ai-sdk-core/71-has-tool-call.mdx b/content/docs/07-reference/01-ai-sdk-core/71-has-tool-call.mdx
new file mode 100644
index 000000000000..a068d35bb864
--- /dev/null
+++ b/content/docs/07-reference/01-ai-sdk-core/71-has-tool-call.mdx
@@ -0,0 +1,120 @@
+---
+title: hasToolCall
+description: API Reference for hasToolCall.
+---
+
+# `hasToolCall()`
+
+Creates a stop condition that stops when a specific tool is called.
+
+This function is used with `stopWhen` in `generateText` and `streamText` to control when a tool-calling loop should stop based on whether a particular tool has been invoked.
+
+```ts
+import { openai } from '@ai-sdk/openai';
+import { generateText, hasToolCall } from 'ai';
+
+const result = await generateText({
+ model: openai('gpt-4o'),
+ tools: {
+ weather: weatherTool,
+ finalAnswer: finalAnswerTool,
+ },
+ // Stop when the finalAnswer tool is called
+ stopWhen: hasToolCall('finalAnswer'),
+});
+```
+
+## Import
+
+
+
+## API Signature
+
+### Parameters
+
+
+
+### Returns
+
+A `StopCondition` function that returns `true` when the specified tool is called in the current step. The function can be used with the `stopWhen` parameter in `generateText` and `streamText`.
+
+## Examples
+
+### Basic Usage
+
+Stop when a specific tool is called:
+
+```ts
+import { generateText, hasToolCall } from 'ai';
+
+const result = await generateText({
+ model: yourModel,
+ tools: {
+ submitAnswer: submitAnswerTool,
+ search: searchTool,
+ },
+ stopWhen: hasToolCall('submitAnswer'),
+});
+```
+
+### Combining with Other Conditions
+
+You can combine multiple stop conditions in an array:
+
+```ts
+import { generateText, hasToolCall, stepCountIs } from 'ai';
+
+const result = await generateText({
+ model: yourModel,
+ tools: {
+ weather: weatherTool,
+ search: searchTool,
+ finalAnswer: finalAnswerTool,
+ },
+ // Stop when weather tool is called OR finalAnswer is called OR after 5 steps
+ stopWhen: [
+ hasToolCall('weather'),
+ hasToolCall('finalAnswer'),
+ stepCountIs(5),
+ ],
+});
+```
+
+### Agent Pattern
+
+Common pattern for agents that run until they provide a final answer:
+
+```ts
+import { generateText, hasToolCall } from 'ai';
+
+const result = await generateText({
+ model: yourModel,
+ tools: {
+ search: searchTool,
+ calculate: calculateTool,
+ finalAnswer: {
+ description: 'Provide the final answer to the user',
+ parameters: z.object({
+ answer: z.string(),
+ }),
+ execute: async ({ answer }) => answer,
+ },
+ },
+ stopWhen: hasToolCall('finalAnswer'),
+});
+```
+
+## See also
+
+- [`stepCountIs()`](/docs/reference/ai-sdk-core/step-count-is)
+- [`generateText()`](/docs/reference/ai-sdk-core/generate-text)
+- [`streamText()`](/docs/reference/ai-sdk-core/stream-text)
From c2871e6e845e6e34cdb91e56e350e93a951f5faa Mon Sep 17 00:00:00 2001
From: josh <144584931+dancer@users.noreply.github.com>
Date: Mon, 11 Aug 2025 11:29:13 +0100
Subject: [PATCH 3/4] fix(provider/amazon-bedrock): resolve opus 4.1 reasoning
mode validation error (#7957)
## background
users reported that bedrock claude opus 4.1 with reasoning mode was
consistently failing with validation error `max_tokens must be greater
than thinking.budget_tokens` even when maxoutputtokens was properly set
higher than budgettokens
## summary
- fix field name mapping from maxoutputtokens to maxtokens in
inferenceconfig
- update existing tests to expect correct field names
- users will still use maxoutputtokens
- tested locally to reproduce error to confirm if issue exists
- tested locally to verify fix
## tasks
- [x] fix field mapping in bedrock-chat-language-model.ts line 162
- [x] update reasoning token calculation logic to use maxtokens
- [x] update test expectations in bedrock-chat-language-model.test.ts
- [x] add verification test for opus 4.1 specific scenario
related issue - #7927
---
.changeset/afraid-worms-yell.md | 5 +++++
.../src/bedrock-chat-language-model.test.ts | 8 +++-----
.../amazon-bedrock/src/bedrock-chat-language-model.ts | 10 +++++-----
3 files changed, 13 insertions(+), 10 deletions(-)
create mode 100644 .changeset/afraid-worms-yell.md
diff --git a/.changeset/afraid-worms-yell.md b/.changeset/afraid-worms-yell.md
new file mode 100644
index 000000000000..683ec99e694c
--- /dev/null
+++ b/.changeset/afraid-worms-yell.md
@@ -0,0 +1,5 @@
+---
+'@ai-sdk/amazon-bedrock': patch
+---
+
+fix(provider/amazon-bedrock): resolve opus 4.1 reasoning mode validation error
diff --git a/packages/amazon-bedrock/src/bedrock-chat-language-model.test.ts b/packages/amazon-bedrock/src/bedrock-chat-language-model.test.ts
index ea60368e116b..1d543956e526 100644
--- a/packages/amazon-bedrock/src/bedrock-chat-language-model.test.ts
+++ b/packages/amazon-bedrock/src/bedrock-chat-language-model.test.ts
@@ -1434,9 +1434,8 @@ describe('doStream', () => {
budget_tokens: 2000,
},
},
- // Should have adjusted maxOutputTokens (100 + 2000)
inferenceConfig: {
- maxOutputTokens: 2100,
+ maxTokens: 2100,
},
});
@@ -1667,7 +1666,7 @@ describe('doGenerate', () => {
expect(await server.calls[0].requestBodyJson).toMatchObject({
inferenceConfig: {
- maxOutputTokens: 100,
+ maxTokens: 100,
temperature: 0.5,
topP: 0.5,
topK: 1,
@@ -2044,9 +2043,8 @@ describe('doGenerate', () => {
budget_tokens: 2000,
},
},
- // Should have adjusted maxOutputTokens (100 + 2000)
inferenceConfig: {
- maxOutputTokens: 2100,
+ maxTokens: 2100,
},
});
diff --git a/packages/amazon-bedrock/src/bedrock-chat-language-model.ts b/packages/amazon-bedrock/src/bedrock-chat-language-model.ts
index 0bb2b6a28f67..bf9956ea8d09 100644
--- a/packages/amazon-bedrock/src/bedrock-chat-language-model.ts
+++ b/packages/amazon-bedrock/src/bedrock-chat-language-model.ts
@@ -159,19 +159,19 @@ export class BedrockChatLanguageModel implements LanguageModelV2 {
const thinkingBudget = bedrockOptions.reasoningConfig?.budgetTokens;
const inferenceConfig = {
- ...(maxOutputTokens != null && { maxOutputTokens }),
+ ...(maxOutputTokens != null && { maxTokens: maxOutputTokens }),
...(temperature != null && { temperature }),
...(topP != null && { topP }),
...(topK != null && { topK }),
...(stopSequences != null && { stopSequences }),
};
- // Adjust maxOutputTokens if thinking is enabled
+ // Adjust maxTokens if thinking is enabled
if (isThinking && thinkingBudget != null) {
- if (inferenceConfig.maxOutputTokens != null) {
- inferenceConfig.maxOutputTokens += thinkingBudget;
+ if (inferenceConfig.maxTokens != null) {
+ inferenceConfig.maxTokens += thinkingBudget;
} else {
- inferenceConfig.maxOutputTokens = thinkingBudget + 4096; // Default + thinking budget maxOutputTokens = 4096, TODO update default in v5
+ inferenceConfig.maxTokens = thinkingBudget + 4096; // Default + thinking budget maxTokens = 4096, TODO update default in v5
}
// Add them to additional model request fields
// Add thinking config to additionalModelRequestFields
From b48e0ffbf5c32ba3be1fd6ceaef86d6bb5d9d3e1 Mon Sep 17 00:00:00 2001
From: Lars Grammel
Date: Mon, 11 Aug 2025 13:26:19 +0200
Subject: [PATCH 4/4] feat(provider/openai): add code interpreter tool
(responses api) (#7952)
## Background
The OpenAI responses API supports a code interpreter tool that is useful
for letting the LLM run python code.
## Summary
Implement code interpreter tool invocation.
## Manual Verification
- [x] run code interpreter example
## Future Work
Provider-executed tool calls/results.
## Related Issues
Continues #6997
---------
Co-authored-by: Nico Albanese <49612682+nicoalbanese@users.noreply.github.com>
Co-authored-by: cleiton couto
Co-authored-by: cleiton couto
Co-authored-by: nicoalbanese
---
.changeset/metal-shrimps-fix.md | 5 +
.../01-ai-sdk-providers/03-openai.mdx | 36 ++-
.../stream-text/openai-code-interpreter.ts | 22 ++
packages/openai/src/openai-provider.ts | 2 +-
packages/openai/src/openai-tools.ts | 13 +-
packages/openai/src/openai-types.ts | 15 +-
.../responses/openai-responses-api-types.ts | 4 +
.../openai-responses-prepare-tools.test.ts | 205 ++++++++++++++++++
.../openai-responses-prepare-tools.ts | 27 ++-
packages/openai/src/tool/code-interpreter.ts | 29 +++
10 files changed, 337 insertions(+), 21 deletions(-)
create mode 100644 .changeset/metal-shrimps-fix.md
create mode 100644 examples/ai-core/src/stream-text/openai-code-interpreter.ts
create mode 100644 packages/openai/src/responses/openai-responses-prepare-tools.test.ts
create mode 100644 packages/openai/src/tool/code-interpreter.ts
diff --git a/.changeset/metal-shrimps-fix.md b/.changeset/metal-shrimps-fix.md
new file mode 100644
index 000000000000..95fcececc8d7
--- /dev/null
+++ b/.changeset/metal-shrimps-fix.md
@@ -0,0 +1,5 @@
+---
+'@ai-sdk/openai': patch
+---
+
+feat(provider/openai): add code interpreter tool (responses api)
diff --git a/content/providers/01-ai-sdk-providers/03-openai.mdx b/content/providers/01-ai-sdk-providers/03-openai.mdx
index 614527ff1af8..a6e8569ced7e 100644
--- a/content/providers/01-ai-sdk-providers/03-openai.mdx
+++ b/content/providers/01-ai-sdk-providers/03-openai.mdx
@@ -729,7 +729,7 @@ The following OpenAI-specific metadata is returned:
#### Web Search
-The OpenAI responses provider supports web search through the `openai.tools.webSearchPreview` tool.
+The OpenAI responses API supports web search through the `openai.tools.webSearchPreview` tool.
You can force the use of the web search tool by setting the `toolChoice` parameter to `{ type: 'tool', toolName: 'web_search_preview' }`.
@@ -830,7 +830,7 @@ The `textVerbosity` parameter scales output length without changing the underlyi
#### File Search
-The OpenAI responses provider supports file search through the `openai.tools.fileSearch` tool.
+The OpenAI responses API supports file search through the `openai.tools.fileSearch` tool.
You can force the use of the file search tool by setting the `toolChoice` parameter to `{ type: 'tool', toolName: 'file_search' }`.
@@ -866,6 +866,38 @@ const result = await generateText({
be customized.
+#### Code Interpreter
+
+The OpenAI responses API supports the code interpreter tool through the `openai.tools.codeInterpreter` tool. This allows models to write and execute Python code.
+
+```ts
+import { openai } from '@ai-sdk/openai';
+import { generateText } from 'ai';
+
+const result = await generateText({
+ model: openai.responses('gpt-5'),
+ prompt: 'Write and run Python code to calculate the factorial of 10',
+ tools: {
+ code_interpreter: openai.tools.codeInterpreter({
+ // optional configuration:
+ container: {
+ fileIds: ['file-123', 'file-456'], // optional file IDs to make available
+ },
+ }),
+ },
+});
+```
+
+The code interpreter tool can be configured with:
+
+- **container**: Either a container ID string or an object with `fileIds` to specify uploaded files that should be available to the code interpreter
+
+
+ The tool must be named `code_interpreter` when using OpenAI's code interpreter
+ functionality. This name is required by OpenAI's API specification and cannot
+ be customized.
+
+
#### Image Support
The OpenAI Responses API supports Image inputs for appropriate models.
diff --git a/examples/ai-core/src/stream-text/openai-code-interpreter.ts b/examples/ai-core/src/stream-text/openai-code-interpreter.ts
new file mode 100644
index 000000000000..84ade9e80d27
--- /dev/null
+++ b/examples/ai-core/src/stream-text/openai-code-interpreter.ts
@@ -0,0 +1,22 @@
+import { openai } from '@ai-sdk/openai';
+import { stepCountIs, streamText } from 'ai';
+import 'dotenv/config';
+
+async function main() {
+ const result = streamText({
+ model: openai.responses('gpt-5'),
+ stopWhen: stepCountIs(5),
+ tools: {
+ code_interpreter: openai.tools.codeInterpreter({}),
+ },
+ prompt:
+ 'Write and run Python code to simulate rolling two dice 10000 times and show a table of the results.' +
+ 'The table should have three columns: "Sum", "Count", and "Percentage".',
+ });
+
+ for await (const chunk of result.textStream) {
+ process.stdout.write(chunk);
+ }
+}
+
+main().catch(console.error);
diff --git a/packages/openai/src/openai-provider.ts b/packages/openai/src/openai-provider.ts
index 41677ba4eaaf..760be930551d 100644
--- a/packages/openai/src/openai-provider.ts
+++ b/packages/openai/src/openai-provider.ts
@@ -33,7 +33,7 @@ export interface OpenAIProvider extends ProviderV2 {
/**
Creates an OpenAI model for text generation.
*/
- languageModel(modelId: OpenAIResponsesModelId): OpenAIResponsesLanguageModel;
+ languageModel(modelId: OpenAIResponsesModelId): LanguageModelV2;
/**
Creates an OpenAI chat model for text generation.
diff --git a/packages/openai/src/openai-tools.ts b/packages/openai/src/openai-tools.ts
index c9292fae5029..1789bebd89b6 100644
--- a/packages/openai/src/openai-tools.ts
+++ b/packages/openai/src/openai-tools.ts
@@ -1,20 +1,11 @@
+import { codeInterpreter } from './tool/code-interpreter';
import { fileSearch } from './tool/file-search';
import { webSearchPreview } from './tool/web-search-preview';
-
export { fileSearch } from './tool/file-search';
export { webSearchPreview } from './tool/web-search-preview';
-export type {
- OpenAITool,
- OpenAITools,
- OpenAIToolChoice,
- OpenAIFunctionTool,
- OpenAIFileSearchTool,
- OpenAIWebSearchPreviewTool,
- OpenAIWebSearchUserLocation,
-} from './openai-types';
-
export const openaiTools = {
+ codeInterpreter,
fileSearch,
webSearchPreview,
};
diff --git a/packages/openai/src/openai-types.ts b/packages/openai/src/openai-types.ts
index 7d098325453b..5b2aa83a6cdd 100644
--- a/packages/openai/src/openai-types.ts
+++ b/packages/openai/src/openai-types.ts
@@ -1,5 +1,6 @@
import { JSONSchema7 } from '@ai-sdk/provider';
+// TODO clean up this file and move the definitions into the tools
/**
* OpenAI function tool definition
*/
@@ -55,13 +56,25 @@ export interface OpenAIWebSearchPreviewTool {
user_location?: OpenAIWebSearchUserLocation;
}
+/**
+ * OpenAI code interpreter tool definition
+ */
+export interface OpenAICodeInterpreterTool {
+ type: 'code_interpreter';
+ container: {
+ type: 'auto';
+ file_ids: string[];
+ };
+}
+
/**
* Union type for all OpenAI tools
*/
export type OpenAITool =
| OpenAIFunctionTool
| OpenAIFileSearchTool
- | OpenAIWebSearchPreviewTool;
+ | OpenAIWebSearchPreviewTool
+ | OpenAICodeInterpreterTool;
/**
* OpenAI tool choice options
diff --git a/packages/openai/src/responses/openai-responses-api-types.ts b/packages/openai/src/responses/openai-responses-api-types.ts
index 0bae9ec8a343..7f6f711c3545 100644
--- a/packages/openai/src/responses/openai-responses-api-types.ts
+++ b/packages/openai/src/responses/openai-responses-api-types.ts
@@ -89,6 +89,10 @@ export type OpenAIResponsesTool =
region: string;
};
}
+ | {
+ type: 'code_interpreter';
+ container: string | { type: 'auto'; file_ids: string[] | undefined };
+ }
| {
type: 'file_search';
vector_store_ids?: string[];
diff --git a/packages/openai/src/responses/openai-responses-prepare-tools.test.ts b/packages/openai/src/responses/openai-responses-prepare-tools.test.ts
new file mode 100644
index 000000000000..7077320f5b78
--- /dev/null
+++ b/packages/openai/src/responses/openai-responses-prepare-tools.test.ts
@@ -0,0 +1,205 @@
+import { prepareResponsesTools } from './openai-responses-prepare-tools';
+
+describe('prepareResponsesTools', () => {
+ describe('code interpreter', () => {
+ it('should prepare code interpreter tool with no container (auto mode)', () => {
+ const result = prepareResponsesTools({
+ tools: [
+ {
+ type: 'provider-defined',
+ id: 'openai.code_interpreter',
+ name: 'code_interpreter',
+ args: {},
+ },
+ ],
+ strictJsonSchema: false,
+ });
+
+ expect(result.tools).toEqual([
+ {
+ type: 'code_interpreter',
+ container: { type: 'auto', file_ids: undefined },
+ },
+ ]);
+ expect(result.toolWarnings).toEqual([]);
+ });
+
+ it('should prepare code interpreter tool with string container ID', () => {
+ const result = prepareResponsesTools({
+ tools: [
+ {
+ type: 'provider-defined',
+ id: 'openai.code_interpreter',
+ name: 'code_interpreter',
+ args: {
+ container: 'container-123',
+ },
+ },
+ ],
+ strictJsonSchema: false,
+ });
+
+ expect(result.tools).toEqual([
+ {
+ type: 'code_interpreter',
+ container: 'container-123',
+ },
+ ]);
+ expect(result.toolWarnings).toEqual([]);
+ });
+
+ it('should prepare code interpreter tool with file IDs container', () => {
+ const result = prepareResponsesTools({
+ tools: [
+ {
+ type: 'provider-defined',
+ id: 'openai.code_interpreter',
+ name: 'code_interpreter',
+ args: {
+ container: {
+ fileIds: ['file-1', 'file-2', 'file-3'],
+ },
+ },
+ },
+ ],
+ strictJsonSchema: false,
+ });
+
+ expect(result.tools).toEqual([
+ {
+ type: 'code_interpreter',
+ container: { type: 'auto', file_ids: ['file-1', 'file-2', 'file-3'] },
+ },
+ ]);
+ expect(result.toolWarnings).toEqual([]);
+ });
+
+ it('should prepare code interpreter tool with empty file IDs array', () => {
+ const result = prepareResponsesTools({
+ tools: [
+ {
+ type: 'provider-defined',
+ id: 'openai.code_interpreter',
+ name: 'code_interpreter',
+ args: {
+ container: {
+ fileIds: [],
+ },
+ },
+ },
+ ],
+ strictJsonSchema: false,
+ });
+
+ expect(result.tools).toEqual([
+ {
+ type: 'code_interpreter',
+ container: { type: 'auto', file_ids: [] },
+ },
+ ]);
+ expect(result.toolWarnings).toEqual([]);
+ });
+
+ it('should prepare code interpreter tool with undefined file IDs', () => {
+ const result = prepareResponsesTools({
+ tools: [
+ {
+ type: 'provider-defined',
+ id: 'openai.code_interpreter',
+ name: 'code_interpreter',
+ args: {
+ container: {
+ fileIds: undefined,
+ },
+ },
+ },
+ ],
+ strictJsonSchema: false,
+ });
+
+ expect(result.tools).toEqual([
+ {
+ type: 'code_interpreter',
+ container: { type: 'auto', file_ids: undefined },
+ },
+ ]);
+ expect(result.toolWarnings).toEqual([]);
+ });
+
+ it('should handle tool choice selection with code interpreter', () => {
+ const result = prepareResponsesTools({
+ tools: [
+ {
+ type: 'provider-defined',
+ id: 'openai.code_interpreter',
+ name: 'code_interpreter',
+ args: {},
+ },
+ ],
+ toolChoice: {
+ type: 'tool',
+ toolName: 'code_interpreter',
+ },
+ strictJsonSchema: false,
+ });
+
+ expect(result.tools).toEqual([
+ {
+ type: 'code_interpreter',
+ container: { type: 'auto', file_ids: undefined },
+ },
+ ]);
+ expect(result.toolChoice).toEqual({
+ type: 'code_interpreter',
+ });
+ expect(result.toolWarnings).toEqual([]);
+ });
+
+ it('should handle multiple tools including code interpreter', () => {
+ const result = prepareResponsesTools({
+ tools: [
+ {
+ type: 'function',
+ name: 'testFunction',
+ description: 'A test function',
+ inputSchema: {
+ type: 'object',
+ properties: {
+ input: { type: 'string' },
+ },
+ },
+ },
+ {
+ type: 'provider-defined',
+ id: 'openai.code_interpreter',
+ name: 'code_interpreter',
+ args: {
+ container: 'my-container',
+ },
+ },
+ ],
+ strictJsonSchema: true,
+ });
+
+ expect(result.tools).toEqual([
+ {
+ type: 'function',
+ name: 'testFunction',
+ description: 'A test function',
+ parameters: {
+ type: 'object',
+ properties: {
+ input: { type: 'string' },
+ },
+ },
+ strict: true,
+ },
+ {
+ type: 'code_interpreter',
+ container: 'my-container',
+ },
+ ]);
+ expect(result.toolWarnings).toEqual([]);
+ });
+ });
+});
diff --git a/packages/openai/src/responses/openai-responses-prepare-tools.ts b/packages/openai/src/responses/openai-responses-prepare-tools.ts
index eb5414d0906a..b70aa842d466 100644
--- a/packages/openai/src/responses/openai-responses-prepare-tools.ts
+++ b/packages/openai/src/responses/openai-responses-prepare-tools.ts
@@ -5,6 +5,7 @@ import {
} from '@ai-sdk/provider';
import { OpenAIResponsesTool } from './openai-responses-api-types';
import { fileSearchArgsSchema } from '../tool/file-search';
+import { codeInterpreterArgsSchema } from '../tool/code-interpreter';
export function prepareResponsesTools({
tools,
@@ -22,7 +23,8 @@ export function prepareResponsesTools({
| 'required'
| { type: 'file_search' }
| { type: 'web_search_preview' }
- | { type: 'function'; name: string };
+ | { type: 'function'; name: string }
+ | { type: 'code_interpreter' };
toolWarnings: LanguageModelV2CallWarning[];
} {
// when the tools array is empty, change it to undefined to prevent errors:
@@ -63,6 +65,7 @@ export function prepareResponsesTools({
break;
}
case 'openai.web_search_preview':
+ // TODO update this with proper validation
openaiTools.push({
type: 'web_search_preview',
search_context_size: tool.args.searchContextSize as
@@ -76,6 +79,18 @@ export function prepareResponsesTools({
},
});
break;
+ case 'openai.code_interpreter':
+ const args = codeInterpreterArgsSchema.parse(tool.args);
+ openaiTools.push({
+ type: 'code_interpreter',
+ container:
+ args.container == null
+ ? { type: 'auto', file_ids: undefined }
+ : typeof args.container === 'string'
+ ? args.container
+ : { type: 'auto', file_ids: args.container.fileIds },
+ });
+ break;
default:
toolWarnings.push({ type: 'unsupported-tool', tool });
break;
@@ -102,11 +117,11 @@ export function prepareResponsesTools({
return {
tools: openaiTools,
toolChoice:
- toolChoice.toolName === 'file_search'
- ? { type: 'file_search' }
- : toolChoice.toolName === 'web_search_preview'
- ? { type: 'web_search_preview' }
- : { type: 'function', name: toolChoice.toolName },
+ toolChoice.toolName === 'code_interpreter' ||
+ toolChoice.toolName === 'file_search' ||
+ toolChoice.toolName === 'web_search_preview'
+ ? { type: toolChoice.toolName }
+ : { type: 'function', name: toolChoice.toolName },
toolWarnings,
};
default: {
diff --git a/packages/openai/src/tool/code-interpreter.ts b/packages/openai/src/tool/code-interpreter.ts
new file mode 100644
index 000000000000..40f2c08eed50
--- /dev/null
+++ b/packages/openai/src/tool/code-interpreter.ts
@@ -0,0 +1,29 @@
+import { createProviderDefinedToolFactory } from '@ai-sdk/provider-utils';
+import { z } from 'zod/v4';
+
+export const codeInterpreterArgsSchema = z.object({
+ container: z
+ .union([
+ z.string(),
+ z.object({
+ fileIds: z.array(z.string()).optional(),
+ }),
+ ])
+ .optional(),
+});
+
+export const codeInterpreter = createProviderDefinedToolFactory<
+ {},
+ {
+ /**
+ * The code interpreter container.
+ * Can be a container ID
+ * or an object that specifies uploaded file IDs to make available to your code.
+ */
+ container?: string | { fileIds?: string[] };
+ }
+>({
+ id: 'openai.code_interpreter',
+ name: 'code_interpreter',
+ inputSchema: z.object({}),
+});