From f22ad852a9cf6e163bc1856c7a94f88a4284e931 Mon Sep 17 00:00:00 2001 From: Jack Herrington Date: Thu, 27 Nov 2025 17:14:30 -0800 Subject: [PATCH 1/6] first pass --- ai-docs/AGENT_LOOP_STRATEGIES.md | 382 ---------- ai-docs/CONNECTION_ADAPTERS_GUIDE.md | 694 ------------------ ai-docs/EVENT_CLIENT.md | 422 ----------- ai-docs/EVENT_CLIENT_INTEGRATION.md | 105 --- ai-docs/IMPLEMENTATION_SUMMARY.md | 435 ----------- ai-docs/MIGRATION_UNIFIED_CHAT.md | 237 ------ ai-docs/TOOL_EXECUTION_LOOP.md | 541 -------------- ai-docs/TOOL_REGISTRY.md | 474 ------------ ai-docs/TOOL_REGISTRY_IMPLEMENTATION.md | 439 ----------- ai-docs/TOOL_REGISTRY_QUICK_START.md | 208 ------ ai-docs/TOOL_STATES_MIGRATION.md | 290 -------- ai-docs/TYPE_NARROWING_SOLUTION.md | 165 ----- ai-docs/TYPE_NARROWING_UNIFIED_CHAT.md | 225 ------ ai-docs/TYPE_SAFETY.md | 305 -------- ai-docs/UNIFIED_CHAT_API.md | 389 ---------- ai-docs/UNIFIED_CHAT_IMPLEMENTATION.md | 257 ------- ai-docs/UNIFIED_CHAT_QUICK_REFERENCE.md | 329 --------- ...x-4a965c47-7c20-4a7c-acd5-317a2c7876cd.png | 0 docs/guides/client-tools.md | 4 + docs/guides/server-tools.md | 12 +- docs/guides/tool-approval.md | 13 +- docs/guides/tools.md | 9 +- .../ts-react-chat/src/lib/guitar-tools.ts | 97 +-- .../ts-react-chat/src/routes/api.tanchat.ts | 8 +- .../ts-solid-chat/src/lib/guitar-tools.ts | 97 +-- package.json | 2 +- .../ai-anthropic/live-tests/README.md | 51 ++ .../ai-anthropic/live-tests/package.json | 19 + .../live-tests/tool-test-empty-object.ts | 169 +++++ .../ai-anthropic/live-tests/tool-test.ts | 212 ++++++ packages/typescript/ai-anthropic/package.json | 3 +- .../ai-anthropic/src/anthropic-adapter.ts | 8 +- .../ai-anthropic/src/tools/bash-tool.ts | 10 +- .../src/tools/code-execution-tool.ts | 10 +- .../src/tools/computer-use-tool.ts | 10 +- .../ai-anthropic/src/tools/custom-tool.ts | 34 +- .../ai-anthropic/src/tools/memory-tool.ts | 10 +- .../src/tools/text-editor-tool.ts | 10 +- .../ai-anthropic/src/tools/tool-converter.ts | 2 +- .../ai-anthropic/src/tools/web-fetch-tool.ts | 10 +- .../ai-anthropic/src/tools/web-search-tool.ts | 10 +- .../typescript/ai-client/src/chat-client.ts | 19 +- .../ai-client/src/message-updaters.ts | 38 +- packages/typescript/ai-gemini/package.json | 3 +- .../src/tools/code-execution-tool.ts | 10 +- .../ai-gemini/src/tools/computer-use-tool.ts | 10 +- .../ai-gemini/src/tools/file-search-tool.ts | 10 +- .../ai-gemini/src/tools/google-maps-tool.ts | 10 +- .../src/tools/google-search-retriveal-tool.ts | 10 +- .../ai-gemini/src/tools/google-search-tool.ts | 10 +- .../ai-gemini/src/tools/tool-converter.ts | 33 +- .../ai-gemini/src/tools/url-context-tool.ts | 10 +- packages/typescript/ai-ollama/package.json | 3 +- .../ai-ollama/src/ollama-adapter.ts | 8 +- .../live-tests/tool-test-empty-object.ts | 128 ++++ .../live-tests/tool-test-optional.ts | 40 +- .../ai-openai/live-tests/tool-test.ts | 37 +- packages/typescript/ai-openai/package.json | 3 +- .../ai-openai/src/tools/apply-patch-tool.ts | 10 +- .../src/tools/code-interpreter-tool.ts | 10 +- .../ai-openai/src/tools/computer-use-tool.ts | 10 +- .../ai-openai/src/tools/custom-tool.ts | 10 +- .../ai-openai/src/tools/file-search-tool.ts | 10 +- .../ai-openai/src/tools/function-tool.ts | 45 +- .../src/tools/image-generation-tool.ts | 10 +- .../ai-openai/src/tools/local-shell-tool.ts | 10 +- .../ai-openai/src/tools/mcp-tool.ts | 10 +- .../ai-openai/src/tools/shell-tool.ts | 10 +- .../ai-openai/src/tools/tool-converter.ts | 2 +- .../src/tools/web-search-preview-tool.ts | 10 +- .../ai-openai/src/tools/web-search-tool.ts | 10 +- packages/typescript/ai/package.json | 9 +- packages/typescript/ai/src/core/chat.ts | 2 +- packages/typescript/ai/src/index.ts | 1 + .../typescript/ai/src/tools/tool-calls.ts | 84 ++- .../typescript/ai/src/tools/tool-utils.ts | 110 +-- .../typescript/ai/src/tools/zod-converter.ts | 88 +++ packages/typescript/ai/src/types.ts | 113 +-- packages/typescript/ai/tests/ai-abort.test.ts | 10 +- packages/typescript/ai/tests/ai-chat.test.ts | 242 +++--- .../ai/tests/tool-call-manager.test.ts | 41 +- .../smoke-tests/adapters/src/harness.ts | 16 +- pnpm-lock.yaml | 30 +- 83 files changed, 1340 insertions(+), 6652 deletions(-) delete mode 100644 ai-docs/AGENT_LOOP_STRATEGIES.md delete mode 100644 ai-docs/CONNECTION_ADAPTERS_GUIDE.md delete mode 100644 ai-docs/EVENT_CLIENT.md delete mode 100644 ai-docs/EVENT_CLIENT_INTEGRATION.md delete mode 100644 ai-docs/IMPLEMENTATION_SUMMARY.md delete mode 100644 ai-docs/MIGRATION_UNIFIED_CHAT.md delete mode 100644 ai-docs/TOOL_EXECUTION_LOOP.md delete mode 100644 ai-docs/TOOL_REGISTRY.md delete mode 100644 ai-docs/TOOL_REGISTRY_IMPLEMENTATION.md delete mode 100644 ai-docs/TOOL_REGISTRY_QUICK_START.md delete mode 100644 ai-docs/TOOL_STATES_MIGRATION.md delete mode 100644 ai-docs/TYPE_NARROWING_SOLUTION.md delete mode 100644 ai-docs/TYPE_NARROWING_UNIFIED_CHAT.md delete mode 100644 ai-docs/TYPE_SAFETY.md delete mode 100644 ai-docs/UNIFIED_CHAT_API.md delete mode 100644 ai-docs/UNIFIED_CHAT_IMPLEMENTATION.md delete mode 100644 ai-docs/UNIFIED_CHAT_QUICK_REFERENCE.md delete mode 100644 assets/CleanShot_2025-11-27_at_08.28.46_2x-4a965c47-7c20-4a7c-acd5-317a2c7876cd.png create mode 100644 packages/typescript/ai-anthropic/live-tests/README.md create mode 100644 packages/typescript/ai-anthropic/live-tests/package.json create mode 100644 packages/typescript/ai-anthropic/live-tests/tool-test-empty-object.ts create mode 100644 packages/typescript/ai-anthropic/live-tests/tool-test.ts create mode 100644 packages/typescript/ai-openai/live-tests/tool-test-empty-object.ts create mode 100644 packages/typescript/ai/src/tools/zod-converter.ts diff --git a/ai-docs/AGENT_LOOP_STRATEGIES.md b/ai-docs/AGENT_LOOP_STRATEGIES.md deleted file mode 100644 index f861faec..00000000 --- a/ai-docs/AGENT_LOOP_STRATEGIES.md +++ /dev/null @@ -1,382 +0,0 @@ -# Agent Loop Strategies - -## Overview - -Agent loop strategies provide flexible control over when the tool execution loop in `chat()` should stop. Instead of a simple `maxIterations` number, you can now use strategy functions that decide whether to continue based on the current state. - -## Quick Start - -```typescript -import { chat, maxIterations } from "@tanstack/ai"; -import { openai } from "@tanstack/ai-openai"; - -const stream = chat({ - adapter: openai(), - model: "gpt-4o", - messages: [...], - tools: [weatherTool], - agentLoopStrategy: maxIterations(5), // Control loop with strategy -}); -``` - -## Built-in Strategies - -### `maxIterations(max)` - -Continue for a maximum number of iterations: - -```typescript -import { maxIterations } from "@tanstack/ai"; - -const stream = chat({ - adapter: openai(), - model: "gpt-4o", - messages: [...], - tools: [...], - agentLoopStrategy: maxIterations(10), // Max 10 iterations -}); -``` - -### `untilFinishReason(stopReasons)` - -Continue until one of the specified finish reasons is encountered: - -```typescript -import { untilFinishReason } from "@tanstack/ai"; - -const stream = chat({ - adapter: openai(), - model: "gpt-4o", - messages: [...], - tools: [...], - agentLoopStrategy: untilFinishReason(["stop", "length"]), -}); -``` - -### `combineStrategies(strategies)` - -Combine multiple strategies with AND logic (all must return true to continue): - -```typescript -import { maxIterations, combineStrategies } from "@tanstack/ai"; - -const stream = chat({ - adapter: openai(), - model: "gpt-4o", - messages: [...], - tools: [...], - agentLoopStrategy: combineStrategies([ - maxIterations(10), - ({ messages }) => messages.length < 100, - ]), -}); -``` - -## Custom Strategies - -Create your own strategy function: - -```typescript -import type { AgentLoopStrategy } from "@tanstack/ai"; - -// Simple: based on iteration count -const simple: AgentLoopStrategy = ({ iterationCount }) => { - return iterationCount < 5; -}; - -// Advanced: based on multiple conditions -const advanced: AgentLoopStrategy = ({ - iterationCount, - messages, - finishReason -}) => { - // Stop after 10 iterations - if (iterationCount >= 10) return false; - - // Stop if conversation gets too long - if (messages.length > 50) return false; - - // Stop on specific finish reasons - if (finishReason === "length" || finishReason === "content_filter") { - return false; - } - - // Otherwise continue - return true; -}; - -// Use custom strategy -const stream = chat({ - adapter: openai(), - model: "gpt-4o", - messages: [...], - tools: [...], - agentLoopStrategy: advanced, -}); -``` - -## AgentLoopState Interface - -The state object passed to your strategy function: - -```typescript -export interface AgentLoopState { - /** Current iteration count (0-indexed) */ - iterationCount: number - - /** Current messages in the conversation */ - messages: Message[] - - /** Finish reason from the last model response */ - finishReason: string | null -} -``` - -**Finish reasons:** - -- `"stop"` - Model finished naturally -- `"length"` - Hit token limit -- `"tool_calls"` - Model called tools (triggers tool execution) -- `"content_filter"` - Content filtered -- `null` - No finish reason yet - -## Use Cases - -### Prevent Runaway Loops - -```typescript -// Stop after 3 iterations OR 20 messages -const conservative: AgentLoopStrategy = ({ iterationCount, messages }) => { - return iterationCount < 3 && messages.length < 20 -} -``` - -### Budget Control - -```typescript -// Stop based on estimated token usage -const budgetAware: AgentLoopStrategy = ({ messages }) => { - const estimatedTokens = messages.reduce( - (sum, m) => sum + (m.content?.length || 0) / 4, // Rough estimate - 0, - ) - return estimatedTokens < 10000 // Stop before 10k tokens -} -``` - -### Conditional Execution - -```typescript -// Different limits for different scenarios -const conditional: AgentLoopStrategy = ({ iterationCount, messages }) => { - const hasToolCalls = messages.some( - (m) => m.toolCalls && m.toolCalls.length > 0, - ) - - // Allow more iterations if tools are being used - const maxIters = hasToolCalls ? 10 : 3 - - return iterationCount < maxIters -} -``` - -### Debug Mode - -```typescript -// Stop early during development -const debug: AgentLoopStrategy = ({ iterationCount }) => { - console.log(`Iteration ${iterationCount + 1}`) - return iterationCount < 2 // Only 2 iterations in debug mode -} -``` - -## Pattern: Strategy Factory - -Create reusable strategy factories: - -```typescript -function maxTokens(max: number): AgentLoopStrategy { - return ({ messages }) => { - const totalTokens = messages.reduce( - (sum, m) => sum + estimateTokens(m.content), - 0 - ); - return totalTokens < max; - }; -} - -function maxMessages(max: number): AgentLoopStrategy { - return ({ messages }) => messages.length < max; -} - -// Use factory -const stream = chat({ - adapter: openai(), - model: "gpt-4o", - messages: [...], - tools: [...], - agentLoopStrategy: combineStrategies([ - maxIterations(10), - maxTokens(5000), - maxMessages(30), - ]), -}); -``` - -## Simplified Syntax - -For convenience, you can use the `maxIterations` option directly: - -```typescript -// Simplified syntax -const stream = chat({ - adapter: openai(), - model: "gpt-4o", - messages: [...], - tools: [...], - maxIterations: 5, // Shorthand for agentLoopStrategy: maxIterations(5) -}); - -// Explicit strategy (more flexible) -const stream = chat({ - adapter: openai(), - model: "gpt-4o", - messages: [...], - tools: [...], - agentLoopStrategy: maxIterations(5), -}); -``` - -Both are equivalent. The `maxIterations` number is automatically converted to `agentLoopStrategy: maxIterations(n)`. - -## Testing Strategies - -### Unit Test Example - -```typescript -import { describe, it, expect } from 'vitest' -import type { AgentLoopStrategy, AgentLoopState } from '@tanstack/ai' - -describe('Custom Strategy', () => { - it('should stop after 3 iterations', () => { - const strategy: AgentLoopStrategy = ({ iterationCount }) => { - return iterationCount < 3 - } - - expect( - strategy({ iterationCount: 0, messages: [], finishReason: null }), - ).toBe(true) - expect( - strategy({ iterationCount: 2, messages: [], finishReason: null }), - ).toBe(true) - expect( - strategy({ iterationCount: 3, messages: [], finishReason: null }), - ).toBe(false) - }) - - it('should stop when finish reason is length', () => { - const strategy: AgentLoopStrategy = ({ finishReason }) => { - return finishReason !== 'length' - } - - expect( - strategy({ iterationCount: 0, messages: [], finishReason: null }), - ).toBe(true) - expect( - strategy({ iterationCount: 0, messages: [], finishReason: 'stop' }), - ).toBe(true) - expect( - strategy({ iterationCount: 0, messages: [], finishReason: 'length' }), - ).toBe(false) - }) -}) -``` - -## Best Practices - -### ✅ DO - -- Use built-in strategies when possible (`maxIterations`, `combineStrategies`) -- Consider message count to prevent memory issues -- Handle all finish reasons appropriately -- Test your custom strategies -- Document complex strategy logic - -### ❌ DON'T - -- Create strategies that never return false (infinite loops) -- Ignore the `finishReason` - it contains important information -- Make strategies too complex - keep them simple and testable -- Forget to handle edge cases (null finishReason, empty messages) - -## Examples - -### Production-Ready Strategy - -```typescript -import { maxIterations, combineStrategies } from "@tanstack/ai"; - -// Combine safety limits -const productionStrategy = combineStrategies([ - maxIterations(15), // Hard limit on iterations - ({ messages }) => messages.length < 100, // Limit conversation length - ({ finishReason }) => finishReason !== "content_filter", // Stop on filter -]); - -const stream = chat({ - adapter: openai(), - model: "gpt-4o", - messages: [...], - tools: [...], - agentLoopStrategy: productionStrategy, -}); -``` - -### Development Strategy - -```typescript -// Aggressive limits during development -const devStrategy: AgentLoopStrategy = ({ iterationCount, messages }) => { - if (iterationCount >= 2) { - console.warn('DEV: Stopping at 2 iterations') - return false - } - if (messages.length >= 10) { - console.warn('DEV: Stopping at 10 messages') - return false - } - return true -} -``` - -## Migration from maxIterations - -Before: - -```typescript -chat({ ..., maxIterations: 10 }) -``` - -After: - -```typescript -import { maxIterations } from "@tanstack/ai"; -chat({ ..., agentLoopStrategy: maxIterations(10) }) -``` - -Or create a custom strategy: - -```typescript -chat({ - ..., - agentLoopStrategy: ({ iterationCount, messages }) => { - return iterationCount < 10 && messages.length < 50; - } -}) -``` - -## See Also - -- [Tool Execution Loop Documentation](TOOL_EXECUTION_LOOP.md) -- [Unified Chat API](UNIFIED_CHAT_API.md) -- [Quick Reference](UNIFIED_CHAT_QUICK_REFERENCE.md) diff --git a/ai-docs/CONNECTION_ADAPTERS_GUIDE.md b/ai-docs/CONNECTION_ADAPTERS_GUIDE.md deleted file mode 100644 index 4b788be6..00000000 --- a/ai-docs/CONNECTION_ADAPTERS_GUIDE.md +++ /dev/null @@ -1,694 +0,0 @@ -# Connection Adapters - Complete Guide - -## Overview - -Connection adapters provide a flexible, pluggable way to connect `ChatClient` and `useChat` to different types of streaming backends. Instead of being hardcoded to fetch and API endpoints, you can now use adapters for any streaming scenario. - -## Why Connection Adapters? - -**Before (Hardcoded):** - -- ❌ Locked to HTTP fetch -- ❌ Locked to specific API format -- ❌ Hard to test -- ❌ Can't use with server functions -- ❌ Can't customize streaming logic - -**After (Adapters):** - -- ✅ Support any streaming source -- ✅ Easy to test with mocks -- ✅ Works with server functions -- ✅ Extensible for custom scenarios -- ✅ **Backward compatible** - -## Built-in Adapters - -### `fetchServerSentEvents(url, options?)` - -**For:** HTTP APIs using Server-Sent Events format - -**When to use:** - -- Your backend uses `toStreamResponse()` from `@tanstack/ai` -- Standard HTTP streaming API -- Most common use case - -**Example:** - -```typescript -import { useChat, fetchServerSentEvents } from "@tanstack/ai-react"; - -function Chat() { - const chat = useChat({ - connection: fetchServerSentEvents("/api/chat", { - headers: { "Authorization": "Bearer token" }, - credentials: "include", - }), - }); - - return ; -} -``` - -**Server format expected:** - -``` -data: {"type":"content","delta":"Hello","content":"Hello",...} -data: {"type":"content","delta":" world","content":"Hello world",...} -data: {"type":"done","finishReason":"stop",...} -data: [DONE] -``` - -### `fetchHttpStream(url, options?)` - -**For:** HTTP APIs using raw newline-delimited JSON - -**When to use:** - -- Your backend streams newline-delimited JSON directly -- Custom streaming format -- Not using SSE - -**Example:** - -```typescript -import { useChat, fetchHttpStream } from "@tanstack/ai-react"; - -function Chat() { - const chat = useChat({ - connection: fetchHttpStream("/api/chat", { - headers: { "X-Custom-Header": "value" }, - }), - }); - - return ; -} -``` - -**Server format expected:** - -``` -{"type":"content","delta":"Hello","content":"Hello",...} -{"type":"content","delta":" world","content":"Hello world",...} -{"type":"done","finishReason":"stop",...} -``` - -### `stream(factory)` - -**For:** Direct async iterables (no HTTP) - -**When to use:** - -- TanStack Start server functions -- Server-side rendering -- Testing with mock streams -- Direct function calls - -**Example with Server Function:** - -```typescript -import { useChat, stream } from "@tanstack/ai-react"; -import { serverChatFunction } from "./server"; - -function Chat() { - const chat = useChat({ - connection: stream((messages, data) => - serverChatFunction({ messages, data }) - ), - }); - - return ; -} -``` - -**Server function:** - -```typescript -// server.ts -import { chat } from '@tanstack/ai' -import { openai } from '@tanstack/ai-openai' - -export async function* serverChatFunction({ - messages, -}: { - messages: Message[] -}) { - yield* chat({ - adapter: openai(), - model: 'gpt-4o', - messages, - }) -} -``` - -**Example with Mock for Testing:** - -```typescript -import { ChatClient, stream } from '@tanstack/ai-client' - -const mockStream = stream(async function* (messages) { - yield { type: 'content', delta: 'Hello', content: 'Hello' } - yield { type: 'content', delta: ' world', content: 'Hello world' } - yield { type: 'done', finishReason: 'stop' } -}) - -const client = new ChatClient({ connection: mockStream }) -``` - -## Custom Adapters - -You can create custom connection adapters for any streaming scenario: - -### WebSocket Example - -```typescript -import type { ConnectionAdapter } from '@tanstack/ai-client' -import type { StreamChunk } from '@tanstack/ai' - -function createWebSocketAdapter(url: string): ConnectionAdapter { - let ws: WebSocket | null = null - - return { - async *connect(messages, data) { - ws = new WebSocket(url) - - // Wait for connection - await new Promise((resolve, reject) => { - ws!.onopen = resolve - ws!.onerror = reject - }) - - // Send messages - ws.send(JSON.stringify({ messages, data })) - - // Yield chunks as they arrive - const queue: StreamChunk[] = [] - let resolveNext: ((chunk: StreamChunk) => void) | null = null - let done = false - - ws.onmessage = (event) => { - const chunk = JSON.parse(event.data) - if (resolveNext) { - resolveNext(chunk) - resolveNext = null - } else { - queue.push(chunk) - } - - if (chunk.type === 'done') { - done = true - ws!.close() - } - } - - while (!done || queue.length > 0) { - if (queue.length > 0) { - yield queue.shift()! - } else { - yield await new Promise((resolve) => { - resolveNext = resolve - }) - } - } - }, - - abort() { - if (ws) { - ws.close() - ws = null - } - }, - } -} - -// Use it -const chat = useChat({ - connection: createWebSocketAdapter('wss://api.example.com/chat'), -}) -``` - -### GraphQL Subscription Example - -```typescript -function createGraphQLSubscriptionAdapter( - client: GraphQLClient, - subscription: string, -): ConnectionAdapter { - let unsubscribe: (() => void) | null = null - - return { - async *connect(messages, data) { - const observable = client.subscribe({ - query: subscription, - variables: { messages, data }, - }) - - const queue: StreamChunk[] = [] - let resolveNext: ((chunk: StreamChunk) => void) | null = null - let done = false - - unsubscribe = observable.subscribe({ - next: (result) => { - const chunk = result.data.chatStream - if (resolveNext) { - resolveNext(chunk) - resolveNext = null - } else { - queue.push(chunk) - } - - if (chunk.type === 'done') { - done = true - } - }, - error: (error) => { - throw error - }, - }).unsubscribe - - while (!done || queue.length > 0) { - if (queue.length > 0) { - yield queue.shift()! - } else { - yield await new Promise((resolve) => { - resolveNext = resolve - }) - } - } - }, - - abort() { - if (unsubscribe) { - unsubscribe() - unsubscribe = null - } - }, - } -} -``` - -## Use Cases - -### 1. Standard HTTP API - -```typescript -const chat = useChat({ - connection: fetchServerSentEvents('/api/chat'), -}) -``` - -### 2. Authenticated API - -```typescript -const chat = useChat({ - connection: fetchServerSentEvents('/api/chat', { - headers: { - Authorization: `Bearer ${token}`, - 'X-User-ID': userId, - }, - credentials: 'include', - }), -}) -``` - -### 3. TanStack Start Server Function - -```typescript -// No HTTP overhead, direct function call -const chat = useChat({ - connection: stream((messages) => serverChat({ messages })), -}) -``` - -### 4. WebSocket Real-time - -```typescript -const chat = useChat({ - connection: createWebSocketAdapter('wss://api.example.com/chat'), -}) -``` - -### 5. Testing with Mocks - -```typescript -const mockAdapter = stream(async function* (messages) { - yield { type: 'content', delta: 'Test', content: 'Test' } - yield { type: 'done', finishReason: 'stop' } -}) - -const client = new ChatClient({ connection: mockAdapter }) -// Easy to test without real API! -``` - -## Benefits - -### 1. Flexibility - -Support any streaming source: - -- ✅ HTTP (SSE or raw) -- ✅ WebSockets -- ✅ GraphQL subscriptions -- ✅ Server functions -- ✅ gRPC streams -- ✅ Custom protocols - -### 2. Testability - -Easy to test with mock adapters: - -```typescript -const mockConnection = stream(async function* () { - yield { type: 'content', delta: 'Hello', content: 'Hello' } - yield { type: 'done', finishReason: 'stop' } -}) - -const client = new ChatClient({ connection: mockConnection }) -``` - -### 3. Type Safety - -Full TypeScript support with proper types: - -```typescript -interface ConnectionAdapter { - connect( - messages: any[], - data?: Record, - ): AsyncIterable - abort?(): void -} -``` - -### 4. Performance - -Direct streams bypass HTTP overhead: - -```typescript -// No HTTP serialization/deserialization -const chat = useChat({ - connection: stream((messages) => directServerFunction(messages)), -}) -``` - -## Advanced Examples - -### Retry Logic - -```typescript -function createRetryAdapter( - baseAdapter: ConnectionAdapter, - maxRetries: number = 3, -): ConnectionAdapter { - return { - async *connect(messages, data) { - let lastError: Error | null = null - - for (let attempt = 0; attempt < maxRetries; attempt++) { - try { - yield* baseAdapter.connect(messages, data) - return // Success - } catch (error) { - lastError = error as Error - if (attempt < maxRetries - 1) { - await new Promise((resolve) => - setTimeout(resolve, 1000 * (attempt + 1)), - ) - } - } - } - - throw lastError - }, - - abort() { - baseAdapter.abort?.() - }, - } -} - -// Use it -const chat = useChat({ - connection: createRetryAdapter(fetchServerSentEvents('/api/chat'), 3), -}) -``` - -### Caching Adapter - -```typescript -function createCachingAdapter( - baseAdapter: ConnectionAdapter, -): ConnectionAdapter { - const cache = new Map() - - return { - async *connect(messages, data) { - const cacheKey = JSON.stringify(messages) - - if (cache.has(cacheKey)) { - // Replay from cache - for (const chunk of cache.get(cacheKey)!) { - yield chunk - } - return - } - - // Cache chunks as they arrive - const chunks: StreamChunk[] = [] - for await (const chunk of baseAdapter.connect(messages, data)) { - chunks.push(chunk) - yield chunk - } - - cache.set(cacheKey, chunks) - }, - - abort() { - baseAdapter.abort?.() - }, - } -} -``` - -### Logging Adapter - -```typescript -function createLoggingAdapter( - baseAdapter: ConnectionAdapter, - logger: (message: string, data: any) => void, -): ConnectionAdapter { - return { - async *connect(messages, data) { - logger('Connection started', { messages, data }) - - try { - for await (const chunk of baseAdapter.connect(messages, data)) { - logger('Chunk received', chunk) - yield chunk - } - logger('Connection complete', {}) - } catch (error) { - logger('Connection error', error) - throw error - } - }, - - abort() { - logger('Connection aborted', {}) - baseAdapter.abort?.() - }, - } -} - -// Use it -const chat = useChat({ - connection: createLoggingAdapter( - fetchServerSentEvents('/api/chat'), - console.log, - ), -}) -``` - -## Best Practices - -### 1. Use Built-in Adapters When Possible - -```typescript -// ✅ Good - use built-in adapter -const chat = useChat({ - connection: fetchServerSentEvents('/api/chat'), -}) - -// ❌ Avoid - custom adapter for standard SSE -const chat = useChat({ - connection: { - connect: async function* () { - /* reimplementing SSE */ - }, - }, -}) -``` - -### 2. Compose Adapters - -```typescript -const chat = useChat({ - connection: createLoggingAdapter( - createRetryAdapter(fetchServerSentEvents('/api/chat'), 3), - console.log, - ), -}) -``` - -### 3. Handle Errors Gracefully - -```typescript -const connection: ConnectionAdapter = { - async *connect(messages, data) { - try { - yield* fetchServerSentEvents('/api/chat').connect(messages, data) - } catch (error) { - // Emit error chunk instead of throwing - yield { - type: 'error', - error: { message: error.message, code: 'CONNECTION_ERROR' }, - } - } - }, -} -``` - -### 4. Implement Abort Support - -```typescript -function createCustomAdapter(url: string): ConnectionAdapter { - return { - async *connect(messages, data, abortSignal) { - // Use the provided abortSignal from ChatClient - const response = await fetch(url, { - method: 'POST', - body: JSON.stringify({ messages, data }), - signal: abortSignal, // Pass abort signal to fetch - }) - - const reader = response.body?.getReader() - if (!reader) { - throw new Error('Response body is not readable') - } - - try { - const decoder = new TextDecoder() - - while (true) { - // Check if aborted before reading - if (abortSignal?.aborted) { - break - } - - const { done, value } = await reader.read() - if (done) break - - // Process chunks... - const chunk = decoder.decode(value, { stream: true }) - // Yield parsed chunks... - } - } finally { - reader.releaseLock() - } - }, - } -} -``` - -## Testing - -### Unit Testing ChatClient - -```typescript -import { ChatClient, stream } from '@tanstack/ai-client' -import { describe, it, expect } from 'vitest' - -describe('ChatClient with mock adapter', () => { - it('should process messages', async () => { - const mockAdapter = stream(async function* (messages) { - expect(messages).toHaveLength(1) - expect(messages[0].content).toBe('Hello') - - yield { type: 'content', delta: 'Hi', content: 'Hi' } - yield { type: 'done', finishReason: 'stop' } - }) - - const client = new ChatClient({ connection: mockAdapter }) - - await client.sendMessage('Hello') - - expect(client.getMessages()).toHaveLength(2) - expect(client.getMessages()[1].content).toBe('Hi') - }) -}) -``` - -### Integration Testing with React - -```typescript -import { renderHook, waitFor } from '@testing-library/react' -import { useChat, stream } from '@tanstack/ai-react' - -test('useChat with mock adapter', async () => { - const mockAdapter = stream(async function* () { - yield { type: 'content', delta: 'Test', content: 'Test' } - yield { type: 'done', finishReason: 'stop' } - }) - - const { result } = renderHook(() => useChat({ connection: mockAdapter })) - - await result.current.sendMessage('Hello') - - await waitFor(() => { - expect(result.current.messages).toHaveLength(2) - expect(result.current.messages[1].content).toBe('Test') - }) -}) -``` - -## Reference - -### ConnectionAdapter Interface - -```typescript -interface ConnectionAdapter { - /** - * Connect and return a stream of chunks - * @param messages - The conversation messages - * @param data - Additional data to send - * @returns AsyncIterable of StreamChunks - */ - connect( - messages: any[], - data?: Record, - ): AsyncIterable - - /** - * Optional: Abort the current connection - */ - abort?(): void -} -``` - -### FetchConnectionOptions - -```typescript -interface FetchConnectionOptions { - headers?: Record | Headers - credentials?: RequestCredentials // "omit" | "same-origin" | "include" - signal?: AbortSignal -} -``` - -## See Also - -- 📖 [ChatClient API](../packages/ai-client/README.md) -- 📖 [useChat Hook](../packages/ai-react/README.md) -- 📖 [Tool Execution Loop](TOOL_EXECUTION_LOOP.md) -- 📖 [Connection Adapters Examples](../packages/ai-client/CONNECTION_ADAPTERS.md) diff --git a/ai-docs/EVENT_CLIENT.md b/ai-docs/EVENT_CLIENT.md deleted file mode 100644 index be9065a0..00000000 --- a/ai-docs/EVENT_CLIENT.md +++ /dev/null @@ -1,422 +0,0 @@ -# AI Event Client - Observability & Debugging - -The `@tanstack/ai/event-client` provides a type-safe EventEmitter for monitoring and debugging AI operations in your application. - -## Installation - -The event client is included with `@tanstack/ai`: - -```bash -npm install @tanstack/ai -``` - -## Usage - -```typescript -import { aiEventClient } from '@tanstack/ai/event-client' - -// Subscribe to events -aiEventClient.on('stream:content', (data) => { - console.log('Content delta:', data.delta) -}) - -aiEventClient.on('usage:tokens', (data) => { - console.log('Tokens used:', data.usage.totalTokens) -}) -``` - -## Available Events - -### Chat Lifecycle Events - -#### `chat:started` - -Emitted when a chat completion or stream starts. - -```typescript -{ - timestamp: number - model: string - messageCount: number - hasTools: boolean - streaming: boolean -} -``` - -#### `chat:completed` - -Emitted when a non-streaming chat completion finishes. - -```typescript -{ - timestamp: number - model: string - result: ChatCompletionResult - duration: number -} -``` - -#### `chat:iteration` - -Emitted when the AI makes another iteration (e.g., for tool calling). - -```typescript -{ - timestamp: number - iteration: number - reason: string -} -``` - -### Stream Events - -#### `stream:started` - -Emitted when a streaming response begins. - -```typescript -{ - timestamp: number - messageId: string -} -``` - -#### `stream:ended` - -Emitted when a streaming response completes. - -```typescript -{ - timestamp: number - messageId: string - duration: number -} -``` - -#### `stream:chunk` - -Emitted for every stream chunk (includes all chunk types). - -```typescript -{ - timestamp: number - messageId: string - chunk: StreamChunk -} -``` - -#### `stream:content` - -Emitted for content delta chunks. - -```typescript -{ - timestamp: number - messageId: string - delta: string -} -``` - -#### `stream:tool-call` - -Emitted when a tool call is received. - -```typescript -{ - timestamp: number - messageId: string - toolCallId: string - toolName: string - arguments: string -} -``` - -#### `stream:tool-result` - -Emitted when a tool result is received. - -```typescript -{ - timestamp: number - messageId: string - toolCallId: string - content: string -} -``` - -#### `stream:done` - -Emitted when the stream completes with finish reason and usage. - -```typescript -{ - timestamp: number; - messageId: string; - finishReason: string | null; - usage?: { - promptTokens: number; - completionTokens: number; - totalTokens: number; - }; -} -``` - -#### `stream:error` - -Emitted when a stream encounters an error. - -```typescript -{ - timestamp: number; - messageId: string; - error: { - message: string; - code?: string; - }; -} -``` - -### Tool Events - -#### `tool:approval-requested` - -Emitted when a tool requires user approval before execution. - -```typescript -{ - timestamp: number - messageId: string - toolCallId: string - toolName: string - input: any - approvalId: string -} -``` - -#### `tool:input-available` - -Emitted when a client-side tool has its input ready. - -```typescript -{ - timestamp: number - messageId: string - toolCallId: string - toolName: string - input: any -} -``` - -#### `tool:completed` - -Emitted when a tool execution completes. - -```typescript -{ - timestamp: number - toolCallId: string - toolName: string - result: any - duration: number -} -``` - -### Token Usage Events - -#### `usage:tokens` - -Emitted when token usage information is available (both streaming and non-streaming). - -```typescript -{ - timestamp: number; - messageId?: string; - model: string; - usage: { - promptTokens: number; - completionTokens: number; - totalTokens: number; - }; -} -``` - -## Example Use Cases - -### 1. Token Usage Tracking & Cost Monitoring - -```typescript -import { aiEventClient } from '@tanstack/ai/event-client' - -let totalTokens = 0 -let totalCost = 0 - -const costPerToken = { - 'gpt-4o': 0.00003, // $30 per 1M tokens - 'gpt-4o-mini': 0.00000015, // $0.15 per 1M tokens -} - -aiEventClient.on('usage:tokens', (data) => { - totalTokens += data.usage.totalTokens - const cost = data.usage.totalTokens * (costPerToken[data.model] || 0) - totalCost += cost - - console.log({ - model: data.model, - tokens: data.usage.totalTokens, - cost: `$${cost.toFixed(6)}`, - totalCost: `$${totalCost.toFixed(6)}`, - }) -}) -``` - -### 2. Real-time Content Streaming Display - -```typescript -import { aiEventClient } from '@tanstack/ai/event-client' - -process.stdout.write('AI: ') -aiEventClient.on('stream:content', (data) => { - process.stdout.write(data.delta) -}) - -aiEventClient.on('stream:done', () => { - process.stdout.write('\n') -}) -``` - -### 3. Logging & Debugging - -```typescript -import { aiEventClient } from '@tanstack/ai/event-client' -import winston from 'winston' - -const logger = winston.createLogger({ - level: 'info', - format: winston.format.json(), - transports: [new winston.transports.File({ filename: 'ai-events.log' })], -}) - -// Log all events -aiEventClient.on('chat:started', (data) => { - logger.info('Chat started', data) -}) - -aiEventClient.on('stream:error', (data) => { - logger.error('Stream error', data) -}) - -aiEventClient.on('usage:tokens', (data) => { - logger.info('Token usage', data) -}) -``` - -### 4. Performance Monitoring - -```typescript -import { aiEventClient } from '@tanstack/ai/event-client' - -const chatMetrics = new Map() - -aiEventClient.on('chat:started', (data) => { - chatMetrics.set(data.timestamp, { - startTime: data.timestamp, - model: data.model, - }) -}) - -aiEventClient.on('chat:completed', (data) => { - const metrics = Array.from(chatMetrics.values()).find( - (m) => m.model === data.model, - ) - - if (metrics) { - console.log('Performance:', { - model: data.model, - duration: data.duration, - tokensPerSecond: data.result.usage.totalTokens / (data.duration / 1000), - }) - } -}) -``` - -### 5. Tool Execution Monitoring - -```typescript -import { aiEventClient } from '@tanstack/ai/event-client' - -aiEventClient.on('tool:input-available', (data) => { - console.log(`[${data.toolName}] Called with:`, data.input) -}) - -aiEventClient.on('tool:completed', (data) => { - console.log(`[${data.toolName}] Completed in ${data.duration}ms`) - console.log('Result:', data.result) -}) - -aiEventClient.on('tool:approval-requested', (data) => { - console.log(`[${data.toolName}] Needs approval:`, data.input) -}) -``` - -## API Reference - -### `aiEventClient.on(event, listener)` - -Subscribe to an event. - -```typescript -aiEventClient.on('stream:content', (data) => { - // Handle event -}) -``` - -### `aiEventClient.once(event, listener)` - -Subscribe to an event once (automatically unsubscribes after first emission). - -```typescript -aiEventClient.once('chat:completed', (data) => { - console.log('First chat completed:', data) -}) -``` - -### `aiEventClient.off(event, listener)` - -Unsubscribe from an event. - -```typescript -const handler = (data) => console.log(data) -aiEventClient.on('stream:content', handler) -// Later... -aiEventClient.off('stream:content', handler) -``` - -### `aiEventClient.removeAllListeners(event?)` - -Remove all listeners for a specific event or all events. - -```typescript -// Remove all listeners for a specific event -aiEventClient.removeAllListeners('stream:content') - -// Remove all listeners for all events -aiEventClient.removeAllListeners() -``` - -## Type Safety - -The event client is fully type-safe. TypeScript will autocomplete event names and infer the correct data types for each event: - -```typescript -aiEventClient.on('usage:tokens', (data) => { - // TypeScript knows data has: timestamp, model, usage - const totalTokens = data.usage.totalTokens // ✓ Type-safe -}) -``` - -## Notes - -- The event client uses Node.js `EventEmitter` under the hood -- Maximum listeners is set to 100 by default to prevent warnings in observability scenarios -- Events are emitted for both streaming and non-streaming operations -- All events include a `timestamp` field for tracking and analysis diff --git a/ai-docs/EVENT_CLIENT_INTEGRATION.md b/ai-docs/EVENT_CLIENT_INTEGRATION.md deleted file mode 100644 index 48fac56d..00000000 --- a/ai-docs/EVENT_CLIENT_INTEGRATION.md +++ /dev/null @@ -1,105 +0,0 @@ -# AI Event Client Integration Example - -This example demonstrates how the event client automatically captures events from AI operations. - -## Usage - -```typescript -import { aiEventClient } from '@tanstack/ai/event-client' -import { chat } from '@tanstack/ai' -import { openai } from '@tanstack/ai-openai' - -// Set up event listeners BEFORE making AI calls -aiEventClient.on('usage:tokens', (data) => { - console.log(`Tokens used: ${data.usage.totalTokens}`) -}) - -aiEventClient.on('stream:content', (data) => { - process.stdout.write(data.delta) -}) - -// Now make AI calls - events will be automatically emitted -const adapter = openai({ apiKey: process.env.OPENAI_API_KEY! }) - -const stream = chat({ - adapter, - model: 'gpt-4o', - messages: [{ role: 'user', content: 'Hello!' }], -}) - -for await (const chunk of stream) { - // Events are automatically emitted during streaming - // No need to manually emit anything -} -``` - -## How It Works - -1. The `aiEventClient` is a singleton EventEmitter that's automatically used by the AI core -2. When you call `chat()` or `chatCompletion()`, the AI core emits events to the client -3. Your event listeners receive these events in real-time -4. No configuration needed - just import and listen! - -## Event Flow - -``` -chat() called - ↓ -chat:started event - ↓ -stream:started event - ↓ -stream:content events (multiple) -stream:tool-call events (if tools used) -stream:done event - ↓ -usage:tokens event - ↓ -stream:ended event -``` - -## Common Patterns - -### Pattern 1: Real-time Content Display - -```typescript -aiEventClient.on('stream:content', (data) => { - document.getElementById('output').textContent += data.delta -}) -``` - -### Pattern 2: Token Usage Tracking - -```typescript -let totalCost = 0 -aiEventClient.on('usage:tokens', (data) => { - const cost = data.usage.totalTokens * 0.00003 // Example cost - totalCost += cost - console.log(`Cost: $${cost.toFixed(6)}, Total: $${totalCost.toFixed(6)}`) -}) -``` - -### Pattern 3: Error Handling - -```typescript -aiEventClient.on('stream:error', (data) => { - console.error('AI Error:', data.error.message) - // Show error to user -}) -``` - -### Pattern 4: Tool Monitoring - -```typescript -aiEventClient.on('tool:completed', (data) => { - console.log(`Tool ${data.toolName} completed in ${data.duration}ms`) -}) -``` - -## Benefits - -- ✅ **Zero Configuration**: Works automatically with all AI operations -- ✅ **Type-Safe**: Full TypeScript support with event type inference -- ✅ **Decoupled**: Observability doesn't affect your core AI logic -- ✅ **Flexible**: Subscribe to only the events you need -- ✅ **Performance**: Minimal overhead, designed for production use diff --git a/ai-docs/IMPLEMENTATION_SUMMARY.md b/ai-docs/IMPLEMENTATION_SUMMARY.md deleted file mode 100644 index 92abc0ef..00000000 --- a/ai-docs/IMPLEMENTATION_SUMMARY.md +++ /dev/null @@ -1,435 +0,0 @@ -# Implementation Summary: Type-Safe Multi-Adapter with Fallback - -## Overview - -This implementation adds two major features to the AI SDK: - -1. **Type-Safe Model Validation** - Models are validated against the selected adapter at compile-time -2. **Automatic Adapter Fallback** - Automatically tries multiple adapters in order when one fails - -## Key Features - -### ✅ Type Safety - -- Model names are validated based on the selected adapter -- Adapter names are type-checked -- Full IDE autocomplete support -- Compile-time error detection - -### ✅ Fallback System - -- Global fallback order configuration in constructor -- Per-request fallback order override -- Automatic retry on errors, rate limits, or service outages -- Detailed error reporting from all failed adapters -- Works with all methods (chat, stream, generate, summarize, embed) - -## Tool Execution Architecture - -The `chat()` method includes an automatic tool execution loop implemented via the `ToolCallManager` class: - -```typescript -// In AI.chat() method -const toolCallManager = new ToolCallManager(tools || []) - -while (iterationCount < maxIterations) { - // Stream chunks and accumulate tool calls - for await (const chunk of adapter.chatStream()) { - if (chunk.type === 'tool_call') { - toolCallManager.addToolCallChunk(chunk) - } - } - - // Execute tools if model requested them - if (shouldExecuteTools && toolCallManager.hasToolCalls()) { - const toolResults = yield * toolCallManager.executeTools(doneChunk) - messages = [...messages, ...toolResults] - continue // Next iteration - } - - break // No tools to execute, done -} -``` - -**ToolCallManager handles:** - -- ✅ Accumulating streaming tool call chunks -- ✅ Validating tool calls (ID and name present) -- ✅ Executing tool `execute` functions -- ✅ Yielding `tool_result` chunks -- ✅ Creating tool result messages - -## Architecture - -### Type System - -```typescript -// Adapter map with typed models -type AdapterMap = Record> - -// Extract model types from adapter -type ExtractModels = T extends AIAdapter ? M[number] : string - -// Single adapter mode: strict model validation -type ChatOptionsWithAdapter = { - adapter: K - model: ExtractModels // Models for this adapter only -} - -// Fallback mode: union of all models -type ChatOptionsWithFallback = { - adapters: ReadonlyArray - model: UnionOfModels // Models from any adapter -} -``` - -### Core Components - -1. **BaseAdapter** - Abstract class with generic model list -2. **AIAdapter Interface** - Includes `models` property with generic type -3. **AI Class** - Main class with fallback logic and tool execution loop -4. **ToolCallManager** - Handles tool call accumulation, validation, and execution -5. **Adapter Implementations** - OpenAI, Anthropic, Gemini, Ollama with model lists - -### Fallback Logic - -```typescript -private async tryWithFallback( - adapters: ReadonlyArray, - operation: (adapter: keyof T & string) => Promise, - operationName: string -): Promise { - const errors: Array<{ adapter: string; error: Error }> = []; - - for (const adapterName of adapters) { - try { - return await operation(adapterName); // Try operation - } catch (error: any) { - errors.push({ adapter: adapterName, error }); // Record error - console.warn(`[AI] Adapter "${adapterName}" failed for ${operationName}`); - } - } - - // All failed - throw comprehensive error - throw new Error(`All adapters failed for ${operationName}:\n${errorDetails}`); -} -``` - -## API Design - -### Constructor - -```typescript -const ai = new AI({ - adapters: { - primary: new OpenAIAdapter({ apiKey: '...' }), - secondary: new AnthropicAdapter({ apiKey: '...' }), - }, - fallbackOrder: ['primary', 'secondary'], // Optional global order -}) -``` - -### Single Adapter Mode (Strict Type Safety) - -```typescript -await ai.chat({ - adapter: "primary", // Type-safe: must exist in adapters - model: "gpt-4", // Type-safe: must be valid for primary - messages: [...], -}); -``` - -### Fallback Mode (Automatic Retry) - -```typescript -await ai.chat({ - adapters: ["primary", "secondary"], // Type-safe: all must exist - model: "gpt-4", // Must work with at least one adapter - messages: [...], -}); -``` - -## Files Modified - -### Core Package (`packages/ai/src/`) - -- **`ai.ts`** - Main AI class with fallback logic -- **`base-adapter.ts`** - Added generic models property -- **`types.ts`** - Added models to AIAdapter interface - -### Adapter Packages - -- **`packages/ai-openai/src/openai-adapter.ts`** - Added OpenAI model list -- **`packages/ai-anthropic/src/anthropic-adapter.ts`** - Added Anthropic model list -- **`packages/ai-gemini/src/gemini-adapter.ts`** - Added Gemini model list -- **`packages/ai-ollama/src/ollama-adapter.ts`** - Added Ollama model list - -### Documentation - -- **`docs/TYPE_SAFETY.md`** - Complete type safety guide -- **`docs/ADAPTER_FALLBACK.md`** - Complete fallback guide -- **`docs/QUICK_START.md`** - Quick reference for both features - -### Examples - -- **`examples/type-safety-demo.ts`** - Type safety examples -- **`examples/visual-error-examples.ts`** - Shows exact TypeScript errors -- **`examples/model-safety-demo.ts`** - Comprehensive type safety examples -- **`examples/adapter-fallback-demo.ts`** - Comprehensive fallback examples -- **`examples/all-adapters-type-safety.ts`** - All adapters together - -## Usage Examples - -### Example 1: Type Safety Only - -```typescript -const ai = new AI({ - adapters: { - openai: new OpenAIAdapter({ apiKey: '...' }), - }, -}) - -// ✅ Valid -await ai.chat({ adapter: 'openai', model: 'gpt-4', messages: [] }) - -// ❌ TypeScript Error -await ai.chat({ adapter: 'openai', model: 'claude-3', messages: [] }) -``` - -### Example 2: Fallback Only - -```typescript -const ai = new AI({ - adapters: { - primary: new OpenAIAdapter({ apiKey: '...' }), - backup: new AnthropicAdapter({ apiKey: '...' }), - }, - fallbackOrder: ['primary', 'backup'], -}) - -// Automatically tries backup if primary fails -await ai.chat({ adapters: [], model: 'gpt-4', messages: [] }) -``` - -### Example 3: Combined Usage - -```typescript -const ai = new AI({ - adapters: { - fast: new OpenAIAdapter({ apiKey: '...' }), - reliable: new AnthropicAdapter({ apiKey: '...' }), - }, - fallbackOrder: ['fast', 'reliable'], -}) - -// Single adapter: strict type safety -await ai.chat({ - adapter: 'fast', - model: 'gpt-4', // ✅ Validated against fast adapter - messages: [], -}) - -// Fallback mode: automatic retry -await ai.chat({ - adapters: ['fast', 'reliable'], - model: 'gpt-4', // ⚠️ Less strict, but has fallback - messages: [], -}) -``` - -## Benefits - -### For Developers - -1. **Catch Errors Early** - Model mismatches caught at compile-time, not runtime -2. **Better IDE Experience** - Autocomplete shows only valid models per adapter -3. **Refactoring Safety** - Changing adapters immediately shows model incompatibilities -4. **Self-Documenting** - Types show exactly what's available - -### For Applications - -1. **Higher Reliability** - Automatic failover on service outages -2. **Rate Limit Protection** - Seamlessly switch to backup on rate limits -3. **Cost Optimization** - Try cheaper options first, fall back to expensive ones -4. **Better Observability** - Detailed error logs from all failed attempts - -## Trade-offs - -### Type Safety vs Flexibility - -- **Single adapter mode**: Maximum type safety, no fallback -- **Fallback mode**: Less strict types, automatic retry - -**Recommendation**: Use single adapter mode when possible, fallback mode when reliability is critical. - -### Model Compatibility - -In fallback mode, TypeScript allows any model from any adapter. This is necessary for flexibility but means you must ensure the model works with at least one adapter in your list. - -**Solution**: Define model mappings per adapter for strict control. - -## Migration Path - -### Existing Code (Single Adapter) - -```typescript -// Before -const ai = new AI(new OpenAIAdapter({ apiKey: '...' })) -await ai.chat('gpt-4', messages) - -// After (backwards compatible) -const ai = new AI({ - adapters: { openai: new OpenAIAdapter({ apiKey: '...' }) }, -}) -await ai.chat({ adapter: 'openai', model: 'gpt-4', messages }) -``` - -### Adding Fallback - -```typescript -// Step 1: Add more adapters -const ai = new AI({ - adapters: { - openai: new OpenAIAdapter({ apiKey: '...' }), - anthropic: new AnthropicAdapter({ apiKey: '...' }), // New! - }, -}) - -// Step 2: Use fallback mode -await ai.chat({ - adapters: ['openai', 'anthropic'], // Fallback enabled - model: 'gpt-4', - messages: [], -}) - -// Step 3: Configure global fallback (optional) -const ai = new AI({ - adapters: { - /* ... */ - }, - fallbackOrder: ['openai', 'anthropic'], // Global default -}) - -await ai.chat({ adapters: [], model: 'gpt-4', messages: [] }) -``` - -## Testing Recommendations - -### Test Type Safety - -```typescript -// These should NOT compile -ai.chat({ adapter: 'openai', model: 'claude-3', messages: [] }) // ❌ -ai.chat({ adapter: 'invalid', model: 'gpt-4', messages: [] }) // ❌ -ai.chat({ adapter: 'openai', model: 'gpt-5', messages: [] }) // ❌ -``` - -### Test Fallback Behavior - -```typescript -// Mock adapters to simulate failures -const mockAdapter1 = { - chatCompletion: jest.fn().mockRejectedValue(new Error('Rate limit')), -} -const mockAdapter2 = { - chatCompletion: jest.fn().mockResolvedValue({ content: 'Success' }), -} - -const ai = new AI({ - adapters: { first: mockAdapter1, second: mockAdapter2 }, - fallbackOrder: ['first', 'second'], -}) - -// Should try first, fail, then succeed with second -await ai.chat({ adapters: [], model: 'gpt-4', messages: [] }) - -expect(mockAdapter1.chatCompletion).toHaveBeenCalled() -expect(mockAdapter2.chatCompletion).toHaveBeenCalled() -``` - -### Test ToolCallManager - -The `ToolCallManager` class has comprehensive unit tests: - -```bash -cd packages/ai -pnpm test -``` - -Test coverage includes: - -- ✅ Accumulating streaming tool call chunks (name, arguments) -- ✅ Filtering incomplete tool calls (missing ID or name) -- ✅ Executing tools with parsed arguments -- ✅ Handling tool execution errors gracefully -- ✅ Handling tools without execute functions -- ✅ Multiple tool calls in one iteration -- ✅ Clearing state between iterations -- ✅ Emitting tool_result chunks -- ✅ Creating tool result messages - -See `packages/ai/src/tool-call-manager.test.ts` for implementation. - -## Performance Considerations - -### Single Adapter Mode - -- **No overhead** - Direct call to adapter -- **Fast failure** - Error thrown immediately - -### Fallback Mode - -- **Sequential retry** - Tries each adapter in order -- **Additional latency** - On failure, waits for timeout before trying next -- **More robust** - Higher chance of success - -**Recommendation**: Use single adapter mode for performance-critical paths, fallback mode for user-facing features where reliability matters. - -## Future Enhancements - -### Possible Additions - -1. **Parallel fallback** - Try multiple adapters simultaneously -2. **Smart routing** - Choose adapter based on request characteristics -3. **Caching** - Remember which adapter succeeded for similar requests -4. **Circuit breaker** - Skip known-failing adapters temporarily -5. **Metrics** - Track success rate, latency per adapter -6. **Weighted fallback** - Probabilistic adapter selection - -### Extensibility - -The system is designed to be extended: - -```typescript -// Custom adapter with type-safe models -const MY_MODELS = ['model-1', 'model-2'] as const - -class MyAdapter extends BaseAdapter { - name = 'my-adapter' - models = MY_MODELS - // ... implement methods -} - -// Use with full type safety -const ai = new AI({ - adapters: { mine: new MyAdapter() }, -}) - -await ai.chat({ - adapter: 'mine', - model: 'model-1', // ✅ Type-safe - messages: [], -}) -``` - -## Conclusion - -This implementation provides: - -- ✅ **Compile-time safety** for model selection -- ✅ **Runtime reliability** with automatic fallback -- ✅ **Developer experience** improvements (autocomplete, error messages) -- ✅ **Production readiness** (error handling, logging) -- ✅ **Extensibility** for future enhancements - -The combination of type safety and fallback makes the SDK both safer and more reliable, suitable for production use cases where uptime and correctness are critical. diff --git a/ai-docs/MIGRATION_UNIFIED_CHAT.md b/ai-docs/MIGRATION_UNIFIED_CHAT.md deleted file mode 100644 index 23e87d96..00000000 --- a/ai-docs/MIGRATION_UNIFIED_CHAT.md +++ /dev/null @@ -1,237 +0,0 @@ -# Migration Guide: From `as` Option to Separate Methods - -## Overview - -The `as` option has been removed from the `chat()` method. Instead, use: - -- **`chat()`** - For streaming (returns `AsyncIterable`) -- **`chatCompletion()`** - For promise-based completion (returns `Promise`) - -## Migration Examples - -### Before (Using `as` option) - -```typescript -import { createAPIFileRoute } from '@tanstack/start/api' -import { ai } from '~/lib/ai-client' - -export const Route = createAPIFileRoute('/api/tanchat')({ - POST: async ({ request }) => { - const { messages, tools } = await request.json() - - // Old way: Using as: "response" - return ai.chat({ - model: 'gpt-4o', - adapter: 'openAi', - fallbacks: [ - { - adapter: 'ollama', - model: 'gpt-oss:20b', - }, - ], - messages: allMessages, - temperature: 0.7, - toolChoice: 'auto', - maxIterations: 5, - as: 'response', // ← Old way - }) - }, -}) -``` - -### After (Using separate methods) - -```typescript -import { createAPIFileRoute } from '@tanstack/start/api' -import { ai } from '~/lib/ai-client' -import { toStreamResponse } from '@tanstack/ai' - -export const Route = createAPIFileRoute('/api/tanchat')({ - POST: async ({ request }) => { - const { messages, tools } = await request.json() - - // New way: Use chat() + toStreamResponse() - const stream = ai.chat({ - model: 'gpt-4o', - adapter: 'openAi', - fallbacks: [ - { - adapter: 'ollama', - model: 'gpt-oss:20b', - }, - ], - messages: allMessages, - temperature: 0.7, - toolChoice: 'auto', - maxIterations: 5, - }) - - return toStreamResponse(stream) - }, -}) -``` - -## Key Changes - -1. **Removed**: `as: "response"` option -2. **Changed**: `chat()` now always returns `AsyncIterable` -3. **Added**: `chatCompletion()` method for promise-based calls -4. **Added**: Import `toStreamResponse()` helper for HTTP responses - -## Migration Patterns - -### Pattern 1: Non-streaming (Promise mode) - -**Before:** - -```typescript -const result = await ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [], - as: 'promise', // or omit - it was the default -}) -``` - -**After:** - -```typescript -const result = await ai.chatCompletion({ - adapter: 'openai', - model: 'gpt-4', - messages: [], -}) -``` - -### Pattern 2: Streaming - -**Before:** - -```typescript -const stream = ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [], - as: 'stream', -}) -``` - -**After:** - -```typescript -const stream = ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [], -}) -// No as option needed - chat() is now streaming-only -``` - -### Pattern 3: HTTP Response - -**Before:** - -```typescript -return ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [], - as: 'response', -}) -``` - -**After:** - -```typescript -import { toStreamResponse } from '@tanstack/ai' - -const stream = ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [], -}) - -return toStreamResponse(stream) -``` - -## Complete Example - -Here's a complete updated file: - -```typescript -import { createAPIFileRoute } from '@tanstack/start/api' -import { ai } from '~/lib/ai-client' -import { toStreamResponse } from '@tanstack/ai' - -const SYSTEM_PROMPT = `You are a helpful AI assistant...` - -export const Route = createAPIFileRoute('/api/tanchat')({ - POST: async ({ request }) => { - try { - const body = await request.json() - const { messages, tools } = body - - const allMessages = tools - ? messages - : [{ role: 'system', content: SYSTEM_PROMPT }, ...messages] - - // Use chat() for streaming, then convert to Response - const stream = ai.chat({ - adapter: 'openAi', - model: 'gpt-4o', - messages: allMessages, - temperature: 0.7, - tools, - toolChoice: 'auto', - maxIterations: 5, - fallbacks: [ - { - adapter: 'ollama', - model: 'gpt-oss:20b', - }, - ], - }) - - return toStreamResponse(stream) - } catch (error: any) { - return new Response(JSON.stringify({ error: error.message }), { - status: 500, - headers: { 'Content-Type': 'application/json' }, - }) - } - }, -}) -``` - -## Benefits - -✅ **Simpler code**: Clearer intent with separate methods -✅ **Same functionality**: Still returns SSE-formatted Response -✅ **Same fallback behavior**: OpenAI → Ollama failover still works -✅ **Same tool execution**: Tools are still executed automatically -✅ **Type-safe**: TypeScript knows exact return types -✅ **Better naming**: `chatCompletion()` clearly indicates promise-based completion - -## Testing - -The client-side code doesn't need any changes! It still consumes the SSE stream the same way: - -```typescript -const response = await fetch('/api/tanchat', { - method: 'POST', - body: JSON.stringify({ messages, tools }), -}) - -const reader = response.body!.getReader() -const decoder = new TextDecoder() - -while (true) { - const { done, value } = await reader.read() - if (done) break - - const text = decoder.decode(value) - // Parse SSE format and handle chunks -} -``` - -Everything works exactly the same, just with a cleaner API! 🎉 diff --git a/ai-docs/TOOL_EXECUTION_LOOP.md b/ai-docs/TOOL_EXECUTION_LOOP.md deleted file mode 100644 index d7fb04b0..00000000 --- a/ai-docs/TOOL_EXECUTION_LOOP.md +++ /dev/null @@ -1,541 +0,0 @@ -# Automatic Tool Execution Loop - -## Overview - -The `chat()` method in TanStack AI includes an **automatic tool execution loop** that handles all tool calling internally. When you provide tools with `execute` functions, the SDK automatically: - -1. Detects when the model wants to call a tool -2. Executes the tool's function -3. Adds the result to the conversation -4. Continues the conversation with the model -5. Repeats until complete (up to `maxIterations`) - -**You don't need to manually execute tools or manage conversation state** - the SDK handles everything! - -## Architecture - -The tool execution loop is implemented using the `ToolCallManager` class, which: - -- **Accumulates tool calls** from streaming chunks -- **Validates tool calls** (ensures IDs and names are present) -- **Executes tools** and emits `tool_result` chunks -- **Returns tool result messages** for adding to conversation - -This separation makes the code maintainable and testable. - -## How It Works - -### Step-by-Step Flow - -``` -User Message - ↓ -Model Response (wants to call tool) - ↓ -SDK emits tool_call chunk ← You see this - ↓ -SDK executes tool.execute() ← Happens automatically - ↓ -SDK emits tool_result chunk ← You see this - ↓ -SDK adds result to messages ← Happens automatically - ↓ -SDK calls model again with updated messages ← Happens automatically - ↓ -Model responds with final answer - ↓ -SDK emits content chunks ← You see this - ↓ -Done! -``` - -### What You Do - -**You only handle the stream chunks for display:** - -```typescript -for await (const chunk of stream) { - if (chunk.type === 'content') { - // Display text to user - console.log(chunk.delta) - } else if (chunk.type === 'tool_call') { - // Show that a tool is being called - console.log(`Calling: ${chunk.toolCall.function.name}`) - } else if (chunk.type === 'tool_result') { - // Show the tool result - console.log(`Result: ${chunk.content}`) - } -} -``` - -### What the SDK Does Automatically - -1. **Tracks tool calls** from the stream -2. **Executes tools** when `finishReason === "tool_calls"` -3. **Adds messages** (assistant with tool calls + tool results) -4. **Continues conversation** by calling the model again -5. **Repeats** until no more tools are needed - -## Complete Example - -```typescript -import { chat, tool } from '@tanstack/ai' -import { openai } from '@tanstack/ai-openai' - -// Define tools with execute functions -const tools = [ - tool({ - type: 'function', - function: { - name: 'get_weather', - description: 'Get current weather for a location', - parameters: { - type: 'object', - properties: { - location: { type: 'string' }, - unit: { type: 'string', enum: ['celsius', 'fahrenheit'] }, - }, - required: ['location'], - }, - }, - execute: async (args) => { - // This is called automatically by the SDK - const weather = await fetchWeatherAPI(args.location) - return JSON.stringify({ - temperature: weather.temp, - conditions: weather.conditions, - unit: args.unit || 'celsius', - }) - }, - }), - - tool({ - type: 'function', - function: { - name: 'calculate', - description: 'Perform mathematical calculations', - parameters: { - type: 'object', - properties: { - expression: { type: 'string' }, - }, - required: ['expression'], - }, - }, - execute: async (args) => { - // This is called automatically by the SDK - const result = evaluateExpression(args.expression) - return JSON.stringify({ result }) - }, - }), -] - -// Use with chat - tools are automatically executed -const stream = chat({ - adapter: openai(), - model: 'gpt-4o', - messages: [{ role: 'user', content: "What's the weather in Paris?" }], - tools, - agentLoopStrategy: maxIterations(5), // Control loop behavior - // Or use custom strategy: - // agentLoopStrategy: ({ iterationCount, messages }) => iterationCount < 10, -}) - -// Handle the stream -for await (const chunk of stream) { - if (chunk.type === 'content') { - process.stdout.write(chunk.delta) - } else if (chunk.type === 'tool_call') { - console.log(`\n🔧 Calling: ${chunk.toolCall.function.name}`) - } else if (chunk.type === 'tool_result') { - console.log(`✓ Result: ${chunk.content}\n`) - } else if (chunk.type === 'done') { - console.log(`\nDone! (${chunk.finishReason})`) - } -} -``` - -### Output - -``` -🔧 Calling: get_weather -✓ Result: {"temperature":15,"conditions":"cloudy","unit":"celsius"} - -The current weather in Paris is 15°C and cloudy. - -Done! (stop) -``` - -## Multi-Turn Tool Execution - -The loop can handle multiple rounds of tool execution: - -```typescript -// User asks: "What's the weather in Paris and what's 5 + 3?" - -// Round 1: Model calls get_weather -// → SDK executes get_weather -// → SDK adds result to messages - -// Round 2: Model calls calculate -// → SDK executes calculate -// → SDK adds result to messages - -// Round 3: Model responds with final answer using both results -// → "In Paris it's 15°C and cloudy. Also, 5 + 3 = 8." -``` - -All handled automatically by the SDK! - -## Configuration - -### Agent Loop Strategies - -Control when the tool execution loop stops using `agentLoopStrategy`: - -#### Built-in Strategies - -```typescript -import { maxIterations, untilFinishReason, combineStrategies } from "@tanstack/ai"; - -// 1. Max iterations (default behavior) -const stream = chat({ - adapter: openai(), - model: "gpt-4o", - messages: [...], - tools: [...], - agentLoopStrategy: maxIterations(3), // Max 3 rounds -}); - -// 2. Until specific finish reason -const stream = chat({ - adapter: openai(), - model: "gpt-4o", - messages: [...], - tools: [...], - agentLoopStrategy: untilFinishReason(["stop", "length"]), -}); - -// 3. Combine multiple strategies -const stream = chat({ - adapter: openai(), - model: "gpt-4o", - messages: [...], - tools: [...], - agentLoopStrategy: combineStrategies([ - maxIterations(10), - ({ messages }) => messages.length < 100, // Custom condition - ]), -}); -``` - -#### Custom Strategies - -Create your own strategy function: - -```typescript -// Simple custom strategy -const myStrategy: AgentLoopStrategy = ({ iterationCount }) => { - return iterationCount < 5; -}; - -// Advanced custom strategy -const advancedStrategy: AgentLoopStrategy = ({ - iterationCount, - messages, - finishReason -}) => { - // Stop if too many iterations - if (iterationCount >= 10) return false; - - // Stop if conversation too long - if (messages.length > 50) return false; - - // Stop on specific finish reasons - if (finishReason === "length") return false; - - // Otherwise continue - return true; -}; - -const stream = chat({ - adapter: openai(), - model: "gpt-4o", - messages: [...], - tools: [...], - agentLoopStrategy: advancedStrategy, -}); -``` - -#### Alternative: maxIterations - -You can also use `maxIterations` as a number for convenience: - -```typescript -const stream = chat({ - adapter: openai(), - model: "gpt-4o", - messages: [...], - tools: [...], - maxIterations: 3, // Shorthand for agentLoopStrategy: maxIterations(3) -}); -``` - -This is equivalent to `agentLoopStrategy: maxIterations(3)`. - -### Agent Loop Strategy Types - -```typescript -export interface AgentLoopState { - iterationCount: number // Current iteration (0-indexed) - messages: Message[] // Current conversation messages - finishReason: string | null // Last finish reason from model -} - -export type AgentLoopStrategy = (state: AgentLoopState) => boolean -``` - -### `toolChoice` - -Control when tools are used: - -```typescript -const stream = chat({ - adapter: openai(), - model: "gpt-4o", - messages: [...], - tools: [...], - toolChoice: "auto", // Let model decide (default) - // toolChoice: "required", // Force model to call a tool - // toolChoice: "none", // Prevent tool calling -}); -``` - -## Stream Chunk Types - -### `tool_call` - -Emitted when the model decides to call a tool: - -```typescript -{ - type: "tool_call", - toolCall: { - id: "call_abc123", - type: "function", - function: { - name: "get_weather", - arguments: '{"location":"Paris"}' // May be incomplete during streaming - } - }, - index: 0 // Index of this tool call if multiple -} -``` - -### `tool_result` - -Emitted after the SDK executes a tool: - -```typescript -{ - type: "tool_result", - toolCallId: "call_abc123", - content: '{"temperature":15,"conditions":"cloudy"}' -} -``` - -## Best Practices - -### ✅ DO - -- Provide tools with `execute` functions for automatic execution -- Handle chunk types for display/logging -- Use `maxIterations` to prevent infinite loops -- Return JSON strings from `execute` functions -- Handle errors in `execute` functions - -### ❌ DON'T - -- Try to execute tools manually (SDK does this) -- Manage conversation state manually (SDK does this) -- Add tool result messages yourself (SDK does this) -- Worry about message ordering (SDK handles this) - -## HTTP Streaming with Tools - -Perfect for API endpoints - tool execution happens on server, results stream to client: - -```typescript -import { chat } from '@tanstack/ai' -import { openai } from '@tanstack/ai-openai' -import { toStreamResponse } from '@tanstack/ai' - -export async function POST(request: Request) { - const { messages } = await request.json() - - const stream = chat({ - adapter: openai(), - model: 'gpt-4o', - messages, - tools: [weatherTool, calculateTool], - maxIterations: 5, - }) - - // Client receives tool_call and tool_result chunks - return toStreamResponse(stream) -} -``` - -**Client-side:** - -```typescript -const response = await fetch('/api/chat', { - method: 'POST', - body: JSON.stringify({ messages }), -}) - -const reader = response.body.getReader() -// Receives: content chunks, tool_call chunks, tool_result chunks, done chunk -``` - -## Comparison: chat() vs chatCompletion() - -| Feature | `chat()` | `chatCompletion()` | -| ------------------------- | -------------------- | -------------------------------- | -| **Tool Execution** | ✅ Automatic loop | ❌ Manual (returns tool calls) | -| **Streaming** | ✅ Yes | ❌ No | -| **Tool Results** | ✅ Emitted as chunks | ❌ Not executed | -| **Conversation Continue** | ✅ Automatic | ❌ Manual | -| **Use Case** | Real-time UIs, APIs | Batch processing, manual control | - -### When to use `chatCompletion()` - -Use `chatCompletion()` if you need manual control over tool execution: - -```typescript -const result = await chatCompletion({ - adapter: openai(), - model: "gpt-4o", - messages: [...], - tools: [weatherTool], -}); - -// Model wants to call a tool, but SDK doesn't execute it -if (result.toolCalls) { - // You decide whether/how to execute - for (const toolCall of result.toolCalls) { - // Manual execution - const tool = tools.find(t => t.function.name === toolCall.function.name); - const result = await tool.execute(JSON.parse(toolCall.function.arguments)); - - // You must add result to messages and call chatCompletion again - messages.push({ - role: "assistant", - content: result.content, - toolCalls: result.toolCalls - }); - messages.push({ - role: "tool", - content: result, - toolCallId: toolCall.id - }); - - // Call again with updated messages - const nextResult = await chatCompletion({ - adapter: openai(), - model: "gpt-4o", - messages, - tools: [weatherTool], - }); - } -} -``` - -**For most use cases, use `chat()` with automatic tool execution!** - -## ToolCallManager Class - -The tool execution logic is implemented in the `ToolCallManager` class for better maintainability and testability. - -### Public API - -```typescript -class ToolCallManager { - constructor(tools: ReadonlyArray) - - // Add a streaming tool call chunk - addToolCallChunk(chunk: ToolCallChunk): void - - // Check if there are complete tool calls - hasToolCalls(): boolean - - // Get all validated tool calls - getToolCalls(): ToolCall[] - - // Execute tools and yield tool_result chunks - async *executeTools( - doneChunk, - ): AsyncGenerator - - // Clear for next iteration - clear(): void -} -``` - -### Usage in chat() method - -```typescript -async *chat(options) { - const toolCallManager = new ToolCallManager(options.tools || []); - - while (iterationCount < maxIterations) { - // Stream chunks - for await (const chunk of adapter.chatStream()) { - yield chunk; - - if (chunk.type === "tool_call") { - toolCallManager.addToolCallChunk(chunk); // Accumulate - } - } - - // Execute if needed - if (toolCallManager.hasToolCalls()) { - const toolResults = yield* toolCallManager.executeTools(doneChunk); - messages = [...messages, ...toolResults]; - toolCallManager.clear(); // Clear for next iteration - continue; - } - - break; - } -} -``` - -### Benefits - -- ✅ **Testable** - Unit tests for tool execution logic -- ✅ **Maintainable** - Tool logic separate from chat logic -- ✅ **Reusable** - Can be used in other contexts -- ✅ **Clean** - Single responsibility principle - -### Unit Tests - -The `ToolCallManager` has comprehensive unit tests. Run them with: - -```bash -cd packages/ai -pnpm test -``` - -See `packages/ai/src/tool-call-manager.test.ts` for test scenarios: - -- Accumulating streaming chunks -- Filtering incomplete tool calls -- Executing tools -- Error handling -- Multiple tool calls -- Clearing state - -## License - -MIT diff --git a/ai-docs/TOOL_REGISTRY.md b/ai-docs/TOOL_REGISTRY.md deleted file mode 100644 index efe8ca83..00000000 --- a/ai-docs/TOOL_REGISTRY.md +++ /dev/null @@ -1,474 +0,0 @@ -# Tool Registry API - -> **🔄 Automatic Tool Execution Loop:** The `chat()` method automatically executes tools in a loop. When the model decides to call a tool, the SDK: -> -> 1. Executes the tool's `execute` function -> 2. Emits `tool_result` chunks with the result -> 3. Adds tool results to messages automatically -> 4. Continues the conversation with the model -> 5. Repeats until no more tools are needed (up to `maxIterations`, default: 5) -> -> **You don't need to manually handle tool execution** - just provide tools with `execute` functions and the SDK handles everything! -> -> **📚 See also:** [Complete Tool Execution Loop Documentation](TOOL_EXECUTION_LOOP.md) - -## Overview - -The Tool Registry API allows you to define tools once in the AI constructor and then reference them by name throughout your application. This provides better organization, type safety, and reusability. - -## Key Benefits - -✅ **Define Once, Use Everywhere** - Register tools in one place -✅ **Type-Safe Tool Names** - TypeScript autocomplete and validation -✅ **Better Organization** - Centralized tool management -✅ **No Duplication** - Reuse tools across different chats -✅ **Runtime Validation** - Errors if referencing non-existent tools - -## Basic Usage - -### 1. Define Tools Registry - -```typescript -import { AI } from '@ts-poc/ai' -import { OpenAIAdapter } from '@ts-poc/ai-openai' - -// Define all your tools in a registry -const tools = { - get_weather: { - type: 'function' as const, - function: { - name: 'get_weather', - description: 'Get current weather for a location', - parameters: { - type: 'object', - properties: { - location: { type: 'string', description: 'City name' }, - }, - required: ['location'], - }, - }, - execute: async (args: { location: string }) => { - // Your implementation - return JSON.stringify({ temp: 72, condition: 'sunny' }) - }, - }, - - calculate: { - type: 'function' as const, - function: { - name: 'calculate', - description: 'Perform mathematical calculations', - parameters: { - type: 'object', - properties: { - expression: { type: 'string' }, - }, - required: ['expression'], - }, - }, - execute: async (args: { expression: string }) => { - const result = eval(args.expression) // Use safe math parser in production! - return JSON.stringify({ result }) - }, - }, -} as const // ← Important: use "as const" for type safety! -``` - -### 2. Initialize AI with Tools - -```typescript -const ai = new AI({ - adapters: { - openai: new OpenAIAdapter({ - apiKey: process.env.OPENAI_API_KEY, - }), - }, - tools, // ← Register tools here! -}) -``` - -### 3. Use Tools by Name (Type-Safe!) - -```typescript -// Use specific tools -const result = await ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: "What's the weather in SF?" }], - tools: ['get_weather'], // ← Type-safe! Only registered tool names - toolChoice: 'auto', - maxIterations: 5, -}) - -// Use multiple tools -const result2 = await ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [ - { role: 'user', content: "What's the weather in SF and what's 2+2?" }, - ], - tools: ['get_weather', 'calculate'], // ← Multiple tools, all type-safe! - toolChoice: 'auto', - maxIterations: 5, -}) - -// No tools (regular chat) -const result3 = await ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: 'Tell me a joke' }], - // No tools specified -}) -``` - -## Type Safety - -TypeScript provides full autocomplete and validation: - -```typescript -const ai = new AI({ - adapters: { /* ... */ }, - tools: { - get_weather: { /* ... */ }, - calculate: { /* ... */ }, - }, -}); - -// ✅ Valid - TypeScript knows these tool names exist -ai.chat({ - adapter: "openai", - model: "gpt-4", - messages: [...], - tools: ["get_weather"], // ← Autocomplete works! -}); - -// ✅ Valid - multiple tools -ai.chat({ - adapter: "openai", - model: "gpt-4", - messages: [...], - tools: ["get_weather", "calculate"], // ← Both validated! -}); - -// ❌ TypeScript Error - invalid tool name -ai.chat({ - adapter: "openai", - model: "gpt-4", - messages: [...], - tools: ["invalid_tool"], // ← Compile error! -}); -``` - -## Migration from Old API - -### Before (Tools Inline) - -```typescript -const result = await ai.chat({ - adapter: "openai", - model: "gpt-4", - messages: [...], - tools: [ - { - type: "function", - function: { - name: "get_weather", - description: "Get weather", - parameters: { /* ... */ }, - }, - execute: async (args) => { /* ... */ }, - }, - { - type: "function", - function: { - name: "calculate", - description: "Calculate", - parameters: { /* ... */ }, - }, - execute: async (args) => { /* ... */ }, - }, - ], - toolChoice: "auto", -}); -``` - -### After (Tool Registry) - -```typescript -// Define once in constructor -const ai = new AI({ - adapters: { /* ... */ }, - tools: { - get_weather: { - type: "function", - function: { - name: "get_weather", - description: "Get weather", - parameters: { /* ... */ }, - }, - execute: async (args) => { /* ... */ }, - }, - calculate: { - type: "function", - function: { - name: "calculate", - description: "Calculate", - parameters: { /* ... */ }, - }, - execute: async (args) => { /* ... */ }, - }, - }, -}); - -// Use by name (type-safe!) -const result = await ai.chat({ - adapter: "openai", - model: "gpt-4", - messages: [...], - tools: ["get_weather", "calculate"], // ← Much cleaner! - toolChoice: "auto", -}); -``` - -## Working with Tools - -### Get Tool by Name - -```typescript -const weatherTool = ai.getTool('get_weather') -console.log(weatherTool.function.description) -``` - -### List All Tool Names - -```typescript -const toolNames = ai.toolNames -console.log('Available tools:', toolNames) -// Output: ["get_weather", "calculate"] -``` - -### Check if Tool Exists - -```typescript -try { - const tool = ai.getTool('some_tool') - console.log('Tool exists!') -} catch (error) { - console.log('Tool not found') -} -``` - -## Streaming with Tools - -Tools work seamlessly with streaming: - -```typescript -const stream = ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [ - { role: 'user', content: "What's the weather in Paris and what's 100*5?" }, - ], - tools: ['get_weather', 'calculate'], - toolChoice: 'auto', - maxIterations: 5, -}) - -for await (const chunk of stream) { - if (chunk.type === 'content') { - process.stdout.write(chunk.delta) - } else if (chunk.type === 'tool_call') { - console.log(`\n→ Calling: ${chunk.toolCall.function.name}`) - } else if (chunk.type === 'done') { - console.log('\n✓ Done') - } -} -``` - -## HTTP Streaming with Tools - -Perfect for API endpoints: - -```typescript -import { toStreamResponse } from '@tanstack/ai' - -export const Route = createAPIFileRoute('/api/chat')({ - POST: async ({ request }): Promise => { - const { messages } = await request.json() - - const stream = ai.chat({ - adapter: 'openai', - model: 'gpt-4o', - messages, - tools: ['get_weather', 'search_database', 'send_email'], - toolChoice: 'auto', - maxIterations: 5, - }) - - return toStreamResponse(stream) - }, -}) -``` - -## Real-World Example: E-commerce Assistant - -```typescript -const tools = { - search_products: { - type: 'function' as const, - function: { - name: 'search_products', - description: 'Search for products in the catalog', - parameters: { - type: 'object', - properties: { - query: { type: 'string' }, - category: { type: 'string' }, - maxPrice: { type: 'number' }, - }, - required: ['query'], - }, - }, - execute: async (args: { - query: string - category?: string - maxPrice?: number - }) => { - const results = await db.products.search(args) - return JSON.stringify(results) - }, - }, - - get_product_details: { - type: 'function' as const, - function: { - name: 'get_product_details', - description: 'Get detailed information about a product', - parameters: { - type: 'object', - properties: { - productId: { type: 'string' }, - }, - required: ['productId'], - }, - }, - execute: async (args: { productId: string }) => { - const product = await db.products.findById(args.productId) - return JSON.stringify(product) - }, - }, - - check_inventory: { - type: 'function' as const, - function: { - name: 'check_inventory', - description: 'Check if a product is in stock', - parameters: { - type: 'object', - properties: { - productId: { type: 'string' }, - quantity: { type: 'number', default: 1 }, - }, - required: ['productId'], - }, - }, - execute: async (args: { productId: string; quantity?: number }) => { - const available = await inventory.check( - args.productId, - args.quantity || 1, - ) - return JSON.stringify({ available, productId: args.productId }) - }, - }, - - add_to_cart: { - type: 'function' as const, - function: { - name: 'add_to_cart', - description: 'Add a product to the shopping cart', - parameters: { - type: 'object', - properties: { - productId: { type: 'string' }, - quantity: { type: 'number', default: 1 }, - }, - required: ['productId'], - }, - }, - execute: async (args: { productId: string; quantity?: number }) => { - await cart.add(args.productId, args.quantity || 1) - return JSON.stringify({ success: true, productId: args.productId }) - }, - }, -} as const - -const ai = new AI({ - adapters: { - openai: new OpenAIAdapter({ apiKey: process.env.OPENAI_API_KEY }), - }, - tools, -}) - -// Now any chat can use these tools by name! -const result = await ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [ - { role: 'user', content: "I'm looking for a red guitar under $500" }, - ], - tools: [ - 'search_products', - 'get_product_details', - 'check_inventory', - 'add_to_cart', - ], - toolChoice: 'auto', - maxIterations: 10, -}) -``` - -## Advanced: Dynamic Tool Selection - -You can dynamically select which tools to use: - -```typescript -function getChatTools(userRole: string): string[] { - if (userRole === "admin") { - return ["search_products", "get_product_details", "check_inventory", "add_to_cart", "update_prices"]; - } else if (userRole === "customer") { - return ["search_products", "get_product_details", "add_to_cart"]; - } else { - return ["search_products"]; // Guest users - } -} - -const result = await ai.chat({ - adapter: "openai", - model: "gpt-4", - messages: [...], - tools: getChatTools(user.role) as any, // Type assertion needed for dynamic arrays - toolChoice: "auto", -}); -``` - -## Best Practices - -1. **Use `as const`** when defining tools for maximum type safety -2. **Descriptive names** - Use clear, verb-based names like `get_weather`, `search_products` -3. **Comprehensive descriptions** - Help the AI understand when to use each tool -4. **Required parameters** - Mark parameters as required when appropriate -5. **Error handling** - Return error information in execute functions -6. **Validation** - Validate parameters in execute functions -7. **Centralize** - Keep all tool definitions in one place for maintainability - -## Summary - -The Tool Registry API provides: - -✅ **Type-Safe Tool References** - Autocomplete and validation -✅ **Centralized Management** - Define once, use everywhere -✅ **Cleaner Code** - Reference by name instead of inline definitions -✅ **Better Reusability** - Share tools across different chats -✅ **Runtime Validation** - Catch errors early - -**Migration Path**: Move inline tool definitions to the constructor registry, then reference them by name in your chat calls! diff --git a/ai-docs/TOOL_REGISTRY_IMPLEMENTATION.md b/ai-docs/TOOL_REGISTRY_IMPLEMENTATION.md deleted file mode 100644 index 6f741a6e..00000000 --- a/ai-docs/TOOL_REGISTRY_IMPLEMENTATION.md +++ /dev/null @@ -1,439 +0,0 @@ -# Tool Registry API - Implementation Summary - -> **🔄 Automatic Tool Execution Loop:** This document describes how tools are registered and referenced. Remember that the `chat()` method automatically executes tools in a loop - when the model calls a tool, the SDK executes it, adds the result to messages, and continues the conversation automatically (up to `maxIterations`, default: 5). - -## Overview - -Successfully refactored the AI API to support a **tool registry** where tools are defined once in the constructor and then referenced by name in a type-safe manner throughout the application. - -## Key Changes - -### 1. Tool Registry in Constructor - -**Before:** - -```typescript -const ai = new AI({ - adapters: { /* ... */ } -}); - -// Had to pass full tool definitions every time -ai.chat({ - messages: [...], - tools: [ - { type: "function", function: { name: "get_weather", ... }, execute: ... }, - { type: "function", function: { name: "calculate", ... }, execute: ... }, - ], -}); -``` - -**After:** - -```typescript -const ai = new AI({ - adapters: { /* ... */ }, - tools: { - get_weather: { - type: "function" as const, - function: { name: "get_weather", ... }, - execute: async (args) => { ... }, - }, - calculate: { - type: "function" as const, - function: { name: "calculate", ... }, - execute: async (args) => { ... }, - }, - }, -}); - -// Reference by name - type-safe! -ai.chat({ - messages: [...], - tools: ["get_weather", "calculate"], // ← Type-safe string array! -}); -``` - -### 2. Type System Updates - -Added new generic parameter to `AI` class: - -```typescript -class AI -``` - -Where: - -- `T` - Adapter map (existing) -- `TTools` - Tool registry (new!) - -### 3. Type-Safe Tool Names - -Tool names are extracted from the registry type: - -```typescript -type ToolNames = keyof TTools & string -``` - -TypeScript provides: - -- ✅ Autocomplete for tool names -- ✅ Compile-time validation -- ✅ Refactoring safety - -### 4. Updated Method Signatures - -All chat methods now accept tool names instead of full tool objects: - -```typescript -// ChatOptionsWithAdapter -type ChatOptionsWithAdapter = { - // ... other options - tools?: ReadonlyArray> // ← Type-safe tool names! -} -``` - -### 5. Internal Tool Resolution - -New helper methods: - -```typescript -class AI { - getTool(name: ToolNames): Tool // Get single tool - get toolNames(): Array> // List all tool names - private getToolsByNames(names: ToolNames[]): Tool[] // Convert names to objects -} -``` - -## API Examples - -### Basic Usage - -```typescript -const ai = new AI({ - adapters: { - openai: new OpenAIAdapter({ apiKey: "..." }), - }, - tools: { - get_weather: { /* ... */ }, - calculate: { /* ... */ }, - }, -}); - -// Use specific tools -await ai.chat({ - adapter: "openai", - model: "gpt-4", - messages: [...], - tools: ["get_weather"], // ← Type-safe! -}); - -// Use multiple tools -await ai.chat({ - adapter: "openai", - model: "gpt-4", - messages: [...], - tools: ["get_weather", "calculate"], // ← Both validated! -}); - -// No tools -await ai.chat({ - adapter: "openai", - model: "gpt-4", - messages: [...], - // No tools specified -}); -``` - -### With Streaming - -```typescript -const stream = ai.chat({ - adapter: "openai", - model: "gpt-4", - messages: [...], - tools: ["get_weather", "calculate"], -}); - -for await (const chunk of stream) { - // Handle chunks -} -``` - -### With HTTP Response - -```typescript -import { toStreamResponse } from "@tanstack/ai"; - -const stream = ai.chat({ - adapter: "openai", - model: "gpt-4", - messages: [...], - tools: ["get_weather", "search_products"], -}); - -return toStreamResponse(stream); -``` - -## Real-World Example: api.tanchat.ts - -**Before:** - -```typescript -const tools: Tool[] = [ - { type: "function", function: { name: "getGuitars", ... }, execute: ... }, - { type: "function", function: { name: "recommendGuitar", ... }, execute: ... }, -]; - -const ai = new AI({ adapters: { /* ... */ } }); - -ai.chat({ - messages: [...], - tools, // ← Pass array of Tool objects -}); -``` - -**After:** - -```typescript -const tools = { - getGuitars: { - type: "function" as const, - function: { name: "getGuitars", ... }, - execute: async () => { ... }, - }, - recommendGuitar: { - type: "function" as const, - function: { name: "recommendGuitar", ... }, - execute: async (args) => { ... }, - }, -} as const; - -const ai = new AI({ - adapters: { /* ... */ }, - tools, // ← Register once! -}); - -ai.chat({ - messages: [...], - tools: ["getGuitars", "recommendGuitar"], // ← Type-safe names! -}); -``` - -## Benefits - -### 1. Type Safety - -- ✅ Autocomplete for tool names in IDE -- ✅ Compile-time errors for invalid tool names -- ✅ Refactoring support (rename tools safely) - -### 2. Better Organization - -- ✅ Centralized tool definitions -- ✅ Single source of truth -- ✅ Easy to maintain and update - -### 3. Code Reusability - -- ✅ Define tools once, use everywhere -- ✅ Share tools across different chat calls -- ✅ No duplication - -### 4. Developer Experience - -- ✅ Cleaner code (tool names vs full objects) -- ✅ Less typing (just reference by name) -- ✅ Better readability - -### 5. Runtime Safety - -- ✅ Validation that tools exist -- ✅ Clear error messages -- ✅ No silent failures - -## Implementation Details - -### Type System - -```typescript -// Tool registry type -type ToolRegistry = Record - -// Extract tool names -type ToolNames = keyof TTools & string - -// AI class with tool registry -class AI { - private tools: TTools - - constructor(config: AIConfig) { - this.tools = config.tools || ({} as TTools) - } - - private getToolsByNames(names: ReadonlyArray>): Tool[] { - return names.map((name) => this.getTool(name)) - } -} -``` - -### Chat Options - -```typescript -type ChatOptionsWithAdapter = { - adapter: keyof TAdapters - model: ExtractModels - messages: Message[] - tools?: ReadonlyArray> // ← Tool names, not objects - // ... other options -} -``` - -### Internal Resolution - -When `chat()` is called: - -1. Extract tool names from options -2. Convert tool names to Tool objects using `getToolsByNames()` -3. Pass Tool objects to adapter methods -4. Adapters work with full Tool objects (no changes needed) - -## Files Changed - -### Core Implementation - -- ✅ `packages/ai/src/ai.ts` - - Added `TTools` generic parameter to `AI` class - - Added `ToolRegistry` and `ToolNames` types - - Updated `ChatOptionsWithAdapter` and `ChatOptionsWithFallback` - - Added `getTool()`, `toolNames`, and `getToolsByNames()` methods - - Updated `chatPromise()` and `chatStream()` to convert tool names - -### Documentation - -- ✅ `docs/TOOL_REGISTRY.md` - Comprehensive guide -- ✅ `docs/TOOL_REGISTRY_QUICK_START.md` - Quick reference -- ✅ `examples/tool-registry-example.ts` - Full examples - -### Example Updates - -- ✅ `examples/ts-chat/src/routes/demo/api.tanchat.ts` - Updated to use tool registry - -## Migration Guide - -### Step 1: Convert Tool Array to Registry - -```typescript -// Before -const tools: Tool[] = [ - { type: "function", function: { name: "tool1", ... } }, - { type: "function", function: { name: "tool2", ... } }, -]; - -// After -const tools = { - tool1: { type: "function" as const, function: { name: "tool1", ... } }, - tool2: { type: "function" as const, function: { name: "tool2", ... } }, -} as const; // ← Important! -``` - -### Step 2: Register Tools in Constructor - -```typescript -// Before -const ai = new AI({ - adapters: { - /* ... */ - }, -}) - -// After -const ai = new AI({ - adapters: { - /* ... */ - }, - tools, // ← Add tools here -}) -``` - -### Step 3: Use Tool Names in Chat Calls - -```typescript -// Before -ai.chat({ - messages: [...], - tools: tools, // ← Full array -}); - -// After -ai.chat({ - messages: [...], - tools: ["tool1", "tool2"], // ← Just names! -}); -``` - -## Testing - -Verify type safety: - -```typescript -const ai = new AI({ - adapters: { - /* ... */ - }, - tools: { - get_weather: { - /* ... */ - }, - calculate: { - /* ... */ - }, - }, -}) - -// ✅ Should work -ai.chat({ messages: [], tools: ['get_weather'] }) - -// ❌ Should show TypeScript error -ai.chat({ messages: [], tools: ['invalid_tool'] }) -``` - -## Performance - -No performance impact: - -- Tool name resolution happens once per chat call -- Minimal overhead (simple object lookup) -- Tool execution unchanged - -## Backward Compatibility - -**Breaking Change**: This is a breaking change. Users must: - -1. Convert tool arrays to registries -2. Register tools in constructor -3. Use tool names instead of objects - -However, the migration path is straightforward and provides significant benefits. - -## Future Enhancements - -Potential improvements: - -- Tool namespaces (e.g., `weather.get`, `weather.forecast`) -- Tool permissions/access control -- Tool versioning -- Dynamic tool registration -- Tool composition/chaining - -## Summary - -The Tool Registry API provides: - -✅ **Type-Safe Tool References** - Autocomplete and validation -✅ **Centralized Management** - Define once, use everywhere -✅ **Cleaner Code** - Reference by name instead of objects -✅ **Better Reusability** - Share tools across chats -✅ **Runtime Validation** - Clear error messages -✅ **Developer Experience** - Improved DX with less code - -**Result**: More maintainable, type-safe, and developer-friendly tool management! 🎉 diff --git a/ai-docs/TOOL_REGISTRY_QUICK_START.md b/ai-docs/TOOL_REGISTRY_QUICK_START.md deleted file mode 100644 index 2ea166e5..00000000 --- a/ai-docs/TOOL_REGISTRY_QUICK_START.md +++ /dev/null @@ -1,208 +0,0 @@ -# Tool Registry API - Quick Start - -> **🔄 Automatic Tool Execution:** The `chat()` method automatically executes tools in a loop. When the model calls a tool, the SDK executes it, adds the result to messages, and continues the conversation automatically (controlled by `agentLoopStrategy`, default: `maxIterations(5)`). You don't need to manually handle tool execution! -> -> **📚 See also:** [Complete Tool Execution Loop Documentation](TOOL_EXECUTION_LOOP.md) - -## In 3 Steps - -### 1. Define Tools in Constructor - -```typescript -const ai = new AI({ - adapters: { - openai: new OpenAIAdapter({ apiKey: process.env.OPENAI_API_KEY }), - }, - tools: { - // ← Define all tools here! - get_weather: { - type: 'function' as const, - function: { - name: 'get_weather', - description: 'Get weather for a location', - parameters: { - type: 'object', - properties: { - location: { type: 'string' }, - }, - required: ['location'], - }, - }, - execute: async (args: { location: string }) => { - return JSON.stringify({ temp: 72, condition: 'sunny' }) - }, - }, - calculate: { - type: 'function' as const, - function: { - name: 'calculate', - description: 'Perform calculations', - parameters: { - type: 'object', - properties: { - expression: { type: 'string' }, - }, - required: ['expression'], - }, - }, - execute: async (args: { expression: string }) => { - return JSON.stringify({ result: eval(args.expression) }) - }, - }, - }, -}) -``` - -### 2. Reference Tools by Name (Type-Safe!) - -```typescript -const result = await ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: "What's the weather in SF?" }], - tools: ['get_weather'], // ← Type-safe! Autocomplete works! - toolChoice: 'auto', -}) -``` - -### 3. Use Multiple Tools - -```typescript -const result = await ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: 'Weather in NYC and calculate 5*20' }], - tools: ['get_weather', 'calculate'], // ← Both tools! - toolChoice: 'auto', -}) -``` - -## Type Safety - -✅ **Autocomplete** - IDE suggests available tool names -✅ **Validation** - TypeScript catches typos at compile time -✅ **Runtime checks** - Errors if tool doesn't exist - -```typescript -// ✅ Valid -tools: ['get_weather', 'calculate'] - -// ❌ TypeScript Error -tools: ['invalid_tool'] -``` - -## Benefits vs Old API - -### Before (Inline Tools) - -```typescript -// Had to define tools every time! -ai.chat({ - messages: [...], - tools: [ - { type: "function", function: { name: "get_weather", ... }, execute: ... }, - { type: "function", function: { name: "calculate", ... }, execute: ... }, - ], -}); -``` - -### After (Tool Registry) - -```typescript -// Define once in constructor, use by name everywhere! -ai.chat({ - messages: [...], - tools: ["get_weather", "calculate"], // ← Much cleaner! -}); -``` - -## With Streaming - -The `chat()` method automatically executes tools and emits chunks for each step: - -```typescript -const stream = ai.chat({ - adapter: "openai", - model: "gpt-4", - messages: [...], - tools: ["get_weather", "calculate"], - toolChoice: "auto", - agentLoopStrategy: maxIterations(5), // Optional: control loop -}); - -for await (const chunk of stream) { - if (chunk.type === "content") { - process.stdout.write(chunk.delta); // Stream text content - } else if (chunk.type === "tool_call") { - console.log(`→ Calling: ${chunk.toolCall.function.name}`); - } else if (chunk.type === "tool_result") { - console.log(`✓ Tool result: ${chunk.content}`); - } -} -``` - -**What happens internally:** - -1. Model decides to call a tool → `tool_call` chunk emitted -2. SDK executes the tool's `execute` function automatically -3. SDK emits `tool_result` chunk with the result -4. SDK adds tool result to messages and continues conversation -5. Model responds with final answer based on tool results - -## With HTTP Response - -```typescript -import { toStreamResponse } from "@tanstack/ai"; - -// Perfect for API endpoints! -const stream = ai.chat({ - adapter: "openai", - model: "gpt-4", - messages: [...], - tools: ["get_weather", "search_products", "send_email"], - toolChoice: "auto", -}); - -return toStreamResponse(stream); -``` - -## Pro Tips - -1. **Use `as const`** when defining tools for type safety -2. **Descriptive names** like `get_weather`, `search_products` -3. **Keep tools in one place** for easy maintenance -4. **List available tools**: `ai.toolNames` -5. **Get a tool**: `ai.getTool("get_weather")` - -## Common Pattern: Separate File - -```typescript -// tools.ts -export const tools = { - get_weather: { /* ... */ }, - calculate: { /* ... */ }, - search_products: { /* ... */ }, -} as const; - -// ai-client.ts -import { tools } from "./tools"; - -export const ai = new AI({ - adapters: { /* ... */ }, - tools, // ← Import from separate file! -}); - -// api.ts -import { ai } from "./ai-client"; - -ai.chat({ - messages: [...], - tools: ["get_weather"], // ← Type-safe across files! -}); -``` - -## Summary - -**Define once, use everywhere with full type safety!** 🎉 - -See full documentation: `docs/TOOL_REGISTRY.md` diff --git a/ai-docs/TOOL_STATES_MIGRATION.md b/ai-docs/TOOL_STATES_MIGRATION.md deleted file mode 100644 index 7b180fd8..00000000 --- a/ai-docs/TOOL_STATES_MIGRATION.md +++ /dev/null @@ -1,290 +0,0 @@ -# Tool States and Parts-Based Messages - Migration Guide - -## Overview - -This migration introduces comprehensive tool state tracking and a parts-based message structure to improve UI rendering and tool handling during streaming. - -## Key Changes - -### 1. Message Type Renamed: `Message` → `ModelMessage` - -**Package:** `@tanstack/ai` - -The `Message` interface has been renamed to `ModelMessage` to better reflect its purpose as the format used for LLM communication. - -**Before:** - -```typescript -import type { Message } from '@tanstack/ai' -const messages: Message[] = [] -``` - -**After:** - -```typescript -import type { ModelMessage } from '@tanstack/ai' -const messages: ModelMessage[] = [] -``` - -### 2. New UIMessage Type with Parts - -**Package:** `@tanstack/ai-client` - -A new `UIMessage` type has been introduced for client-side UI rendering. Messages are now composed of parts (text, tool calls, tool results) instead of flat content and toolCalls properties. - -**Structure:** - -```typescript -interface UIMessage { - id: string - role: 'system' | 'user' | 'assistant' - parts: MessagePart[] - createdAt?: Date -} - -type MessagePart = TextPart | ToolCallPart | ToolResultPart - -interface TextPart { - type: 'text' - content: string -} - -interface ToolCallPart { - type: 'tool-call' - id: string - name: string - arguments: string - state: ToolCallState // "awaiting-input" | "input-streaming" | "input-complete" -} - -interface ToolResultPart { - type: 'tool-result' - toolCallId: string - content: string - state: ToolResultState // "streaming" | "complete" | "error" - error?: string -} -``` - -### 3. Tool Call States - -Tool calls now track their lifecycle: - -- **awaiting-input**: Tool call started but no arguments received yet -- **input-streaming**: Partial arguments received (uses loose JSON parser) -- **input-complete**: All arguments received - -### 4. Partial JSON Parsing - -A new loose JSON parser has been integrated to handle incomplete tool arguments during streaming: - -```typescript -import { parsePartialJSON } from '@tanstack/ai-client' - -const partialArgs = '{"name": "John", "ag' -const parsed = parsePartialJSON(partialArgs) // { name: "John" } -``` - -### 5. Automatic Conversion - -Connection adapters automatically convert `UIMessage[]` to `ModelMessage[]` before sending to the server: - -```typescript -// Client code - works with UIMessages -const messages: UIMessage[] = [ - { - id: '1', - role: 'user', - parts: [{ type: 'text', content: 'Hello' }], - }, -] - -// Automatically converted to ModelMessages when sent -const connection = fetchServerSentEvents('/api/chat') -const stream = connection.connect(messages) // Converts internally -``` - -## Migration Steps - -### For CLI/Backend Code (using @tanstack/ai) - -**Step 1:** Update type imports - -```diff -- import type { Message } from "@tanstack/ai"; -+ import type { ModelMessage } from "@tanstack/ai"; -``` - -**Step 2:** Update variable types - -```diff -- const messages: Message[] = []; -+ const messages: ModelMessage[] = []; -``` - -### For React/Client Code (using @tanstack/ai-client or @tanstack/ai-react) - -**Step 1:** Update message rendering to use parts - -**Before:** - -```typescript -{messages.map(({ id, role, content, toolCalls }) => ( -
- {content &&

{content}

} - {toolCalls?.map(tc => )} -
-))} -``` - -**After:** - -```typescript -{messages.map(({ id, role, parts }) => { - const textContent = parts - .filter(p => p.type === "text") - .map(p => p.content) - .join(""); - - const toolCallParts = parts.filter(p => p.type === "tool-call"); - - return ( -
- {textContent &&

{textContent}

} - {toolCallParts.map(tc => )} -
- ); -})} -``` - -**Step 2:** Access tool call state - -```typescript -{toolCallParts.map(tc => ( -
- {tc.name} - {tc.state === "input-streaming" && } - {tc.state === "input-complete" && } -
-))} -``` - -### For Server Code (Python/PHP) - -**No changes required!** Servers receive ModelMessages (the same structure as before), so existing server code continues to work without modification. - -## New Features Available - -### 1. Tool State Tracking - -Monitor tool call progress in real-time: - -```typescript -const processor = new StreamProcessor({ - handlers: { - onToolCallStateChange: (index, id, name, state, args, parsedArgs) => { - console.log(`Tool ${name} is now ${state}`) - if (parsedArgs) { - console.log('Parsed arguments so far:', parsedArgs) - } - }, - }, -}) -``` - -### 2. Message Converters - -Convert between UIMessages and ModelMessages: - -```typescript -import { - uiMessageToModelMessages, - modelMessageToUIMessage, - modelMessagesToUIMessages, -} from '@tanstack/ai-client' - -// Convert UI message to model message(s) -const modelMessages = uiMessageToModelMessages(uiMessage) - -// Convert model message to UI message -const uiMessage = modelMessageToUIMessage(modelMessage, 'msg-123') - -// Convert array of model messages to UI messages -const uiMessages = modelMessagesToUIMessages(modelMessages) -``` - -### 3. Custom JSON Parser - -Provide your own parser for incomplete JSON: - -```typescript -const customParser = { - parse: (jsonString: string) => { - // Your custom parsing logic - return myPartialJSONParser(jsonString) - }, -} - -const processor = new StreamProcessor({ - jsonParser: customParser, - handlers: { - /* ... */ - }, -}) -``` - -## Updated Exports - -### @tanstack/ai - -- ✅ `ModelMessage` (renamed from `Message`) -- All other exports unchanged - -### @tanstack/ai-client - -- ✅ `UIMessage` - New parts-based message type -- ✅ `MessagePart`, `TextPart`, `ToolCallPart`, `ToolResultPart` - Part types -- ✅ `ToolCallState`, `ToolResultState` - State types -- ✅ `uiMessageToModelMessages`, `modelMessageToUIMessage`, `modelMessagesToUIMessages` - Converters -- ✅ `parsePartialJSON`, `PartialJSONParser`, `JSONParser` - JSON parsing utilities -- ✅ `UIMessage` - Domain-specific message format optimized for building chat UIs - -## Breaking Changes - -### ❗️ Message Type Rename - -- `Message` is now `ModelMessage` in `@tanstack/ai` -- Update all type imports and variable declarations - -### ❗️ UIMessage Structure Change - -- Messages now have `parts: MessagePart[]` instead of `content` and `toolCalls` -- Update UI rendering code to iterate over parts -- Access text via `parts.filter(p => p.type === "text")` -- Access tool calls via `parts.filter(p => p.type === "tool-call")` - -### ✅ No Breaking Changes For - -- Server-side code (Python, PHP) - continues to work as-is -- Connection adapters - automatically convert UIMessages to ModelMessages -- Core AI functionality - ModelMessage has same structure as old Message - -## Benefits - -1. **Better UI State Management**: Track tool call progress in real-time -2. **Partial JSON Parsing**: Handle incomplete tool arguments during streaming -3. **Cleaner Domain Separation**: UIMessages for UI, ModelMessages for LLMs -4. **Tool Result Integration**: Tool results are now parts of the message stream -5. **Enhanced Developer Experience**: Clear state transitions and type safety - -## Examples - -See the updated examples: - -- **CLI Example**: `/examples/cli/src/index.ts` - Uses ModelMessage -- **React Chat Example**: `/examples/ts-chat/src/routes/demo/tanchat.tsx` - Uses UIMessage with parts -- **AI Assistant Component**: `/examples/ts-chat/src/components/example-AIAssistant.tsx` - Uses UIMessage with parts - -## Support - -For questions or issues related to this migration, please refer to the TanStack AI documentation or open an issue on GitHub. diff --git a/ai-docs/TYPE_NARROWING_SOLUTION.md b/ai-docs/TYPE_NARROWING_SOLUTION.md deleted file mode 100644 index 2f618e06..00000000 --- a/ai-docs/TYPE_NARROWING_SOLUTION.md +++ /dev/null @@ -1,165 +0,0 @@ -# Type Narrowing with Separate Methods ✅ - -> **Note**: This document describes type narrowing with the current API. The previous `as` option approach has been replaced with separate methods. - -## The Solution - -With separate methods, type narrowing is automatic and simple: - -```typescript -// Streaming - returns AsyncIterable -const stream = ai.chat({ - adapter: "openai", - model: "gpt-4", - messages: [...], -}); -// Type: AsyncIterable ✅ - -// Promise-based - returns Promise -const result = ai.chatCompletion({ - adapter: "openai", - model: "gpt-4", - messages: [...], -}); -// Type: Promise ✅ -``` - -No need for `as const` assertions or discriminated unions - TypeScript automatically knows the return type! - -## How to Use - -### ✅ Correct Usage - Type is Automatically Narrowed - -```typescript -// Returns AsyncIterable -const stream = ai.chat({ - adapter: "openai", - model: "gpt-4", - messages: [...], -}); - -for await (const chunk of stream) { - // TypeScript knows chunk is StreamChunk ✅ - console.log(chunk.type); -} - -// Returns Promise -const result = await ai.chatCompletion({ - adapter: "openai", - model: "gpt-4", - messages: [...], -}); - -// TypeScript knows result is ChatCompletionResult ✅ -console.log(result.content); -console.log(result.usage.totalTokens); -``` - -### Type Inference Examples - -```typescript -// 1. Stream mode - returns AsyncIterable -const stream = ai.chat({ adapter: 'openai', model: 'gpt-4', messages: [] }) -// Type: AsyncIterable ✅ - -// 2. Promise mode - returns Promise -const promise = ai.chatCompletion({ - adapter: 'openai', - model: 'gpt-4', - messages: [], -}) -// Type: Promise ✅ - -// 3. After await - ChatCompletionResult -const result = await ai.chatCompletion({ - adapter: 'openai', - model: 'gpt-4', - messages: [], -}) -// Type: ChatCompletionResult ✅ -``` - -## Real-World Example: API Handler - -```typescript -import { toStreamResponse } from '@tanstack/ai' - -export const Route = createAPIFileRoute('/api/chat')({ - POST: async ({ request }): Promise => { - const { messages } = await request.json() - - // TypeScript knows this returns AsyncIterable ✅ - const stream = ai.chat({ - adapter: 'openAi', - model: 'gpt-4o', - messages, - fallbacks: [{ adapter: 'ollama', model: 'llama2' }], - }) - - // Convert to Response - return toStreamResponse(stream) - }, -}) -``` - -## Why Separate Methods Are Better - -With the old `as` option approach: - -```typescript -const as = 'response' // Type: string -const result = ai.chat({ adapter: 'openai', model: 'gpt-4', messages: [], as }) -// Return type: Promise | AsyncIterable | Response -// ❌ TypeScript doesn't know which specific type -// Need: as: "response" as const -``` - -With separate methods: - -```typescript -const stream = ai.chat({ adapter: 'openai', model: 'gpt-4', messages: [] }) -// Return type: AsyncIterable -// ✅ TypeScript knows exact type automatically! -``` - -## Technical Explanation - -The separate methods approach is simpler: - -```typescript -class AI { - chat(options: ChatOptions): AsyncIterable { - // Implementation... - } - - async chatCompletion(options: ChatOptions): Promise { - // Implementation... - } -} -``` - -TypeScript's type inference: - -1. Call `chat()` → method signature says it returns `AsyncIterable` -2. Call `chatCompletion()` → method signature says it returns `Promise` -3. No conditional types needed - just straightforward method signatures! - -## Benefits - -✅ **Type Safety**: TypeScript knows exact return type at compile time -✅ **IntelliSense**: Autocomplete shows correct properties for each method -✅ **Compile-Time Errors**: Catch type mismatches before runtime -✅ **Refactoring Safety**: Changes are caught automatically -✅ **Self-Documenting**: Methods serve as inline documentation -✅ **Simpler**: No `as const` needed, no overloads needed - -## Summary - -The separate methods API provides perfect type narrowing without any special syntax: - -| Method | Return Type | -| ------------------ | ------------------------------- | -| `chat()` | `AsyncIterable` | -| `chatCompletion()` | `Promise` | - -**Pro Tip**: Just call the method you need - TypeScript handles the rest! 🎉 diff --git a/ai-docs/TYPE_NARROWING_UNIFIED_CHAT.md b/ai-docs/TYPE_NARROWING_UNIFIED_CHAT.md deleted file mode 100644 index adc0554d..00000000 --- a/ai-docs/TYPE_NARROWING_UNIFIED_CHAT.md +++ /dev/null @@ -1,225 +0,0 @@ -# Type Narrowing in Chat API - -> **Note**: This document describes type narrowing with the current API using separate methods. The previous `as` option approach has been replaced with `chat()` for streaming and `chatCompletion()` for promise-based completion. - -## Overview - -The chat API uses separate methods, which provides automatic type narrowing without needing discriminated unions or const assertions: - -- **`chat()`** - Always returns `AsyncIterable` -- **`chatCompletion()`** - Always returns `Promise` - -TypeScript automatically knows the exact return type based on which method you call! - -## Type Narrowing Rules - -| Method | Return Type | Usage | -| ------------------ | ------------------------------- | ---------------------------------------------- | -| `chat()` | `AsyncIterable` | Can use `for await...of`, iterate chunks | -| `chatCompletion()` | `Promise` | Can `await`, access `.content`, `.usage`, etc. | - -## Examples with Type Checking - -### 1. Promise Mode (chatCompletion) - Type is `Promise` - -```typescript -const result = ai.chatCompletion({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: 'Hello' }], -}) - -// TypeScript knows result is Promise -const resolved = await result - -// ✅ These work - properties exist on ChatCompletionResult -console.log(resolved.content) -console.log(resolved.role) -console.log(resolved.usage.totalTokens) - -// ❌ TypeScript error - headers doesn't exist on ChatCompletionResult -console.log(resolved.headers) // Type error! -``` - -### 2. Stream Mode (chat) - Type is `AsyncIterable` - -```typescript -const stream = ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: 'Hello' }], -}) - -// TypeScript knows stream is AsyncIterable -// ✅ This works - can iterate async iterable -for await (const chunk of stream) { - console.log(chunk.type) - console.log(chunk.id) - console.log(chunk.model) -} - -// ❌ TypeScript error - content doesn't exist on AsyncIterable -console.log(stream.content) // Type error! - -// ❌ TypeScript error - headers doesn't exist on AsyncIterable -console.log(stream.headers) // Type error! -``` - -### 3. HTTP Response Mode - -```typescript -import { toStreamResponse } from '@tanstack/ai' - -const stream = ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: 'Hello' }], -}) - -const response = toStreamResponse(stream) - -// TypeScript knows response is Response -// ✅ These work - properties exist on Response -console.log(response.headers) -console.log(response.body) -console.log(response.status) -console.log(response.ok) - -const contentType = response.headers.get('Content-Type') - -// ❌ TypeScript error - content doesn't exist on Response -console.log(response.content) // Type error! -``` - -## Function Return Type Inference - -TypeScript correctly infers return types in functions: - -### API Handler - Returns `Response` - -```typescript -import { toStreamResponse } from "@tanstack/ai"; - -function apiHandler() { - const stream = ai.chat({ - adapter: "openai", - model: "gpt-4", - messages: [...], - }); - - return toStreamResponse(stream); - // TypeScript infers: function apiHandler(): Response ✅ -} -``` - -### Type-safe API Handler - -```typescript -import { toStreamResponse } from "@tanstack/ai"; - -function apiHandler(): Response { - const stream = ai.chat({ - adapter: "openai", - model: "gpt-4", - messages: [...], - }); - - return toStreamResponse(stream); // ✅ Correct - returns Response -} - -function wrongApiHandler(): Response { - const result = ai.chatCompletion({ - adapter: "openai", - model: "gpt-4", - messages: [...], - }); - - return result; // ❌ TypeScript error - returns Promise, not Response -} -``` - -### Streaming Handler - -```typescript -async function* streamHandler() { - const stream = ai.chat({ - adapter: "openai", - model: "gpt-4", - messages: [...], - }); - - // TypeScript knows stream is AsyncIterable - for await (const chunk of stream) { - yield chunk; // ✅ Works perfectly - } -} -``` - -## With Fallbacks - Type Narrowing Still Works - -```typescript -// Promise with fallbacks - Type: Promise -const promise = ai.chatCompletion({ - adapter: "openai", - model: "gpt-4", - messages: [...], - fallbacks: [{ adapter: "ollama", model: "llama2" }] -}); -const resolved = await promise; -console.log(resolved.content); // ✅ Works - -// Stream with fallbacks - Type: AsyncIterable -const stream = ai.chat({ - adapter: "openai", - model: "gpt-4", - messages: [...], - fallbacks: [{ adapter: "ollama", model: "llama2" }] -}); -for await (const chunk of stream) { - console.log(chunk.type); // ✅ Works -} -``` - -## How It Works (Technical Details) - -With separate methods, TypeScript doesn't need function overloads or conditional types: - -```typescript -class AI { - // Simple method signatures - no overloads needed! - chat(options: ChatOptions): AsyncIterable { - return this.adapter.chatStream(options) - } - - async chatCompletion(options: ChatOptions): Promise { - return this.adapter.chatCompletion(options) - } -} -``` - -TypeScript's type inference is straightforward: - -- Call `chat()` → get `AsyncIterable` -- Call `chatCompletion()` → get `Promise` - -No need for `as const` assertions or discriminated unions! - -## Benefits - -✅ **Type Safety**: TypeScript knows exact return type at compile time -✅ **IntelliSense**: Autocomplete shows correct properties for each method -✅ **Compile-Time Errors**: Catch type mismatches before runtime -✅ **Refactoring Safety**: Changes are caught automatically -✅ **Self-Documenting**: Methods serve as inline documentation -✅ **Simpler**: No need for const assertions or overloads - -## Summary - -The separate methods API provides perfect type narrowing automatically: - -| Code | Return Type | -| ------------------ | ------------------------------- | -| `chat()` | `AsyncIterable` | -| `chatCompletion()` | `Promise` | - -TypeScript enforces these types at compile time, providing complete type safety without any special syntax! 🎉 diff --git a/ai-docs/TYPE_SAFETY.md b/ai-docs/TYPE_SAFETY.md deleted file mode 100644 index 48cf95ee..00000000 --- a/ai-docs/TYPE_SAFETY.md +++ /dev/null @@ -1,305 +0,0 @@ -# Type-Safe Multi-Adapter AI API - -This package provides complete TypeScript type safety for working with multiple AI providers, ensuring that you can only use models that are supported by each adapter. - -## Features - -- ✅ **Adapter-specific model validation** - TypeScript prevents using GPT models with Anthropic and vice versa -- ✅ **Full autocomplete support** - Your IDE suggests only valid models for the selected adapter -- ✅ **Compile-time safety** - Catch model incompatibilities before runtime -- ✅ **Multi-adapter support** - Use multiple AI providers in a single application -- ✅ **Type inference** - Model types are automatically inferred from adapter configuration - -## Installation - -```bash -npm install @tanstack/ai @tanstack/ai-openai @tanstack/ai-anthropic -``` - -## Basic Usage - -### Creating an AI instance with multiple adapters - -```typescript -import { AI } from '@tanstack/ai' -import { OpenAIAdapter } from '@tanstack/ai-openai' -import { AnthropicAdapter } from '@tanstack/ai-anthropic' - -const ai = new AI({ - adapters: { - openai: new OpenAIAdapter({ - apiKey: process.env.OPENAI_API_KEY!, - }), - anthropic: new AnthropicAdapter({ - apiKey: process.env.ANTHROPIC_API_KEY!, - }), - }, -}) -``` - -### Type-safe model selection - -```typescript -// ✅ VALID - OpenAI with GPT model -await ai.chat({ - adapter: 'openai', - model: 'gpt-4', // TypeScript knows this is valid - messages: [{ role: 'user', content: 'Hello!' }], -}) - -// ✅ VALID - Anthropic with Claude model -await ai.chat({ - adapter: 'anthropic', - model: 'claude-3-5-sonnet-20241022', // TypeScript knows this is valid - messages: [{ role: 'user', content: 'Hello!' }], -}) - -// ❌ COMPILE ERROR - Wrong model for adapter -await ai.chat({ - adapter: 'anthropic', - model: 'gpt-4', // TypeScript error: "gpt-4" not valid for Anthropic! - messages: [{ role: 'user', content: 'Hello!' }], -}) - -// ❌ COMPILE ERROR - Wrong model for adapter -await ai.chat({ - adapter: 'openai', - model: 'claude-3-5-sonnet-20241022', // TypeScript error: Claude not valid for OpenAI! - messages: [{ role: 'user', content: 'Hello!' }], -}) -``` - -## Available Models - -### OpenAI Models - -```typescript -type OpenAIModel = - | 'gpt-4' - | 'gpt-4-turbo' - | 'gpt-4-turbo-preview' - | 'gpt-4o' - | 'gpt-4o-mini' - | 'gpt-3.5-turbo' - | 'gpt-3.5-turbo-16k' - | 'gpt-3.5-turbo-instruct' - | 'text-embedding-ada-002' - | 'text-embedding-3-small' - | 'text-embedding-3-large' -``` - -### Anthropic Models - -```typescript -type AnthropicModel = - | 'claude-3-5-sonnet-20241022' - | 'claude-3-5-sonnet-20240620' - | 'claude-3-opus-20240229' - | 'claude-3-sonnet-20240229' - | 'claude-3-haiku-20240307' - | 'claude-2.1' - | 'claude-2.0' - | 'claude-instant-1.2' -``` - -## API Methods - -All methods support the same type-safe adapter and model selection: - -### Chat Completion - -```typescript -const result = await ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [ - { role: 'system', content: 'You are a helpful assistant.' }, - { role: 'user', content: 'What is TypeScript?' }, - ], - temperature: 0.7, - maxTokens: 500, -}) -``` - -### Streaming Chat - -```typescript -for await (const chunk of ai.streamChat({ - adapter: 'anthropic', - model: 'claude-3-5-sonnet-20241022', - messages: [{ role: 'user', content: 'Count from 1 to 5' }], -})) { - if (chunk.type === 'content') { - process.stdout.write(chunk.delta) - } -} -``` - -### Text Generation - -```typescript -const result = await ai.generateText({ - adapter: 'openai', - model: 'gpt-3.5-turbo-instruct', - prompt: 'Write a haiku about TypeScript', - maxTokens: 100, -}) -``` - -### Summarization - -```typescript -const result = await ai.summarize({ - adapter: 'anthropic', - model: 'claude-3-haiku-20240307', - text: 'Long text to summarize...', - style: 'bullet-points', - maxLength: 200, -}) -``` - -### Embeddings - -```typescript -const result = await ai.embed({ - adapter: 'openai', - model: 'text-embedding-3-small', - input: 'Text to embed', -}) -``` - -## Advanced Features - -### Dynamic Adapter Addition - -```typescript -const aiWithGemini = ai.addAdapter( - 'gemini', - new GeminiAdapter({ apiKey: '...' }), -) - -// Now "gemini" is available with full type safety -await aiWithGemini.chat({ - adapter: 'gemini', - model: 'gemini-pro', // Types updated automatically - messages: [{ role: 'user', content: 'Hello!' }], -}) -``` - -### Getting Available Adapters - -```typescript -console.log(ai.adapterNames) // ["openai", "anthropic"] -``` - -### Direct Adapter Access - -```typescript -const openai = ai.getAdapter('openai') -console.log(openai.models) // Array of OpenAI models -``` - -## Benefits - -### 1. Compile-Time Safety - -**Before:** - -```typescript -// Runtime error when deployed -await ai.chat({ - provider: 'anthropic', - model: 'gpt-4', // Oops! Wrong model -}) -// Error: Model 'gpt-4' not found for provider 'anthropic' -``` - -**After:** - -```typescript -// Compile-time error in your editor -await ai.chat({ - adapter: 'anthropic', - model: 'gpt-4', // TypeScript error immediately -}) -// Error: Type '"gpt-4"' is not assignable to type 'claude-...' -``` - -### 2. IDE Autocomplete - -When you type `model:`, your IDE will show you **only** the models available for the selected adapter: - -- Select `openai` → See GPT models -- Select `anthropic` → See Claude models - -### 3. Refactoring Safety - -If you switch adapters, TypeScript will immediately flag any incompatible models: - -```typescript -// Change from OpenAI to Anthropic -await ai.chat({ - adapter: 'anthropic', // Changed this - model: 'gpt-4', // TypeScript immediately flags this as an error - messages: [], -}) -``` - -### 4. Self-Documenting Code - -The types serve as documentation - you can see all available models without checking docs: - -```typescript -// Hover over "model" to see all valid options -ai.chat({ adapter: "openai", model: /* hover here */ }); -``` - -## Creating Custom Adapters - -To create a custom adapter with type safety: - -```typescript -import { BaseAdapter } from '@tanstack/ai' - -const MY_MODELS = ['my-model-1', 'my-model-2', 'my-model-3'] as const - -export class MyAdapter extends BaseAdapter { - name = 'my-adapter' - models = MY_MODELS - - // Implement required methods... -} -``` - -Then use it with full type safety: - -```typescript -const ai = new AI({ - adapters: { - 'my-adapter': new MyAdapter({ apiKey: '...' }), - }, -}) - -// TypeScript now knows about "my-model-1", "my-model-2", etc. -await ai.chat({ - adapter: 'my-adapter', - model: 'my-model-1', // Autocomplete works! - messages: [], -}) -``` - -## Examples - -See the `/examples` directory for complete working examples: - -- `model-safety-demo.ts` - Comprehensive demonstration of type safety -- `type-safety-demo.ts` - Quick reference showing valid and invalid usage -- `multi-adapter-example.ts` - Real-world multi-adapter usage - -## TypeScript Configuration - -This package requires TypeScript 4.5 or higher for full type inference support. - -## License - -MIT diff --git a/ai-docs/UNIFIED_CHAT_API.md b/ai-docs/UNIFIED_CHAT_API.md deleted file mode 100644 index 300fa93e..00000000 --- a/ai-docs/UNIFIED_CHAT_API.md +++ /dev/null @@ -1,389 +0,0 @@ -# Unified Chat API - -## Overview - -The chat API provides two methods for different use cases: - -- **`chat()`** - Returns `AsyncIterable` - streaming with **automatic tool execution loop** -- **`chatCompletion()`** - Returns `Promise` - standard non-streaming chat with optional structured output - -### 🔄 Automatic Tool Execution in `chat()` - -**IMPORTANT:** The `chat()` method runs an automatic tool execution loop. When you provide tools with `execute` functions: - -1. **Model calls a tool** → SDK executes it automatically -2. **SDK emits chunks** for tool calls and results (`tool_call`, `tool_result`) -3. **SDK adds results** to messages and continues conversation -4. **Loop repeats** until stopped by `agentLoopStrategy` (default: `maxIterations(5)`) - -**You don't need to manually execute tools or manage conversation state** - the SDK handles everything internally! - -**📚 See also:** [Complete Tool Execution Loop Documentation](TOOL_EXECUTION_LOOP.md) - -## Migration Guide - -### Before (Using `as` option) - -```typescript -// For non-streaming -const result = await ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: 'Hello' }], - as: 'promise', -}) - -// For streaming -const stream = ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: 'Hello' }], - as: 'stream', -}) -for await (const chunk of stream) { - console.log(chunk) -} - -// For HTTP response -const response = ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: 'Hello' }], - as: 'response', -}) -return response -``` - -### After (Separate Methods) - -```typescript -// For non-streaming - use chatCompletion() -const result = await ai.chatCompletion({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: 'Hello' }], -}) - -// For streaming - use chat() -const stream = ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: 'Hello' }], -}) -for await (const chunk of stream) { - console.log(chunk) -} - -// For HTTP response - use chat() + toStreamResponse() -import { toStreamResponse } from '@tanstack/ai' - -const stream = ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: 'Hello' }], -}) -return toStreamResponse(stream) -``` - -## Usage Examples - -### 1. Promise Mode (chatCompletion) - -Standard non-streaming chat completion: - -```typescript -const result = await ai.chatCompletion({ - adapter: 'openai', - model: 'gpt-4', - messages: [ - { role: 'system', content: 'You are a helpful assistant.' }, - { role: 'user', content: 'What is TypeScript?' }, - ], - temperature: 0.7, -}) - -console.log(result.content) -console.log(`Tokens used: ${result.usage.totalTokens}`) -``` - -### 2. Stream Mode (chat) - -Streaming with automatic tool execution loop: - -```typescript -const stream = ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: 'Write a story' }], - tools: [weatherTool], // Optional: tools are auto-executed - agentLoopStrategy: maxIterations(5), // Optional: control loop -}) - -for await (const chunk of stream) { - if (chunk.type === 'content') { - process.stdout.write(chunk.delta) // Stream text response - } else if (chunk.type === 'tool_call') { - console.log(`→ Calling tool: ${chunk.toolCall.function.name}`) - } else if (chunk.type === 'tool_result') { - console.log(`✓ Tool result: ${chunk.content}`) - } else if (chunk.type === 'done') { - console.log(`\nFinished: ${chunk.finishReason}`) - console.log(`Tokens: ${chunk.usage?.totalTokens}`) - } -} -``` - -**Chunk Types:** - -- `content` - Text content from the model (use `chunk.delta` for streaming) -- `tool_call` - Model is calling a tool (emitted by model, auto-executed by SDK) -- `tool_result` - Tool execution result (emitted after SDK executes tool) -- `done` - Stream complete (includes `finishReason` and token usage) -- `error` - An error occurred - -### 3. HTTP Response Mode - -Perfect for API endpoints: - -```typescript -import { toStreamResponse } from '@tanstack/ai' - -// TanStack Start API Route -export const POST = async ({ request }: { request: Request }) => { - const { messages } = await request.json() - - const stream = ai.chat({ - adapter: 'openai', - model: 'gpt-4o', - messages, - temperature: 0.7, - }) - - // Convert stream to Response with SSE headers - return toStreamResponse(stream) -} -``` - -## With Fallbacks - -Both methods support fallbacks: - -```typescript -// Promise mode with fallbacks -const result = await ai.chatCompletion({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: 'Hello' }], - fallbacks: [ - { adapter: 'anthropic', model: 'claude-3-sonnet-20240229' }, - { adapter: 'ollama', model: 'llama2' }, - ], -}) - -// Stream mode with fallbacks -const stream = ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: 'Hello' }], - fallbacks: [{ adapter: 'anthropic', model: 'claude-3-sonnet-20240229' }], -}) - -// HTTP response with fallbacks (seamless failover in HTTP streaming!) -const stream = ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: 'Hello' }], - fallbacks: [{ adapter: 'ollama', model: 'llama2' }], -}) -return toStreamResponse(stream) -``` - -## Tool Execution with Automatic Loop - -**The `chat()` method automatically executes tools in a loop** - no manual management needed! - -```typescript -const tools = [ - { - type: 'function' as const, - function: { - name: 'get_weather', - description: 'Get weather for a location', - parameters: { - type: 'object', - properties: { - location: { type: 'string' }, - }, - required: ['location'], - }, - }, - execute: async (args: { location: string }) => { - // This function is automatically called by the SDK - const weather = await fetchWeatherAPI(args.location) - return JSON.stringify(weather) - }, - }, -] - -// Streaming chat with automatic tool execution -const stream = ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: "What's the weather in SF?" }], - tools, // Tools with execute functions are auto-executed - toolChoice: 'auto', - agentLoopStrategy: maxIterations(5), // Control loop behavior -}) - -for await (const chunk of stream) { - if (chunk.type === 'content') { - process.stdout.write(chunk.delta) // Stream text response - } else if (chunk.type === 'tool_call') { - // Model decided to call a tool - SDK will execute it automatically - console.log(`→ Calling: ${chunk.toolCall.function.name}`) - } else if (chunk.type === 'tool_result') { - // SDK executed the tool and got a result - console.log(`✓ Result: ${chunk.content}`) - } else if (chunk.type === 'done') { - console.log(`Finished: ${chunk.finishReason}`) - } -} -``` - -**🔄 What Happens Internally:** - -1. User asks: "What's the weather in SF?" -2. Model decides to call `get_weather` tool - - SDK emits `tool_call` chunk -3. **SDK automatically executes** `tools[0].execute({ location: "SF" })` - - SDK emits `tool_result` chunk -4. SDK adds assistant message (with tool call) + tool result to messages -5. **SDK automatically continues** conversation by calling model again -6. Model responds: "The weather in SF is sunny, 72°F" - - SDK emits `content` chunks -7. SDK emits `done` chunk - -**Key Points:** - -- ✅ Tools are **automatically executed** by the SDK (you don't call `execute`) -- ✅ Tool results are **automatically added** to messages -- ✅ Conversation **automatically continues** after tool execution -- ✅ Loop controlled by `agentLoopStrategy` (default: `maxIterations(5)`) -- ✅ All you do is handle chunks for display -- ✅ Custom strategies available for advanced control - -**Promise Mode (No Tool Execution):** - -The `chatCompletion()` method does NOT execute tools - it returns the model's response immediately: - -```typescript -// chatCompletion does not execute tools -const result = await ai.chatCompletion({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: "What's the weather in SF?" }], - tools, -}) - -// If model wanted to call a tool, result.toolCalls will contain the calls -// but they won't be executed. This is useful if you want manual control. -if (result.toolCalls) { - console.log('Model wants to call:', result.toolCalls) - // You would execute manually and call chatCompletion again -} -``` - -## Type Safety - -TypeScript automatically infers the correct return type: - -```typescript -// Type: Promise -const promise = ai.chatCompletion({ - adapter: 'openai', - model: 'gpt-4', - messages: [], -}) - -// Type: AsyncIterable -const stream = ai.chat({ adapter: 'openai', model: 'gpt-4', messages: [] }) -``` - -## Benefits - -1. **Clearer API**: Separate methods for different use cases -2. **Consistent Interface**: Same options across both methods -3. **HTTP Streaming Made Easy**: Use `toStreamResponse()` helper -4. **Fallbacks Everywhere**: Both methods support the same fallback mechanism -5. **Type Safety**: TypeScript infers the correct return type -6. **Structured Outputs**: Available in `chatCompletion()` method - -## Real-World Example: TanStack Start API - -```typescript -import { createAPIFileRoute } from '@tanstack/start/api' -import { ai } from '~/lib/ai-client' -import { toStreamResponse } from '@tanstack/ai' - -export const Route = createAPIFileRoute('/api/chat')({ - POST: async ({ request }) => { - const { messages, tools } = await request.json() - - const stream = ai.chat({ - adapter: 'openAi', - model: 'gpt-4o', - messages, - tools, - toolChoice: 'auto', - maxIterations: 5, - temperature: 0.7, - fallbacks: [{ adapter: 'ollama', model: 'llama2' }], - }) - - return toStreamResponse(stream) - }, -}) -``` - -Client-side consumption: - -```typescript -const response = await fetch('/api/chat', { - method: 'POST', - body: JSON.stringify({ messages, tools }), -}) - -const reader = response.body!.getReader() -const decoder = new TextDecoder() - -while (true) { - const { done, value } = await reader.read() - if (done) break - - const text = decoder.decode(value) - const lines = text.split('\n\n') - - for (const line of lines) { - if (line.startsWith('data: ')) { - const data = line.slice(6) - if (data === '[DONE]') continue - - const chunk = JSON.parse(data) - if (chunk.type === 'content') { - console.log(chunk.delta) // Stream content to UI - } - } - } -} -``` - -## Summary - -The unified chat API provides: - -- **Two methods**: `chat()` for streaming, `chatCompletion()` for promises -- **Same options** across both methods -- **Built-in HTTP streaming** helper (`toStreamResponse`) -- **Full fallback support** in both methods -- **Type-safe** return types -- **Simpler code** for common patterns diff --git a/ai-docs/UNIFIED_CHAT_IMPLEMENTATION.md b/ai-docs/UNIFIED_CHAT_IMPLEMENTATION.md deleted file mode 100644 index a75ad3c2..00000000 --- a/ai-docs/UNIFIED_CHAT_IMPLEMENTATION.md +++ /dev/null @@ -1,257 +0,0 @@ -# Unified Chat API - Implementation Summary - -> **Note**: This document describes the historical implementation with the `as` option. The current API uses separate methods: `chat()` for streaming and `chatCompletion()` for promise-based completion. See `docs/UNIFIED_CHAT_API.md` for current API documentation. - -## Overview - -The chat API was previously unified using an `as` configuration option. The current implementation separates streaming and promise-based completion into distinct methods: - -- **`chat()`** - Always returns `AsyncIterable` (streaming) -- **`chatCompletion()`** - Always returns `Promise` (promise-based) - -## Current API Design - -### Method Separation - -```typescript -class AI { - // Streaming method with automatic tool execution loop - async *chat(options): AsyncIterable { - // Manages tool execution internally using ToolCallManager - const toolCallManager = new ToolCallManager(options.tools || []) - - while (iterationCount < maxIterations) { - // Stream from adapter - for await (const chunk of this.adapter.chatStream(options)) { - yield chunk - - // Track tool calls - if (chunk.type === 'tool_call') { - toolCallManager.addToolCallChunk(chunk) - } - } - - // Execute tools if needed - if (shouldExecuteTools && toolCallManager.hasToolCalls()) { - const toolResults = yield* toolCallManager.executeTools(doneChunk) - messages = [...messages, ...toolResults] - continue // Next iteration - } - - break // Done - } - } - - // Promise-based method (no tool execution loop) - async chatCompletion(options): Promise { - return this.adapter.chatCompletion(options) - } -} -``` - -### ToolCallManager Class - -The tool execution logic is extracted into a dedicated `ToolCallManager` class: - -```typescript -class ToolCallManager { - // Accumulate tool calls from streaming chunks - addToolCallChunk(chunk): void - - // Check if there are tool calls to execute - hasToolCalls(): boolean - - // Get all complete tool calls - getToolCalls(): ToolCall[] - - // Execute tools and yield tool_result chunks - async *executeTools( - doneChunk, - ): AsyncGenerator - - // Clear for next iteration - clear(): void -} -``` - -**Benefits:** - -- ✅ **Separation of concerns** - tool logic isolated from chat logic -- ✅ **Testable** - ToolCallManager can be unit tested independently -- ✅ **Maintainable** - changes to tool execution don't affect chat method -- ✅ **Reusable** - can be used in other contexts if needed - -### Benefits of Separate Methods - -✅ **Clearer API**: Method names indicate return type -✅ **Better Type Inference**: TypeScript knows exact return type without overloads -✅ **Simpler Implementation**: No need for discriminated unions -✅ **Easier to Use**: Less cognitive overhead - -## Usage Examples - -### 1. Promise Mode (chatCompletion) - -```typescript -const result = await ai.chatCompletion({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: 'Hello' }], -}) -``` - -### 2. Stream Mode (chat) - -```typescript -const stream = ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: 'Hello' }], -}) - -for await (const chunk of stream) { - console.log(chunk) -} -``` - -### 3. HTTP Response Mode - -```typescript -import { toStreamResponse } from '@tanstack/ai' - -const stream = ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: 'Hello' }], -}) - -return toStreamResponse(stream) -``` - -## Historical Context - -The `as` option approach was implemented to unify `chat()` and `streamChat()` methods. However, separate methods provide better developer experience and type safety. - -### Migration Path - -See `docs/MIGRATION_UNIFIED_CHAT.md` for migration guide from the `as` option API to the current separate methods API. - -## Features Preserved - -✅ **All features still supported**: - -- Discriminated union types for adapter-model pairs -- Fallback mechanism (single-with-fallbacks or fallbacks-only) -- **Automatic tool execution loop** (via `ToolCallManager`) -- Error chunk detection for streaming -- Type-safe model selection - -✅ **No breaking changes** to core functionality: - -- Streaming behavior matches old `streamChat()` method -- Promise behavior matches old `chat()` method -- Error handling and fallbacks work identically -- **Tool execution now handled by `ToolCallManager` class** - -## Files Changed - -### Core Implementation - -- ✅ `packages/ai/src/ai.ts` - - Removed `as` option from `chat()` method - - Made `chat()` streaming-only with automatic tool execution loop - - Added `chatCompletion()` method for promise-based calls - - Removed `streamToResponse()` private method (use `toStreamResponse()` from `stream-to-response.ts`) - - Refactored to use `ToolCallManager` for tool execution - -- ✅ `packages/ai/src/tool-call-manager.ts` (NEW) - - Encapsulates tool call accumulation, validation, and execution - - Independently testable - - Yields `tool_result` chunks during execution - - Returns tool result messages for conversation history - -- ✅ `packages/ai/src/types.ts` - - Added `ToolResultStreamChunk` type - - Added `"tool_result"` to `StreamChunkType` union - - Updated `StreamChunk` union to include `ToolResultStreamChunk` - -### Documentation - -- ✅ `docs/UNIFIED_CHAT_API.md` - Updated API documentation with tool execution details -- ✅ `docs/MIGRATION_UNIFIED_CHAT.md` - Migration guide -- ✅ `docs/UNIFIED_CHAT_QUICK_REFERENCE.md` - Quick reference updated -- ✅ `docs/TOOL_EXECUTION_LOOP.md` (NEW) - Comprehensive tool execution guide -- ✅ `README.md` - Updated with tool execution loop documentation -- ✅ `examples/cli/README.md` - Updated with automatic tool execution details -- ✅ `packages/ai-react/README.md` - Updated backend examples with tool execution -- ✅ `packages/ai-client/README.md` - Added backend example with tool execution - -## Benefits of Current Approach - -1. **Simpler API Surface** - Two clear methods instead of one with options -2. **Consistent Interface** - Same options across both methods -3. **HTTP Streaming Made Easy** - Use `toStreamResponse()` helper -4. **Better Developer Experience** - Clear intent with method names -5. **Type Safety Maintained** - All discriminated unions still work -6. **Backward Compatible Migration** - Easy to migrate from old API -7. **Fallbacks Everywhere** - Both methods support same fallback mechanism -8. **Automatic Tool Execution** - `chat()` handles tool calling in a loop via `ToolCallManager` -9. **Testable Architecture** - Tool execution logic isolated in separate class -10. **Clean Separation** - `chat()` for streaming+tools, `chatCompletion()` for promises+structured output - -## Testing Recommendations - -Test scenarios: - -1. ✅ Promise mode with primary adapter -2. ✅ Promise mode with fallbacks -3. ✅ Stream mode with primary adapter -4. ✅ Stream mode with fallbacks -5. ✅ HTTP response mode with primary adapter -6. ✅ HTTP response mode with fallbacks -7. ✅ Automatic tool execution in `chat()` (via `ToolCallManager`) -8. ✅ Manual tool handling in `chatCompletion()` -9. ✅ Error chunk detection triggers fallbacks -10. ✅ Type inference for both methods -11. ✅ Fallback-only mode (no primary adapter) -12. ✅ `ToolCallManager` unit tests (accumulation, validation, execution) -13. ✅ Multi-round tool execution (up to `maxIterations`) -14. ✅ Tool execution error handling - -## Next Steps - -### For Users - -1. **Update method calls**: - - `chat({ as: "promise" })` → `chatCompletion()` - - `chat({ as: "stream" })` → `chat()` - - `chat({ as: "response" })` → `chat()` + `toStreamResponse()` -2. **Update imports**: Add `toStreamResponse` import if needed -3. **Test fallback behavior**: Verify seamless failover in all modes - -### Testing ToolCallManager - -The `ToolCallManager` class is independently testable. See `packages/ai/src/tool-call-manager.test.ts` for unit tests. - -Test scenarios: - -- ✅ Accumulating streaming tool call chunks -- ✅ Filtering incomplete tool calls -- ✅ Executing tools with valid arguments -- ✅ Handling tool execution errors -- ✅ Handling tools without execute functions -- ✅ Multiple tool calls in one iteration -- ✅ Clearing tool calls between iterations - -### Future Enhancements - -- Consider adding structured output support to streaming -- Add streaming response mode to embeddings -- Document SSE format for client-side consumption -- Add examples for different frameworks (Express, Fastify, etc.) - -## Conclusion - -Separating `chat()` and `chatCompletion()` provides a cleaner, more intuitive interface while maintaining all existing functionality. The two-method design covers all common use cases with clear, type-safe APIs. - -**Key Achievement**: Clear separation of concerns with `chat()` for streaming and `chatCompletion()` for promises, eliminating the need for a configuration option. diff --git a/ai-docs/UNIFIED_CHAT_QUICK_REFERENCE.md b/ai-docs/UNIFIED_CHAT_QUICK_REFERENCE.md deleted file mode 100644 index e0ec1235..00000000 --- a/ai-docs/UNIFIED_CHAT_QUICK_REFERENCE.md +++ /dev/null @@ -1,329 +0,0 @@ -# Unified Chat API - Quick Reference - -> **🔄 Automatic Tool Execution:** The `chat()` method runs an automatic tool execution loop. Tools with `execute` functions are automatically called, results are added to messages, and the conversation continues - all handled internally by the SDK! -> -> **📚 See also:** [Complete Tool Execution Loop Documentation](TOOL_EXECUTION_LOOP.md) - -## Two Methods for Different Use Cases - -```typescript -// 1. CHATCOMPLETION - Returns Promise -const result = await ai.chatCompletion({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: 'Hello' }], -}) - -// 2. CHAT - Returns AsyncIterable with automatic tool execution loop -const stream = ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: 'Hello' }], - tools: [weatherTool], // Optional: auto-executed when called - agentLoopStrategy: maxIterations(5), // Optional: control loop -}) -for await (const chunk of stream) { - if (chunk.type === 'content') process.stdout.write(chunk.delta) - else if (chunk.type === 'tool_call') console.log('Calling tool...') - else if (chunk.type === 'tool_result') console.log('Tool executed!') -} -``` - -## Quick Comparison - -| Feature | chatCompletion | chat | -| --------------------- | ------------------------------- | ---------------------------- | -| **Return Type** | `Promise` | `AsyncIterable` | -| **When to Use** | Need complete response | Real-time streaming | -| **Async/Await** | ✅ Yes | ✅ Yes (for await) | -| **Fallbacks** | ✅ Yes | ✅ Yes | -| **Tool Execution** | ❌ No (manual) | ✅ **Automatic loop** | -| **Type-Safe Models** | ✅ Yes | ✅ Yes | -| **Structured Output** | ✅ Yes | ❌ No | - -## Common Patterns - -### API Endpoint (TanStack Start) - -```typescript -import { toStreamResponse } from '@tanstack/ai' - -export const Route = createAPIFileRoute('/api/chat')({ - POST: async ({ request }) => { - const { messages } = await request.json() - - const stream = ai.chat({ - adapter: 'openAi', - model: 'gpt-4o', - messages, - fallbacks: [{ adapter: 'ollama', model: 'llama2' }], - }) - - return toStreamResponse(stream) - }, -}) -``` - -### CLI Application - -```typescript -const stream = ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: userInput }], -}) - -for await (const chunk of stream) { - if (chunk.type === 'content') { - process.stdout.write(chunk.delta) - } -} -``` - -### Batch Processing - -```typescript -const result = await ai.chatCompletion({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: document }], -}) - -await saveToDatabase(result.content) -``` - -## With Tools - -### Automatic Execution (chat) - -The `chat()` method **automatically executes tools in a loop**: - -```typescript -const tools = [ - { - type: 'function' as const, - function: { - name: 'get_weather', - description: 'Get weather for a location', - parameters: { - /* ... */ - }, - }, - execute: async (args: any) => { - // SDK automatically calls this when model calls the tool - return JSON.stringify({ temp: 72, condition: 'sunny' }) - }, - }, -] - -// Stream mode with automatic tool execution -const stream = ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: "What's the weather in SF?" }], - tools, // Tools with execute functions are auto-executed - toolChoice: 'auto', - agentLoopStrategy: maxIterations(5), // Control loop behavior -}) - -for await (const chunk of stream) { - if (chunk.type === 'content') { - process.stdout.write(chunk.delta) - } else if (chunk.type === 'tool_call') { - console.log(`→ Calling: ${chunk.toolCall.function.name}`) - } else if (chunk.type === 'tool_result') { - console.log(`✓ Result: ${chunk.content}`) - } -} -``` - -**How it works:** - -1. Model decides to call a tool → `tool_call` chunk -2. SDK executes `tool.execute()` → `tool_result` chunk -3. SDK adds result to messages → continues conversation -4. Repeats until complete (up to `maxIterations`) - -### Manual Execution (chatCompletion) - -The `chatCompletion()` method does NOT execute tools automatically: - -```typescript -// chatCompletion returns tool calls but doesn't execute them -const result = await ai.chatCompletion({ - adapter: 'openai', - model: 'gpt-4', - messages: [{ role: 'user', content: "What's the weather in SF?" }], - tools, -}) - -// Check if model wants to call tools -if (result.toolCalls) { - console.log('Model wants to call:', result.toolCalls) - // You must execute manually and call chatCompletion again -} -``` - -## With Fallbacks - -Both methods support the same fallback mechanism: - -```typescript -// Promise with fallbacks -const result = await ai.chatCompletion({ - adapter: "openai", - model: "gpt-4", - messages: [...], - fallbacks: [ - { adapter: "anthropic", model: "claude-3-sonnet-20240229" }, - { adapter: "ollama", model: "llama2" } - ] -}); - -// Stream with fallbacks -const stream = ai.chat({ - adapter: "openai", - model: "gpt-4", - messages: [...], - fallbacks: [ - { adapter: "ollama", model: "llama2" } - ] -}); - -// HTTP response with fallbacks (seamless HTTP failover!) -import { toStreamResponse } from "@tanstack/ai"; - -const stream = ai.chat({ - adapter: "openai", - model: "gpt-4", - messages: [...], - fallbacks: [ - { adapter: "ollama", model: "llama2" } - ] -}); -return toStreamResponse(stream); -``` - -## Fallback-Only Mode - -No primary adapter, just try fallbacks in order: - -```typescript -const result = await ai.chatCompletion({ - messages: [...], - fallbacks: [ - { adapter: "openai", model: "gpt-4" }, - { adapter: "anthropic", model: "claude-3-sonnet-20240229" }, - { adapter: "ollama", model: "llama2" } - ], -}); -``` - -## Migration from Old API - -### Before (using `as` option) - -```typescript -// Non-streaming -const result = await ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [], - as: 'promise', -}) - -// Streaming -const stream = ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [], - as: 'stream', -}) - -// HTTP Response -const response = ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [], - as: 'response', -}) -``` - -### After (separate methods) - -```typescript -// Non-streaming - use chatCompletion() -const result = await ai.chatCompletion({ - adapter: 'openai', - model: 'gpt-4', - messages: [], -}) - -// Streaming - use chat() -const stream = ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [], -}) - -// HTTP Response - use chat() + toStreamResponse() -import { toStreamResponse } from '@tanstack/ai' - -const stream = ai.chat({ - adapter: 'openai', - model: 'gpt-4', - messages: [], -}) -return toStreamResponse(stream) -``` - -## Type Inference - -TypeScript automatically infers the correct return type: - -```typescript -// Type: Promise -const promise = ai.chatCompletion({ - adapter: 'openai', - model: 'gpt-4', - messages: [], -}) - -// Type: AsyncIterable -const stream = ai.chat({ adapter: 'openai', model: 'gpt-4', messages: [] }) -``` - -## Error Handling - -Both methods throw errors if all adapters fail: - -```typescript -try { - const result = await ai.chatCompletion({ - adapter: "openai", - model: "gpt-4", - messages: [...], - fallbacks: [{ adapter: "ollama", model: "llama2" }] - }); -} catch (error: any) { - console.error("All adapters failed:", error.message); -} -``` - -## Cheat Sheet - -| What You Want | Use This | Example | -| ----------------- | ------------------------------------ | ----------------------------------------------------- | -| Complete response | `chatCompletion()` | `const result = await ai.chatCompletion({...})` | -| Custom streaming | `chat()` | `for await (const chunk of ai.chat({...}))` | -| API endpoint | `chat()` + `toStreamResponse()` | `return toStreamResponse(ai.chat({...}))` | -| With fallbacks | Add `fallbacks: [...]` | `fallbacks: [{ adapter: "ollama", model: "llama2" }]` | -| With tools | Add `tools: [...]` | `tools: [{...}, {...}], toolChoice: "auto"` | -| Multiple adapters | Use `fallbacks` only | `fallbacks: [{ adapter: "a", model: "m1" }, {...}]` | -| Structured output | Use `chatCompletion()` with `output` | `chatCompletion({..., output: schema })` | - -## Documentation - -- **Full API Docs**: `docs/UNIFIED_CHAT_API.md` -- **Migration Guide**: `docs/MIGRATION_UNIFIED_CHAT.md` -- **Implementation**: `docs/UNIFIED_CHAT_IMPLEMENTATION.md` diff --git a/assets/CleanShot_2025-11-27_at_08.28.46_2x-4a965c47-7c20-4a7c-acd5-317a2c7876cd.png b/assets/CleanShot_2025-11-27_at_08.28.46_2x-4a965c47-7c20-4a7c-acd5-317a2c7876cd.png deleted file mode 100644 index e69de29b..00000000 diff --git a/docs/guides/client-tools.md b/docs/guides/client-tools.md index 6490a0b1..10dbabdc 100644 --- a/docs/guides/client-tools.md +++ b/docs/guides/client-tools.md @@ -13,6 +13,7 @@ import { z } from "zod"; // Client tool - no execute function (executes on client) export const updateUI = tool({ + name: "update_ui", description: "Update the UI with new information", inputSchema: z.object({ message: z.string().describe("Message to display"), @@ -22,6 +23,7 @@ export const updateUI = tool({ }); export const saveToLocalStorage = tool({ + name: "save_to_local_storage", description: "Save data to browser local storage", inputSchema: z.object({ key: z.string().describe("Storage key"), @@ -127,6 +129,7 @@ Some tools might need both server and client execution: ```typescript // Server: Fetch data const fetchUserPreferences = tool({ + name: "fetch_user_preferences", description: "Get user preferences from server", inputSchema: z.object({ userId: z.string(), @@ -140,6 +143,7 @@ const fetchUserPreferences = tool({ // Client: Display preferences in UI const displayPreferences = tool({ + name: "display_preferences", description: "Display user preferences in the UI", inputSchema: z.object({ preferences: z.any(), diff --git a/docs/guides/server-tools.md b/docs/guides/server-tools.md index c4154542..9e4f37d7 100644 --- a/docs/guides/server-tools.md +++ b/docs/guides/server-tools.md @@ -4,7 +4,7 @@ Server tools execute on the backend, giving you secure access to databases, APIs ## Defining Server Tools -Server tools are defined using the `tool` utility and passed to the AI instance: +Server tools are defined using the `tool` utility with Zod schemas for type safety: ```typescript import { tool } from "@tanstack/ai"; @@ -12,10 +12,16 @@ import { z } from "zod"; // Example: Database query tool const getUserData = tool({ + name: "get_user_data", description: "Get user information from the database", inputSchema: z.object({ userId: z.string().describe("The user ID to look up"), }), + outputSchema: z.object({ + name: z.string(), + email: z.string().email(), + createdAt: z.string(), + }), execute: async ({ userId }) => { // This runs on the server - can access database, APIs, etc. const user = await db.users.findUnique({ where: { id: userId } }); @@ -29,6 +35,7 @@ const getUserData = tool({ // Example: API call tool const searchProducts = tool({ + name: "search_products", description: "Search for products in the catalog", inputSchema: z.object({ query: z.string().describe("Search query"), @@ -82,6 +89,7 @@ import { z } from "zod"; export const tools = { getUserData: tool({ + name: "get_user_data", description: "Get user information", inputSchema: z.object({ userId: z.string(), @@ -91,6 +99,7 @@ export const tools = { }, }), searchProducts: tool({ + name: "search_products", description: "Search products", inputSchema: z.object({ query: z.string(), @@ -131,6 +140,7 @@ Tools should handle errors gracefully: ```typescript const getUserData = tool({ + name: "get_user_data", description: "Get user information", inputSchema: z.object({ userId: z.string(), diff --git a/docs/guides/tool-approval.md b/docs/guides/tool-approval.md index 21866db9..d798f7a2 100644 --- a/docs/guides/tool-approval.md +++ b/docs/guides/tool-approval.md @@ -14,20 +14,21 @@ When a tool requires approval: ## Enabling Approval -Tools can be marked as requiring approval by setting `requiresApproval: true`: +Tools can be marked as requiring approval by setting `needsApproval: true`: ```typescript import { tool } from "@tanstack/ai"; import { z } from "zod"; const sendEmail = tool({ + name: "send_email", description: "Send an email to a recipient", inputSchema: z.object({ to: z.string().email(), subject: z.string(), body: z.string(), }), - requiresApproval: true, // This tool requires approval + needsApproval: true, // This tool requires approval execute: async ({ to, subject, body }) => { // Only executes if approved await emailService.send({ to, subject, body }); @@ -38,7 +39,7 @@ const sendEmail = tool({ ## Server-Side Approval -On the server, tools with `requiresApproval: true` will pause execution and wait for approval: +On the server, tools with `needsApproval: true` will pause execution and wait for approval: ```typescript import { chat, toStreamResponse } from "@tanstack/ai"; @@ -162,11 +163,12 @@ Client tools can also require approval: ```typescript // Server: Define client tool with approval const deleteLocalData = tool({ + name: "delete_local_data", description: "Delete data from local storage", inputSchema: z.object({ key: z.string(), }), - requiresApproval: true, // Requires approval even on client + needsApproval: true, // Requires approval even on client }); // Client: Handle approval @@ -204,13 +206,14 @@ Tools go through these states during approval: ```typescript const purchaseItem = tool({ + name: "purchase_item", description: "Purchase an item from the store", inputSchema: z.object({ itemId: z.string(), quantity: z.number(), price: z.number(), }), - requiresApproval: true, + needsApproval: true, execute: async ({ itemId, quantity, price }) => { const order = await createOrder({ itemId, quantity, price }); return { orderId: order.id, total: price * quantity }; diff --git a/docs/guides/tools.md b/docs/guides/tools.md index 0cdc6e19..042942c0 100644 --- a/docs/guides/tools.md +++ b/docs/guides/tools.md @@ -34,18 +34,24 @@ Tools that execute in the browser. These are useful for: ## Basic Tool Definition -Tools are defined using the `tool` utility from `@tanstack/ai`: +Tools are defined using the `tool` utility from `@tanstack/ai` with Zod schemas: ```typescript import { tool } from "@tanstack/ai"; import { z } from "zod"; const getWeather = tool({ + name: "get_weather", description: "Get the current weather for a location", inputSchema: z.object({ location: z.string().describe("The city and state, e.g. San Francisco, CA"), unit: z.enum(["celsius", "fahrenheit"]).optional(), }), + outputSchema: z.object({ + temperature: z.number(), + conditions: z.string(), + location: z.string(), + }), execute: async ({ location, unit }) => { // Fetch weather data const response = await fetch( @@ -54,6 +60,7 @@ const getWeather = tool({ }` ); const data = await response.json(); + // Return value is validated against outputSchema return { temperature: data.temperature, conditions: data.conditions, diff --git a/examples/ts-react-chat/src/lib/guitar-tools.ts b/examples/ts-react-chat/src/lib/guitar-tools.ts index c5030334..0933c9f3 100644 --- a/examples/ts-react-chat/src/lib/guitar-tools.ts +++ b/examples/ts-react-chat/src/lib/guitar-tools.ts @@ -1,96 +1,63 @@ import { tool } from '@tanstack/ai' +import { z } from 'zod' import guitars from '@/data/example-guitars' export const getGuitarsTool = tool({ - type: 'function', - function: { - name: 'getGuitars', - description: 'Get all products from the database', - parameters: { - type: 'object', - properties: {}, - required: [], - }, - }, + name: 'getGuitars', + description: 'Get all products from the database', + inputSchema: z.object({}), execute: async () => { - return JSON.stringify(guitars) + return guitars }, }) export const recommendGuitarTool = tool({ - type: 'function', - function: { - name: 'recommendGuitar', - description: - 'REQUIRED tool to display a guitar recommendation to the user. This tool MUST be used whenever recommending a guitar - do NOT write recommendations yourself. This displays the guitar in a special appealing format with a buy button.', - parameters: { - type: 'object', - properties: { - id: { - type: 'string', - description: - 'The ID of the guitar to recommend (from the getGuitars results)', - }, - }, - required: ['id'], - }, - }, + name: 'recommendGuitar', + description: + 'REQUIRED tool to display a guitar recommendation to the user. This tool MUST be used whenever recommending a guitar - do NOT write recommendations yourself. This displays the guitar in a special appealing format with a buy button.', + inputSchema: z.object({ + id: z + .string() + .describe( + 'The ID of the guitar to recommend (from the getGuitars results)', + ), + }), }) export const getPersonalGuitarPreferenceTool = tool({ - type: 'function', - function: { - name: 'getPersonalGuitarPreference', - description: - "Get the user's guitar preference from their local browser storage", - parameters: { - type: 'object', - properties: {}, - }, - }, + name: 'getPersonalGuitarPreference', + description: + "Get the user's guitar preference from their local browser storage", + inputSchema: z.object({}), // No execute = client-side tool }) export const addToWishListTool = tool({ - type: 'function', - function: { - name: 'addToWishList', - description: "Add a guitar to the user's wish list (requires approval)", - parameters: { - type: 'object', - properties: { - guitarId: { type: 'string' }, - }, - required: ['guitarId'], - }, - }, + name: 'addToWishList', + description: "Add a guitar to the user's wish list (requires approval)", + inputSchema: z.object({ + guitarId: z.string(), + }), needsApproval: true, // No execute = client-side but needs approval }) export const addToCartTool = tool({ - type: 'function', - function: { - name: 'addToCart', - description: 'Add a guitar to the shopping cart (requires approval)', - parameters: { - type: 'object', - properties: { - guitarId: { type: 'string' }, - quantity: { type: 'number' }, - }, - required: ['guitarId', 'quantity'], - }, - }, + name: 'addToCart', + description: 'Add a guitar to the shopping cart (requires approval)', + inputSchema: z.object({ + guitarId: z.string(), + quantity: z.number(), + }), needsApproval: true, execute: async (args) => { - return JSON.stringify({ + return { success: true, cartId: 'CART_' + Date.now(), guitarId: args.guitarId, quantity: args.quantity, totalItems: args.quantity, - }) + } }, }) diff --git a/examples/ts-react-chat/src/routes/api.tanchat.ts b/examples/ts-react-chat/src/routes/api.tanchat.ts index fa6adaf9..3519f1ac 100644 --- a/examples/ts-react-chat/src/routes/api.tanchat.ts +++ b/examples/ts-react-chat/src/routes/api.tanchat.ts @@ -1,8 +1,8 @@ import { createFileRoute } from '@tanstack/react-router' import { chat, maxIterations, toStreamResponse } from '@tanstack/ai' -import { openai } from '@tanstack/ai-openai' +// import { openai } from '@tanstack/ai-openai' // import { ollama } from "@tanstack/ai-ollama"; -// import { anthropic } from "@tanstack/ai-anthropic"; +import { anthropic } from '@tanstack/ai-anthropic' // import { gemini } from "@tanstack/ai-gemini"; import { allTools } from '@/lib/guitar-tools' @@ -47,12 +47,12 @@ export const Route = createFileRoute('/api/tanchat')({ try { // Use the stream abort signal for proper cancellation handling const stream = chat({ - adapter: openai(), + adapter: anthropic(), // For thinking/reasoning support, use one of these models: // - OpenAI: "gpt-5", "o3", "o3-pro", "o3-mini" (with reasoning option) // - Anthropic: "claude-sonnet-4-5-20250929", "claude-opus-4-5-20251101" (with thinking option) // - Gemini: "gemini-3-pro-preview", "gemini-2.5-pro" (with thinkingConfig option) - model: 'gpt-5', + model: 'claude-sonnet-4-5-20250929', // model: "claude-sonnet-4-5-20250929", // model: "smollm", // model: "gemini-2.5-flash", diff --git a/examples/ts-solid-chat/src/lib/guitar-tools.ts b/examples/ts-solid-chat/src/lib/guitar-tools.ts index 41a9bcf1..7ab609a9 100644 --- a/examples/ts-solid-chat/src/lib/guitar-tools.ts +++ b/examples/ts-solid-chat/src/lib/guitar-tools.ts @@ -1,97 +1,64 @@ import { tool } from '@tanstack/ai' +import { z } from 'zod' import guitars from '@/data/example-guitars' export const getGuitarsTool = tool({ - type: 'function', - function: { - name: 'getGuitars', - description: 'Get all products from the database', - parameters: { - type: 'object', - properties: {}, - required: [], - }, - }, + name: 'getGuitars', + description: 'Get all products from the database', + inputSchema: z.object({}), execute: async () => { - return JSON.stringify(guitars) + return guitars }, }) export const recommendGuitarTool = tool({ - type: 'function', - function: { - name: 'recommendGuitar', - description: - 'REQUIRED tool to display a guitar recommendation to the user. This tool MUST be used whenever recommending a guitar - do NOT write recommendations yourself. This displays the guitar in a special appealing format with a buy button.', - parameters: { - type: 'object', - properties: { - id: { - type: 'string', - description: - 'The ID of the guitar to recommend (from the getGuitars results)', - }, - }, - required: ['id'], - }, - }, + name: 'recommendGuitar', + description: + 'REQUIRED tool to display a guitar recommendation to the user. This tool MUST be used whenever recommending a guitar - do NOT write recommendations yourself. This displays the guitar in a special appealing format with a buy button.', + inputSchema: z.object({ + id: z + .string() + .describe( + 'The ID of the guitar to recommend (from the getGuitars results)', + ), + }), // No execute = client-side tool }) export const getPersonalGuitarPreferenceTool = tool({ - type: 'function', - function: { - name: 'getPersonalGuitarPreference', - description: - "Get the user's guitar preference from their local browser storage", - parameters: { - type: 'object', - properties: {}, - }, - }, + name: 'getPersonalGuitarPreference', + description: + "Get the user's guitar preference from their local browser storage", + inputSchema: z.object({}), // No execute = client-side tool }) export const addToWishListTool = tool({ - type: 'function', - function: { - name: 'addToWishList', - description: "Add a guitar to the user's wish list (requires approval)", - parameters: { - type: 'object', - properties: { - guitarId: { type: 'string' }, - }, - required: ['guitarId'], - }, - }, + name: 'addToWishList', + description: "Add a guitar to the user's wish list (requires approval)", + inputSchema: z.object({ + guitarId: z.string(), + }), needsApproval: true, // No execute = client-side but needs approval }) export const addToCartTool = tool({ - type: 'function', - function: { - name: 'addToCart', - description: 'Add a guitar to the shopping cart (requires approval)', - parameters: { - type: 'object', - properties: { - guitarId: { type: 'string' }, - quantity: { type: 'number' }, - }, - required: ['guitarId', 'quantity'], - }, - }, + name: 'addToCart', + description: 'Add a guitar to the shopping cart (requires approval)', + inputSchema: z.object({ + guitarId: z.string(), + quantity: z.number(), + }), needsApproval: true, execute: async (args) => { - return JSON.stringify({ + return { success: true, cartId: 'CART_' + Date.now(), guitarId: args.guitarId, quantity: args.quantity, totalItems: args.quantity, - }) + } }, }) diff --git a/package.json b/package.json index 74406482..13b1ada0 100644 --- a/package.json +++ b/package.json @@ -53,7 +53,7 @@ "size-limit": [ { "path": "packages/typescript/ai/dist/esm/index.js", - "limit": "10 KB" + "limit": "21 KB" } ], "devDependencies": { diff --git a/packages/typescript/ai-anthropic/live-tests/README.md b/packages/typescript/ai-anthropic/live-tests/README.md new file mode 100644 index 00000000..0cd5f2fd --- /dev/null +++ b/packages/typescript/ai-anthropic/live-tests/README.md @@ -0,0 +1,51 @@ +# Anthropic Live Tests + +These tests verify that the Anthropic adapter correctly handles tool calling with various parameter configurations. + +## Setup + +1. Create a `.env.local` file in this directory with your Anthropic API key: + + ``` + ANTHROPIC_API_KEY=sk-ant-... + ``` + +2. Install dependencies: + ```bash + pnpm install + ``` + +## Tests + +### `tool-test-empty-object.ts` + +Tests tools with empty object schemas (`z.object({})`). Verifies that: + +- Tool calls are made correctly +- Arguments are normalized to `{}` (not empty strings) +- Tools execute successfully + +### `tool-test.ts` + +Tests tools with required parameters. Verifies that: + +- Tool calls include all required parameters +- Arguments are valid JSON (not empty strings) +- Tools execute with correct arguments + +## Running Tests + +```bash +# Run all tests +pnpm test:all + +# Run individual tests +pnpm test # tool-test.ts +pnpm test:empty # tool-test-empty-object.ts +``` + +## Expected Behavior + +- **Arguments should NEVER be empty strings** - they should be: + - `"{}"` for empty object schemas + - Valid JSON strings for tools with parameters (e.g., `"{\"id\":\"1\"}"`) diff --git a/packages/typescript/ai-anthropic/live-tests/package.json b/packages/typescript/ai-anthropic/live-tests/package.json new file mode 100644 index 00000000..b21a1f97 --- /dev/null +++ b/packages/typescript/ai-anthropic/live-tests/package.json @@ -0,0 +1,19 @@ +{ + "name": "ai-anthropic-live-tests", + "version": "0.0.0", + "private": true, + "type": "module", + "scripts": { + "test": "tsx tool-test.ts", + "test:empty": "tsx tool-test-empty-object.ts", + "test:all": "tsx tool-test.ts && tsx tool-test-empty-object.ts" + }, + "dependencies": { + "@tanstack/ai": "workspace:*", + "@tanstack/ai-anthropic": "workspace:*", + "zod": "^3.24.1" + }, + "devDependencies": { + "tsx": "^4.19.2" + } +} diff --git a/packages/typescript/ai-anthropic/live-tests/tool-test-empty-object.ts b/packages/typescript/ai-anthropic/live-tests/tool-test-empty-object.ts new file mode 100644 index 00000000..93628f56 --- /dev/null +++ b/packages/typescript/ai-anthropic/live-tests/tool-test-empty-object.ts @@ -0,0 +1,169 @@ +import { createAnthropic } from '../src/index' +import { z } from 'zod' +import { readFileSync } from 'fs' +import { join, dirname } from 'path' +import { fileURLToPath } from 'url' + +// Load environment variables from .env.local manually +const __dirname = dirname(fileURLToPath(import.meta.url)) +try { + const envContent = readFileSync(join(__dirname, '.env.local'), 'utf-8') + envContent.split('\n').forEach((line) => { + const match = line.match(/^([^=]+)=(.*)$/) + if (match) { + process.env[match[1].trim()] = match[2].trim() + } + }) +} catch (e) { + // .env.local not found, will use process.env +} + +const apiKey = process.env.ANTHROPIC_API_KEY + +if (!apiKey) { + console.error('❌ ANTHROPIC_API_KEY not found in .env.local') + process.exit(1) +} + +async function testToolWithEmptyObjectSchema() { + console.log('🚀 Testing Anthropic tool calling with empty object schema\n') + + const adapter = createAnthropic(apiKey) + + // Create a tool with empty object schema (like getGuitars) + const getGuitarsTool = { + name: 'getGuitars', + description: 'Get all products from the database', + inputSchema: z.object({}), + execute: async () => { + console.log('✅ Tool executed successfully') + return [ + { id: '1', name: 'Guitar 1' }, + { id: '2', name: 'Guitar 2' }, + ] + }, + } + + const messages = [ + { + role: 'user' as const, + content: 'Get me all the guitars', + }, + ] + + console.log('📤 Sending request with tool:') + console.log(' Tool name:', getGuitarsTool.name) + console.log(' Input schema:', getGuitarsTool.inputSchema.toString()) + console.log(' User message:', messages[0].content) + console.log() + + try { + console.log('📥 Streaming response...\n') + + let toolCallFound = false + let toolExecuted = false + let finalResponse = '' + let toolCallArguments: string | null = null + + // @ts-ignore - using internal chat method + const stream = adapter.chatStream({ + model: 'claude-3-5-sonnet-20241022', + messages, + tools: [getGuitarsTool], + }) + + for await (const chunk of stream) { + if (chunk.type === 'tool_call') { + toolCallFound = true + toolCallArguments = chunk.toolCall.function.arguments + console.log('\n🔧 Tool call detected!') + console.log(' Name:', chunk.toolCall.function.name) + console.log(' Arguments (raw):', toolCallArguments) + console.log(' Arguments (type):', typeof toolCallArguments) + + // Validate arguments are not empty string + if (toolCallArguments === '') { + console.error(' ❌ ERROR: Arguments are empty string!') + console.error(' Expected: "{}" or valid JSON') + } else if (toolCallArguments === '{}') { + console.log(' ✅ Arguments are correctly normalized to {}') + } + + // Try to parse if it's a string + if (typeof toolCallArguments === 'string') { + try { + const parsed = JSON.parse(toolCallArguments) + console.log( + ' Arguments (parsed):', + JSON.stringify(parsed, null, 2), + ) + } catch (e) { + console.error(' ❌ Failed to parse arguments as JSON:', e) + } + } + + // Execute the tool + if (getGuitarsTool.execute) { + console.log('\n🔨 Executing tool...') + try { + const parsedArgs = + typeof toolCallArguments === 'string' + ? JSON.parse(toolCallArguments) + : toolCallArguments + const result = await getGuitarsTool.execute(parsedArgs) + toolExecuted = true + console.log(' Result:', JSON.stringify(result, null, 2)) + } catch (error) { + console.error(' ❌ Tool execution error:', error) + } + } + } + + if (chunk.type === 'content') { + finalResponse += chunk.delta + } + } + + console.log('\n' + '='.repeat(60)) + console.log('📊 Test Summary:') + console.log(' Tool call found:', toolCallFound ? '✅' : '❌') + console.log(' Arguments received:', toolCallArguments ? '✅' : '❌') + console.log(' Arguments value:', JSON.stringify(toolCallArguments)) + console.log( + ' Arguments not empty string:', + toolCallArguments !== '' ? '✅' : '❌', + ) + console.log(' Tool executed:', toolExecuted ? '✅' : '❌') + console.log(' Final response:', finalResponse || '(none)') + console.log('='.repeat(60)) + + if (!toolCallFound) { + console.error('\n❌ FAIL: No tool call was detected in the stream') + process.exit(1) + } + + if (toolCallArguments === '') { + console.error( + '\n❌ FAIL: Tool call arguments are empty string (should be "{}" or valid JSON)', + ) + process.exit(1) + } + + if (!toolExecuted) { + console.error('\n❌ FAIL: Tool was not executed successfully') + process.exit(1) + } + + console.log('\n✅ SUCCESS: Tool with empty object schema works correctly!') + process.exit(0) + } catch (error: any) { + console.error('\n❌ ERROR:', error.message) + if (error.error) { + console.error(' Error details:', JSON.stringify(error.error, null, 2)) + } + console.error('Stack:', error.stack) + process.exit(1) + } +} + +testToolWithEmptyObjectSchema() diff --git a/packages/typescript/ai-anthropic/live-tests/tool-test.ts b/packages/typescript/ai-anthropic/live-tests/tool-test.ts new file mode 100644 index 00000000..5e5846e9 --- /dev/null +++ b/packages/typescript/ai-anthropic/live-tests/tool-test.ts @@ -0,0 +1,212 @@ +import { createAnthropic } from '../src/index' +import { z } from 'zod' +import { readFileSync } from 'fs' +import { join, dirname } from 'path' +import { fileURLToPath } from 'url' + +// Load environment variables from .env.local manually +const __dirname = dirname(fileURLToPath(import.meta.url)) +try { + const envContent = readFileSync(join(__dirname, '.env.local'), 'utf-8') + envContent.split('\n').forEach((line) => { + const match = line.match(/^([^=]+)=(.*)$/) + if (match) { + process.env[match[1].trim()] = match[2].trim() + } + }) +} catch (e) { + // .env.local not found, will use process.env +} + +const apiKey = process.env.ANTHROPIC_API_KEY + +if (!apiKey) { + console.error('❌ ANTHROPIC_API_KEY not found in .env.local') + process.exit(1) +} + +async function testToolCallingWithArguments() { + console.log('🚀 Testing Anthropic tool calling with required arguments\n') + + const adapter = createAnthropic(apiKey) + + // Create a tool that requires arguments (like recommendGuitar) + const recommendGuitarTool = { + name: 'recommendGuitar', + description: + 'REQUIRED tool to display a guitar recommendation to the user. This tool MUST be used whenever recommending a guitar - do NOT write recommendations yourself. This displays the guitar in a special appealing format with a buy button.', + inputSchema: z.object({ + id: z + .string() + .describe( + 'The ID of the guitar to recommend (from the getGuitars results)', + ), + }), + execute: async (args: any) => { + console.log( + '✅ Tool executed with arguments:', + JSON.stringify(args, null, 2), + ) + + // Validate arguments were passed correctly + if (!args) { + console.error('❌ ERROR: Arguments are undefined!') + return 'Error: No arguments received' + } + + if (typeof args !== 'object') { + console.error('❌ ERROR: Arguments are not an object:', typeof args) + return 'Error: Invalid arguments type' + } + + if (!args.id) { + console.error('❌ ERROR: ID argument is missing!') + return 'Error: ID is required' + } + + console.log(` - id: "${args.id}" (type: ${typeof args.id})`) + + return `Recommended guitar with ID: ${args.id}` + }, + } + + const messages = [ + { + role: 'user' as const, + content: 'Recommend guitar ID 1 to me', + }, + ] + + console.log('📤 Sending request with tool:') + console.log(' Tool name:', recommendGuitarTool.name) + console.log(' Input schema:', recommendGuitarTool.inputSchema.toString()) + console.log(' User message:', messages[0].content) + console.log() + + try { + console.log('📥 Streaming response...\n') + + let toolCallFound = false + let toolCallArguments: string | null = null + let toolExecuted = false + let finalResponse = '' + + // @ts-ignore - using internal chat method + const stream = adapter.chatStream({ + model: 'claude-3-5-sonnet-20241022', + messages, + tools: [recommendGuitarTool], + }) + + for await (const chunk of stream) { + if (chunk.type === 'tool_call') { + toolCallFound = true + toolCallArguments = chunk.toolCall.function.arguments + console.log('\n🔧 Tool call detected!') + console.log(' Name:', chunk.toolCall.function.name) + console.log(' Arguments (raw):', toolCallArguments) + console.log(' Arguments (type):', typeof toolCallArguments) + + // Validate arguments are not empty string + if (toolCallArguments === '') { + console.error(' ❌ ERROR: Arguments are empty string!') + console.error(' Expected: Valid JSON with required parameters') + } + + // Try to parse if it's a string + if (typeof toolCallArguments === 'string') { + try { + const parsed = JSON.parse(toolCallArguments) + console.log( + ' Arguments (parsed):', + JSON.stringify(parsed, null, 2), + ) + toolCallArguments = parsed as any + } catch (e) { + console.error(' ❌ Failed to parse arguments as JSON:', e) + } + } + + // Execute the tool + if (recommendGuitarTool.execute) { + console.log('\n🔨 Executing tool...') + try { + const parsedArgs = + typeof toolCallArguments === 'string' + ? JSON.parse(toolCallArguments) + : toolCallArguments + const result = await recommendGuitarTool.execute(parsedArgs) + toolExecuted = true + console.log(' Result:', result) + } catch (error) { + console.error(' ❌ Tool execution error:', error) + } + } + } + + if (chunk.type === 'content') { + finalResponse += chunk.delta + } + } + + console.log('\n' + '='.repeat(60)) + console.log('📊 Test Summary:') + console.log(' Tool call found:', toolCallFound ? '✅' : '❌') + console.log(' Arguments received:', toolCallArguments ? '✅' : '❌') + console.log(' Arguments value:', JSON.stringify(toolCallArguments)) + console.log( + ' Arguments not empty string:', + toolCallArguments !== '' ? '✅' : '❌', + ) + console.log(' Tool executed:', toolExecuted ? '✅' : '❌') + console.log(' Final response:', finalResponse) + console.log('='.repeat(60)) + + if (!toolCallFound) { + console.error('\n❌ FAIL: No tool call was detected in the stream') + process.exit(1) + } + + if (toolCallArguments === '') { + console.error( + '\n❌ FAIL: Tool call arguments are empty string (should be valid JSON)', + ) + process.exit(1) + } + + if (typeof toolCallArguments === 'string') { + try { + const parsed = JSON.parse(toolCallArguments) + if (!parsed.id) { + console.error('\n❌ FAIL: ID parameter is missing from arguments') + process.exit(1) + } + } catch (e) { + console.error('\n❌ FAIL: Tool arguments are not valid JSON') + process.exit(1) + } + } else if (toolCallArguments && typeof toolCallArguments === 'object') { + if (!toolCallArguments.id) { + console.error('\n❌ FAIL: ID parameter is missing from arguments') + process.exit(1) + } + } + + if (!toolExecuted) { + console.error('\n❌ FAIL: Tool was not executed successfully') + process.exit(1) + } + + console.log('\n✅ SUCCESS: Tool calling with arguments works correctly!') + process.exit(0) + } catch (error: any) { + console.error('\n❌ ERROR:', error.message) + if (error.error) { + console.error(' Error details:', JSON.stringify(error.error, null, 2)) + } + console.error('Stack:', error.stack) + process.exit(1) + } +} + +testToolCallingWithArguments() diff --git a/packages/typescript/ai-anthropic/package.json b/packages/typescript/ai-anthropic/package.json index d1728903..1e9272c2 100644 --- a/packages/typescript/ai-anthropic/package.json +++ b/packages/typescript/ai-anthropic/package.json @@ -41,7 +41,8 @@ }, "dependencies": { "@anthropic-ai/sdk": "^0.71.0", - "@tanstack/ai": "workspace:*" + "@tanstack/ai": "workspace:*", + "zod": "^4.1.13" }, "devDependencies": { "@vitest/coverage-v8": "4.0.14" diff --git a/packages/typescript/ai-anthropic/src/anthropic-adapter.ts b/packages/typescript/ai-anthropic/src/anthropic-adapter.ts index a84580f7..d11f3724 100644 --- a/packages/typescript/ai-anthropic/src/anthropic-adapter.ts +++ b/packages/typescript/ai-anthropic/src/anthropic-adapter.ts @@ -369,6 +369,12 @@ export class Anthropic extends BaseAdapter< if (existing) { existing.input += event.delta.partial_json + // Normalize arguments: empty string -> {} for empty object schemas + let normalizedArgs = existing.input.trim() + if (normalizedArgs === '') { + normalizedArgs = '{}' + } + yield { type: 'tool_call', id: generateId(), @@ -379,7 +385,7 @@ export class Anthropic extends BaseAdapter< type: 'function', function: { name: existing.name, - arguments: event.delta.partial_json, + arguments: normalizedArgs, }, }, index: currentToolIndex, diff --git a/packages/typescript/ai-anthropic/src/tools/bash-tool.ts b/packages/typescript/ai-anthropic/src/tools/bash-tool.ts index 7afcfa64..9a7d3ba2 100644 --- a/packages/typescript/ai-anthropic/src/tools/bash-tool.ts +++ b/packages/typescript/ai-anthropic/src/tools/bash-tool.ts @@ -2,6 +2,7 @@ import type { BetaToolBash20241022, BetaToolBash20250124, } from '@anthropic-ai/sdk/resources/beta' +import { z } from 'zod' import type { Tool } from '@tanstack/ai' export type BashTool = BetaToolBash20241022 | BetaToolBash20250124 @@ -12,12 +13,9 @@ export function convertBashToolToAdapterFormat(tool: Tool): BashTool { } export function bashTool(config: BashTool): Tool { return { - type: 'function', - function: { - name: 'bash', - description: '', - parameters: {}, - }, + name: 'bash', + description: '', + inputSchema: z.object({}), metadata: config, } } diff --git a/packages/typescript/ai-anthropic/src/tools/code-execution-tool.ts b/packages/typescript/ai-anthropic/src/tools/code-execution-tool.ts index cb2dc105..aadce668 100644 --- a/packages/typescript/ai-anthropic/src/tools/code-execution-tool.ts +++ b/packages/typescript/ai-anthropic/src/tools/code-execution-tool.ts @@ -2,6 +2,7 @@ import type { BetaCodeExecutionTool20250522, BetaCodeExecutionTool20250825, } from '@anthropic-ai/sdk/resources/beta' +import { z } from 'zod' import type { Tool } from '@tanstack/ai' export type CodeExecutionTool = @@ -17,12 +18,9 @@ export function convertCodeExecutionToolToAdapterFormat( export function codeExecutionTool(config: CodeExecutionTool): Tool { return { - type: 'function', - function: { - name: 'code_execution', - description: '', - parameters: {}, - }, + name: 'code_execution', + description: '', + inputSchema: z.object({}), metadata: config, } } diff --git a/packages/typescript/ai-anthropic/src/tools/computer-use-tool.ts b/packages/typescript/ai-anthropic/src/tools/computer-use-tool.ts index bdd4110a..8954ac1b 100644 --- a/packages/typescript/ai-anthropic/src/tools/computer-use-tool.ts +++ b/packages/typescript/ai-anthropic/src/tools/computer-use-tool.ts @@ -2,6 +2,7 @@ import type { BetaToolComputerUse20241022, BetaToolComputerUse20250124, } from '@anthropic-ai/sdk/resources/beta' +import { z } from 'zod' import type { Tool } from '@tanstack/ai' export type ComputerUseTool = @@ -17,12 +18,9 @@ export function convertComputerUseToolToAdapterFormat( export function computerUseTool(config: ComputerUseTool): Tool { return { - type: 'function', - function: { - name: 'computer', - description: '', - parameters: {}, - }, + name: 'computer', + description: '', + inputSchema: z.object({}), metadata: config, } } diff --git a/packages/typescript/ai-anthropic/src/tools/custom-tool.ts b/packages/typescript/ai-anthropic/src/tools/custom-tool.ts index 26e1021e..34543583 100644 --- a/packages/typescript/ai-anthropic/src/tools/custom-tool.ts +++ b/packages/typescript/ai-anthropic/src/tools/custom-tool.ts @@ -1,5 +1,6 @@ import type { CacheControl } from '../text/text-provider-options' -import type { Tool } from '@tanstack/ai' +import { z } from 'zod' +import { convertZodToJsonSchema, type Tool } from '@tanstack/ai' export interface CustomTool { /** @@ -26,15 +27,21 @@ export interface CustomTool { export function convertCustomToolToAdapterFormat(tool: Tool): CustomTool { const metadata = (tool.metadata as { cacheControl?: CacheControl | null } | undefined) || {} + + // Convert Zod schema to JSON Schema + const jsonSchema = convertZodToJsonSchema(tool.inputSchema) + + const inputSchema = { + type: 'object' as const, + properties: jsonSchema.properties || null, + required: jsonSchema.required || null, + } + return { - name: tool.function.name, + name: tool.name, type: 'custom', - description: tool.function.description, - input_schema: { - type: 'object', - properties: tool.function.parameters.properties || null, - required: tool.function.parameters.required || null, - }, + description: tool.description, + input_schema: inputSchema, cache_control: metadata.cacheControl || null, } } @@ -42,16 +49,13 @@ export function convertCustomToolToAdapterFormat(tool: Tool): CustomTool { export function customTool( name: string, description: string, - parameters: Record, + inputSchema: z.ZodType, cacheControl?: CacheControl | null, ): Tool { return { - type: 'function', - function: { - name, - description, - parameters, - }, + name, + description, + inputSchema, metadata: { cacheControl, }, diff --git a/packages/typescript/ai-anthropic/src/tools/memory-tool.ts b/packages/typescript/ai-anthropic/src/tools/memory-tool.ts index 3318a48e..4e730f3a 100644 --- a/packages/typescript/ai-anthropic/src/tools/memory-tool.ts +++ b/packages/typescript/ai-anthropic/src/tools/memory-tool.ts @@ -1,4 +1,5 @@ import type { BetaMemoryTool20250818 } from '@anthropic-ai/sdk/resources/beta' +import { z } from 'zod' import type { Tool } from '@tanstack/ai' export type MemoryTool = BetaMemoryTool20250818 @@ -10,12 +11,9 @@ export function convertMemoryToolToAdapterFormat(tool: Tool): MemoryTool { export function memoryTool(cacheControl?: MemoryTool): Tool { return { - type: 'function', - function: { - name: 'memory', - description: '', - parameters: {}, - }, + name: 'memory', + description: '', + inputSchema: z.object({}), metadata: { cacheControl, }, diff --git a/packages/typescript/ai-anthropic/src/tools/text-editor-tool.ts b/packages/typescript/ai-anthropic/src/tools/text-editor-tool.ts index 401b0923..5608d265 100644 --- a/packages/typescript/ai-anthropic/src/tools/text-editor-tool.ts +++ b/packages/typescript/ai-anthropic/src/tools/text-editor-tool.ts @@ -3,6 +3,7 @@ import type { ToolTextEditor20250429, ToolTextEditor20250728, } from '@anthropic-ai/sdk/resources/messages' +import { z } from 'zod' import type { Tool } from '@tanstack/ai' export type TextEditorTool = @@ -21,12 +22,9 @@ export function convertTextEditorToolToAdapterFormat( export function textEditorTool(config: T): Tool { return { - type: 'function', - function: { - name: 'str_replace_editor', - description: '', - parameters: {}, - }, + name: 'str_replace_editor', + description: '', + inputSchema: z.object({}), metadata: config, } } diff --git a/packages/typescript/ai-anthropic/src/tools/tool-converter.ts b/packages/typescript/ai-anthropic/src/tools/tool-converter.ts index 0b3e08f9..4ca43c38 100644 --- a/packages/typescript/ai-anthropic/src/tools/tool-converter.ts +++ b/packages/typescript/ai-anthropic/src/tools/tool-converter.ts @@ -37,7 +37,7 @@ export function convertToolsToProviderFormat( tools: Array, ): Array { return tools.map((tool) => { - const name = tool.function.name + const name = tool.name switch (name) { case 'bash': diff --git a/packages/typescript/ai-anthropic/src/tools/web-fetch-tool.ts b/packages/typescript/ai-anthropic/src/tools/web-fetch-tool.ts index 196eb579..e34a2bb3 100644 --- a/packages/typescript/ai-anthropic/src/tools/web-fetch-tool.ts +++ b/packages/typescript/ai-anthropic/src/tools/web-fetch-tool.ts @@ -1,5 +1,6 @@ import type { BetaWebFetchTool20250910 } from '@anthropic-ai/sdk/resources/beta' import type { CacheControl } from '../text/text-provider-options' +import { z } from 'zod' import type { Tool } from '@tanstack/ai' export type WebFetchTool = BetaWebFetchTool20250910 @@ -34,12 +35,9 @@ export function webFetchTool(config?: { cacheControl?: CacheControl | null }): Tool { return { - type: 'function', - function: { - name: 'web_fetch', - description: '', - parameters: {}, - }, + name: 'web_fetch', + description: '', + inputSchema: z.object({}), metadata: { allowedDomains: config?.allowedDomains, blockedDomains: config?.blockedDomains, diff --git a/packages/typescript/ai-anthropic/src/tools/web-search-tool.ts b/packages/typescript/ai-anthropic/src/tools/web-search-tool.ts index 2aea4a09..f8c69ffb 100644 --- a/packages/typescript/ai-anthropic/src/tools/web-search-tool.ts +++ b/packages/typescript/ai-anthropic/src/tools/web-search-tool.ts @@ -1,5 +1,6 @@ import type { WebSearchTool20250305 } from '@anthropic-ai/sdk/resources/messages' import type { CacheControl } from '../text/text-provider-options' +import { z } from 'zod' import type { Tool } from '@tanstack/ai' export type WebSearchTool = WebSearchTool20250305 @@ -74,12 +75,9 @@ export function webSearchTool(config: WebSearchTool): Tool { validateDomains(config) validateUserLocation(config) return { - type: 'function', - function: { - name: 'web_search', - description: '', - parameters: {}, - }, + name: 'web_search', + description: '', + inputSchema: z.object({}), metadata: config, } } diff --git a/packages/typescript/ai-client/src/chat-client.ts b/packages/typescript/ai-client/src/chat-client.ts index 2a06c8b5..2d2709c5 100644 --- a/packages/typescript/ai-client/src/chat-client.ts +++ b/packages/typescript/ai-client/src/chat-client.ts @@ -122,21 +122,22 @@ export class ChatClient { // Collect raw chunks for debugging const rawChunks: Array = [] const streamId = this.generateUniqueId('stream') + const currentMessageId = assistantMessageId const processor = new StreamProcessor({ chunkStrategy: this.streamProcessorConfig?.chunkStrategy, parser: this.streamProcessorConfig?.parser, handlers: { onTextUpdate: (content) => { - this.events.textUpdated(streamId, assistantMessageId, content) + this.events.textUpdated(streamId, currentMessageId, content) this.setMessages( - updateTextPart(this.messages, assistantMessageId, content), + updateTextPart(this.messages, currentMessageId, content), ) }, onToolCallStateChange: (_index, id, name, state, args) => { this.events.toolCallStateChanged( streamId, - assistantMessageId, + currentMessageId, id, name, state, @@ -145,7 +146,7 @@ export class ChatClient { // Update or create tool call part with state this.setMessages( - updateToolCallPart(this.messages, assistantMessageId, { + updateToolCallPart(this.messages, currentMessageId, { id, name, arguments: args, @@ -166,7 +167,7 @@ export class ChatClient { this.setMessages( updateToolResultPart( this.messages, - assistantMessageId, + currentMessageId, toolCallId, content, state, @@ -176,7 +177,7 @@ export class ChatClient { }, onApprovalRequested: (toolCallId, toolName, input, approvalId) => { this.events.approvalRequested( - assistantMessageId, + currentMessageId, toolCallId, toolName, input, @@ -187,7 +188,7 @@ export class ChatClient { this.setMessages( updateToolCallApproval( this.messages, - assistantMessageId, + currentMessageId, toolCallId, approvalId, ), @@ -232,9 +233,9 @@ export class ChatClient { } }, onThinkingUpdate: (content) => { - this.events.textUpdated(streamId, assistantMessageId, content) + this.events.textUpdated(streamId, currentMessageId, content) this.setMessages( - updateThinkingPart(this.messages, assistantMessageId, content), + updateThinkingPart(this.messages, currentMessageId, content), ) }, onStreamEnd: () => { diff --git a/packages/typescript/ai-client/src/message-updaters.ts b/packages/typescript/ai-client/src/message-updaters.ts index 564bf3de..af17fe70 100644 --- a/packages/typescript/ai-client/src/message-updaters.ts +++ b/packages/typescript/ai-client/src/message-updaters.ts @@ -8,8 +8,7 @@ import type { } from './types' /** - * Update or add a text part to a message, ensuring tool calls come before text. - * Text parts are always placed at the end (after tool calls). + * Update or add a text part to a message. */ export function updateTextPart( messages: Array, @@ -21,21 +20,15 @@ export function updateTextPart( return msg } - let parts = [...msg.parts] + const parts = [...msg.parts] const textPartIndex = parts.findIndex((p) => p.type === 'text') - // Always add/update text part at the end (after tool calls) if (textPartIndex >= 0) { + // Update existing text part parts[textPartIndex] = { type: 'text', content } } else { - // Remove existing parts temporarily to ensure order - const toolCallParts = parts.filter((p) => p.type === 'tool-call') - const otherParts = parts.filter( - (p) => p.type !== 'tool-call' && p.type !== 'text', - ) - - // Rebuild: tool calls first, then other parts, then text - parts = [...toolCallParts, ...otherParts, { type: 'text', content }] + // Add new text part at the end (preserve natural streaming order) + parts.push({ type: 'text', content }) } return { ...msg, parts } @@ -44,7 +37,6 @@ export function updateTextPart( /** * Update or add a tool call part to a message. - * Tool calls are inserted before any text parts. */ export function updateToolCallPart( messages: Array, @@ -79,13 +71,8 @@ export function updateToolCallPart( // Update existing tool call parts[existingPartIndex] = toolCallPart } else { - // Insert tool call before any text parts - const textPartIndex = parts.findIndex((p) => p.type === 'text') - if (textPartIndex >= 0) { - parts.splice(textPartIndex, 0, toolCallPart) - } else { - parts.push(toolCallPart) - } + // Add new tool call at the end (preserve natural streaming order) + parts.push(toolCallPart) } return { ...msg, parts } @@ -247,7 +234,6 @@ export function updateToolCallApprovalResponse( /** * Update or add a thinking part to a message. - * Thinking parts are typically placed before text parts. */ export function updateThinkingPart( messages: Array, @@ -271,14 +257,8 @@ export function updateThinkingPart( // Update existing thinking part parts[thinkingPartIndex] = thinkingPart } else { - // Insert thinking part before text parts (but after tool calls) - const textPartIndex = parts.findIndex((p) => p.type === 'text') - if (textPartIndex >= 0) { - parts.splice(textPartIndex, 0, thinkingPart) - } else { - // No text part, add at end - parts.push(thinkingPart) - } + // Add new thinking part at the end (preserve natural streaming order) + parts.push(thinkingPart) } return { ...msg, parts } diff --git a/packages/typescript/ai-gemini/package.json b/packages/typescript/ai-gemini/package.json index 5ec5c90f..28902d75 100644 --- a/packages/typescript/ai-gemini/package.json +++ b/packages/typescript/ai-gemini/package.json @@ -41,7 +41,8 @@ ], "dependencies": { "@google/genai": "^1.30.0", - "@tanstack/ai": "workspace:*" + "@tanstack/ai": "workspace:*", + "zod": "^4.1.13" }, "devDependencies": { "@vitest/coverage-v8": "4.0.14", diff --git a/packages/typescript/ai-gemini/src/tools/code-execution-tool.ts b/packages/typescript/ai-gemini/src/tools/code-execution-tool.ts index f7eadd93..e3f35d05 100644 --- a/packages/typescript/ai-gemini/src/tools/code-execution-tool.ts +++ b/packages/typescript/ai-gemini/src/tools/code-execution-tool.ts @@ -1,3 +1,4 @@ +import { z } from 'zod' import type { Tool } from '@tanstack/ai' export interface CodeExecutionTool {} @@ -10,12 +11,9 @@ export function convertCodeExecutionToolToAdapterFormat(_tool: Tool) { export function codeExecutionTool(): Tool { return { - type: 'function', - function: { - name: 'code_execution', - description: '', - parameters: {}, - }, + name: 'code_execution', + description: '', + inputSchema: z.object({}), metadata: {}, } } diff --git a/packages/typescript/ai-gemini/src/tools/computer-use-tool.ts b/packages/typescript/ai-gemini/src/tools/computer-use-tool.ts index 9a1b65f6..43ceef01 100644 --- a/packages/typescript/ai-gemini/src/tools/computer-use-tool.ts +++ b/packages/typescript/ai-gemini/src/tools/computer-use-tool.ts @@ -1,4 +1,5 @@ import type { ComputerUse } from '@google/genai' +import { z } from 'zod' import type { Tool } from '@tanstack/ai' export type ComputerUseTool = ComputerUse @@ -15,12 +16,9 @@ export function convertComputerUseToolToAdapterFormat(tool: Tool) { export function computerUseTool(config: ComputerUseTool): Tool { return { - type: 'function', - function: { - name: 'computer_use', - description: '', - parameters: {}, - }, + name: 'computer_use', + description: '', + inputSchema: z.object({}), metadata: { environment: config.environment, excludedPredefinedFunctions: config.excludedPredefinedFunctions, diff --git a/packages/typescript/ai-gemini/src/tools/file-search-tool.ts b/packages/typescript/ai-gemini/src/tools/file-search-tool.ts index 2c3816a1..0e8c33b0 100644 --- a/packages/typescript/ai-gemini/src/tools/file-search-tool.ts +++ b/packages/typescript/ai-gemini/src/tools/file-search-tool.ts @@ -1,3 +1,4 @@ +import { z } from 'zod' import type { Tool } from '@tanstack/ai' import type { FileSearch } from '@google/genai' @@ -12,12 +13,9 @@ export function convertFileSearchToolToAdapterFormat(tool: Tool) { export function fileSearchTool(config: FileSearchTool): Tool { return { - type: 'function', - function: { - name: 'file_search', - description: '', - parameters: {}, - }, + name: 'file_search', + description: '', + inputSchema: z.object({}), metadata: config, } } diff --git a/packages/typescript/ai-gemini/src/tools/google-maps-tool.ts b/packages/typescript/ai-gemini/src/tools/google-maps-tool.ts index 00305f6d..46a9de7c 100644 --- a/packages/typescript/ai-gemini/src/tools/google-maps-tool.ts +++ b/packages/typescript/ai-gemini/src/tools/google-maps-tool.ts @@ -1,4 +1,5 @@ import type { GoogleMaps } from '@google/genai' +import { z } from 'zod' import type { Tool } from '@tanstack/ai' export type GoogleMapsTool = GoogleMaps @@ -12,12 +13,9 @@ export function convertGoogleMapsToolToAdapterFormat(tool: Tool) { export function googleMapsTool(config?: GoogleMapsTool): Tool { return { - type: 'function', - function: { - name: 'google_maps', - description: '', - parameters: {}, - }, + name: 'google_maps', + description: '', + inputSchema: z.object({}), metadata: config, } } diff --git a/packages/typescript/ai-gemini/src/tools/google-search-retriveal-tool.ts b/packages/typescript/ai-gemini/src/tools/google-search-retriveal-tool.ts index 4f235d70..67269f7c 100644 --- a/packages/typescript/ai-gemini/src/tools/google-search-retriveal-tool.ts +++ b/packages/typescript/ai-gemini/src/tools/google-search-retriveal-tool.ts @@ -1,4 +1,5 @@ import type { GoogleSearchRetrieval } from '@google/genai' +import { z } from 'zod' import type { Tool } from '@tanstack/ai' export type GoogleSearchRetrievalTool = GoogleSearchRetrieval @@ -14,12 +15,9 @@ export function googleSearchRetrievalTool( config?: GoogleSearchRetrievalTool, ): Tool { return { - type: 'function', - function: { - name: 'google_search_retrieval', - description: '', - parameters: {}, - }, + name: 'google_search_retrieval', + description: '', + inputSchema: z.object({}), metadata: config, } } diff --git a/packages/typescript/ai-gemini/src/tools/google-search-tool.ts b/packages/typescript/ai-gemini/src/tools/google-search-tool.ts index bc41b22e..7fae8497 100644 --- a/packages/typescript/ai-gemini/src/tools/google-search-tool.ts +++ b/packages/typescript/ai-gemini/src/tools/google-search-tool.ts @@ -1,4 +1,5 @@ import type { GoogleSearch } from '@google/genai' +import { z } from 'zod' import type { Tool } from '@tanstack/ai' export type GoogleSearchTool = GoogleSearch @@ -12,12 +13,9 @@ export function convertGoogleSearchToolToAdapterFormat(tool: Tool) { export function googleSearchTool(config?: GoogleSearchTool): Tool { return { - type: 'function', - function: { - name: 'google_search', - description: '', - parameters: {}, - }, + name: 'google_search', + description: '', + inputSchema: z.object({}), metadata: config, } } diff --git a/packages/typescript/ai-gemini/src/tools/tool-converter.ts b/packages/typescript/ai-gemini/src/tools/tool-converter.ts index 6f34b0cd..1d89414f 100644 --- a/packages/typescript/ai-gemini/src/tools/tool-converter.ts +++ b/packages/typescript/ai-gemini/src/tools/tool-converter.ts @@ -5,7 +5,7 @@ import { convertGoogleMapsToolToAdapterFormat } from './google-maps-tool' import { convertGoogleSearchRetrievalToolToAdapterFormat } from './google-search-retriveal-tool' import { convertGoogleSearchToolToAdapterFormat } from './google-search-tool' import { convertUrlContextToolToAdapterFormat } from './url-context-tool' -import type { Tool } from '@tanstack/ai' +import { convertZodToJsonSchema, type Tool } from '@tanstack/ai' import type { ToolUnion } from '@google/genai' /** @@ -17,16 +17,11 @@ import type { ToolUnion } from '@google/genai' * @example * ```typescript * const tools: Tool[] = [{ - * type: "function", - * function: { - * name: "get_weather", - * description: "Get weather for a location", - * parameters: { - * type: "object", - * properties: { location: { type: "string" } }, - * required: ["location"] - * } - * } + * name: "get_weather", + * description: "Get weather for a location", + * inputSchema: z.object({ + * location: z.string() + * }) * }]; * * const geminiTools = convertToolsToProviderFormat(tools); @@ -47,7 +42,7 @@ export function convertToolsToProviderFormat( // Process each tool and group function declarations together for (const tool of tools) { - const name = tool.function.name + const name = tool.name switch (name) { case 'code_execution': @@ -74,15 +69,19 @@ export function convertToolsToProviderFormat( default: // Collect function declarations to group together // Description is required for Gemini function declarations - if (!tool.function.description) { + if (!tool.description) { throw new Error( - `Tool ${tool.function.name} requires a description for Gemini adapter`, + `Tool ${tool.name} requires a description for Gemini adapter`, ) } + + // Convert Zod schema to JSON Schema + const jsonSchema = convertZodToJsonSchema(tool.inputSchema) + functionDeclarations.push({ - name: tool.function.name, - description: tool.function.description, - parameters: tool.function.parameters, + name: tool.name, + description: tool.description, + parameters: jsonSchema, }) break } diff --git a/packages/typescript/ai-gemini/src/tools/url-context-tool.ts b/packages/typescript/ai-gemini/src/tools/url-context-tool.ts index dc3d69b7..9a6796b8 100644 --- a/packages/typescript/ai-gemini/src/tools/url-context-tool.ts +++ b/packages/typescript/ai-gemini/src/tools/url-context-tool.ts @@ -1,3 +1,4 @@ +import { z } from 'zod' import type { Tool } from '@tanstack/ai' export interface UrlContextTool {} @@ -10,12 +11,9 @@ export function convertUrlContextToolToAdapterFormat(_tool: Tool) { export function urlContextTool(): Tool { return { - type: 'function', - function: { - name: 'url_context', - description: '', - parameters: {}, - }, + name: 'url_context', + description: '', + inputSchema: z.object({}), metadata: {}, } } diff --git a/packages/typescript/ai-ollama/package.json b/packages/typescript/ai-ollama/package.json index 64645f1e..097cf571 100644 --- a/packages/typescript/ai-ollama/package.json +++ b/packages/typescript/ai-ollama/package.json @@ -42,7 +42,8 @@ ], "dependencies": { "@tanstack/ai": "workspace:*", - "ollama": "^0.6.3" + "ollama": "^0.6.3", + "zod": "^4.1.13" }, "devDependencies": { "@vitest/coverage-v8": "4.0.14", diff --git a/packages/typescript/ai-ollama/src/ollama-adapter.ts b/packages/typescript/ai-ollama/src/ollama-adapter.ts index a8f04397..1162e52d 100644 --- a/packages/typescript/ai-ollama/src/ollama-adapter.ts +++ b/packages/typescript/ai-ollama/src/ollama-adapter.ts @@ -1,5 +1,5 @@ import { Ollama as OllamaSDK } from 'ollama' -import { BaseAdapter } from '@tanstack/ai' +import { BaseAdapter, convertZodToJsonSchema } from '@tanstack/ai' import type { ChatOptions, EmbeddingOptions, @@ -175,9 +175,9 @@ function convertToolsToOllamaFormat( return tools.map((tool) => ({ type: 'function', function: { - name: tool.function.name, - description: tool.function.description, - parameters: tool.function.parameters, + name: tool.name, + description: tool.description, + parameters: convertZodToJsonSchema(tool.inputSchema), }, })) } diff --git a/packages/typescript/ai-openai/live-tests/tool-test-empty-object.ts b/packages/typescript/ai-openai/live-tests/tool-test-empty-object.ts new file mode 100644 index 00000000..ed2f2989 --- /dev/null +++ b/packages/typescript/ai-openai/live-tests/tool-test-empty-object.ts @@ -0,0 +1,128 @@ +import { createOpenAI } from '../src/index' +import { z } from 'zod' +import { readFileSync } from 'fs' +import { join, dirname } from 'path' +import { fileURLToPath } from 'url' + +// Load environment variables from .env.local manually +const __dirname = dirname(fileURLToPath(import.meta.url)) +try { + const envContent = readFileSync(join(__dirname, '.env.local'), 'utf-8') + envContent.split('\n').forEach((line) => { + const match = line.match(/^([^=]+)=(.*)$/) + if (match) { + process.env[match[1].trim()] = match[2].trim() + } + }) +} catch (e) { + // .env.local not found, will use process.env +} + +const apiKey = process.env.OPENAI_API_KEY + +if (!apiKey) { + console.error('❌ OPENAI_API_KEY not found in .env.local') + process.exit(1) +} + +async function testToolWithEmptyObjectSchema() { + console.log('🚀 Testing OpenAI tool calling with empty object schema\n') + + const adapter = createOpenAI(apiKey) + + // Create a tool with empty object schema (like getGuitars) + const getGuitarsTool = { + name: 'getGuitars', + description: 'Get all products from the database', + inputSchema: z.object({}), + execute: async () => { + console.log('✅ Tool executed successfully') + return [ + { id: '1', name: 'Guitar 1' }, + { id: '2', name: 'Guitar 2' }, + ] + }, + } + + const messages = [ + { + role: 'user' as const, + content: 'Get me all the guitars', + }, + ] + + console.log('📤 Sending request with tool:') + console.log(' Tool name:', getGuitarsTool.name) + console.log(' Input schema:', getGuitarsTool.inputSchema.toString()) + console.log(' User message:', messages[0].content) + console.log() + + try { + console.log('📥 Streaming response...\n') + + let toolCallFound = false + let toolExecuted = false + let finalResponse = '' + + // @ts-ignore - using internal chat method + const stream = adapter.chatStream({ + model: 'gpt-4o-mini', + messages, + tools: [getGuitarsTool], + }) + + for await (const chunk of stream) { + if (chunk.type === 'tool_call') { + toolCallFound = true + console.log('\n🔧 Tool call detected!') + console.log(' Name:', chunk.toolCall.function.name) + console.log(' Arguments:', chunk.toolCall.function.arguments) + + // Execute the tool + if (getGuitarsTool.execute) { + console.log('\n🔨 Executing tool...') + try { + const result = await getGuitarsTool.execute({}) + toolExecuted = true + console.log(' Result:', JSON.stringify(result, null, 2)) + } catch (error) { + console.error(' ❌ Tool execution error:', error) + } + } + } + + if (chunk.type === 'content') { + finalResponse += chunk.delta + } + } + + console.log('\n' + '='.repeat(60)) + console.log('📊 Test Summary:') + console.log(' Tool call found:', toolCallFound ? '✅' : '❌') + console.log(' Tool executed:', toolExecuted ? '✅' : '❌') + console.log(' Final response:', finalResponse || '(none)') + console.log('='.repeat(60)) + + if (!toolCallFound) { + console.error('\n❌ FAIL: No tool call was detected in the stream') + process.exit(1) + } + + if (!toolExecuted) { + console.error('\n❌ FAIL: Tool was not executed successfully') + process.exit(1) + } + + console.log('\n✅ SUCCESS: Tool with empty object schema works correctly!') + process.exit(0) + } catch (error: any) { + console.error('\n❌ ERROR:', error.message) + if (error.error) { + console.error(' Error details:', JSON.stringify(error.error, null, 2)) + } + console.error('Stack:', error.stack) + process.exit(1) + } +} + +testToolWithEmptyObjectSchema() diff --git a/packages/typescript/ai-openai/live-tests/tool-test-optional.ts b/packages/typescript/ai-openai/live-tests/tool-test-optional.ts index 36a59f62..042ba337 100644 --- a/packages/typescript/ai-openai/live-tests/tool-test-optional.ts +++ b/packages/typescript/ai-openai/live-tests/tool-test-optional.ts @@ -1,4 +1,5 @@ import { createOpenAI } from '../src/index' +import { z } from 'zod' import { readFileSync } from 'fs' import { join, dirname } from 'path' import { fileURLToPath } from 'url' @@ -31,27 +32,17 @@ async function testToolWithOptionalParameters() { // Create a tool with optional parameters (unit is optional) const getTemperatureTool = { - type: 'function' as const, - function: { - name: 'get_temperature', - description: 'Get the current temperature for a specific location', - parameters: { - type: 'object', - properties: { - location: { - type: 'string', - description: 'The city or location to get the temperature for', - }, - unit: { - type: 'string', - enum: ['celsius', 'fahrenheit'], - description: - 'The temperature unit (optional, defaults to fahrenheit)', - }, - }, - required: ['location'], // unit is optional - }, - }, + name: 'get_temperature', + description: 'Get the current temperature for a specific location', + inputSchema: z.object({ + location: z + .string() + .describe('The city or location to get the temperature for'), + unit: z + .enum(['celsius', 'fahrenheit']) + .optional() + .describe('The temperature unit (optional, defaults to fahrenheit)'), + }), execute: async (args: any) => { console.log( '✅ Tool executed with arguments:', @@ -82,11 +73,8 @@ async function testToolWithOptionalParameters() { ] console.log('📤 Sending request with tool:') - console.log(' Tool name:', getTemperatureTool.function.name) - console.log( - ' Required params:', - getTemperatureTool.function.parameters.required, - ) + console.log(' Tool name:', getTemperatureTool.name) + console.log(' Input schema:', getTemperatureTool.inputSchema.toString()) console.log(' Optional params:', ['unit']) console.log(' User message:', messages[0].content) console.log() diff --git a/packages/typescript/ai-openai/live-tests/tool-test.ts b/packages/typescript/ai-openai/live-tests/tool-test.ts index f8e4bb0f..e702dc5a 100644 --- a/packages/typescript/ai-openai/live-tests/tool-test.ts +++ b/packages/typescript/ai-openai/live-tests/tool-test.ts @@ -1,4 +1,5 @@ import { createOpenAI } from '../src/index' +import { z } from 'zod' import { readFileSync } from 'fs' import { join, dirname } from 'path' import { fileURLToPath } from 'url' @@ -30,28 +31,15 @@ async function testToolCallingWithArguments() { const adapter = createOpenAI(apiKey) // Create a simple tool that requires arguments - // Note: Using strict mode which requires ALL properties to be in required array const getTemperatureTool = { - type: 'function' as const, - function: { - name: 'get_temperature', - description: 'Get the current temperature for a specific location', - parameters: { - type: 'object', - properties: { - location: { - type: 'string', - description: 'The city or location to get the temperature for', - }, - unit: { - type: 'string', - enum: ['celsius', 'fahrenheit'], - description: 'The temperature unit', - }, - }, - required: ['location', 'unit'], // strict mode requires ALL properties in required - }, - }, + name: 'get_temperature', + description: 'Get the current temperature for a specific location', + inputSchema: z.object({ + location: z + .string() + .describe('The city or location to get the temperature for'), + unit: z.enum(['celsius', 'fahrenheit']).describe('The temperature unit'), + }), execute: async (args: any) => { console.log( '✅ Tool executed with arguments:', @@ -91,11 +79,8 @@ async function testToolCallingWithArguments() { ] console.log('📤 Sending request with tool:') - console.log(' Tool name:', getTemperatureTool.function.name) - console.log( - ' Required params:', - getTemperatureTool.function.parameters.required, - ) + console.log(' Tool name:', getTemperatureTool.name) + console.log(' Input schema:', getTemperatureTool.inputSchema.toString()) console.log(' User message:', messages[0].content) console.log() diff --git a/packages/typescript/ai-openai/package.json b/packages/typescript/ai-openai/package.json index 164c3457..30092a0d 100644 --- a/packages/typescript/ai-openai/package.json +++ b/packages/typescript/ai-openai/package.json @@ -41,7 +41,8 @@ ], "dependencies": { "@tanstack/ai": "workspace:*", - "openai": "^6.9.1" + "openai": "^6.9.1", + "zod": "^4.1.13" }, "devDependencies": { "@vitest/coverage-v8": "4.0.14", diff --git a/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts b/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts index 08b28fa1..a816d0c9 100644 --- a/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts +++ b/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts @@ -1,4 +1,5 @@ import type OpenAI from 'openai' +import { z } from 'zod' import type { Tool } from '@tanstack/ai' export type ApplyPatchTool = OpenAI.Responses.ApplyPatchTool @@ -19,12 +20,9 @@ export function convertApplyPatchToolToAdapterFormat( */ export function applyPatchTool(): Tool { return { - type: 'function', - function: { - name: 'apply_patch', - description: 'Apply a patch to modify files', - parameters: {}, - }, + name: 'apply_patch', + description: 'Apply a patch to modify files', + inputSchema: z.object({}), metadata: {}, } } diff --git a/packages/typescript/ai-openai/src/tools/code-interpreter-tool.ts b/packages/typescript/ai-openai/src/tools/code-interpreter-tool.ts index c03e9793..529fd22c 100644 --- a/packages/typescript/ai-openai/src/tools/code-interpreter-tool.ts +++ b/packages/typescript/ai-openai/src/tools/code-interpreter-tool.ts @@ -1,3 +1,4 @@ +import { z } from 'zod' import type { Tool } from '@tanstack/ai' import type OpenAI from 'openai' @@ -21,12 +22,9 @@ export function convertCodeInterpreterToolToAdapterFormat( */ export function codeInterpreterTool(container: CodeInterpreterTool): Tool { return { - type: 'function', - function: { - name: 'code_interpreter', - description: 'Execute code in a sandboxed environment', - parameters: {}, - }, + name: 'code_interpreter', + description: 'Execute code in a sandboxed environment', + inputSchema: z.object({}), metadata: { type: 'code_interpreter', container, diff --git a/packages/typescript/ai-openai/src/tools/computer-use-tool.ts b/packages/typescript/ai-openai/src/tools/computer-use-tool.ts index 0bce5ad3..b3b0e6e5 100644 --- a/packages/typescript/ai-openai/src/tools/computer-use-tool.ts +++ b/packages/typescript/ai-openai/src/tools/computer-use-tool.ts @@ -1,4 +1,5 @@ import type OpenAI from 'openai' +import { z } from 'zod' import type { Tool } from '@tanstack/ai' export type ComputerUseTool = OpenAI.Responses.ComputerTool @@ -22,12 +23,9 @@ export function convertComputerUseToolToAdapterFormat( */ export function computerUseTool(toolData: ComputerUseTool): Tool { return { - type: 'function', - function: { - name: 'computer_use_preview', - description: 'Control a virtual computer', - parameters: {}, - }, + name: 'computer_use_preview', + description: 'Control a virtual computer', + inputSchema: z.object({}), metadata: { ...toolData, }, diff --git a/packages/typescript/ai-openai/src/tools/custom-tool.ts b/packages/typescript/ai-openai/src/tools/custom-tool.ts index 5e365d85..b663703e 100644 --- a/packages/typescript/ai-openai/src/tools/custom-tool.ts +++ b/packages/typescript/ai-openai/src/tools/custom-tool.ts @@ -1,4 +1,5 @@ import type OpenAI from 'openai' +import { z } from 'zod' import type { Tool } from '@tanstack/ai' export type CustomTool = OpenAI.Responses.CustomTool @@ -21,12 +22,9 @@ export function convertCustomToolToAdapterFormat(tool: Tool): CustomTool { */ export function customTool(toolData: CustomTool): Tool { return { - type: 'function', - function: { - name: 'custom', - description: toolData.description || 'A custom tool', - parameters: {}, - }, + name: 'custom', + description: toolData.description || 'A custom tool', + inputSchema: z.object({}), metadata: { ...toolData, }, diff --git a/packages/typescript/ai-openai/src/tools/file-search-tool.ts b/packages/typescript/ai-openai/src/tools/file-search-tool.ts index 0aae3671..6fdc5500 100644 --- a/packages/typescript/ai-openai/src/tools/file-search-tool.ts +++ b/packages/typescript/ai-openai/src/tools/file-search-tool.ts @@ -1,4 +1,5 @@ import type OpenAI from 'openai' +import { z } from 'zod' import type { Tool } from '@tanstack/ai' const validateMaxNumResults = (maxNumResults: number | undefined) => { @@ -33,12 +34,9 @@ export function fileSearchTool( ): Tool { validateMaxNumResults(toolData.max_num_results) return { - type: 'function', - function: { - name: 'file_search', - description: 'Search files in vector stores', - parameters: {}, - }, + name: 'file_search', + description: 'Search files in vector stores', + inputSchema: z.object({}), metadata: { ...toolData, }, diff --git a/packages/typescript/ai-openai/src/tools/function-tool.ts b/packages/typescript/ai-openai/src/tools/function-tool.ts index 006324f9..c6796808 100644 --- a/packages/typescript/ai-openai/src/tools/function-tool.ts +++ b/packages/typescript/ai-openai/src/tools/function-tool.ts @@ -1,5 +1,5 @@ import type OpenAI from 'openai' -import type { Tool } from '@tanstack/ai' +import { convertZodToJsonSchema, type Tool } from '@tanstack/ai' export type FunctionTool = OpenAI.Responses.FunctionTool @@ -7,56 +7,29 @@ export type FunctionTool = OpenAI.Responses.FunctionTool * Converts a standard Tool to OpenAI FunctionTool format */ export function convertFunctionToolToAdapterFormat(tool: Tool): FunctionTool { - // If tool has metadata (created via functionTool helper), use that - if (tool.metadata) { - const metadata = tool.metadata as Omit - return { - type: 'function', - ...metadata, - } - } - - // Otherwise, convert directly from tool.function (regular Tool structure) - // For Responses API, FunctionTool has name at top level, with function containing description and parameters + // Convert Zod schema to JSON Schema + const jsonSchema = convertZodToJsonSchema(tool.inputSchema) // Determine if we can use strict mode // Strict mode requires all properties to be in the required array - const parameters = tool.function.parameters - const properties = parameters.properties || {} - const required = parameters.required || [] + const properties = jsonSchema.properties || {} + const required = jsonSchema.required || [] const propertyNames = Object.keys(properties) // Only enable strict mode if all properties are required // This ensures compatibility with tools that have optional parameters const canUseStrict = propertyNames.length > 0 && - propertyNames.every((prop) => required.includes(prop)) + propertyNames.every((prop: string) => required.includes(prop)) return { type: 'function', - name: tool.function.name, - description: tool.function.description, + name: tool.name, + description: tool.description, parameters: { - ...tool.function.parameters, + ...jsonSchema, additionalProperties: false, }, strict: canUseStrict, } satisfies FunctionTool } - -/** - * Creates a standard Tool from FunctionTool parameters - */ -export function functionTool(config: Omit): Tool { - return { - type: 'function', - function: { - name: config.name, - description: config.description ?? '', - parameters: config.parameters ?? {}, - }, - metadata: { - ...config, - }, - } -} diff --git a/packages/typescript/ai-openai/src/tools/image-generation-tool.ts b/packages/typescript/ai-openai/src/tools/image-generation-tool.ts index 3ed45c92..1a165fd2 100644 --- a/packages/typescript/ai-openai/src/tools/image-generation-tool.ts +++ b/packages/typescript/ai-openai/src/tools/image-generation-tool.ts @@ -1,4 +1,5 @@ import type OpenAI from 'openai' +import { z } from 'zod' import type { Tool } from '@tanstack/ai' export type ImageGenerationTool = OpenAI.Responses.Tool.ImageGeneration @@ -30,12 +31,9 @@ export function imageGenerationTool( ): Tool { validatePartialImages(toolData.partial_images) return { - type: 'function', - function: { - name: 'image_generation', - description: 'Generate images based on text descriptions', - parameters: {}, - }, + name: 'image_generation', + description: 'Generate images based on text descriptions', + inputSchema: z.object({}), metadata: { ...toolData, }, diff --git a/packages/typescript/ai-openai/src/tools/local-shell-tool.ts b/packages/typescript/ai-openai/src/tools/local-shell-tool.ts index 40c0bc3b..a19c761b 100644 --- a/packages/typescript/ai-openai/src/tools/local-shell-tool.ts +++ b/packages/typescript/ai-openai/src/tools/local-shell-tool.ts @@ -1,4 +1,5 @@ import type OpenAI from 'openai' +import { z } from 'zod' import type { Tool } from '@tanstack/ai' export type LocalShellTool = OpenAI.Responses.Tool.LocalShell @@ -19,12 +20,9 @@ export function convertLocalShellToolToAdapterFormat( */ export function localShellTool(): Tool { return { - type: 'function', - function: { - name: 'local_shell', - description: 'Execute local shell commands', - parameters: {}, - }, + name: 'local_shell', + description: 'Execute local shell commands', + inputSchema: z.object({}), metadata: {}, } } diff --git a/packages/typescript/ai-openai/src/tools/mcp-tool.ts b/packages/typescript/ai-openai/src/tools/mcp-tool.ts index 0792a099..123f198c 100644 --- a/packages/typescript/ai-openai/src/tools/mcp-tool.ts +++ b/packages/typescript/ai-openai/src/tools/mcp-tool.ts @@ -1,4 +1,5 @@ import type OpenAI from 'openai' +import { z } from 'zod' import type { Tool } from '@tanstack/ai' export type MCPTool = OpenAI.Responses.Tool.Mcp @@ -34,12 +35,9 @@ export function mcpTool(toolData: Omit): Tool { validateMCPtool({ ...toolData, type: 'mcp' }) return { - type: 'function', - function: { - name: 'mcp', - description: toolData.server_description || '', - parameters: {}, - }, + name: 'mcp', + description: toolData.server_description || '', + inputSchema: z.object({}), metadata: toolData, } } diff --git a/packages/typescript/ai-openai/src/tools/shell-tool.ts b/packages/typescript/ai-openai/src/tools/shell-tool.ts index 30a1b57b..de8bf459 100644 --- a/packages/typescript/ai-openai/src/tools/shell-tool.ts +++ b/packages/typescript/ai-openai/src/tools/shell-tool.ts @@ -1,4 +1,5 @@ import type OpenAI from 'openai' +import { z } from 'zod' import type { Tool } from '@tanstack/ai' export type ShellTool = OpenAI.Responses.FunctionShellTool @@ -17,12 +18,9 @@ export function convertShellToolToAdapterFormat(_tool: Tool): ShellTool { */ export function shellTool(): Tool { return { - type: 'function', - function: { - name: 'shell', - description: 'Execute shell commands', - parameters: {}, - }, + name: 'shell', + description: 'Execute shell commands', + inputSchema: z.object({}), metadata: {}, } } diff --git a/packages/typescript/ai-openai/src/tools/tool-converter.ts b/packages/typescript/ai-openai/src/tools/tool-converter.ts index 9262789c..c4ac5909 100644 --- a/packages/typescript/ai-openai/src/tools/tool-converter.ts +++ b/packages/typescript/ai-openai/src/tools/tool-converter.ts @@ -35,7 +35,7 @@ export function convertToolsToProviderFormat( 'custom', ]) - const toolName = tool.function.name + const toolName = tool.name // If it's a special tool name, route to the appropriate converter if (specialToolNames.has(toolName)) { diff --git a/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts b/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts index e68e21e2..795f0fb0 100644 --- a/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts +++ b/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts @@ -1,4 +1,5 @@ import type OpenAI from 'openai' +import { z } from 'zod' import type { Tool } from '@tanstack/ai' export type WebSearchPreviewTool = OpenAI.Responses.WebSearchPreviewTool @@ -22,12 +23,9 @@ export function convertWebSearchPreviewToolToAdapterFormat( */ export function webSearchPreviewTool(toolData: WebSearchPreviewTool): Tool { return { - type: 'function', - function: { - name: 'web_search_preview', - description: 'Search the web (preview version)', - parameters: {}, - }, + name: 'web_search_preview', + description: 'Search the web (preview version)', + inputSchema: z.object({}), metadata: toolData, } } diff --git a/packages/typescript/ai-openai/src/tools/web-search-tool.ts b/packages/typescript/ai-openai/src/tools/web-search-tool.ts index baa0d771..e2ca606d 100644 --- a/packages/typescript/ai-openai/src/tools/web-search-tool.ts +++ b/packages/typescript/ai-openai/src/tools/web-search-tool.ts @@ -1,4 +1,5 @@ import type OpenAI from 'openai' +import { z } from 'zod' import type { Tool } from '@tanstack/ai' export type WebSearchTool = OpenAI.Responses.WebSearchTool @@ -16,12 +17,9 @@ export function convertWebSearchToolToAdapterFormat(tool: Tool): WebSearchTool { */ export function webSearchTool(toolData: WebSearchTool): Tool { return { - type: 'function', - function: { - name: 'web_search', - description: 'Search the web', - parameters: {}, - }, + name: 'web_search', + description: 'Search the web', + inputSchema: z.object({}), metadata: toolData, } } diff --git a/packages/typescript/ai/package.json b/packages/typescript/ai/package.json index e52bfc37..b33cf81d 100644 --- a/packages/typescript/ai/package.json +++ b/packages/typescript/ai/package.json @@ -49,9 +49,14 @@ "embeddings" ], "dependencies": { - "@tanstack/devtools-event-client": "^0.3.5" + "@tanstack/devtools-event-client": "^0.3.5", + "zod-to-json-schema": "^3.23.0" + }, + "peerDependencies": { + "zod": "^3.0.0 || ^4.0.0" }, "devDependencies": { - "@vitest/coverage-v8": "4.0.14" + "@vitest/coverage-v8": "4.0.14", + "zod": "^4.1.13" } } diff --git a/packages/typescript/ai/src/core/chat.ts b/packages/typescript/ai/src/core/chat.ts index 79bcf585..f88862d7 100644 --- a/packages/typescript/ai/src/core/chat.ts +++ b/packages/typescript/ai/src/core/chat.ts @@ -141,7 +141,7 @@ class ChatEngine< hasTools: !!tools && tools.length > 0, streaming: true, timestamp: Date.now(), - toolNames: tools?.map((t) => t.function.name), + toolNames: tools?.map((t) => t.name), options: options as Record | undefined, providerOptions: providerOptions as Record | undefined, }) diff --git a/packages/typescript/ai/src/index.ts b/packages/typescript/ai/src/index.ts index 941c1450..74419732 100644 --- a/packages/typescript/ai/src/index.ts +++ b/packages/typescript/ai/src/index.ts @@ -2,6 +2,7 @@ export { chat } from './core/chat' export { summarize } from './core/summarize' export { embedding } from './core/embedding' export { tool } from './tools/tool-utils' +export { convertZodToJsonSchema } from './tools/zod-converter' export { toServerSentEventsStream, toStreamResponse, diff --git a/packages/typescript/ai/src/tools/tool-calls.ts b/packages/typescript/ai/src/tools/tool-calls.ts index a7adad3c..207ba3ac 100644 --- a/packages/typescript/ai/src/tools/tool-calls.ts +++ b/packages/typescript/ai/src/tools/tool-calls.ts @@ -115,9 +115,7 @@ export class ToolCallManager { const toolResults: Array = [] for (const toolCall of toolCallsArray) { - const tool = this.tools.find( - (t) => t.function.name === toolCall.function.name, - ) + const tool = this.tools.find((t) => t.name === toolCall.function.name) let toolResultContent: string if (tool?.execute) { @@ -132,7 +130,31 @@ export class ToolCallManager { ) } - const result = await tool.execute(args) + // Validate input against inputSchema + if (tool.inputSchema) { + try { + args = tool.inputSchema.parse(args) + } catch (validationError: any) { + throw new Error( + `Input validation failed for tool ${tool.name}: ${validationError.message}`, + ) + } + } + + // Execute the tool + let result = await tool.execute(args) + + // Validate output against outputSchema if provided + if (tool.outputSchema && result !== undefined && result !== null) { + try { + result = tool.outputSchema.parse(result) + } catch (validationError: any) { + throw new Error( + `Output validation failed for tool ${tool.name}: ${validationError.message}`, + ) + } + } + toolResultContent = typeof result === 'string' ? result : JSON.stringify(result) } catch (error: any) { @@ -227,7 +249,7 @@ export async function executeToolCalls( // Create tool lookup map const toolMap = new Map() for (const tool of tools) { - toolMap.set(tool.function.name, tool) + toolMap.set(tool.name, tool) } for (const toolCall of toolCalls) { @@ -255,6 +277,22 @@ export async function executeToolCalls( } } + // Validate input against inputSchema + if (tool.inputSchema) { + try { + input = tool.inputSchema.parse(input) + } catch (validationError: any) { + results.push({ + toolCallId: toolCall.id, + result: { + error: `Input validation failed for tool ${tool.name}: ${validationError.message}`, + }, + state: 'output-error', + }) + continue + } + } + // CASE 1: Client-side tool (no execute function) if (!tool.execute) { // Check if tool needs approval @@ -327,10 +365,25 @@ export async function executeToolCalls( if (approved) { // Execute after approval try { - const result = await tool.execute(input) + let result = await tool.execute(input) + + // Validate output against outputSchema if provided + if (tool.outputSchema && result !== undefined && result !== null) { + try { + result = tool.outputSchema.parse(result) + } catch (validationError: any) { + throw new Error( + `Output validation failed for tool ${tool.name}: ${validationError.message}`, + ) + } + } + results.push({ toolCallId: toolCall.id, - result: result ? JSON.parse(result) : null, + result: + typeof result === 'string' + ? JSON.parse(result) + : result || null, }) } catch (error: any) { results.push({ @@ -361,10 +414,23 @@ export async function executeToolCalls( // CASE 3: Normal server tool - execute immediately try { - const result = await tool.execute(input) + let result = await tool.execute(input) + + // Validate output against outputSchema if provided + if (tool.outputSchema && result !== undefined && result !== null) { + try { + result = tool.outputSchema.parse(result) + } catch (validationError: any) { + throw new Error( + `Output validation failed for tool ${tool.name}: ${validationError.message}`, + ) + } + } + results.push({ toolCallId: toolCall.id, - result: result ? JSON.parse(result) : null, + result: + typeof result === 'string' ? JSON.parse(result) : result || null, }) } catch (error: any) { results.push({ diff --git a/packages/typescript/ai/src/tools/tool-utils.ts b/packages/typescript/ai/src/tools/tool-utils.ts index 184f4288..5498167a 100644 --- a/packages/typescript/ai/src/tools/tool-utils.ts +++ b/packages/typescript/ai/src/tools/tool-utils.ts @@ -1,87 +1,49 @@ +import type { z } from 'zod' import type { Tool } from '../types' -/** - * Infer TypeScript type from JSON Schema property type - */ -type InferPropertyType = T extends { type: 'string' } - ? string - : T extends { type: 'number' } - ? number - : T extends { type: 'boolean' } - ? boolean - : T extends { type: 'array' } - ? Array - : T extends { type: 'object' } - ? Record - : any - -/** - * Infer argument types from parameters schema - * Makes properties optional unless they're in the required array - */ -type InferArgs< - TProps extends Record, - TRequired extends ReadonlyArray | undefined, -> = - TRequired extends ReadonlyArray - ? { - [K in keyof TProps as K extends TRequired[number] - ? K - : never]: InferPropertyType - } & { - [K in keyof TProps as K extends TRequired[number] - ? never - : K]?: InferPropertyType - } - : { - [K in keyof TProps]?: InferPropertyType - } - /** * Helper to define a tool with enforced type safety. - * Automatically infers the execute function argument types from the parameters schema. - * User must provide the full Tool structure with type: "function" and function: {...} + * + * Automatically infers TypeScript types from Zod schemas, providing + * full type safety for tool inputs and outputs. * * @example * ```typescript - * const tools = { - * myTool: tool({ - * type: "function", - * function: { - * name: "myTool", - * description: "My tool description", - * parameters: { - * type: "object", - * properties: { - * id: { type: "string", description: "The ID" }, - * optional: { type: "number", description: "Optional param" }, - * }, - * required: ["id"], - * }, - * }, - * execute: async (args) => { - * // ✅ args is automatically typed as { id: string; optional?: number } - * return args.id; - * }, + * import { tool } from '@tanstack/ai'; + * import { z } from 'zod'; + * + * const getWeather = tool({ + * name: 'get_weather', + * description: 'Get the current weather for a location', + * inputSchema: z.object({ + * location: z.string().describe('The city and state, e.g. San Francisco, CA'), + * unit: z.enum(['celsius', 'fahrenheit']).optional(), + * }), + * outputSchema: z.object({ + * temperature: z.number(), + * conditions: z.string(), * }), - * }; + * execute: async ({ location, unit }) => { + * // args are fully typed: { location: string; unit?: "celsius" | "fahrenheit" } + * const data = await fetchWeather(location, unit); + * return data; // validated against outputSchema + * }, + * }); * ``` */ export function tool< - const TProps extends Record, - const TRequired extends ReadonlyArray | undefined, + TInput extends z.ZodType, + TOutput extends z.ZodType = z.ZodAny, >(config: { - type: 'function' - function: { - name: string - description: string - parameters: { - type: 'object' - properties: TProps - required?: TRequired - } - } - execute: (args: InferArgs) => Promise | string -}): Tool { - return config as Tool + name: string + description: string + inputSchema: TInput + outputSchema?: TOutput + execute?: ( + args: z.infer, + ) => Promise> | z.infer + needsApproval?: boolean + metadata?: Record +}): Tool { + return config as Tool } diff --git a/packages/typescript/ai/src/tools/zod-converter.ts b/packages/typescript/ai/src/tools/zod-converter.ts new file mode 100644 index 00000000..e5e83211 --- /dev/null +++ b/packages/typescript/ai/src/tools/zod-converter.ts @@ -0,0 +1,88 @@ +import type { z } from 'zod' +import { zodToJsonSchema } from 'zod-to-json-schema' + +/** + * Converts a Zod schema to JSON Schema format compatible with LLM providers. + * + * Uses OpenAPI 3.0 format which is supported by OpenAI, Anthropic, Gemini, and other providers. + * + * @param schema - Zod schema to convert + * @returns JSON Schema object that can be sent to LLM providers + * + * @example + * ```typescript + * import { z } from 'zod'; + * + * const schema = z.object({ + * location: z.string().describe('City name'), + * unit: z.enum(['celsius', 'fahrenheit']).optional() + * }); + * + * const jsonSchema = convertZodToJsonSchema(schema); + * // Returns: + * // { + * // type: 'object', + * // properties: { + * // location: { type: 'string', description: 'City name' }, + * // unit: { type: 'string', enum: ['celsius', 'fahrenheit'] } + * // }, + * // required: ['location'] + * // } + * ``` + */ +export function convertZodToJsonSchema(schema: z.ZodType): Record { + const jsonSchema = zodToJsonSchema(schema as any, { + target: 'openApi3', + $refStrategy: 'none', // Inline all references for LLM compatibility + }) + + // Remove $schema property as it's not needed for LLM providers + let result = jsonSchema as Record + if (result && typeof result === 'object' && '$schema' in result) { + const { $schema, ...rest } = result + result = rest + } + + // Ensure object schemas always have type: "object" + // This fixes cases where zod-to-json-schema doesn't set type for empty objects + if (result && typeof result === 'object') { + // Check if the input schema is a ZodObject by inspecting its internal structure + const isZodObject = + schema && + typeof schema === 'object' && + '_def' in schema && + (schema as any)._def?.typeName === 'ZodObject' + + // If type is explicitly "None", fix it + if (result.type === 'None') { + result.type = 'object' + } + + // If we know it's a ZodObject but result doesn't have type, set it + if (isZodObject && !result.type) { + result.type = 'object' + } + + // If result is completely empty (no keys), it's likely an empty object schema + if (Object.keys(result).length === 0) { + result.type = 'object' + } + + // If it has properties (even empty), it should be an object type + if ('properties' in result && !result.type) { + result.type = 'object' + } + + // Ensure properties exists for object types (even if empty) + if (result.type === 'object' && !('properties' in result)) { + result.properties = {} + } + + // Ensure required exists for object types (even if empty array) + if (result.type === 'object' && !('required' in result)) { + result.required = [] + } + } + + return result +} diff --git a/packages/typescript/ai/src/types.ts b/packages/typescript/ai/src/types.ts index d929c9d4..a6c807df 100644 --- a/packages/typescript/ai/src/types.ts +++ b/packages/typescript/ai/src/types.ts @@ -1,4 +1,5 @@ import type { CommonOptions } from './core/chat-common-options' +import type { z } from 'zod' export interface ToolCall { id: string @@ -23,61 +24,71 @@ export interface ModelMessage { * Tools allow the model to interact with external systems, APIs, or perform computations. * The model will decide when to call tools based on the user's request and the tool descriptions. * + * Tools use Zod schemas for runtime validation and type safety. + * * @see https://platform.openai.com/docs/guides/function-calling * @see https://docs.anthropic.com/claude/docs/tool-use */ -export interface Tool { +export interface Tool< + TInput extends z.ZodType = z.ZodType, + TOutput extends z.ZodType = z.ZodType, +> { /** - * Type of tool - currently only "function" is supported. + * Unique name of the tool (used by the model to call it). + * + * Should be descriptive and follow naming conventions (e.g., snake_case or camelCase). + * Must be unique within the tools array. * - * Future versions may support additional tool types. + * @example "get_weather", "search_database", "sendEmail" */ - type: 'function' + name: string /** - * Function definition and metadata. + * Clear description of what the tool does. + * + * This is crucial - the model uses this to decide when to call the tool. + * Be specific about what the tool does, what parameters it needs, and what it returns. + * + * @example "Get the current weather in a given location. Returns temperature, conditions, and forecast." */ - function: { - /** - * Unique name of the function (used by the model to call it). - * - * Should be descriptive and follow naming conventions (e.g., snake_case or camelCase). - * Must be unique within the tools array. - * - * @example "get_weather", "search_database", "sendEmail" - */ - name: string + description: string - /** - * Clear description of what the function does. - * - * This is crucial - the model uses this to decide when to call the function. - * Be specific about what the function does, what parameters it needs, and what it returns. - * - * @example "Get the current weather in a given location. Returns temperature, conditions, and forecast." - */ - description: string + /** + * Zod schema describing the tool's input parameters. + * + * Defines the structure and types of arguments the tool accepts. + * The model will generate arguments matching this schema. + * The schema is converted to JSON Schema for LLM providers. + * + * @see https://zod.dev/ + * + * @example + * import { z } from 'zod'; + * + * z.object({ + * location: z.string().describe("City name or coordinates"), + * unit: z.enum(["celsius", "fahrenheit"]).optional() + * }) + */ + inputSchema: TInput - /** - * JSON Schema describing the function's parameters. - * - * Defines the structure and types of arguments the function accepts. - * The model will generate arguments matching this schema. - * - * @see https://json-schema.org/ - * - * @example - * { - * type: "object", - * properties: { - * location: { type: "string", description: "City name or coordinates" }, - * unit: { type: "string", enum: ["celsius", "fahrenheit"] } - * }, - * required: ["location"] - * } - */ - parameters: Record - } + /** + * Optional Zod schema for validating tool output. + * + * If provided, tool results will be validated against this schema before + * being sent back to the model. This catches bugs in tool implementations + * and ensures consistent output formatting. + * + * Note: This is client-side validation only - not sent to LLM providers. + * + * @example + * z.object({ + * temperature: z.number(), + * conditions: z.string(), + * forecast: z.array(z.string()).optional() + * }) + */ + outputSchema?: TOutput /** * Optional function to execute when the model calls this tool. @@ -85,21 +96,25 @@ export interface Tool { * If provided, the SDK will automatically execute the function with the model's arguments * and feed the result back to the model. This enables autonomous tool use loops. * - * Returns the result as a string (or Promise) to send back to the model. + * Can return any value - will be automatically stringified if needed. * - * @param args - The arguments parsed from the model's tool call (matches the parameters schema) - * @returns Result string to send back to the model + * @param args - The arguments parsed from the model's tool call (validated against inputSchema) + * @returns Result to send back to the model (validated against outputSchema if provided) * * @example * execute: async (args) => { * const weather = await fetchWeather(args.location); - * return JSON.stringify(weather); + * return weather; // Can return object or string * } */ - execute?: (args: any) => Promise | string + execute?: ( + args: z.infer, + ) => Promise> | z.infer + /** If true, tool execution requires user approval before running. Works with both server and client tools. */ needsApproval?: boolean + /** Additional metadata for adapters or custom extensions */ metadata?: Record } diff --git a/packages/typescript/ai/tests/ai-abort.test.ts b/packages/typescript/ai/tests/ai-abort.test.ts index c624909d..78c3cad5 100644 --- a/packages/typescript/ai/tests/ai-abort.test.ts +++ b/packages/typescript/ai/tests/ai-abort.test.ts @@ -1,4 +1,5 @@ import { describe, it, expect } from 'vitest' +import { z } from 'zod' import { chat } from '../src/core/chat' import type { ChatOptions, StreamChunk } from '../src/types' import { BaseAdapter } from '../src/base-adapter' @@ -187,12 +188,9 @@ describe('chat() - Abort Signal Handling', () => { messages: [{ role: 'user', content: 'Hello' }], tools: [ { - type: 'function', - function: { - name: 'test_tool', - description: 'Test tool', - parameters: {}, - }, + name: 'test_tool', + description: 'Test tool', + inputSchema: z.object({}), }, ], abortController, diff --git a/packages/typescript/ai/tests/ai-chat.test.ts b/packages/typescript/ai/tests/ai-chat.test.ts index 8a8b771b..ec2295b3 100644 --- a/packages/typescript/ai/tests/ai-chat.test.ts +++ b/packages/typescript/ai/tests/ai-chat.test.ts @@ -1,5 +1,6 @@ /* eslint-disable @typescript-eslint/require-await */ import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest' +import { z } from 'zod' import { chat } from '../src/core/chat' import { BaseAdapter } from '../src/base-adapter' import { aiEventClient } from '../src/event-client.js' @@ -148,8 +149,9 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { ], tools: [ { - type: 'function', - function: { name: 'test', description: 'test', parameters: {} }, + name: 'test', + description: 'test', + inputSchema: z.object({}), }, ], }), @@ -371,12 +373,11 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { describe('Tool Call Paths', () => { it('should handle single tool call and execute it', async () => { const tool: Tool = { - type: 'function', - function: { - name: 'get_weather', - description: 'Get weather', - parameters: {}, - }, + name: 'get_weather', + description: 'Get weather', + inputSchema: z.object({ + location: z.string().optional(), + }), execute: vi.fn(async (args: any) => JSON.stringify({ temp: 72, location: args.location }), ), @@ -461,12 +462,12 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { it('should handle streaming tool call arguments (incremental JSON)', async () => { const tool: Tool = { - type: 'function', - function: { - name: 'calculate', - description: 'Calculate', - parameters: {}, - }, + name: 'calculate', + description: 'Calculate', + inputSchema: z.object({ + a: z.number(), + b: z.number(), + }), execute: vi.fn(async (args: any) => JSON.stringify({ result: args.a + args.b }), ), @@ -557,14 +558,16 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { it('should handle multiple tool calls in same iteration', async () => { const tool1: Tool = { - type: 'function', - function: { name: 'tool1', description: 'Tool 1', parameters: {} }, + name: 'tool1', + description: 'Tool 1', + inputSchema: z.object({}), execute: vi.fn(async () => JSON.stringify({ result: 1 })), } const tool2: Tool = { - type: 'function', - function: { name: 'tool2', description: 'Tool 2', parameters: {} }, + name: 'tool2', + description: 'Tool 2', + inputSchema: z.object({}), execute: vi.fn(async () => JSON.stringify({ result: 2 })), } @@ -654,8 +657,9 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { it('should handle tool calls with accumulated content', async () => { const tool: Tool = { - type: 'function', - function: { name: 'test_tool', description: 'Test', parameters: {} }, + name: 'test_tool', + description: 'Test', + inputSchema: z.object({}), execute: vi.fn(async () => JSON.stringify({ result: 'ok' })), } @@ -739,8 +743,9 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { it('should handle tool calls without accumulated content', async () => { const tool: Tool = { - type: 'function', - function: { name: 'test_tool', description: 'Test', parameters: {} }, + name: 'test_tool', + description: 'Test', + inputSchema: z.object({}), execute: vi.fn(async () => JSON.stringify({ result: 'ok' })), } @@ -815,12 +820,11 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { it('should handle incomplete tool calls (empty name)', async () => { const tool: Tool = { - type: 'function', - function: { - name: 'test_tool', - description: 'Test', - parameters: {}, - }, + name: 'test_tool', + + description: 'Test', + + inputSchema: z.object({}), execute: vi.fn(), } @@ -875,8 +879,9 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { describe('Tool Execution Result Paths', () => { it('should emit tool_result chunks after execution', async () => { const tool: Tool = { - type: 'function', - function: { name: 'test_tool', description: 'Test', parameters: {} }, + name: 'test_tool', + description: 'Test', + inputSchema: z.object({}), execute: vi.fn(async () => JSON.stringify({ result: 'success' })), } @@ -953,8 +958,9 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { it('should add tool result messages to conversation', async () => { const tool: Tool = { - type: 'function', - function: { name: 'test_tool', description: 'Test', parameters: {} }, + name: 'test_tool', + description: 'Test', + inputSchema: z.object({}), execute: vi.fn(async () => JSON.stringify({ result: 'ok' })), } @@ -1025,8 +1031,9 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { it('should handle tool execution errors gracefully', async () => { const tool: Tool = { - type: 'function', - function: { name: 'error_tool', description: 'Error', parameters: {} }, + name: 'error_tool', + description: 'Error', + inputSchema: z.object({}), execute: vi.fn(async () => { throw new Error('Tool execution failed') }), @@ -1132,12 +1139,11 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { messages: [{ role: 'user', content: 'Test' }], tools: [ { - type: 'function', - function: { - name: 'known_tool', - description: 'Known', - parameters: {}, - }, + name: 'known_tool', + + description: 'Known', + + inputSchema: z.object({}), }, ], }), @@ -1156,12 +1162,11 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { describe('Approval & Client Tool Paths', () => { it('should handle approval-required tools', async () => { const tool: Tool = { - type: 'function', - function: { - name: 'delete_file', - description: 'Delete', - parameters: {}, - }, + name: 'delete_file', + + description: 'Delete', + + inputSchema: z.object({}), needsApproval: true, execute: vi.fn(async () => JSON.stringify({ success: true })), } @@ -1225,12 +1230,13 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { it('should handle client-side tools (no execute)', async () => { const tool: Tool = { - type: 'function', - function: { - name: 'client_tool', - description: 'Client', - parameters: {}, - }, + name: 'client_tool', + + description: 'Client', + + inputSchema: z.object({ + input: z.string(), + }), // No execute function } @@ -1287,21 +1293,24 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { it('should handle mixed tools (approval + client + normal)', async () => { const normalTool: Tool = { - type: 'function', - function: { name: 'normal', description: 'Normal', parameters: {} }, + name: 'normal', + description: 'Normal', + inputSchema: z.object({}), execute: vi.fn(async () => JSON.stringify({ result: 'ok' })), } const approvalTool: Tool = { - type: 'function', - function: { name: 'approval', description: 'Approval', parameters: {} }, + name: 'approval', + description: 'Approval', + inputSchema: z.object({}), needsApproval: true, execute: vi.fn(async () => JSON.stringify({ success: true })), } const clientTool: Tool = { - type: 'function', - function: { name: 'client', description: 'Client', parameters: {} }, + name: 'client', + description: 'Client', + inputSchema: z.object({}), // No execute } @@ -1388,12 +1397,13 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { .mockResolvedValue(JSON.stringify({ success: true })) const approvalTool: Tool = { - type: 'function', - function: { - name: 'approval_tool', - description: 'Needs approval', - parameters: {}, - }, + name: 'approval_tool', + + description: 'Needs approval', + + inputSchema: z.object({ + path: z.string(), + }), needsApproval: true, execute: toolExecute, } @@ -1479,8 +1489,9 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { describe('Agent Loop Strategy Paths', () => { it('should respect custom agent loop strategy', async () => { const tool: Tool = { - type: 'function', - function: { name: 'test_tool', description: 'Test', parameters: {} }, + name: 'test_tool', + description: 'Test', + inputSchema: z.object({}), execute: vi.fn(async () => JSON.stringify({ result: 'ok' })), } @@ -1548,8 +1559,9 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { it('should use default max iterations strategy (5)', async () => { const tool: Tool = { - type: 'function', - function: { name: 'test_tool', description: 'Test', parameters: {} }, + name: 'test_tool', + description: 'Test', + inputSchema: z.object({}), execute: vi.fn(async () => JSON.stringify({ result: 'ok' })), } @@ -1602,8 +1614,9 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { it("should exit loop when finishReason is not 'tool_calls'", async () => { const tool: Tool = { - type: 'function', - function: { name: 'test_tool', description: 'Test', parameters: {} }, + name: 'test_tool', + description: 'Test', + inputSchema: z.object({}), execute: vi.fn(), } @@ -1687,8 +1700,9 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { it('should exit loop when toolCallManager has no tool calls', async () => { const tool: Tool = { - type: 'function', - function: { name: 'test_tool', description: 'Test', parameters: {} }, + name: 'test_tool', + description: 'Test', + inputSchema: z.object({}), execute: vi.fn(), } @@ -1816,8 +1830,9 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { it('should check abort signal before tool execution', async () => { const tool: Tool = { - type: 'function', - function: { name: 'test_tool', description: 'Test', parameters: {} }, + name: 'test_tool', + description: 'Test', + inputSchema: z.object({}), execute: vi.fn(), } @@ -2081,8 +2096,9 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { it('should emit iteration events for tool calls', async () => { const tool: Tool = { - type: 'function', - function: { name: 'test_tool', description: 'Test', parameters: {} }, + name: 'test_tool', + description: 'Test', + inputSchema: z.object({}), execute: vi.fn(async () => JSON.stringify({ result: 'ok' })), } @@ -2170,8 +2186,9 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { it('should track total chunk count across iterations', async () => { const tool: Tool = { - type: 'function', - function: { name: 'test_tool', description: 'Test', parameters: {} }, + name: 'test_tool', + description: 'Test', + inputSchema: z.object({}), execute: vi.fn(async () => JSON.stringify({ result: 'ok' })), } @@ -2281,8 +2298,9 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { it('should handle tool calls with missing ID', async () => { const tool: Tool = { - type: 'function', - function: { name: 'test_tool', description: 'Test', parameters: {} }, + name: 'test_tool', + description: 'Test', + inputSchema: z.object({}), execute: vi.fn(), } @@ -2328,8 +2346,9 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { it('should handle tool call with invalid JSON arguments', async () => { const tool: Tool = { - type: 'function', - function: { name: 'test_tool', description: 'Test', parameters: {} }, + name: 'test_tool', + description: 'Test', + inputSchema: z.object({}), execute: vi.fn(), } @@ -2433,12 +2452,13 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { describe('Extract Approvals and Client Tool Results from Messages', () => { it('should extract approval responses from messages with parts', async () => { const tool: Tool = { - type: 'function', - function: { - name: 'delete_file', - description: 'Delete file', - parameters: {}, - }, + name: 'delete_file', + + description: 'Delete file', + + inputSchema: z.object({ + path: z.string(), + }), needsApproval: true, execute: vi.fn(async () => JSON.stringify({ success: true })), } @@ -2578,12 +2598,11 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { it('should extract client tool outputs from messages with parts', async () => { const tool: Tool = { - type: 'function', - function: { - name: 'client_tool', - description: 'Client tool', - parameters: {}, - }, + name: 'client_tool', + + description: 'Client tool', + + inputSchema: z.object({}), // No execute - client-side tool } @@ -2697,23 +2716,21 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { it('should handle messages with both approval and client tool parts', async () => { const approvalTool: Tool = { - type: 'function', - function: { - name: 'approval_tool', - description: 'Approval', - parameters: {}, - }, + name: 'approval_tool', + + description: 'Approval', + + inputSchema: z.object({}), needsApproval: true, execute: vi.fn(async () => JSON.stringify({ success: true })), } const clientTool: Tool = { - type: 'function', - function: { - name: 'client_tool', - description: 'Client', - parameters: {}, - }, + name: 'client_tool', + + description: 'Client', + + inputSchema: z.object({}), // No execute } @@ -2840,16 +2857,9 @@ describe('chat() - Comprehensive Logic Path Coverage', () => { it('should execute tool and continue loop when receiving tool_calls finishReason with maxIterations(20)', async () => { // Create a tool that returns "70" like the failing test const temperatureTool: Tool = { - type: 'function', - function: { - name: 'get_temperature', - description: 'Get the current temperature in degrees', - parameters: { - type: 'object', - properties: {}, - required: [], - }, - }, + name: 'get_temperature', + description: 'Get the current temperature in degrees', + inputSchema: z.object({}), execute: vi.fn(async (_args: any) => { return '70' }), diff --git a/packages/typescript/ai/tests/tool-call-manager.test.ts b/packages/typescript/ai/tests/tool-call-manager.test.ts index e4781ded..9d74205c 100644 --- a/packages/typescript/ai/tests/tool-call-manager.test.ts +++ b/packages/typescript/ai/tests/tool-call-manager.test.ts @@ -1,4 +1,5 @@ import { describe, expect, it, vi } from 'vitest' +import { z } from 'zod' import { ToolCallManager } from '../src/tools/tool-calls' import type { DoneStreamChunk, Tool } from '../src/types' @@ -12,12 +13,11 @@ describe('ToolCallManager', () => { } const mockWeatherTool: Tool = { - type: 'function', - function: { - name: 'get_weather', - description: 'Get weather', - parameters: {}, - }, + name: 'get_weather', + description: 'Get weather', + inputSchema: z.object({ + location: z.string().optional(), + }), execute: vi.fn((args: any) => { return JSON.stringify({ temp: 72, location: args.location }) }), @@ -126,12 +126,9 @@ describe('ToolCallManager', () => { it('should handle tool execution errors gracefully', async () => { const errorTool: Tool = { - type: 'function', - function: { - name: 'error_tool', - description: 'Throws error', - parameters: {}, - }, + name: 'error_tool', + description: 'Throws error', + inputSchema: z.object({}), execute: vi.fn(() => { throw new Error('Tool failed') }), @@ -164,12 +161,9 @@ describe('ToolCallManager', () => { it('should handle tools without execute function', async () => { const noExecuteTool: Tool = { - type: 'function', - function: { - name: 'no_execute', - description: 'No execute function', - parameters: {}, - }, + name: 'no_execute', + description: 'No execute function', + inputSchema: z.object({}), // No execute function } @@ -216,12 +210,11 @@ describe('ToolCallManager', () => { it('should handle multiple tool calls in same iteration', async () => { const calculateTool: Tool = { - type: 'function', - function: { - name: 'calculate', - description: 'Calculate', - parameters: {}, - }, + name: 'calculate', + description: 'Calculate', + inputSchema: z.object({ + expression: z.string(), + }), execute: vi.fn((args: any) => { return JSON.stringify({ result: eval(args.expression) }) }), diff --git a/packages/typescript/smoke-tests/adapters/src/harness.ts b/packages/typescript/smoke-tests/adapters/src/harness.ts index 7e2b8e31..25d01ee6 100644 --- a/packages/typescript/smoke-tests/adapters/src/harness.ts +++ b/packages/typescript/smoke-tests/adapters/src/harness.ts @@ -80,16 +80,12 @@ export async function writeDebugFile( function formatToolsForDebug(tools: Array = []) { return tools.map((t) => ({ - type: t.type, - function: t.function - ? { - name: t.function.name, - description: t.function.description, - parameters: t.function.parameters, - } - : undefined, - needsApproval: (t as any).needsApproval, - hasExecute: Boolean((t as any).execute), + name: t.name, + description: t.description, + needsApproval: t.needsApproval, + hasExecute: Boolean(t.execute), + hasInputSchema: Boolean(t.inputSchema), + hasOutputSchema: Boolean(t.outputSchema), })) } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index ba3766f7..c12c0f6b 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -445,10 +445,16 @@ importers: '@tanstack/devtools-event-client': specifier: ^0.3.5 version: 0.3.5 + zod-to-json-schema: + specifier: ^3.23.0 + version: 3.25.0(zod@4.1.13) devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 version: 4.0.14(vitest@4.0.14(@types/node@24.10.1)(happy-dom@20.0.10)(jiti@2.6.1)(jsdom@27.2.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.0)(tsx@4.20.6)(yaml@2.8.1)) + zod: + specifier: ^4.1.13 + version: 4.1.13 packages/typescript/ai-anthropic: dependencies: @@ -458,6 +464,9 @@ importers: '@tanstack/ai': specifier: workspace:* version: link:../ai + zod: + specifier: ^4.1.13 + version: 4.1.13 devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 @@ -518,6 +527,9 @@ importers: '@tanstack/ai': specifier: workspace:* version: link:../ai + zod: + specifier: ^4.1.13 + version: 4.1.13 devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 @@ -534,6 +546,9 @@ importers: ollama: specifier: ^0.6.3 version: 0.6.3 + zod: + specifier: ^4.1.13 + version: 4.1.13 devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 @@ -550,6 +565,9 @@ importers: openai: specifier: ^6.9.1 version: 6.9.1(ws@8.18.3)(zod@4.1.13) + zod: + specifier: ^4.1.13 + version: 4.1.13 devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 @@ -557,9 +575,6 @@ importers: vite: specifier: ^7.2.4 version: 7.2.4(@types/node@24.10.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.0)(tsx@4.20.6)(yaml@2.8.1) - zod: - specifier: ^4.1.13 - version: 4.1.13 packages/typescript/ai-react: dependencies: @@ -6565,6 +6580,11 @@ packages: resolution: {integrity: sha512-zK7YHHz4ZXpW89AHXUPbQVGKI7uvkd3hzusTdotCg1UxyaVtg0zFJSTfW/Dq5f7OBBVnq6cZIaC8Ti4hb6dtCA==} engines: {node: '>= 14'} + zod-to-json-schema@3.25.0: + resolution: {integrity: sha512-HvWtU2UG41LALjajJrML6uQejQhNJx+JBO9IflpSja4R03iNWfKXrj6W2h7ljuLyc1nKS+9yDyL/9tD1U/yBnQ==} + peerDependencies: + zod: ^3.25 || ^4 + zod@3.25.76: resolution: {integrity: sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==} @@ -13213,6 +13233,10 @@ snapshots: compress-commons: 6.0.2 readable-stream: 4.7.0 + zod-to-json-schema@3.25.0(zod@4.1.13): + dependencies: + zod: 4.1.13 + zod@3.25.76: {} zod@4.1.13: {} From 6d403ee077f3829500ef4a287e6396d9e8366396 Mon Sep 17 00:00:00 2001 From: Jack Herrington Date: Thu, 27 Nov 2025 17:50:27 -0800 Subject: [PATCH 2/6] fixed zod conversion --- .../ai-anthropic/src/anthropic-adapter.ts | 111 +++++++++++++++--- packages/typescript/ai/package.json | 2 +- .../typescript/ai/src/tools/zod-converter.ts | 9 +- pnpm-lock.yaml | 24 ++-- 4 files changed, 111 insertions(+), 35 deletions(-) diff --git a/packages/typescript/ai-anthropic/src/anthropic-adapter.ts b/packages/typescript/ai-anthropic/src/anthropic-adapter.ts index d11f3724..096541a0 100644 --- a/packages/typescript/ai-anthropic/src/anthropic-adapter.ts +++ b/packages/typescript/ai-anthropic/src/anthropic-adapter.ts @@ -316,16 +316,34 @@ export class Anthropic extends BaseAdapter< generateId: () => string, ): AsyncIterable { let accumulatedContent = '' + const allChunks: Array<{ + input?: any + output?: any + iteration_start?: any + }> = [] let accumulatedThinking = '' const timestamp = Date.now() + const iterationId = generateId() const toolCallsMap = new Map< number, { id: string; name: string; input: string } >() let currentToolIndex = -1 + // Helper to log output chunks + const logAndYield = (chunk: StreamChunk) => { + allChunks.push({ output: chunk }) + return chunk + } + + // Log iteration start + allChunks.push({ iteration_start: { id: iterationId, timestamp } }) + try { for await (const event of stream) { + // Log input event + allChunks.push({ input: event }) + if (event.type === 'content_block_start') { if (event.content_block.type === 'tool_use') { currentToolIndex++ @@ -342,7 +360,7 @@ export class Anthropic extends BaseAdapter< if (event.delta.type === 'text_delta') { const delta = event.delta.text accumulatedContent += delta - yield { + yield logAndYield({ type: 'content', id: generateId(), model: model, @@ -350,32 +368,29 @@ export class Anthropic extends BaseAdapter< delta, content: accumulatedContent, role: 'assistant', - } + }) } else if (event.delta.type === 'thinking_delta') { // Handle thinking content const delta = event.delta.thinking accumulatedThinking += delta - yield { + yield logAndYield({ type: 'thinking', id: generateId(), model: model, timestamp, delta, content: accumulatedThinking, - } + }) } else if (event.delta.type === 'input_json_delta') { // Tool input is being streamed const existing = toolCallsMap.get(currentToolIndex) if (existing) { + // Accumulate the input for final processing existing.input += event.delta.partial_json - // Normalize arguments: empty string -> {} for empty object schemas - let normalizedArgs = existing.input.trim() - if (normalizedArgs === '') { - normalizedArgs = '{}' - } - - yield { + // Yield the DELTA (partial_json), not the full accumulated input + // The stream processor will concatenate these deltas + yield logAndYield({ type: 'tool_call', id: generateId(), model: model, @@ -385,24 +400,46 @@ export class Anthropic extends BaseAdapter< type: 'function', function: { name: existing.name, - arguments: normalizedArgs, + arguments: event.delta.partial_json, }, }, index: currentToolIndex, - } + }) } } + } else if (event.type === 'content_block_stop') { + // If this is a tool call and we haven't received any input deltas, + // emit a tool_call chunk with empty arguments + const existing = toolCallsMap.get(currentToolIndex) + if (existing && existing.input === '') { + // No input_json_delta events received, emit empty arguments + yield logAndYield({ + type: 'tool_call', + id: generateId(), + model: model, + timestamp, + toolCall: { + id: existing.id, + type: 'function', + function: { + name: existing.name, + arguments: '{}', + }, + }, + index: currentToolIndex, + }) + } } else if (event.type === 'message_stop') { - yield { + yield logAndYield({ type: 'done', id: generateId(), model: model, timestamp, finishReason: 'stop', - } + }) } else if (event.type === 'message_delta') { if (event.delta.stop_reason) { - yield { + yield logAndYield({ type: 'done', id: generateId(), model: model, @@ -420,7 +457,7 @@ export class Anthropic extends BaseAdapter< (event.usage.input_tokens || 0) + (event.usage.output_tokens || 0), }, - } + }) } } } @@ -435,7 +472,7 @@ export class Anthropic extends BaseAdapter< stack: error?.stack, }) - yield { + yield logAndYield({ type: 'error', id: generateId(), model: model, @@ -444,6 +481,44 @@ export class Anthropic extends BaseAdapter< message: error?.message || 'Unknown error occurred', code: error?.code || error?.status, }, + }) + } finally { + // Append all chunks to /tmp/chunks.txt for debugging (to capture all iterations) + try { + const fs = await import('fs/promises') + // Read existing content + let existingContent = '[]' + try { + existingContent = await fs.readFile('/tmp/chunks.txt', 'utf-8') + } catch { + // File doesn't exist yet, start fresh + } + + // Parse existing array and append new chunks + let allIterations = [] + try { + allIterations = JSON.parse(existingContent) + if (!Array.isArray(allIterations)) { + allIterations = [] + } + } catch { + allIterations = [] + } + + // Add this iteration's chunks + allIterations.push(...allChunks) + + // Write back + await fs.writeFile( + '/tmp/chunks.txt', + JSON.stringify(allIterations, null, 2), + 'utf-8', + ) + console.log( + `[Anthropic] Appended ${allChunks.length} chunks to /tmp/chunks.txt (iteration ${iterationId})`, + ) + } catch (writeError) { + console.error('[Anthropic] Failed to write chunks:', writeError) } } } diff --git a/packages/typescript/ai/package.json b/packages/typescript/ai/package.json index b33cf81d..667fd1f7 100644 --- a/packages/typescript/ai/package.json +++ b/packages/typescript/ai/package.json @@ -50,7 +50,7 @@ ], "dependencies": { "@tanstack/devtools-event-client": "^0.3.5", - "zod-to-json-schema": "^3.23.0" + "@alcyone-labs/zod-to-json-schema": "^4.0.10" }, "peerDependencies": { "zod": "^3.0.0 || ^4.0.0" diff --git a/packages/typescript/ai/src/tools/zod-converter.ts b/packages/typescript/ai/src/tools/zod-converter.ts index e5e83211..e424adb2 100644 --- a/packages/typescript/ai/src/tools/zod-converter.ts +++ b/packages/typescript/ai/src/tools/zod-converter.ts @@ -1,10 +1,10 @@ import type { z } from 'zod' -import { zodToJsonSchema } from 'zod-to-json-schema' +import { zodToJsonSchema } from '@alcyone-labs/zod-to-json-schema' /** * Converts a Zod schema to JSON Schema format compatible with LLM providers. * - * Uses OpenAPI 3.0 format which is supported by OpenAI, Anthropic, Gemini, and other providers. + * Uses @alcyone-labs/zod-to-json-schema which is compatible with Zod v4. * * @param schema - Zod schema to convert * @returns JSON Schema object that can be sent to LLM providers @@ -31,6 +31,7 @@ import { zodToJsonSchema } from 'zod-to-json-schema' * ``` */ export function convertZodToJsonSchema(schema: z.ZodType): Record { + // Use Alcyone Labs fork which is compatible with Zod v4 const jsonSchema = zodToJsonSchema(schema as any, { target: 'openApi3', $refStrategy: 'none', // Inline all references for LLM compatibility @@ -50,8 +51,8 @@ export function convertZodToJsonSchema(schema: z.ZodType): Record { const isZodObject = schema && typeof schema === 'object' && - '_def' in schema && - (schema as any)._def?.typeName === 'ZodObject' + 'def' in schema && + (schema as any).def?.type === 'object' // If type is explicitly "None", fix it if (result.type === 'None') { diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index c12c0f6b..9ab48d90 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -442,12 +442,12 @@ importers: packages/typescript/ai: dependencies: + '@alcyone-labs/zod-to-json-schema': + specifier: ^4.0.10 + version: 4.0.10(zod@4.1.13) '@tanstack/devtools-event-client': specifier: ^0.3.5 version: 0.3.5 - zod-to-json-schema: - specifier: ^3.23.0 - version: 3.25.0(zod@4.1.13) devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 @@ -730,6 +730,11 @@ packages: '@adobe/css-tools@4.4.4': resolution: {integrity: sha512-Elp+iwUx5rN5+Y8xLt5/GRoG20WGoDCQ/1Fb+1LiGtvwbDavuSk0jhD/eZdckHAuzcDzccnkv+rEjyWfRx18gg==} + '@alcyone-labs/zod-to-json-schema@4.0.10': + resolution: {integrity: sha512-TFsSpAPToqmqmT85SGHXuxoCwEeK9zUDvn512O9aBVvWRhSuy+VvAXZkifzsdllD3ncF0ZjUrf4MpBwIEixdWQ==} + peerDependencies: + zod: ^4.0.5 + '@anthropic-ai/sdk@0.71.0': resolution: {integrity: sha512-go1XeWXmpxuiTkosSXpb8tokLk2ZLkIRcXpbWVwJM6gH5OBtHOVsfPfGuqI1oW7RRt4qc59EmYbrXRZ0Ng06Jw==} hasBin: true @@ -6580,11 +6585,6 @@ packages: resolution: {integrity: sha512-zK7YHHz4ZXpW89AHXUPbQVGKI7uvkd3hzusTdotCg1UxyaVtg0zFJSTfW/Dq5f7OBBVnq6cZIaC8Ti4hb6dtCA==} engines: {node: '>= 14'} - zod-to-json-schema@3.25.0: - resolution: {integrity: sha512-HvWtU2UG41LALjajJrML6uQejQhNJx+JBO9IflpSja4R03iNWfKXrj6W2h7ljuLyc1nKS+9yDyL/9tD1U/yBnQ==} - peerDependencies: - zod: ^3.25 || ^4 - zod@3.25.76: resolution: {integrity: sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==} @@ -6601,6 +6601,10 @@ snapshots: '@adobe/css-tools@4.4.4': optional: true + '@alcyone-labs/zod-to-json-schema@4.0.10(zod@4.1.13)': + dependencies: + zod: 4.1.13 + '@anthropic-ai/sdk@0.71.0(zod@4.1.13)': dependencies: json-schema-to-ts: 3.1.1 @@ -13233,10 +13237,6 @@ snapshots: compress-commons: 6.0.2 readable-stream: 4.7.0 - zod-to-json-schema@3.25.0(zod@4.1.13): - dependencies: - zod: 4.1.13 - zod@3.25.76: {} zod@4.1.13: {} From 0ed86b391bbbf3c92a3717c14c9335abcb065af4 Mon Sep 17 00:00:00 2001 From: Jack Herrington Date: Thu, 27 Nov 2025 18:18:13 -0800 Subject: [PATCH 3/6] fixed part ordering --- package.json | 5 +- .../ai-anthropic/src/anthropic-adapter.ts | 82 ++------- .../ai-client/src/message-updaters.ts | 13 +- .../ai-client/src/stream/processor.ts | 24 +++ .../ai-client/tests/message-updaters.test.ts | 6 +- .../stream/message-parts-ordering.test.ts | 165 ++++++++++++++++++ packages/typescript/ai/package.json | 7 +- packages/typescript/ai/vite.config.ts | 26 ++- pnpm-lock.yaml | 6 +- 9 files changed, 246 insertions(+), 88 deletions(-) create mode 100644 packages/typescript/ai-client/tests/stream/message-parts-ordering.test.ts diff --git a/package.json b/package.json index 13b1ada0..07ed3bf8 100644 --- a/package.json +++ b/package.json @@ -53,7 +53,10 @@ "size-limit": [ { "path": "packages/typescript/ai/dist/esm/index.js", - "limit": "21 KB" + "limit": "21 KB", + "ignore": [ + "@alcyone-labs/zod-to-json-schema" + ] } ], "devDependencies": { diff --git a/packages/typescript/ai-anthropic/src/anthropic-adapter.ts b/packages/typescript/ai-anthropic/src/anthropic-adapter.ts index 096541a0..599c16de 100644 --- a/packages/typescript/ai-anthropic/src/anthropic-adapter.ts +++ b/packages/typescript/ai-anthropic/src/anthropic-adapter.ts @@ -316,34 +316,16 @@ export class Anthropic extends BaseAdapter< generateId: () => string, ): AsyncIterable { let accumulatedContent = '' - const allChunks: Array<{ - input?: any - output?: any - iteration_start?: any - }> = [] let accumulatedThinking = '' const timestamp = Date.now() - const iterationId = generateId() const toolCallsMap = new Map< number, { id: string; name: string; input: string } >() let currentToolIndex = -1 - // Helper to log output chunks - const logAndYield = (chunk: StreamChunk) => { - allChunks.push({ output: chunk }) - return chunk - } - - // Log iteration start - allChunks.push({ iteration_start: { id: iterationId, timestamp } }) - try { for await (const event of stream) { - // Log input event - allChunks.push({ input: event }) - if (event.type === 'content_block_start') { if (event.content_block.type === 'tool_use') { currentToolIndex++ @@ -360,7 +342,7 @@ export class Anthropic extends BaseAdapter< if (event.delta.type === 'text_delta') { const delta = event.delta.text accumulatedContent += delta - yield logAndYield({ + yield { type: 'content', id: generateId(), model: model, @@ -368,19 +350,19 @@ export class Anthropic extends BaseAdapter< delta, content: accumulatedContent, role: 'assistant', - }) + } } else if (event.delta.type === 'thinking_delta') { // Handle thinking content const delta = event.delta.thinking accumulatedThinking += delta - yield logAndYield({ + yield { type: 'thinking', id: generateId(), model: model, timestamp, delta, content: accumulatedThinking, - }) + } } else if (event.delta.type === 'input_json_delta') { // Tool input is being streamed const existing = toolCallsMap.get(currentToolIndex) @@ -390,7 +372,7 @@ export class Anthropic extends BaseAdapter< // Yield the DELTA (partial_json), not the full accumulated input // The stream processor will concatenate these deltas - yield logAndYield({ + yield { type: 'tool_call', id: generateId(), model: model, @@ -404,7 +386,7 @@ export class Anthropic extends BaseAdapter< }, }, index: currentToolIndex, - }) + } } } } else if (event.type === 'content_block_stop') { @@ -413,7 +395,7 @@ export class Anthropic extends BaseAdapter< const existing = toolCallsMap.get(currentToolIndex) if (existing && existing.input === '') { // No input_json_delta events received, emit empty arguments - yield logAndYield({ + yield { type: 'tool_call', id: generateId(), model: model, @@ -427,19 +409,19 @@ export class Anthropic extends BaseAdapter< }, }, index: currentToolIndex, - }) + } } } else if (event.type === 'message_stop') { - yield logAndYield({ + yield { type: 'done', id: generateId(), model: model, timestamp, finishReason: 'stop', - }) + } } else if (event.type === 'message_delta') { if (event.delta.stop_reason) { - yield logAndYield({ + yield { type: 'done', id: generateId(), model: model, @@ -457,7 +439,7 @@ export class Anthropic extends BaseAdapter< (event.usage.input_tokens || 0) + (event.usage.output_tokens || 0), }, - }) + } } } } @@ -472,7 +454,7 @@ export class Anthropic extends BaseAdapter< stack: error?.stack, }) - yield logAndYield({ + yield { type: 'error', id: generateId(), model: model, @@ -481,44 +463,6 @@ export class Anthropic extends BaseAdapter< message: error?.message || 'Unknown error occurred', code: error?.code || error?.status, }, - }) - } finally { - // Append all chunks to /tmp/chunks.txt for debugging (to capture all iterations) - try { - const fs = await import('fs/promises') - // Read existing content - let existingContent = '[]' - try { - existingContent = await fs.readFile('/tmp/chunks.txt', 'utf-8') - } catch { - // File doesn't exist yet, start fresh - } - - // Parse existing array and append new chunks - let allIterations = [] - try { - allIterations = JSON.parse(existingContent) - if (!Array.isArray(allIterations)) { - allIterations = [] - } - } catch { - allIterations = [] - } - - // Add this iteration's chunks - allIterations.push(...allChunks) - - // Write back - await fs.writeFile( - '/tmp/chunks.txt', - JSON.stringify(allIterations, null, 2), - 'utf-8', - ) - console.log( - `[Anthropic] Appended ${allChunks.length} chunks to /tmp/chunks.txt (iteration ${iterationId})`, - ) - } catch (writeError) { - console.error('[Anthropic] Failed to write chunks:', writeError) } } } diff --git a/packages/typescript/ai-client/src/message-updaters.ts b/packages/typescript/ai-client/src/message-updaters.ts index af17fe70..671446c7 100644 --- a/packages/typescript/ai-client/src/message-updaters.ts +++ b/packages/typescript/ai-client/src/message-updaters.ts @@ -9,6 +9,9 @@ import type { /** * Update or add a text part to a message. + * + * If the last part is a text part, update it (continuing the same text segment). + * Otherwise, create a new text part (starting a new text segment after tool calls). */ export function updateTextPart( messages: Array, @@ -21,13 +24,13 @@ export function updateTextPart( } const parts = [...msg.parts] - const textPartIndex = parts.findIndex((p) => p.type === 'text') + const lastPart = parts.length > 0 ? parts[parts.length - 1] : null - if (textPartIndex >= 0) { - // Update existing text part - parts[textPartIndex] = { type: 'text', content } + if (lastPart && lastPart.type === 'text') { + // Update the last text part (continuing same text segment) + parts[parts.length - 1] = { type: 'text', content } } else { - // Add new text part at the end (preserve natural streaming order) + // Create new text part (starting new text segment after tool calls/results) parts.push({ type: 'text', content }) } diff --git a/packages/typescript/ai-client/src/stream/processor.ts b/packages/typescript/ai-client/src/stream/processor.ts index e9adc8d3..420e4817 100644 --- a/packages/typescript/ai-client/src/stream/processor.ts +++ b/packages/typescript/ai-client/src/stream/processor.ts @@ -219,6 +219,18 @@ export class StreamProcessor { */ private handleTextChunk(content?: string, delta?: string): void { // Text arriving means all current tool calls are complete + // If we have pending tool calls, emit current text and reset before completing them + const hadPendingToolCalls = this.hasPendingToolCalls() + if (hadPendingToolCalls && this.textContent) { + // Emit any accumulated text before completing tool calls + if (this.textContent !== this.lastEmittedText) { + this.emitTextUpdate() + } + // Reset text accumulation for the new text segment after tool calls + this.textContent = '' + this.lastEmittedText = '' + } + this.completeAllToolCalls() const previous = this.textContent @@ -356,6 +368,18 @@ export class StreamProcessor { } } + /** + * Check if there are any pending tool calls (not yet complete) + */ + private hasPendingToolCalls(): boolean { + for (const toolCall of this.toolCalls.values()) { + if (toolCall.state !== 'input-complete') { + return true + } + } + return false + } + /** * Complete all tool calls */ diff --git a/packages/typescript/ai-client/tests/message-updaters.test.ts b/packages/typescript/ai-client/tests/message-updaters.test.ts index 99b4d27c..2faf829c 100644 --- a/packages/typescript/ai-client/tests/message-updaters.test.ts +++ b/packages/typescript/ai-client/tests/message-updaters.test.ts @@ -201,7 +201,7 @@ describe('message-updaters', () => { }) }) - it('should insert tool call before text parts', () => { + it('should append tool call after existing parts (preserve streaming order)', () => { const messages: Array = [ { id: 'msg-1', @@ -218,8 +218,8 @@ describe('message-updaters', () => { }) expect(result[0]?.parts).toHaveLength(2) - expect(result[0]?.parts[0]?.type).toBe('tool-call') - expect(result[0]?.parts[1]?.type).toBe('text') + expect(result[0]?.parts[0]?.type).toBe('text') + expect(result[0]?.parts[1]?.type).toBe('tool-call') }) it('should not modify other messages', () => { diff --git a/packages/typescript/ai-client/tests/stream/message-parts-ordering.test.ts b/packages/typescript/ai-client/tests/stream/message-parts-ordering.test.ts new file mode 100644 index 00000000..1878fd3b --- /dev/null +++ b/packages/typescript/ai-client/tests/stream/message-parts-ordering.test.ts @@ -0,0 +1,165 @@ +/** + * Test for message parts ordering + * + * This test ensures that when an assistant message contains: + * - Text content before tool calls + * - Tool calls + * - Tool results + * - Text content after tool results + * + * The parts array maintains the correct order instead of concatenating all text into one part. + */ + +import { describe, it, expect } from 'vitest' +import { StreamProcessor } from '../../src/stream/processor' +import { updateTextPart, updateToolCallPart } from '../../src/message-updaters' +import type { UIMessage } from '../../src/types' + +describe('Message Parts Ordering', () => { + it('should create separate text parts before and after tool calls', async () => { + // Track the message state as it's built + let messages: UIMessage[] = [ + { + id: 'test-msg-1', + role: 'assistant', + parts: [], + createdAt: new Date().toISOString(), + }, + ] + + // Simulate the streaming chunks from Anthropic + const chunks = [ + // Initial text: "I'll search the product catalog for acoustic guitars and recommend a good option for you." + { + type: 'content', + delta: "I'll search the product catalog", + content: "I'll search the product catalog", + }, + { + type: 'content', + delta: ' for acoustic guitars', + content: "I'll search the product catalog for acoustic guitars", + }, + { + type: 'content', + delta: ' and recommend a good option for you.', + content: + "I'll search the product catalog for acoustic guitars and recommend a good option for you.", + }, + + // Tool call: getGuitars + { + type: 'tool_call', + toolCall: { + id: 'tool-1', + type: 'function', + function: { name: 'getGuitars', arguments: '{}' }, + }, + index: 0, + }, + + // Text after tool call: "Great! I found several guitars in the catalog..." + { + type: 'content', + delta: 'Great! I found several guitars', + content: 'Great! I found several guitars', + }, + { + type: 'content', + delta: ' in the catalog.', + content: 'Great! I found several guitars in the catalog.', + }, + + // Second tool call: recommendGuitar + { + type: 'tool_call', + toolCall: { + id: 'tool-2', + type: 'function', + function: { name: 'recommendGuitar', arguments: '{"id": "6"}' }, + }, + index: 1, + }, + + // Done + { type: 'done' }, + ] + + const processor = new StreamProcessor({ + handlers: { + onTextUpdate: (content: string) => { + // Use the actual updateTextPart function + messages = updateTextPart(messages, 'test-msg-1', content) + }, + onToolCallStart: (index: number, id: string, name: string) => { + messages = updateToolCallPart(messages, 'test-msg-1', { + id, + name, + arguments: '', + state: 'awaiting-input', + }) + }, + onToolCallStateChange: ( + index: number, + id: string, + name: string, + state: any, + args: string, + ) => { + messages = updateToolCallPart(messages, 'test-msg-1', { + id, + name, + arguments: args, + state, + }) + }, + }, + }) + + // Process the stream + await processor.process( + (async function* () { + for (const chunk of chunks) { + yield chunk + } + })(), + ) + + const currentMessage = messages[0] + + // Verify the structure + console.log('Message parts:', JSON.stringify(currentMessage.parts, null, 2)) + + // EXPECTED: parts should be [text, tool-call, text, tool-call] + // ACTUAL (BUG): parts are [text (concatenated), tool-call, tool-call] + expect(currentMessage.parts.length).toBeGreaterThan(3) + + // First part should be text + expect(currentMessage.parts[0].type).toBe('text') + if (currentMessage.parts[0].type === 'text') { + expect(currentMessage.parts[0].content).toBe( + "I'll search the product catalog for acoustic guitars and recommend a good option for you.", + ) + } + + // Second part should be tool-call + expect(currentMessage.parts[1].type).toBe('tool-call') + if (currentMessage.parts[1].type === 'tool-call') { + expect(currentMessage.parts[1].name).toBe('getGuitars') + } + + // Third part should be text (NOT concatenated with first text) + expect(currentMessage.parts[2].type).toBe('text') + if (currentMessage.parts[2].type === 'text') { + expect(currentMessage.parts[2].content).toBe( + 'Great! I found several guitars in the catalog.', + ) + } + + // Fourth part should be tool-call + expect(currentMessage.parts[3].type).toBe('tool-call') + if (currentMessage.parts[3].type === 'tool-call') { + expect(currentMessage.parts[3].name).toBe('recommendGuitar') + } + }) +}) diff --git a/packages/typescript/ai/package.json b/packages/typescript/ai/package.json index 667fd1f7..644ec7ef 100644 --- a/packages/typescript/ai/package.json +++ b/packages/typescript/ai/package.json @@ -49,13 +49,14 @@ "embeddings" ], "dependencies": { - "@tanstack/devtools-event-client": "^0.3.5", - "@alcyone-labs/zod-to-json-schema": "^4.0.10" + "@tanstack/devtools-event-client": "^0.3.5" }, "peerDependencies": { - "zod": "^3.0.0 || ^4.0.0" + "zod": "^3.0.0 || ^4.0.0", + "@alcyone-labs/zod-to-json-schema": "^4.0.0" }, "devDependencies": { + "@alcyone-labs/zod-to-json-schema": "^4.0.10", "@vitest/coverage-v8": "4.0.14", "zod": "^4.1.13" } diff --git a/packages/typescript/ai/vite.config.ts b/packages/typescript/ai/vite.config.ts index 37703b37..a14bae7f 100644 --- a/packages/typescript/ai/vite.config.ts +++ b/packages/typescript/ai/vite.config.ts @@ -27,9 +27,27 @@ const config = defineConfig({ }) export default mergeConfig( - config, - tanstackViteConfig({ - entry: ['./src/index.ts', './src/event-client.ts'], - srcDir: './src', + mergeConfig( + config, + tanstackViteConfig({ + entry: ['./src/index.ts', './src/event-client.ts'], + srcDir: './src', + }), + ), + defineConfig({ + build: { + rollupOptions: { + external: (id) => { + // Mark @alcyone-labs/zod-to-json-schema as external + if ( + id === '@alcyone-labs/zod-to-json-schema' || + id.startsWith('@alcyone-labs/zod-to-json-schema/') + ) { + return true + } + return false + }, + }, + }, }), ) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 9ab48d90..2c73a677 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -442,13 +442,13 @@ importers: packages/typescript/ai: dependencies: - '@alcyone-labs/zod-to-json-schema': - specifier: ^4.0.10 - version: 4.0.10(zod@4.1.13) '@tanstack/devtools-event-client': specifier: ^0.3.5 version: 0.3.5 devDependencies: + '@alcyone-labs/zod-to-json-schema': + specifier: ^4.0.10 + version: 4.0.10(zod@4.1.13) '@vitest/coverage-v8': specifier: 4.0.14 version: 4.0.14(vitest@4.0.14(@types/node@24.10.1)(happy-dom@20.0.10)(jiti@2.6.1)(jsdom@27.2.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.0)(tsx@4.20.6)(yaml@2.8.1)) From 26c222cb1e64aec80eb9603b45c026f84e2832f5 Mon Sep 17 00:00:00 2001 From: Jack Herrington Date: Thu, 27 Nov 2025 18:30:50 -0800 Subject: [PATCH 4/6] moving to inputSchema and outputSchema for tools --- .../ai-anthropic/src/tools/bash-tool.ts | 2 +- .../src/tools/code-execution-tool.ts | 2 +- .../src/tools/computer-use-tool.ts | 2 +- .../ai-anthropic/src/tools/custom-tool.ts | 5 +-- .../ai-anthropic/src/tools/memory-tool.ts | 2 +- .../src/tools/text-editor-tool.ts | 2 +- .../ai-anthropic/src/tools/web-fetch-tool.ts | 2 +- .../ai-anthropic/src/tools/web-search-tool.ts | 2 +- .../tests/anthropic-adapter.test.ts | 18 ++++------ .../stream/message-parts-ordering.test.ts | 34 +++++++++---------- .../ai-gemini/src/tools/computer-use-tool.ts | 2 +- .../ai-gemini/src/tools/google-maps-tool.ts | 2 +- .../src/tools/google-search-retriveal-tool.ts | 2 +- .../ai-gemini/src/tools/google-search-tool.ts | 2 +- .../ai-gemini/src/tools/tool-converter.ts | 3 +- .../ai-gemini/tests/gemini-adapter.test.ts | 18 ++++------ .../ai-openai/src/tools/apply-patch-tool.ts | 2 +- .../ai-openai/src/tools/computer-use-tool.ts | 2 +- .../ai-openai/src/tools/custom-tool.ts | 2 +- .../ai-openai/src/tools/file-search-tool.ts | 2 +- .../ai-openai/src/tools/function-tool.ts | 3 +- .../src/tools/image-generation-tool.ts | 2 +- .../ai-openai/src/tools/local-shell-tool.ts | 2 +- .../ai-openai/src/tools/mcp-tool.ts | 2 +- .../ai-openai/src/tools/shell-tool.ts | 2 +- .../src/tools/web-search-preview-tool.ts | 2 +- .../ai-openai/src/tools/web-search-tool.ts | 2 +- .../ai-openai/tests/openai-adapter.test.ts | 18 ++++------ packages/typescript/ai/package.json | 4 +-- .../typescript/ai/src/tools/tool-calls.ts | 2 ++ .../typescript/ai/src/tools/zod-converter.ts | 7 ++-- 31 files changed, 69 insertions(+), 85 deletions(-) diff --git a/packages/typescript/ai-anthropic/src/tools/bash-tool.ts b/packages/typescript/ai-anthropic/src/tools/bash-tool.ts index 9a7d3ba2..fd97b815 100644 --- a/packages/typescript/ai-anthropic/src/tools/bash-tool.ts +++ b/packages/typescript/ai-anthropic/src/tools/bash-tool.ts @@ -1,8 +1,8 @@ +import { z } from 'zod' import type { BetaToolBash20241022, BetaToolBash20250124, } from '@anthropic-ai/sdk/resources/beta' -import { z } from 'zod' import type { Tool } from '@tanstack/ai' export type BashTool = BetaToolBash20241022 | BetaToolBash20250124 diff --git a/packages/typescript/ai-anthropic/src/tools/code-execution-tool.ts b/packages/typescript/ai-anthropic/src/tools/code-execution-tool.ts index aadce668..8fc58fef 100644 --- a/packages/typescript/ai-anthropic/src/tools/code-execution-tool.ts +++ b/packages/typescript/ai-anthropic/src/tools/code-execution-tool.ts @@ -1,8 +1,8 @@ +import { z } from 'zod' import type { BetaCodeExecutionTool20250522, BetaCodeExecutionTool20250825, } from '@anthropic-ai/sdk/resources/beta' -import { z } from 'zod' import type { Tool } from '@tanstack/ai' export type CodeExecutionTool = diff --git a/packages/typescript/ai-anthropic/src/tools/computer-use-tool.ts b/packages/typescript/ai-anthropic/src/tools/computer-use-tool.ts index 8954ac1b..2f895a9c 100644 --- a/packages/typescript/ai-anthropic/src/tools/computer-use-tool.ts +++ b/packages/typescript/ai-anthropic/src/tools/computer-use-tool.ts @@ -1,8 +1,8 @@ +import { z } from 'zod' import type { BetaToolComputerUse20241022, BetaToolComputerUse20250124, } from '@anthropic-ai/sdk/resources/beta' -import { z } from 'zod' import type { Tool } from '@tanstack/ai' export type ComputerUseTool = diff --git a/packages/typescript/ai-anthropic/src/tools/custom-tool.ts b/packages/typescript/ai-anthropic/src/tools/custom-tool.ts index 34543583..14c3ee11 100644 --- a/packages/typescript/ai-anthropic/src/tools/custom-tool.ts +++ b/packages/typescript/ai-anthropic/src/tools/custom-tool.ts @@ -1,6 +1,7 @@ +import { convertZodToJsonSchema } from '@tanstack/ai' +import type {Tool} from '@tanstack/ai'; +import type { z } from 'zod' import type { CacheControl } from '../text/text-provider-options' -import { z } from 'zod' -import { convertZodToJsonSchema, type Tool } from '@tanstack/ai' export interface CustomTool { /** diff --git a/packages/typescript/ai-anthropic/src/tools/memory-tool.ts b/packages/typescript/ai-anthropic/src/tools/memory-tool.ts index 4e730f3a..a35ecaa7 100644 --- a/packages/typescript/ai-anthropic/src/tools/memory-tool.ts +++ b/packages/typescript/ai-anthropic/src/tools/memory-tool.ts @@ -1,5 +1,5 @@ -import type { BetaMemoryTool20250818 } from '@anthropic-ai/sdk/resources/beta' import { z } from 'zod' +import type { BetaMemoryTool20250818 } from '@anthropic-ai/sdk/resources/beta' import type { Tool } from '@tanstack/ai' export type MemoryTool = BetaMemoryTool20250818 diff --git a/packages/typescript/ai-anthropic/src/tools/text-editor-tool.ts b/packages/typescript/ai-anthropic/src/tools/text-editor-tool.ts index 5608d265..e784d52d 100644 --- a/packages/typescript/ai-anthropic/src/tools/text-editor-tool.ts +++ b/packages/typescript/ai-anthropic/src/tools/text-editor-tool.ts @@ -1,9 +1,9 @@ +import { z } from 'zod' import type { ToolTextEditor20250124, ToolTextEditor20250429, ToolTextEditor20250728, } from '@anthropic-ai/sdk/resources/messages' -import { z } from 'zod' import type { Tool } from '@tanstack/ai' export type TextEditorTool = diff --git a/packages/typescript/ai-anthropic/src/tools/web-fetch-tool.ts b/packages/typescript/ai-anthropic/src/tools/web-fetch-tool.ts index e34a2bb3..b1f3edad 100644 --- a/packages/typescript/ai-anthropic/src/tools/web-fetch-tool.ts +++ b/packages/typescript/ai-anthropic/src/tools/web-fetch-tool.ts @@ -1,6 +1,6 @@ +import { z } from 'zod' import type { BetaWebFetchTool20250910 } from '@anthropic-ai/sdk/resources/beta' import type { CacheControl } from '../text/text-provider-options' -import { z } from 'zod' import type { Tool } from '@tanstack/ai' export type WebFetchTool = BetaWebFetchTool20250910 diff --git a/packages/typescript/ai-anthropic/src/tools/web-search-tool.ts b/packages/typescript/ai-anthropic/src/tools/web-search-tool.ts index f8c69ffb..a764acb7 100644 --- a/packages/typescript/ai-anthropic/src/tools/web-search-tool.ts +++ b/packages/typescript/ai-anthropic/src/tools/web-search-tool.ts @@ -1,6 +1,6 @@ +import { z } from 'zod' import type { WebSearchTool20250305 } from '@anthropic-ai/sdk/resources/messages' import type { CacheControl } from '../text/text-provider-options' -import { z } from 'zod' import type { Tool } from '@tanstack/ai' export type WebSearchTool = WebSearchTool20250305 diff --git a/packages/typescript/ai-anthropic/tests/anthropic-adapter.test.ts b/packages/typescript/ai-anthropic/tests/anthropic-adapter.test.ts index 6958f386..b7dee9f3 100644 --- a/packages/typescript/ai-anthropic/tests/anthropic-adapter.test.ts +++ b/packages/typescript/ai-anthropic/tests/anthropic-adapter.test.ts @@ -4,6 +4,7 @@ import { Anthropic, type AnthropicProviderOptions, } from '../src/anthropic-adapter' +import { z } from 'zod' const mocks = vi.hoisted(() => { const betaMessagesCreate = vi.fn() @@ -41,18 +42,11 @@ const createAdapter = () => new Anthropic({ apiKey: 'test-key' }) const toolArguments = JSON.stringify({ location: 'Berlin' }) const weatherTool: Tool = { - type: 'function', - function: { - name: 'lookup_weather', - description: 'Return the weather for a city', - parameters: { - type: 'object', - properties: { - location: { type: 'string' }, - }, - required: ['location'], - }, - }, + name: 'lookup_weather', + description: 'Return the weather for a city', + inputSchema: z.object({ + location: z.string(), + }), } describe('Anthropic adapter option mapping', () => { diff --git a/packages/typescript/ai-client/tests/stream/message-parts-ordering.test.ts b/packages/typescript/ai-client/tests/stream/message-parts-ordering.test.ts index 1878fd3b..65fb5ed9 100644 --- a/packages/typescript/ai-client/tests/stream/message-parts-ordering.test.ts +++ b/packages/typescript/ai-client/tests/stream/message-parts-ordering.test.ts @@ -23,7 +23,7 @@ describe('Message Parts Ordering', () => { id: 'test-msg-1', role: 'assistant', parts: [], - createdAt: new Date().toISOString(), + createdAt: new Date(), }, ] @@ -91,7 +91,7 @@ describe('Message Parts Ordering', () => { // Use the actual updateTextPart function messages = updateTextPart(messages, 'test-msg-1', content) }, - onToolCallStart: (index: number, id: string, name: string) => { + onToolCallStart: (_index: number, id: string, name: string) => { messages = updateToolCallPart(messages, 'test-msg-1', { id, name, @@ -100,7 +100,7 @@ describe('Message Parts Ordering', () => { }) }, onToolCallStateChange: ( - index: number, + _index: number, id: string, name: string, state: any, @@ -125,41 +125,39 @@ describe('Message Parts Ordering', () => { })(), ) - const currentMessage = messages[0] + const currentMessage = messages[0]! // Verify the structure - console.log('Message parts:', JSON.stringify(currentMessage.parts, null, 2)) - // EXPECTED: parts should be [text, tool-call, text, tool-call] // ACTUAL (BUG): parts are [text (concatenated), tool-call, tool-call] expect(currentMessage.parts.length).toBeGreaterThan(3) // First part should be text - expect(currentMessage.parts[0].type).toBe('text') - if (currentMessage.parts[0].type === 'text') { - expect(currentMessage.parts[0].content).toBe( + expect(currentMessage.parts[0]!.type).toBe('text') + if (currentMessage.parts[0]!.type === 'text') { + expect(currentMessage.parts[0]!.content).toBe( "I'll search the product catalog for acoustic guitars and recommend a good option for you.", ) } // Second part should be tool-call - expect(currentMessage.parts[1].type).toBe('tool-call') - if (currentMessage.parts[1].type === 'tool-call') { - expect(currentMessage.parts[1].name).toBe('getGuitars') + expect(currentMessage.parts[1]!.type).toBe('tool-call') + if (currentMessage.parts[1]!.type === 'tool-call') { + expect(currentMessage.parts[1]!.name).toBe('getGuitars') } // Third part should be text (NOT concatenated with first text) - expect(currentMessage.parts[2].type).toBe('text') - if (currentMessage.parts[2].type === 'text') { - expect(currentMessage.parts[2].content).toBe( + expect(currentMessage.parts[2]!.type).toBe('text') + if (currentMessage.parts[2]!.type === 'text') { + expect(currentMessage.parts[2]!.content).toBe( 'Great! I found several guitars in the catalog.', ) } // Fourth part should be tool-call - expect(currentMessage.parts[3].type).toBe('tool-call') - if (currentMessage.parts[3].type === 'tool-call') { - expect(currentMessage.parts[3].name).toBe('recommendGuitar') + expect(currentMessage.parts[3]!.type).toBe('tool-call') + if (currentMessage.parts[3]!.type === 'tool-call') { + expect(currentMessage.parts[3]!.name).toBe('recommendGuitar') } }) }) diff --git a/packages/typescript/ai-gemini/src/tools/computer-use-tool.ts b/packages/typescript/ai-gemini/src/tools/computer-use-tool.ts index 43ceef01..e54a8b3e 100644 --- a/packages/typescript/ai-gemini/src/tools/computer-use-tool.ts +++ b/packages/typescript/ai-gemini/src/tools/computer-use-tool.ts @@ -1,5 +1,5 @@ -import type { ComputerUse } from '@google/genai' import { z } from 'zod' +import type { ComputerUse } from '@google/genai' import type { Tool } from '@tanstack/ai' export type ComputerUseTool = ComputerUse diff --git a/packages/typescript/ai-gemini/src/tools/google-maps-tool.ts b/packages/typescript/ai-gemini/src/tools/google-maps-tool.ts index 46a9de7c..edc60044 100644 --- a/packages/typescript/ai-gemini/src/tools/google-maps-tool.ts +++ b/packages/typescript/ai-gemini/src/tools/google-maps-tool.ts @@ -1,5 +1,5 @@ -import type { GoogleMaps } from '@google/genai' import { z } from 'zod' +import type { GoogleMaps } from '@google/genai' import type { Tool } from '@tanstack/ai' export type GoogleMapsTool = GoogleMaps diff --git a/packages/typescript/ai-gemini/src/tools/google-search-retriveal-tool.ts b/packages/typescript/ai-gemini/src/tools/google-search-retriveal-tool.ts index 67269f7c..6d511b6e 100644 --- a/packages/typescript/ai-gemini/src/tools/google-search-retriveal-tool.ts +++ b/packages/typescript/ai-gemini/src/tools/google-search-retriveal-tool.ts @@ -1,5 +1,5 @@ -import type { GoogleSearchRetrieval } from '@google/genai' import { z } from 'zod' +import type { GoogleSearchRetrieval } from '@google/genai' import type { Tool } from '@tanstack/ai' export type GoogleSearchRetrievalTool = GoogleSearchRetrieval diff --git a/packages/typescript/ai-gemini/src/tools/google-search-tool.ts b/packages/typescript/ai-gemini/src/tools/google-search-tool.ts index 7fae8497..6550eed2 100644 --- a/packages/typescript/ai-gemini/src/tools/google-search-tool.ts +++ b/packages/typescript/ai-gemini/src/tools/google-search-tool.ts @@ -1,5 +1,5 @@ -import type { GoogleSearch } from '@google/genai' import { z } from 'zod' +import type { GoogleSearch } from '@google/genai' import type { Tool } from '@tanstack/ai' export type GoogleSearchTool = GoogleSearch diff --git a/packages/typescript/ai-gemini/src/tools/tool-converter.ts b/packages/typescript/ai-gemini/src/tools/tool-converter.ts index 1d89414f..a31bd6c8 100644 --- a/packages/typescript/ai-gemini/src/tools/tool-converter.ts +++ b/packages/typescript/ai-gemini/src/tools/tool-converter.ts @@ -1,3 +1,4 @@ +import { convertZodToJsonSchema } from '@tanstack/ai' import { convertCodeExecutionToolToAdapterFormat } from './code-execution-tool' import { convertComputerUseToolToAdapterFormat } from './computer-use-tool' import { convertFileSearchToolToAdapterFormat } from './file-search-tool' @@ -5,7 +6,7 @@ import { convertGoogleMapsToolToAdapterFormat } from './google-maps-tool' import { convertGoogleSearchRetrievalToolToAdapterFormat } from './google-search-retriveal-tool' import { convertGoogleSearchToolToAdapterFormat } from './google-search-tool' import { convertUrlContextToolToAdapterFormat } from './url-context-tool' -import { convertZodToJsonSchema, type Tool } from '@tanstack/ai' +import type {Tool} from '@tanstack/ai'; import type { ToolUnion } from '@google/genai' /** diff --git a/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts b/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts index 0516401c..ff7b5ab6 100644 --- a/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts +++ b/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts @@ -11,6 +11,7 @@ import { GeminiAdapter, type GeminiProviderOptions, } from '../src/gemini-adapter' +import { z } from 'zod' const mocks = vi.hoisted(() => { return { @@ -51,18 +52,11 @@ vi.mock('@google/genai', () => { const createAdapter = () => new GeminiAdapter({ apiKey: 'test-key' }) const weatherTool: Tool = { - type: 'function', - function: { - name: 'lookup_weather', - description: 'Return the weather for a location', - parameters: { - type: 'object', - properties: { - location: { type: 'string' }, - }, - required: ['location'], - }, - }, + name: 'lookup_weather', + description: 'Return the weather for a location', + inputSchema: z.object({ + location: z.string(), + }), } const createStream = (chunks: Array>) => { diff --git a/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts b/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts index a816d0c9..61a664ce 100644 --- a/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts +++ b/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts @@ -1,5 +1,5 @@ -import type OpenAI from 'openai' import { z } from 'zod' +import type OpenAI from 'openai' import type { Tool } from '@tanstack/ai' export type ApplyPatchTool = OpenAI.Responses.ApplyPatchTool diff --git a/packages/typescript/ai-openai/src/tools/computer-use-tool.ts b/packages/typescript/ai-openai/src/tools/computer-use-tool.ts index b3b0e6e5..a266fd0c 100644 --- a/packages/typescript/ai-openai/src/tools/computer-use-tool.ts +++ b/packages/typescript/ai-openai/src/tools/computer-use-tool.ts @@ -1,5 +1,5 @@ -import type OpenAI from 'openai' import { z } from 'zod' +import type OpenAI from 'openai' import type { Tool } from '@tanstack/ai' export type ComputerUseTool = OpenAI.Responses.ComputerTool diff --git a/packages/typescript/ai-openai/src/tools/custom-tool.ts b/packages/typescript/ai-openai/src/tools/custom-tool.ts index b663703e..9cf52319 100644 --- a/packages/typescript/ai-openai/src/tools/custom-tool.ts +++ b/packages/typescript/ai-openai/src/tools/custom-tool.ts @@ -1,5 +1,5 @@ -import type OpenAI from 'openai' import { z } from 'zod' +import type OpenAI from 'openai' import type { Tool } from '@tanstack/ai' export type CustomTool = OpenAI.Responses.CustomTool diff --git a/packages/typescript/ai-openai/src/tools/file-search-tool.ts b/packages/typescript/ai-openai/src/tools/file-search-tool.ts index 6fdc5500..2bfd9a59 100644 --- a/packages/typescript/ai-openai/src/tools/file-search-tool.ts +++ b/packages/typescript/ai-openai/src/tools/file-search-tool.ts @@ -1,5 +1,5 @@ -import type OpenAI from 'openai' import { z } from 'zod' +import type OpenAI from 'openai' import type { Tool } from '@tanstack/ai' const validateMaxNumResults = (maxNumResults: number | undefined) => { diff --git a/packages/typescript/ai-openai/src/tools/function-tool.ts b/packages/typescript/ai-openai/src/tools/function-tool.ts index c6796808..51702b70 100644 --- a/packages/typescript/ai-openai/src/tools/function-tool.ts +++ b/packages/typescript/ai-openai/src/tools/function-tool.ts @@ -1,5 +1,6 @@ +import { convertZodToJsonSchema } from '@tanstack/ai' +import type {Tool} from '@tanstack/ai'; import type OpenAI from 'openai' -import { convertZodToJsonSchema, type Tool } from '@tanstack/ai' export type FunctionTool = OpenAI.Responses.FunctionTool diff --git a/packages/typescript/ai-openai/src/tools/image-generation-tool.ts b/packages/typescript/ai-openai/src/tools/image-generation-tool.ts index 1a165fd2..d9e89b6a 100644 --- a/packages/typescript/ai-openai/src/tools/image-generation-tool.ts +++ b/packages/typescript/ai-openai/src/tools/image-generation-tool.ts @@ -1,5 +1,5 @@ -import type OpenAI from 'openai' import { z } from 'zod' +import type OpenAI from 'openai' import type { Tool } from '@tanstack/ai' export type ImageGenerationTool = OpenAI.Responses.Tool.ImageGeneration diff --git a/packages/typescript/ai-openai/src/tools/local-shell-tool.ts b/packages/typescript/ai-openai/src/tools/local-shell-tool.ts index a19c761b..d6ca2543 100644 --- a/packages/typescript/ai-openai/src/tools/local-shell-tool.ts +++ b/packages/typescript/ai-openai/src/tools/local-shell-tool.ts @@ -1,5 +1,5 @@ -import type OpenAI from 'openai' import { z } from 'zod' +import type OpenAI from 'openai' import type { Tool } from '@tanstack/ai' export type LocalShellTool = OpenAI.Responses.Tool.LocalShell diff --git a/packages/typescript/ai-openai/src/tools/mcp-tool.ts b/packages/typescript/ai-openai/src/tools/mcp-tool.ts index 123f198c..d9398299 100644 --- a/packages/typescript/ai-openai/src/tools/mcp-tool.ts +++ b/packages/typescript/ai-openai/src/tools/mcp-tool.ts @@ -1,5 +1,5 @@ -import type OpenAI from 'openai' import { z } from 'zod' +import type OpenAI from 'openai' import type { Tool } from '@tanstack/ai' export type MCPTool = OpenAI.Responses.Tool.Mcp diff --git a/packages/typescript/ai-openai/src/tools/shell-tool.ts b/packages/typescript/ai-openai/src/tools/shell-tool.ts index de8bf459..83aff438 100644 --- a/packages/typescript/ai-openai/src/tools/shell-tool.ts +++ b/packages/typescript/ai-openai/src/tools/shell-tool.ts @@ -1,5 +1,5 @@ -import type OpenAI from 'openai' import { z } from 'zod' +import type OpenAI from 'openai' import type { Tool } from '@tanstack/ai' export type ShellTool = OpenAI.Responses.FunctionShellTool diff --git a/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts b/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts index 795f0fb0..1695afd8 100644 --- a/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts +++ b/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts @@ -1,5 +1,5 @@ -import type OpenAI from 'openai' import { z } from 'zod' +import type OpenAI from 'openai' import type { Tool } from '@tanstack/ai' export type WebSearchPreviewTool = OpenAI.Responses.WebSearchPreviewTool diff --git a/packages/typescript/ai-openai/src/tools/web-search-tool.ts b/packages/typescript/ai-openai/src/tools/web-search-tool.ts index e2ca606d..7e567c70 100644 --- a/packages/typescript/ai-openai/src/tools/web-search-tool.ts +++ b/packages/typescript/ai-openai/src/tools/web-search-tool.ts @@ -1,5 +1,5 @@ -import type OpenAI from 'openai' import { z } from 'zod' +import type OpenAI from 'openai' import type { Tool } from '@tanstack/ai' export type WebSearchTool = OpenAI.Responses.WebSearchTool diff --git a/packages/typescript/ai-openai/tests/openai-adapter.test.ts b/packages/typescript/ai-openai/tests/openai-adapter.test.ts index 9620438f..740f8722 100644 --- a/packages/typescript/ai-openai/tests/openai-adapter.test.ts +++ b/packages/typescript/ai-openai/tests/openai-adapter.test.ts @@ -1,24 +1,18 @@ import { describe, it, expect, beforeEach, vi } from 'vitest' import { chat, type Tool, type StreamChunk } from '@tanstack/ai' import { OpenAI, type OpenAIProviderOptions } from '../src/openai-adapter' +import { z } from 'zod' const createAdapter = () => new OpenAI({ apiKey: 'test-key' }) const toolArguments = JSON.stringify({ location: 'Berlin' }) const weatherTool: Tool = { - type: 'function', - function: { - name: 'lookup_weather', - description: 'Return the forecast for a location', - parameters: { - type: 'object', - properties: { - location: { type: 'string' }, - }, - required: ['location'], - }, - }, + name: 'lookup_weather', + description: 'Return the forecast for a location', + inputSchema: z.object({ + location: z.string(), + }), } function createMockChatCompletionsStream( diff --git a/packages/typescript/ai/package.json b/packages/typescript/ai/package.json index 644ec7ef..2eeedd14 100644 --- a/packages/typescript/ai/package.json +++ b/packages/typescript/ai/package.json @@ -52,8 +52,8 @@ "@tanstack/devtools-event-client": "^0.3.5" }, "peerDependencies": { - "zod": "^3.0.0 || ^4.0.0", - "@alcyone-labs/zod-to-json-schema": "^4.0.0" + "@alcyone-labs/zod-to-json-schema": "^4.0.0", + "zod": "^3.0.0 || ^4.0.0" }, "devDependencies": { "@alcyone-labs/zod-to-json-schema": "^4.0.10", diff --git a/packages/typescript/ai/src/tools/tool-calls.ts b/packages/typescript/ai/src/tools/tool-calls.ts index 207ba3ac..802df0a4 100644 --- a/packages/typescript/ai/src/tools/tool-calls.ts +++ b/packages/typescript/ai/src/tools/tool-calls.ts @@ -131,6 +131,7 @@ export class ToolCallManager { } // Validate input against inputSchema + // eslint-disable-next-line @typescript-eslint/no-unnecessary-condition if (tool.inputSchema) { try { args = tool.inputSchema.parse(args) @@ -278,6 +279,7 @@ export async function executeToolCalls( } // Validate input against inputSchema + // eslint-disable-next-line @typescript-eslint/no-unnecessary-condition if (tool.inputSchema) { try { input = tool.inputSchema.parse(input) diff --git a/packages/typescript/ai/src/tools/zod-converter.ts b/packages/typescript/ai/src/tools/zod-converter.ts index e424adb2..e5011ec5 100644 --- a/packages/typescript/ai/src/tools/zod-converter.ts +++ b/packages/typescript/ai/src/tools/zod-converter.ts @@ -1,5 +1,5 @@ -import type { z } from 'zod' import { zodToJsonSchema } from '@alcyone-labs/zod-to-json-schema' +import type { z } from 'zod' /** * Converts a Zod schema to JSON Schema format compatible with LLM providers. @@ -39,17 +39,16 @@ export function convertZodToJsonSchema(schema: z.ZodType): Record { // Remove $schema property as it's not needed for LLM providers let result = jsonSchema as Record - if (result && typeof result === 'object' && '$schema' in result) { + if (typeof result === 'object' && '$schema' in result) { const { $schema, ...rest } = result result = rest } // Ensure object schemas always have type: "object" // This fixes cases where zod-to-json-schema doesn't set type for empty objects - if (result && typeof result === 'object') { + if (typeof result === 'object') { // Check if the input schema is a ZodObject by inspecting its internal structure const isZodObject = - schema && typeof schema === 'object' && 'def' in schema && (schema as any).def?.type === 'object' From 46ddf8a1ac63ec30d699e609d2f7b6c859a5488e Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Fri, 28 Nov 2025 02:33:21 +0000 Subject: [PATCH 5/6] ci: apply automated fixes --- docs/reference/classes/ToolCallManager.md | 4 +- .../functions/convertZodToJsonSchema.md | 52 ++++++ docs/reference/functions/tool.md | 104 +++++------ docs/reference/index.md | 1 + docs/reference/interfaces/AIAdapter.md | 22 +-- docs/reference/interfaces/AIAdapterConfig.md | 12 +- docs/reference/interfaces/AgentLoopState.md | 8 +- .../ApprovalRequestedStreamChunk.md | 18 +- docs/reference/interfaces/BaseStreamChunk.md | 10 +- .../interfaces/ChatCompletionChunk.md | 14 +- docs/reference/interfaces/ChatOptions.md | 24 +-- .../interfaces/ContentStreamChunk.md | 16 +- docs/reference/interfaces/DoneStreamChunk.md | 14 +- docs/reference/interfaces/EmbeddingOptions.md | 8 +- docs/reference/interfaces/EmbeddingResult.md | 10 +- docs/reference/interfaces/ErrorStreamChunk.md | 12 +- docs/reference/interfaces/ModelMessage.md | 12 +- docs/reference/interfaces/ResponseFormat.md | 8 +- .../interfaces/SummarizationOptions.md | 12 +- .../interfaces/SummarizationResult.md | 10 +- .../interfaces/ThinkingStreamChunk.md | 14 +- docs/reference/interfaces/Tool.md | 161 ++++++++++-------- docs/reference/interfaces/ToolCall.md | 8 +- .../interfaces/ToolCallStreamChunk.md | 14 +- docs/reference/interfaces/ToolConfig.md | 4 +- .../ToolInputAvailableStreamChunk.md | 16 +- .../interfaces/ToolResultStreamChunk.md | 14 +- .../type-aliases/AgentLoopStrategy.md | 2 +- .../type-aliases/ChatStreamOptionsUnion.md | 2 +- .../type-aliases/ExtractModelsFromAdapter.md | 2 +- docs/reference/type-aliases/StreamChunk.md | 2 +- .../reference/type-aliases/StreamChunkType.md | 2 +- .../ai-anthropic/src/tools/custom-tool.ts | 4 +- .../ai-gemini/src/tools/tool-converter.ts | 4 +- .../ai-openai/src/tools/function-tool.ts | 4 +- 35 files changed, 342 insertions(+), 282 deletions(-) create mode 100644 docs/reference/functions/convertZodToJsonSchema.md diff --git a/docs/reference/classes/ToolCallManager.md b/docs/reference/classes/ToolCallManager.md index 1d713c08..12622dc1 100644 --- a/docs/reference/classes/ToolCallManager.md +++ b/docs/reference/classes/ToolCallManager.md @@ -53,7 +53,7 @@ Defined in: [tools/tool-calls.ts:45](https://github.com/TanStack/ai/blob/main/pa ##### tools -readonly [`Tool`](../../interfaces/Tool.md)[] +readonly [`Tool`](../../interfaces/Tool.md)\<`ZodType`\<`unknown`, `unknown`, `$ZodTypeInternals`\<`unknown`, `unknown`\>\>, `ZodType`\<`unknown`, `unknown`, `$ZodTypeInternals`\<`unknown`, `unknown`\>\>\>[] #### Returns @@ -126,7 +126,7 @@ Handles streaming tool calls by accumulating arguments clear(): void; ``` -Defined in: [tools/tool-calls.ts:171](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L171) +Defined in: [tools/tool-calls.ts:194](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L194) Clear the tool calls map for the next iteration diff --git a/docs/reference/functions/convertZodToJsonSchema.md b/docs/reference/functions/convertZodToJsonSchema.md new file mode 100644 index 00000000..326e7233 --- /dev/null +++ b/docs/reference/functions/convertZodToJsonSchema.md @@ -0,0 +1,52 @@ +--- +id: convertZodToJsonSchema +title: convertZodToJsonSchema +--- + +# Function: convertZodToJsonSchema() + +```ts +function convertZodToJsonSchema(schema): Record; +``` + +Defined in: [tools/zod-converter.ts:33](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/zod-converter.ts#L33) + +Converts a Zod schema to JSON Schema format compatible with LLM providers. + +Uses @alcyone-labs/zod-to-json-schema which is compatible with Zod v4. + +## Parameters + +### schema + +`ZodType` + +Zod schema to convert + +## Returns + +`Record`\<`string`, `any`\> + +JSON Schema object that can be sent to LLM providers + +## Example + +```typescript +import { z } from 'zod'; + +const schema = z.object({ + location: z.string().describe('City name'), + unit: z.enum(['celsius', 'fahrenheit']).optional() +}); + +const jsonSchema = convertZodToJsonSchema(schema); +// Returns: +// { +// type: 'object', +// properties: { +// location: { type: 'string', description: 'City name' }, +// unit: { type: 'string', enum: ['celsius', 'fahrenheit'] } +// }, +// required: ['location'] +// } +``` diff --git a/docs/reference/functions/tool.md b/docs/reference/functions/tool.md index f5fb6c7b..a0655a98 100644 --- a/docs/reference/functions/tool.md +++ b/docs/reference/functions/tool.md @@ -6,103 +6,83 @@ title: tool # Function: tool() ```ts -function tool(config): Tool; +function tool(config): Tool; ``` -Defined in: [tools/tool-utils.ts:70](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-utils.ts#L70) +Defined in: [tools/tool-utils.ts:34](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-utils.ts#L34) Helper to define a tool with enforced type safety. -Automatically infers the execute function argument types from the parameters schema. -User must provide the full Tool structure with type: "function" and function: {...} + +Automatically infers TypeScript types from Zod schemas, providing +full type safety for tool inputs and outputs. ## Type Parameters -### TProps +### TInput -`TProps` *extends* `Record`\<`string`, `any`\> +`TInput` *extends* `ZodType`\<`unknown`, `unknown`, `$ZodTypeInternals`\<`unknown`, `unknown`\>\> -### TRequired +### TOutput -`TRequired` *extends* readonly `string`[] \| `undefined` +`TOutput` *extends* `ZodType`\<`unknown`, `unknown`, `$ZodTypeInternals`\<`unknown`, `unknown`\>\> = `ZodAny` ## Parameters ### config -#### execute - -(`args`) => `string` \| `Promise`\<`string`\> - -#### function - -\{ - `description`: `string`; - `name`: `string`; - `parameters`: \{ - `properties`: `TProps`; - `required?`: `TRequired`; - `type`: `"object"`; - \}; -\} - -#### function.description +#### description `string` -#### function.name +#### execute? -`string` +(`args`) => `output`\<`TOutput`\> \| `Promise`\<`output`\<`TOutput`\>\> -#### function.parameters +#### inputSchema -\{ - `properties`: `TProps`; - `required?`: `TRequired`; - `type`: `"object"`; -\} +`TInput` -#### function.parameters.properties +#### metadata? -`TProps` +`Record`\<`string`, `any`\> -#### function.parameters.required? +#### name -`TRequired` +`string` -#### function.parameters.type +#### needsApproval? -`"object"` +`boolean` -#### type +#### outputSchema? -`"function"` +`TOutput` ## Returns -[`Tool`](../../interfaces/Tool.md) +[`Tool`](../../interfaces/Tool.md)\<`TInput`, `TOutput`\> ## Example ```typescript -const tools = { - myTool: tool({ - type: "function", - function: { - name: "myTool", - description: "My tool description", - parameters: { - type: "object", - properties: { - id: { type: "string", description: "The ID" }, - optional: { type: "number", description: "Optional param" }, - }, - required: ["id"], - }, - }, - execute: async (args) => { - // ✅ args is automatically typed as { id: string; optional?: number } - return args.id; - }, +import { tool } from '@tanstack/ai'; +import { z } from 'zod'; + +const getWeather = tool({ + name: 'get_weather', + description: 'Get the current weather for a location', + inputSchema: z.object({ + location: z.string().describe('The city and state, e.g. San Francisco, CA'), + unit: z.enum(['celsius', 'fahrenheit']).optional(), + }), + outputSchema: z.object({ + temperature: z.number(), + conditions: z.string(), }), -}; + execute: async ({ location, unit }) => { + // args are fully typed: { location: string; unit?: "celsius" | "fahrenheit" } + const data = await fetchWeather(location, unit); + return data; // validated against outputSchema + }, +}); ``` diff --git a/docs/reference/index.md b/docs/reference/index.md index a1632d55..0b79ad15 100644 --- a/docs/reference/index.md +++ b/docs/reference/index.md @@ -53,6 +53,7 @@ title: "@tanstack/ai" - [chat](../functions/chat.md) - [chatOptions](../functions/chatOptions.md) - [combineStrategies](../functions/combineStrategies.md) +- [convertZodToJsonSchema](../functions/convertZodToJsonSchema.md) - [embedding](../functions/embedding.md) - [maxIterations](../functions/maxIterations.md) - [summarize](../functions/summarize.md) diff --git a/docs/reference/interfaces/AIAdapter.md b/docs/reference/interfaces/AIAdapter.md index 6595d6dc..086d77e1 100644 --- a/docs/reference/interfaces/AIAdapter.md +++ b/docs/reference/interfaces/AIAdapter.md @@ -5,7 +5,7 @@ title: AIAdapter # Interface: AIAdapter\ -Defined in: [types.ts:425](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L425) +Defined in: [types.ts:440](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L440) AI adapter interface with support for endpoint-specific models and provider options. @@ -51,7 +51,7 @@ Generic parameters: optional _chatProviderOptions: TChatProviderOptions; ``` -Defined in: [types.ts:441](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L441) +Defined in: [types.ts:456](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L456) *** @@ -61,7 +61,7 @@ Defined in: [types.ts:441](https://github.com/TanStack/ai/blob/main/packages/typ optional _embeddingProviderOptions: TEmbeddingProviderOptions; ``` -Defined in: [types.ts:442](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L442) +Defined in: [types.ts:457](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L457) *** @@ -71,7 +71,7 @@ Defined in: [types.ts:442](https://github.com/TanStack/ai/blob/main/packages/typ _modelProviderOptionsByName: TModelProviderOptionsByName; ``` -Defined in: [types.ts:448](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L448) +Defined in: [types.ts:463](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L463) Type-only map from model name to its specific provider options. Used by the core AI types to narrow providerOptions based on the selected model. @@ -85,7 +85,7 @@ Must be provided by all adapters. optional _providerOptions: TChatProviderOptions; ``` -Defined in: [types.ts:440](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L440) +Defined in: [types.ts:455](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L455) *** @@ -95,7 +95,7 @@ Defined in: [types.ts:440](https://github.com/TanStack/ai/blob/main/packages/typ chatStream: (options) => AsyncIterable; ``` -Defined in: [types.ts:451](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L451) +Defined in: [types.ts:466](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L466) #### Parameters @@ -115,7 +115,7 @@ Defined in: [types.ts:451](https://github.com/TanStack/ai/blob/main/packages/typ createEmbeddings: (options) => Promise; ``` -Defined in: [types.ts:459](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L459) +Defined in: [types.ts:474](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L474) #### Parameters @@ -135,7 +135,7 @@ Defined in: [types.ts:459](https://github.com/TanStack/ai/blob/main/packages/typ optional embeddingModels: TEmbeddingModels; ``` -Defined in: [types.ts:437](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L437) +Defined in: [types.ts:452](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L452) Models that support embeddings @@ -147,7 +147,7 @@ Models that support embeddings models: TChatModels; ``` -Defined in: [types.ts:434](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L434) +Defined in: [types.ts:449](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L449) Models that support chat/text completion @@ -159,7 +159,7 @@ Models that support chat/text completion name: string; ``` -Defined in: [types.ts:432](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L432) +Defined in: [types.ts:447](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L447) *** @@ -169,7 +169,7 @@ Defined in: [types.ts:432](https://github.com/TanStack/ai/blob/main/packages/typ summarize: (options) => Promise; ``` -Defined in: [types.ts:456](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L456) +Defined in: [types.ts:471](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L471) #### Parameters diff --git a/docs/reference/interfaces/AIAdapterConfig.md b/docs/reference/interfaces/AIAdapterConfig.md index aed5908b..1fbf7a92 100644 --- a/docs/reference/interfaces/AIAdapterConfig.md +++ b/docs/reference/interfaces/AIAdapterConfig.md @@ -5,7 +5,7 @@ title: AIAdapterConfig # Interface: AIAdapterConfig -Defined in: [types.ts:462](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L462) +Defined in: [types.ts:477](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L477) ## Properties @@ -15,7 +15,7 @@ Defined in: [types.ts:462](https://github.com/TanStack/ai/blob/main/packages/typ optional apiKey: string; ``` -Defined in: [types.ts:463](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L463) +Defined in: [types.ts:478](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L478) *** @@ -25,7 +25,7 @@ Defined in: [types.ts:463](https://github.com/TanStack/ai/blob/main/packages/typ optional baseUrl: string; ``` -Defined in: [types.ts:464](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L464) +Defined in: [types.ts:479](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L479) *** @@ -35,7 +35,7 @@ Defined in: [types.ts:464](https://github.com/TanStack/ai/blob/main/packages/typ optional headers: Record; ``` -Defined in: [types.ts:467](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L467) +Defined in: [types.ts:482](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L482) *** @@ -45,7 +45,7 @@ Defined in: [types.ts:467](https://github.com/TanStack/ai/blob/main/packages/typ optional maxRetries: number; ``` -Defined in: [types.ts:466](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L466) +Defined in: [types.ts:481](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L481) *** @@ -55,4 +55,4 @@ Defined in: [types.ts:466](https://github.com/TanStack/ai/blob/main/packages/typ optional timeout: number; ``` -Defined in: [types.ts:465](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L465) +Defined in: [types.ts:480](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L480) diff --git a/docs/reference/interfaces/AgentLoopState.md b/docs/reference/interfaces/AgentLoopState.md index 14a78b4f..45e76224 100644 --- a/docs/reference/interfaces/AgentLoopState.md +++ b/docs/reference/interfaces/AgentLoopState.md @@ -5,7 +5,7 @@ title: AgentLoopState # Interface: AgentLoopState -Defined in: [types.ts:205](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L205) +Defined in: [types.ts:220](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L220) State passed to agent loop strategy for determining whether to continue @@ -17,7 +17,7 @@ State passed to agent loop strategy for determining whether to continue finishReason: string | null; ``` -Defined in: [types.ts:211](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L211) +Defined in: [types.ts:226](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L226) Finish reason from the last response @@ -29,7 +29,7 @@ Finish reason from the last response iterationCount: number; ``` -Defined in: [types.ts:207](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L207) +Defined in: [types.ts:222](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L222) Current iteration count (0-indexed) @@ -41,6 +41,6 @@ Current iteration count (0-indexed) messages: ModelMessage[]; ``` -Defined in: [types.ts:209](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L209) +Defined in: [types.ts:224](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L224) Current messages array diff --git a/docs/reference/interfaces/ApprovalRequestedStreamChunk.md b/docs/reference/interfaces/ApprovalRequestedStreamChunk.md index 0d8e25f0..dacd2d52 100644 --- a/docs/reference/interfaces/ApprovalRequestedStreamChunk.md +++ b/docs/reference/interfaces/ApprovalRequestedStreamChunk.md @@ -5,7 +5,7 @@ title: ApprovalRequestedStreamChunk # Interface: ApprovalRequestedStreamChunk -Defined in: [types.ts:323](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L323) +Defined in: [types.ts:338](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L338) ## Extends @@ -19,7 +19,7 @@ Defined in: [types.ts:323](https://github.com/TanStack/ai/blob/main/packages/typ approval: object; ``` -Defined in: [types.ts:328](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L328) +Defined in: [types.ts:343](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L343) #### id @@ -41,7 +41,7 @@ needsApproval: true; id: string; ``` -Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L274) +Defined in: [types.ts:289](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L289) #### Inherited from @@ -55,7 +55,7 @@ Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typ input: any; ``` -Defined in: [types.ts:327](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L327) +Defined in: [types.ts:342](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L342) *** @@ -65,7 +65,7 @@ Defined in: [types.ts:327](https://github.com/TanStack/ai/blob/main/packages/typ model: string; ``` -Defined in: [types.ts:275](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L275) +Defined in: [types.ts:290](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L290) #### Inherited from @@ -79,7 +79,7 @@ Defined in: [types.ts:275](https://github.com/TanStack/ai/blob/main/packages/typ timestamp: number; ``` -Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L276) +Defined in: [types.ts:291](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L291) #### Inherited from @@ -93,7 +93,7 @@ Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typ toolCallId: string; ``` -Defined in: [types.ts:325](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L325) +Defined in: [types.ts:340](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L340) *** @@ -103,7 +103,7 @@ Defined in: [types.ts:325](https://github.com/TanStack/ai/blob/main/packages/typ toolName: string; ``` -Defined in: [types.ts:326](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L326) +Defined in: [types.ts:341](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L341) *** @@ -113,7 +113,7 @@ Defined in: [types.ts:326](https://github.com/TanStack/ai/blob/main/packages/typ type: "approval-requested"; ``` -Defined in: [types.ts:324](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L324) +Defined in: [types.ts:339](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L339) #### Overrides diff --git a/docs/reference/interfaces/BaseStreamChunk.md b/docs/reference/interfaces/BaseStreamChunk.md index f8fc9143..ca4060e5 100644 --- a/docs/reference/interfaces/BaseStreamChunk.md +++ b/docs/reference/interfaces/BaseStreamChunk.md @@ -5,7 +5,7 @@ title: BaseStreamChunk # Interface: BaseStreamChunk -Defined in: [types.ts:272](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L272) +Defined in: [types.ts:287](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L287) ## Extended by @@ -26,7 +26,7 @@ Defined in: [types.ts:272](https://github.com/TanStack/ai/blob/main/packages/typ id: string; ``` -Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L274) +Defined in: [types.ts:289](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L289) *** @@ -36,7 +36,7 @@ Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typ model: string; ``` -Defined in: [types.ts:275](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L275) +Defined in: [types.ts:290](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L290) *** @@ -46,7 +46,7 @@ Defined in: [types.ts:275](https://github.com/TanStack/ai/blob/main/packages/typ timestamp: number; ``` -Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L276) +Defined in: [types.ts:291](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L291) *** @@ -56,4 +56,4 @@ Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typ type: StreamChunkType; ``` -Defined in: [types.ts:273](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L273) +Defined in: [types.ts:288](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L288) diff --git a/docs/reference/interfaces/ChatCompletionChunk.md b/docs/reference/interfaces/ChatCompletionChunk.md index ffe87890..09856dff 100644 --- a/docs/reference/interfaces/ChatCompletionChunk.md +++ b/docs/reference/interfaces/ChatCompletionChunk.md @@ -5,7 +5,7 @@ title: ChatCompletionChunk # Interface: ChatCompletionChunk -Defined in: [types.ts:362](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L362) +Defined in: [types.ts:377](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L377) ## Properties @@ -15,7 +15,7 @@ Defined in: [types.ts:362](https://github.com/TanStack/ai/blob/main/packages/typ content: string; ``` -Defined in: [types.ts:365](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L365) +Defined in: [types.ts:380](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L380) *** @@ -25,7 +25,7 @@ Defined in: [types.ts:365](https://github.com/TanStack/ai/blob/main/packages/typ optional finishReason: "stop" | "length" | "content_filter" | null; ``` -Defined in: [types.ts:367](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L367) +Defined in: [types.ts:382](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L382) *** @@ -35,7 +35,7 @@ Defined in: [types.ts:367](https://github.com/TanStack/ai/blob/main/packages/typ id: string; ``` -Defined in: [types.ts:363](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L363) +Defined in: [types.ts:378](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L378) *** @@ -45,7 +45,7 @@ Defined in: [types.ts:363](https://github.com/TanStack/ai/blob/main/packages/typ model: string; ``` -Defined in: [types.ts:364](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L364) +Defined in: [types.ts:379](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L379) *** @@ -55,7 +55,7 @@ Defined in: [types.ts:364](https://github.com/TanStack/ai/blob/main/packages/typ optional role: "assistant"; ``` -Defined in: [types.ts:366](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L366) +Defined in: [types.ts:381](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L381) *** @@ -65,7 +65,7 @@ Defined in: [types.ts:366](https://github.com/TanStack/ai/blob/main/packages/typ optional usage: object; ``` -Defined in: [types.ts:368](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L368) +Defined in: [types.ts:383](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L383) #### completionTokens diff --git a/docs/reference/interfaces/ChatOptions.md b/docs/reference/interfaces/ChatOptions.md index 5866ecd1..908b6f18 100644 --- a/docs/reference/interfaces/ChatOptions.md +++ b/docs/reference/interfaces/ChatOptions.md @@ -5,7 +5,7 @@ title: ChatOptions # Interface: ChatOptions\ -Defined in: [types.ts:231](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L231) +Defined in: [types.ts:246](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L246) Options passed into the SDK and further piped to the AI provider. @@ -35,7 +35,7 @@ Options passed into the SDK and further piped to the AI provider. optional abortController: AbortController; ``` -Defined in: [types.ts:259](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L259) +Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L274) AbortController for request cancellation. @@ -62,7 +62,7 @@ https://developer.mozilla.org/en-US/docs/Web/API/AbortController optional agentLoopStrategy: AgentLoopStrategy; ``` -Defined in: [types.ts:241](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L241) +Defined in: [types.ts:256](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L256) *** @@ -72,7 +72,7 @@ Defined in: [types.ts:241](https://github.com/TanStack/ai/blob/main/packages/typ messages: ModelMessage[]; ``` -Defined in: [types.ts:238](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L238) +Defined in: [types.ts:253](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L253) *** @@ -82,7 +82,7 @@ Defined in: [types.ts:238](https://github.com/TanStack/ai/blob/main/packages/typ model: TModel; ``` -Defined in: [types.ts:237](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L237) +Defined in: [types.ts:252](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L252) *** @@ -92,7 +92,7 @@ Defined in: [types.ts:237](https://github.com/TanStack/ai/blob/main/packages/typ optional options: CommonOptions; ``` -Defined in: [types.ts:242](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L242) +Defined in: [types.ts:257](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L257) *** @@ -102,7 +102,7 @@ Defined in: [types.ts:242](https://github.com/TanStack/ai/blob/main/packages/typ optional output: TOutput; ``` -Defined in: [types.ts:245](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L245) +Defined in: [types.ts:260](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L260) *** @@ -112,7 +112,7 @@ Defined in: [types.ts:245](https://github.com/TanStack/ai/blob/main/packages/typ optional providerOptions: TProviderOptionsForModel; ``` -Defined in: [types.ts:243](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L243) +Defined in: [types.ts:258](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L258) *** @@ -122,7 +122,7 @@ Defined in: [types.ts:243](https://github.com/TanStack/ai/blob/main/packages/typ optional request: Request | RequestInit; ``` -Defined in: [types.ts:244](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L244) +Defined in: [types.ts:259](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L259) *** @@ -132,14 +132,14 @@ Defined in: [types.ts:244](https://github.com/TanStack/ai/blob/main/packages/typ optional systemPrompts: string[]; ``` -Defined in: [types.ts:240](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L240) +Defined in: [types.ts:255](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L255) *** ### tools? ```ts -optional tools: Tool[]; +optional tools: Tool>, ZodType>>[]; ``` -Defined in: [types.ts:239](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L239) +Defined in: [types.ts:254](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L254) diff --git a/docs/reference/interfaces/ContentStreamChunk.md b/docs/reference/interfaces/ContentStreamChunk.md index 9f953f9a..37f2b11d 100644 --- a/docs/reference/interfaces/ContentStreamChunk.md +++ b/docs/reference/interfaces/ContentStreamChunk.md @@ -5,7 +5,7 @@ title: ContentStreamChunk # Interface: ContentStreamChunk -Defined in: [types.ts:279](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L279) +Defined in: [types.ts:294](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L294) ## Extends @@ -19,7 +19,7 @@ Defined in: [types.ts:279](https://github.com/TanStack/ai/blob/main/packages/typ content: string; ``` -Defined in: [types.ts:282](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L282) +Defined in: [types.ts:297](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L297) *** @@ -29,7 +29,7 @@ Defined in: [types.ts:282](https://github.com/TanStack/ai/blob/main/packages/typ delta: string; ``` -Defined in: [types.ts:281](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L281) +Defined in: [types.ts:296](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L296) *** @@ -39,7 +39,7 @@ Defined in: [types.ts:281](https://github.com/TanStack/ai/blob/main/packages/typ id: string; ``` -Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L274) +Defined in: [types.ts:289](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L289) #### Inherited from @@ -53,7 +53,7 @@ Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typ model: string; ``` -Defined in: [types.ts:275](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L275) +Defined in: [types.ts:290](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L290) #### Inherited from @@ -67,7 +67,7 @@ Defined in: [types.ts:275](https://github.com/TanStack/ai/blob/main/packages/typ optional role: "assistant"; ``` -Defined in: [types.ts:283](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L283) +Defined in: [types.ts:298](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L298) *** @@ -77,7 +77,7 @@ Defined in: [types.ts:283](https://github.com/TanStack/ai/blob/main/packages/typ timestamp: number; ``` -Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L276) +Defined in: [types.ts:291](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L291) #### Inherited from @@ -91,7 +91,7 @@ Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typ type: "content"; ``` -Defined in: [types.ts:280](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L280) +Defined in: [types.ts:295](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L295) #### Overrides diff --git a/docs/reference/interfaces/DoneStreamChunk.md b/docs/reference/interfaces/DoneStreamChunk.md index 51689b8a..559a8ece 100644 --- a/docs/reference/interfaces/DoneStreamChunk.md +++ b/docs/reference/interfaces/DoneStreamChunk.md @@ -5,7 +5,7 @@ title: DoneStreamChunk # Interface: DoneStreamChunk -Defined in: [types.ts:305](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L305) +Defined in: [types.ts:320](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L320) ## Extends @@ -19,7 +19,7 @@ Defined in: [types.ts:305](https://github.com/TanStack/ai/blob/main/packages/typ finishReason: "stop" | "length" | "content_filter" | "tool_calls" | null; ``` -Defined in: [types.ts:307](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L307) +Defined in: [types.ts:322](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L322) *** @@ -29,7 +29,7 @@ Defined in: [types.ts:307](https://github.com/TanStack/ai/blob/main/packages/typ id: string; ``` -Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L274) +Defined in: [types.ts:289](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L289) #### Inherited from @@ -43,7 +43,7 @@ Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typ model: string; ``` -Defined in: [types.ts:275](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L275) +Defined in: [types.ts:290](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L290) #### Inherited from @@ -57,7 +57,7 @@ Defined in: [types.ts:275](https://github.com/TanStack/ai/blob/main/packages/typ timestamp: number; ``` -Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L276) +Defined in: [types.ts:291](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L291) #### Inherited from @@ -71,7 +71,7 @@ Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typ type: "done"; ``` -Defined in: [types.ts:306](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L306) +Defined in: [types.ts:321](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L321) #### Overrides @@ -85,7 +85,7 @@ Defined in: [types.ts:306](https://github.com/TanStack/ai/blob/main/packages/typ optional usage: object; ``` -Defined in: [types.ts:308](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L308) +Defined in: [types.ts:323](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L323) #### completionTokens diff --git a/docs/reference/interfaces/EmbeddingOptions.md b/docs/reference/interfaces/EmbeddingOptions.md index 0c73ef4d..ba088507 100644 --- a/docs/reference/interfaces/EmbeddingOptions.md +++ b/docs/reference/interfaces/EmbeddingOptions.md @@ -5,7 +5,7 @@ title: EmbeddingOptions # Interface: EmbeddingOptions -Defined in: [types.ts:394](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L394) +Defined in: [types.ts:409](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L409) ## Properties @@ -15,7 +15,7 @@ Defined in: [types.ts:394](https://github.com/TanStack/ai/blob/main/packages/typ optional dimensions: number; ``` -Defined in: [types.ts:397](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L397) +Defined in: [types.ts:412](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L412) *** @@ -25,7 +25,7 @@ Defined in: [types.ts:397](https://github.com/TanStack/ai/blob/main/packages/typ input: string | string[]; ``` -Defined in: [types.ts:396](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L396) +Defined in: [types.ts:411](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L411) *** @@ -35,4 +35,4 @@ Defined in: [types.ts:396](https://github.com/TanStack/ai/blob/main/packages/typ model: string; ``` -Defined in: [types.ts:395](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L395) +Defined in: [types.ts:410](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L410) diff --git a/docs/reference/interfaces/EmbeddingResult.md b/docs/reference/interfaces/EmbeddingResult.md index f1ec170c..bc45f304 100644 --- a/docs/reference/interfaces/EmbeddingResult.md +++ b/docs/reference/interfaces/EmbeddingResult.md @@ -5,7 +5,7 @@ title: EmbeddingResult # Interface: EmbeddingResult -Defined in: [types.ts:400](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L400) +Defined in: [types.ts:415](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L415) ## Properties @@ -15,7 +15,7 @@ Defined in: [types.ts:400](https://github.com/TanStack/ai/blob/main/packages/typ embeddings: number[][]; ``` -Defined in: [types.ts:403](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L403) +Defined in: [types.ts:418](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L418) *** @@ -25,7 +25,7 @@ Defined in: [types.ts:403](https://github.com/TanStack/ai/blob/main/packages/typ id: string; ``` -Defined in: [types.ts:401](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L401) +Defined in: [types.ts:416](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L416) *** @@ -35,7 +35,7 @@ Defined in: [types.ts:401](https://github.com/TanStack/ai/blob/main/packages/typ model: string; ``` -Defined in: [types.ts:402](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L402) +Defined in: [types.ts:417](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L417) *** @@ -45,7 +45,7 @@ Defined in: [types.ts:402](https://github.com/TanStack/ai/blob/main/packages/typ usage: object; ``` -Defined in: [types.ts:404](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L404) +Defined in: [types.ts:419](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L419) #### promptTokens diff --git a/docs/reference/interfaces/ErrorStreamChunk.md b/docs/reference/interfaces/ErrorStreamChunk.md index a75aba12..00561e4d 100644 --- a/docs/reference/interfaces/ErrorStreamChunk.md +++ b/docs/reference/interfaces/ErrorStreamChunk.md @@ -5,7 +5,7 @@ title: ErrorStreamChunk # Interface: ErrorStreamChunk -Defined in: [types.ts:315](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L315) +Defined in: [types.ts:330](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L330) ## Extends @@ -19,7 +19,7 @@ Defined in: [types.ts:315](https://github.com/TanStack/ai/blob/main/packages/typ error: object; ``` -Defined in: [types.ts:317](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L317) +Defined in: [types.ts:332](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L332) #### code? @@ -41,7 +41,7 @@ message: string; id: string; ``` -Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L274) +Defined in: [types.ts:289](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L289) #### Inherited from @@ -55,7 +55,7 @@ Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typ model: string; ``` -Defined in: [types.ts:275](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L275) +Defined in: [types.ts:290](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L290) #### Inherited from @@ -69,7 +69,7 @@ Defined in: [types.ts:275](https://github.com/TanStack/ai/blob/main/packages/typ timestamp: number; ``` -Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L276) +Defined in: [types.ts:291](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L291) #### Inherited from @@ -83,7 +83,7 @@ Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typ type: "error"; ``` -Defined in: [types.ts:316](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L316) +Defined in: [types.ts:331](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L331) #### Overrides diff --git a/docs/reference/interfaces/ModelMessage.md b/docs/reference/interfaces/ModelMessage.md index 40369099..703f45e0 100644 --- a/docs/reference/interfaces/ModelMessage.md +++ b/docs/reference/interfaces/ModelMessage.md @@ -5,7 +5,7 @@ title: ModelMessage # Interface: ModelMessage -Defined in: [types.ts:12](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L12) +Defined in: [types.ts:13](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L13) ## Properties @@ -15,7 +15,7 @@ Defined in: [types.ts:12](https://github.com/TanStack/ai/blob/main/packages/type content: string | null; ``` -Defined in: [types.ts:14](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L14) +Defined in: [types.ts:15](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L15) *** @@ -25,7 +25,7 @@ Defined in: [types.ts:14](https://github.com/TanStack/ai/blob/main/packages/type optional name: string; ``` -Defined in: [types.ts:15](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L15) +Defined in: [types.ts:16](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L16) *** @@ -35,7 +35,7 @@ Defined in: [types.ts:15](https://github.com/TanStack/ai/blob/main/packages/type role: "system" | "user" | "assistant" | "tool"; ``` -Defined in: [types.ts:13](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L13) +Defined in: [types.ts:14](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L14) *** @@ -45,7 +45,7 @@ Defined in: [types.ts:13](https://github.com/TanStack/ai/blob/main/packages/type optional toolCallId: string; ``` -Defined in: [types.ts:17](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L17) +Defined in: [types.ts:18](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L18) *** @@ -55,4 +55,4 @@ Defined in: [types.ts:17](https://github.com/TanStack/ai/blob/main/packages/type optional toolCalls: ToolCall[]; ``` -Defined in: [types.ts:16](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L16) +Defined in: [types.ts:17](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L17) diff --git a/docs/reference/interfaces/ResponseFormat.md b/docs/reference/interfaces/ResponseFormat.md index 62f53a2f..c6a2f616 100644 --- a/docs/reference/interfaces/ResponseFormat.md +++ b/docs/reference/interfaces/ResponseFormat.md @@ -5,7 +5,7 @@ title: ResponseFormat # Interface: ResponseFormat\ -Defined in: [types.ts:121](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L121) +Defined in: [types.ts:136](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L136) Structured output format specification. @@ -33,7 +33,7 @@ TypeScript type of the expected data structure (for type safety) optional __data: TData; ``` -Defined in: [types.ts:199](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L199) +Defined in: [types.ts:214](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L214) **`Internal`** @@ -50,7 +50,7 @@ Allows the SDK to know what type to expect when parsing the response. optional json_schema: object; ``` -Defined in: [types.ts:138](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L138) +Defined in: [types.ts:153](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L153) JSON schema specification (required when type is "json_schema"). @@ -139,7 +139,7 @@ https://platform.openai.com/docs/guides/structured-outputs#strict-mode type: "json_object" | "json_schema"; ``` -Defined in: [types.ts:130](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L130) +Defined in: [types.ts:145](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L145) Type of structured output. diff --git a/docs/reference/interfaces/SummarizationOptions.md b/docs/reference/interfaces/SummarizationOptions.md index dcd67c89..39432593 100644 --- a/docs/reference/interfaces/SummarizationOptions.md +++ b/docs/reference/interfaces/SummarizationOptions.md @@ -5,7 +5,7 @@ title: SummarizationOptions # Interface: SummarizationOptions -Defined in: [types.ts:375](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L375) +Defined in: [types.ts:390](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L390) ## Properties @@ -15,7 +15,7 @@ Defined in: [types.ts:375](https://github.com/TanStack/ai/blob/main/packages/typ optional focus: string[]; ``` -Defined in: [types.ts:380](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L380) +Defined in: [types.ts:395](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L395) *** @@ -25,7 +25,7 @@ Defined in: [types.ts:380](https://github.com/TanStack/ai/blob/main/packages/typ optional maxLength: number; ``` -Defined in: [types.ts:378](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L378) +Defined in: [types.ts:393](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L393) *** @@ -35,7 +35,7 @@ Defined in: [types.ts:378](https://github.com/TanStack/ai/blob/main/packages/typ model: string; ``` -Defined in: [types.ts:376](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L376) +Defined in: [types.ts:391](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L391) *** @@ -45,7 +45,7 @@ Defined in: [types.ts:376](https://github.com/TanStack/ai/blob/main/packages/typ optional style: "bullet-points" | "paragraph" | "concise"; ``` -Defined in: [types.ts:379](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L379) +Defined in: [types.ts:394](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L394) *** @@ -55,4 +55,4 @@ Defined in: [types.ts:379](https://github.com/TanStack/ai/blob/main/packages/typ text: string; ``` -Defined in: [types.ts:377](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L377) +Defined in: [types.ts:392](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L392) diff --git a/docs/reference/interfaces/SummarizationResult.md b/docs/reference/interfaces/SummarizationResult.md index a926eb93..88d928ae 100644 --- a/docs/reference/interfaces/SummarizationResult.md +++ b/docs/reference/interfaces/SummarizationResult.md @@ -5,7 +5,7 @@ title: SummarizationResult # Interface: SummarizationResult -Defined in: [types.ts:383](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L383) +Defined in: [types.ts:398](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L398) ## Properties @@ -15,7 +15,7 @@ Defined in: [types.ts:383](https://github.com/TanStack/ai/blob/main/packages/typ id: string; ``` -Defined in: [types.ts:384](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L384) +Defined in: [types.ts:399](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L399) *** @@ -25,7 +25,7 @@ Defined in: [types.ts:384](https://github.com/TanStack/ai/blob/main/packages/typ model: string; ``` -Defined in: [types.ts:385](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L385) +Defined in: [types.ts:400](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L400) *** @@ -35,7 +35,7 @@ Defined in: [types.ts:385](https://github.com/TanStack/ai/blob/main/packages/typ summary: string; ``` -Defined in: [types.ts:386](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L386) +Defined in: [types.ts:401](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L401) *** @@ -45,7 +45,7 @@ Defined in: [types.ts:386](https://github.com/TanStack/ai/blob/main/packages/typ usage: object; ``` -Defined in: [types.ts:387](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L387) +Defined in: [types.ts:402](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L402) #### completionTokens diff --git a/docs/reference/interfaces/ThinkingStreamChunk.md b/docs/reference/interfaces/ThinkingStreamChunk.md index 91a5fd21..d0364a65 100644 --- a/docs/reference/interfaces/ThinkingStreamChunk.md +++ b/docs/reference/interfaces/ThinkingStreamChunk.md @@ -5,7 +5,7 @@ title: ThinkingStreamChunk # Interface: ThinkingStreamChunk -Defined in: [types.ts:341](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L341) +Defined in: [types.ts:356](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L356) ## Extends @@ -19,7 +19,7 @@ Defined in: [types.ts:341](https://github.com/TanStack/ai/blob/main/packages/typ content: string; ``` -Defined in: [types.ts:344](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L344) +Defined in: [types.ts:359](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L359) *** @@ -29,7 +29,7 @@ Defined in: [types.ts:344](https://github.com/TanStack/ai/blob/main/packages/typ optional delta: string; ``` -Defined in: [types.ts:343](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L343) +Defined in: [types.ts:358](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L358) *** @@ -39,7 +39,7 @@ Defined in: [types.ts:343](https://github.com/TanStack/ai/blob/main/packages/typ id: string; ``` -Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L274) +Defined in: [types.ts:289](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L289) #### Inherited from @@ -53,7 +53,7 @@ Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typ model: string; ``` -Defined in: [types.ts:275](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L275) +Defined in: [types.ts:290](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L290) #### Inherited from @@ -67,7 +67,7 @@ Defined in: [types.ts:275](https://github.com/TanStack/ai/blob/main/packages/typ timestamp: number; ``` -Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L276) +Defined in: [types.ts:291](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L291) #### Inherited from @@ -81,7 +81,7 @@ Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typ type: "thinking"; ``` -Defined in: [types.ts:342](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L342) +Defined in: [types.ts:357](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L357) #### Overrides diff --git a/docs/reference/interfaces/Tool.md b/docs/reference/interfaces/Tool.md index a0668f0f..b3986c13 100644 --- a/docs/reference/interfaces/Tool.md +++ b/docs/reference/interfaces/Tool.md @@ -3,144 +3,157 @@ id: Tool title: Tool --- -# Interface: Tool +# Interface: Tool\ -Defined in: [types.ts:29](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L29) +Defined in: [types.ts:32](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L32) Tool/Function definition for function calling. Tools allow the model to interact with external systems, APIs, or perform computations. The model will decide when to call tools based on the user's request and the tool descriptions. +Tools use Zod schemas for runtime validation and type safety. + ## See - https://platform.openai.com/docs/guides/function-calling - https://docs.anthropic.com/claude/docs/tool-use +## Type Parameters + +### TInput + +`TInput` *extends* `z.ZodType` = `z.ZodType` + +### TOutput + +`TOutput` *extends* `z.ZodType` = `z.ZodType` + ## Properties +### description + +```ts +description: string; +``` + +Defined in: [types.ts:54](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L54) + +Clear description of what the tool does. + +This is crucial - the model uses this to decide when to call the tool. +Be specific about what the tool does, what parameters it needs, and what it returns. + +#### Example + +```ts +"Get the current weather in a given location. Returns temperature, conditions, and forecast." +``` + +*** + ### execute()? ```ts -optional execute: (args) => string | Promise; +optional execute: (args) => output | Promise>; ``` -Defined in: [types.ts:99](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L99) +Defined in: [types.ts:110](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L110) Optional function to execute when the model calls this tool. If provided, the SDK will automatically execute the function with the model's arguments and feed the result back to the model. This enables autonomous tool use loops. -Returns the result as a string (or Promise) to send back to the model. +Can return any value - will be automatically stringified if needed. #### Parameters ##### args -`any` +`output`\<`TInput`\> -The arguments parsed from the model's tool call (matches the parameters schema) +The arguments parsed from the model's tool call (validated against inputSchema) #### Returns -`string` \| `Promise`\<`string`\> +`output`\<`TOutput`\> \| `Promise`\<`output`\<`TOutput`\>\> -Result string to send back to the model +Result to send back to the model (validated against outputSchema if provided) #### Example ```ts execute: async (args) => { const weather = await fetchWeather(args.location); - return JSON.stringify(weather); + return weather; // Can return object or string } ``` *** -### function +### inputSchema ```ts -function: object; +inputSchema: TInput; ``` -Defined in: [types.ts:40](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L40) +Defined in: [types.ts:73](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L73) -Function definition and metadata. +Zod schema describing the tool's input parameters. -#### description - -```ts -description: string; -``` +Defines the structure and types of arguments the tool accepts. +The model will generate arguments matching this schema. +The schema is converted to JSON Schema for LLM providers. -Clear description of what the function does. +#### See -This is crucial - the model uses this to decide when to call the function. -Be specific about what the function does, what parameters it needs, and what it returns. +https://zod.dev/ -##### Example +#### Example ```ts -"Get the current weather in a given location. Returns temperature, conditions, and forecast." -``` +import { z } from 'zod'; -#### name - -```ts -name: string; +z.object({ + location: z.string().describe("City name or coordinates"), + unit: z.enum(["celsius", "fahrenheit"]).optional() +}) ``` -Unique name of the function (used by the model to call it). - -Should be descriptive and follow naming conventions (e.g., snake_case or camelCase). -Must be unique within the tools array. - -##### Example - -```ts -"get_weather", "search_database", "sendEmail" -``` +*** -#### parameters +### metadata? ```ts -parameters: Record; +optional metadata: Record; ``` -JSON Schema describing the function's parameters. +Defined in: [types.ts:118](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L118) -Defines the structure and types of arguments the function accepts. -The model will generate arguments matching this schema. - -##### See +Additional metadata for adapters or custom extensions -https://json-schema.org/ +*** -##### Example +### name ```ts -{ - * type: "object", - * properties: { - * location: { type: "string", description: "City name or coordinates" }, - * unit: { type: "string", enum: ["celsius", "fahrenheit"] } - * }, - * required: ["location"] - * } +name: string; ``` -*** +Defined in: [types.ts:44](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L44) -### metadata? +Unique name of the tool (used by the model to call it). + +Should be descriptive and follow naming conventions (e.g., snake_case or camelCase). +Must be unique within the tools array. + +#### Example ```ts -optional metadata: Record; +"get_weather", "search_database", "sendEmail" ``` -Defined in: [types.ts:103](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L103) - *** ### needsApproval? @@ -149,20 +162,34 @@ Defined in: [types.ts:103](https://github.com/TanStack/ai/blob/main/packages/typ optional needsApproval: boolean; ``` -Defined in: [types.ts:101](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L101) +Defined in: [types.ts:115](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L115) If true, tool execution requires user approval before running. Works with both server and client tools. *** -### type +### outputSchema? ```ts -type: "function"; +optional outputSchema: TOutput; ``` -Defined in: [types.ts:35](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L35) +Defined in: [types.ts:91](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L91) + +Optional Zod schema for validating tool output. + +If provided, tool results will be validated against this schema before +being sent back to the model. This catches bugs in tool implementations +and ensures consistent output formatting. -Type of tool - currently only "function" is supported. +Note: This is client-side validation only - not sent to LLM providers. -Future versions may support additional tool types. +#### Example + +```ts +z.object({ + temperature: z.number(), + conditions: z.string(), + forecast: z.array(z.string()).optional() +}) +``` diff --git a/docs/reference/interfaces/ToolCall.md b/docs/reference/interfaces/ToolCall.md index 4843850d..a5215973 100644 --- a/docs/reference/interfaces/ToolCall.md +++ b/docs/reference/interfaces/ToolCall.md @@ -5,7 +5,7 @@ title: ToolCall # Interface: ToolCall -Defined in: [types.ts:3](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L3) +Defined in: [types.ts:4](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L4) ## Properties @@ -15,7 +15,7 @@ Defined in: [types.ts:3](https://github.com/TanStack/ai/blob/main/packages/types function: object; ``` -Defined in: [types.ts:6](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L6) +Defined in: [types.ts:7](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L7) #### arguments @@ -37,7 +37,7 @@ name: string; id: string; ``` -Defined in: [types.ts:4](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L4) +Defined in: [types.ts:5](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L5) *** @@ -47,4 +47,4 @@ Defined in: [types.ts:4](https://github.com/TanStack/ai/blob/main/packages/types type: "function"; ``` -Defined in: [types.ts:5](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L5) +Defined in: [types.ts:6](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L6) diff --git a/docs/reference/interfaces/ToolCallStreamChunk.md b/docs/reference/interfaces/ToolCallStreamChunk.md index 92507c37..aa4c435a 100644 --- a/docs/reference/interfaces/ToolCallStreamChunk.md +++ b/docs/reference/interfaces/ToolCallStreamChunk.md @@ -5,7 +5,7 @@ title: ToolCallStreamChunk # Interface: ToolCallStreamChunk -Defined in: [types.ts:286](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L286) +Defined in: [types.ts:301](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L301) ## Extends @@ -19,7 +19,7 @@ Defined in: [types.ts:286](https://github.com/TanStack/ai/blob/main/packages/typ id: string; ``` -Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L274) +Defined in: [types.ts:289](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L289) #### Inherited from @@ -33,7 +33,7 @@ Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typ index: number; ``` -Defined in: [types.ts:296](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L296) +Defined in: [types.ts:311](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L311) *** @@ -43,7 +43,7 @@ Defined in: [types.ts:296](https://github.com/TanStack/ai/blob/main/packages/typ model: string; ``` -Defined in: [types.ts:275](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L275) +Defined in: [types.ts:290](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L290) #### Inherited from @@ -57,7 +57,7 @@ Defined in: [types.ts:275](https://github.com/TanStack/ai/blob/main/packages/typ timestamp: number; ``` -Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L276) +Defined in: [types.ts:291](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L291) #### Inherited from @@ -71,7 +71,7 @@ Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typ toolCall: object; ``` -Defined in: [types.ts:288](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L288) +Defined in: [types.ts:303](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L303) #### function @@ -111,7 +111,7 @@ type: "function"; type: "tool_call"; ``` -Defined in: [types.ts:287](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L287) +Defined in: [types.ts:302](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L302) #### Overrides diff --git a/docs/reference/interfaces/ToolConfig.md b/docs/reference/interfaces/ToolConfig.md index 0cecdd3d..6f397307 100644 --- a/docs/reference/interfaces/ToolConfig.md +++ b/docs/reference/interfaces/ToolConfig.md @@ -5,10 +5,10 @@ title: ToolConfig # Interface: ToolConfig -Defined in: [types.ts:106](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L106) +Defined in: [types.ts:121](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L121) ## Indexable ```ts -[key: string]: Tool +[key: string]: Tool>, ZodType>> ``` diff --git a/docs/reference/interfaces/ToolInputAvailableStreamChunk.md b/docs/reference/interfaces/ToolInputAvailableStreamChunk.md index c3f29478..dd8d5d67 100644 --- a/docs/reference/interfaces/ToolInputAvailableStreamChunk.md +++ b/docs/reference/interfaces/ToolInputAvailableStreamChunk.md @@ -5,7 +5,7 @@ title: ToolInputAvailableStreamChunk # Interface: ToolInputAvailableStreamChunk -Defined in: [types.ts:334](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L334) +Defined in: [types.ts:349](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L349) ## Extends @@ -19,7 +19,7 @@ Defined in: [types.ts:334](https://github.com/TanStack/ai/blob/main/packages/typ id: string; ``` -Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L274) +Defined in: [types.ts:289](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L289) #### Inherited from @@ -33,7 +33,7 @@ Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typ input: any; ``` -Defined in: [types.ts:338](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L338) +Defined in: [types.ts:353](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L353) *** @@ -43,7 +43,7 @@ Defined in: [types.ts:338](https://github.com/TanStack/ai/blob/main/packages/typ model: string; ``` -Defined in: [types.ts:275](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L275) +Defined in: [types.ts:290](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L290) #### Inherited from @@ -57,7 +57,7 @@ Defined in: [types.ts:275](https://github.com/TanStack/ai/blob/main/packages/typ timestamp: number; ``` -Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L276) +Defined in: [types.ts:291](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L291) #### Inherited from @@ -71,7 +71,7 @@ Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typ toolCallId: string; ``` -Defined in: [types.ts:336](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L336) +Defined in: [types.ts:351](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L351) *** @@ -81,7 +81,7 @@ Defined in: [types.ts:336](https://github.com/TanStack/ai/blob/main/packages/typ toolName: string; ``` -Defined in: [types.ts:337](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L337) +Defined in: [types.ts:352](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L352) *** @@ -91,7 +91,7 @@ Defined in: [types.ts:337](https://github.com/TanStack/ai/blob/main/packages/typ type: "tool-input-available"; ``` -Defined in: [types.ts:335](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L335) +Defined in: [types.ts:350](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L350) #### Overrides diff --git a/docs/reference/interfaces/ToolResultStreamChunk.md b/docs/reference/interfaces/ToolResultStreamChunk.md index 58085881..5e82d356 100644 --- a/docs/reference/interfaces/ToolResultStreamChunk.md +++ b/docs/reference/interfaces/ToolResultStreamChunk.md @@ -5,7 +5,7 @@ title: ToolResultStreamChunk # Interface: ToolResultStreamChunk -Defined in: [types.ts:299](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L299) +Defined in: [types.ts:314](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L314) ## Extends @@ -19,7 +19,7 @@ Defined in: [types.ts:299](https://github.com/TanStack/ai/blob/main/packages/typ content: string; ``` -Defined in: [types.ts:302](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L302) +Defined in: [types.ts:317](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L317) *** @@ -29,7 +29,7 @@ Defined in: [types.ts:302](https://github.com/TanStack/ai/blob/main/packages/typ id: string; ``` -Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L274) +Defined in: [types.ts:289](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L289) #### Inherited from @@ -43,7 +43,7 @@ Defined in: [types.ts:274](https://github.com/TanStack/ai/blob/main/packages/typ model: string; ``` -Defined in: [types.ts:275](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L275) +Defined in: [types.ts:290](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L290) #### Inherited from @@ -57,7 +57,7 @@ Defined in: [types.ts:275](https://github.com/TanStack/ai/blob/main/packages/typ timestamp: number; ``` -Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L276) +Defined in: [types.ts:291](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L291) #### Inherited from @@ -71,7 +71,7 @@ Defined in: [types.ts:276](https://github.com/TanStack/ai/blob/main/packages/typ toolCallId: string; ``` -Defined in: [types.ts:301](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L301) +Defined in: [types.ts:316](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L316) *** @@ -81,7 +81,7 @@ Defined in: [types.ts:301](https://github.com/TanStack/ai/blob/main/packages/typ type: "tool_result"; ``` -Defined in: [types.ts:300](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L300) +Defined in: [types.ts:315](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L315) #### Overrides diff --git a/docs/reference/type-aliases/AgentLoopStrategy.md b/docs/reference/type-aliases/AgentLoopStrategy.md index 371cb3c3..b7b765af 100644 --- a/docs/reference/type-aliases/AgentLoopStrategy.md +++ b/docs/reference/type-aliases/AgentLoopStrategy.md @@ -9,7 +9,7 @@ title: AgentLoopStrategy type AgentLoopStrategy = (state) => boolean; ``` -Defined in: [types.ts:226](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L226) +Defined in: [types.ts:241](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L241) Strategy function that determines whether the agent loop should continue diff --git a/docs/reference/type-aliases/ChatStreamOptionsUnion.md b/docs/reference/type-aliases/ChatStreamOptionsUnion.md index 3599785a..91b00590 100644 --- a/docs/reference/type-aliases/ChatStreamOptionsUnion.md +++ b/docs/reference/type-aliases/ChatStreamOptionsUnion.md @@ -9,7 +9,7 @@ title: ChatStreamOptionsUnion type ChatStreamOptionsUnion = TAdapter extends AIAdapter ? Models[number] extends infer TModel ? TModel extends string ? Omit & object : never : never : never; ``` -Defined in: [types.ts:470](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L470) +Defined in: [types.ts:485](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L485) ## Type Parameters diff --git a/docs/reference/type-aliases/ExtractModelsFromAdapter.md b/docs/reference/type-aliases/ExtractModelsFromAdapter.md index 1e5ab252..2a030e63 100644 --- a/docs/reference/type-aliases/ExtractModelsFromAdapter.md +++ b/docs/reference/type-aliases/ExtractModelsFromAdapter.md @@ -9,7 +9,7 @@ title: ExtractModelsFromAdapter type ExtractModelsFromAdapter = T extends AIAdapter ? M[number] : never; ``` -Defined in: [types.ts:494](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L494) +Defined in: [types.ts:509](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L509) ## Type Parameters diff --git a/docs/reference/type-aliases/StreamChunk.md b/docs/reference/type-aliases/StreamChunk.md index 758a3973..b10b827b 100644 --- a/docs/reference/type-aliases/StreamChunk.md +++ b/docs/reference/type-aliases/StreamChunk.md @@ -17,6 +17,6 @@ type StreamChunk = | ThinkingStreamChunk; ``` -Defined in: [types.ts:350](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L350) +Defined in: [types.ts:365](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L365) Chunk returned by the sdk during streaming chat completions. diff --git a/docs/reference/type-aliases/StreamChunkType.md b/docs/reference/type-aliases/StreamChunkType.md index 1fface01..33980eed 100644 --- a/docs/reference/type-aliases/StreamChunkType.md +++ b/docs/reference/type-aliases/StreamChunkType.md @@ -17,4 +17,4 @@ type StreamChunkType = | "thinking"; ``` -Defined in: [types.ts:262](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L262) +Defined in: [types.ts:277](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L277) diff --git a/packages/typescript/ai-anthropic/src/tools/custom-tool.ts b/packages/typescript/ai-anthropic/src/tools/custom-tool.ts index 14c3ee11..309a1b5c 100644 --- a/packages/typescript/ai-anthropic/src/tools/custom-tool.ts +++ b/packages/typescript/ai-anthropic/src/tools/custom-tool.ts @@ -1,5 +1,5 @@ -import { convertZodToJsonSchema } from '@tanstack/ai' -import type {Tool} from '@tanstack/ai'; +import { convertZodToJsonSchema } from '@tanstack/ai' +import type { Tool } from '@tanstack/ai' import type { z } from 'zod' import type { CacheControl } from '../text/text-provider-options' diff --git a/packages/typescript/ai-gemini/src/tools/tool-converter.ts b/packages/typescript/ai-gemini/src/tools/tool-converter.ts index a31bd6c8..f0a239ca 100644 --- a/packages/typescript/ai-gemini/src/tools/tool-converter.ts +++ b/packages/typescript/ai-gemini/src/tools/tool-converter.ts @@ -1,4 +1,4 @@ -import { convertZodToJsonSchema } from '@tanstack/ai' +import { convertZodToJsonSchema } from '@tanstack/ai' import { convertCodeExecutionToolToAdapterFormat } from './code-execution-tool' import { convertComputerUseToolToAdapterFormat } from './computer-use-tool' import { convertFileSearchToolToAdapterFormat } from './file-search-tool' @@ -6,7 +6,7 @@ import { convertGoogleMapsToolToAdapterFormat } from './google-maps-tool' import { convertGoogleSearchRetrievalToolToAdapterFormat } from './google-search-retriveal-tool' import { convertGoogleSearchToolToAdapterFormat } from './google-search-tool' import { convertUrlContextToolToAdapterFormat } from './url-context-tool' -import type {Tool} from '@tanstack/ai'; +import type { Tool } from '@tanstack/ai' import type { ToolUnion } from '@google/genai' /** diff --git a/packages/typescript/ai-openai/src/tools/function-tool.ts b/packages/typescript/ai-openai/src/tools/function-tool.ts index 51702b70..e9f85129 100644 --- a/packages/typescript/ai-openai/src/tools/function-tool.ts +++ b/packages/typescript/ai-openai/src/tools/function-tool.ts @@ -1,5 +1,5 @@ -import { convertZodToJsonSchema } from '@tanstack/ai' -import type {Tool} from '@tanstack/ai'; +import { convertZodToJsonSchema } from '@tanstack/ai' +import type { Tool } from '@tanstack/ai' import type OpenAI from 'openai' export type FunctionTool = OpenAI.Responses.FunctionTool From 5f5c59c50293339df10e0a71bf8a4a1271463b0b Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Fri, 28 Nov 2025 12:28:20 +0100 Subject: [PATCH 6/6] add improvements to input/output schema (#38) * add improvements to input/output schema * remove old files * ci: apply automated fixes * remove old files * remove old files * fix types for anthropic and gemini * ci: apply automated fixes * lock fix --------- Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- docs/reference/classes/ToolCallManager.md | 2 +- .../functions/convertZodToJsonSchema.md | 12 +++--- docs/reference/functions/tool.md | 28 +------------ docs/reference/interfaces/Tool.md | 4 +- packages/typescript/ai-anthropic/package.json | 6 +-- .../ai-anthropic/src/anthropic-adapter.ts | 22 ++++------- .../ai-anthropic/src/tools/bash-tool.ts | 2 - .../src/tools/code-execution-tool.ts | 2 - .../src/tools/computer-use-tool.ts | 2 - .../ai-anthropic/src/tools/custom-tool.ts | 4 +- .../ai-anthropic/src/tools/memory-tool.ts | 15 ++++--- .../src/tools/text-editor-tool.ts | 2 - .../ai-anthropic/src/tools/web-fetch-tool.ts | 39 +++---------------- .../ai-anthropic/src/tools/web-search-tool.ts | 2 - packages/typescript/ai-gemini/package.json | 3 +- .../ai-gemini/src/gemini-adapter.ts | 18 +++------ .../src/tools/code-execution-tool.ts | 2 - .../ai-gemini/src/tools/computer-use-tool.ts | 2 - .../ai-gemini/src/tools/file-search-tool.ts | 2 - .../ai-gemini/src/tools/google-maps-tool.ts | 2 - .../src/tools/google-search-retriveal-tool.ts | 2 - .../ai-gemini/src/tools/google-search-tool.ts | 2 - .../ai-gemini/src/tools/url-context-tool.ts | 2 - .../ai-gemini/tests/gemini-adapter.test.ts | 28 +++++++------ packages/typescript/ai-openai/package.json | 6 +-- .../ai-openai/src/tools/apply-patch-tool.ts | 2 - .../src/tools/code-interpreter-tool.ts | 2 - .../ai-openai/src/tools/computer-use-tool.ts | 2 - .../ai-openai/src/tools/custom-tool.ts | 2 - .../ai-openai/src/tools/file-search-tool.ts | 2 - .../ai-openai/src/tools/function-tool.ts | 8 ++-- .../src/tools/image-generation-tool.ts | 2 - .../ai-openai/src/tools/local-shell-tool.ts | 2 - .../ai-openai/src/tools/mcp-tool.ts | 2 - .../ai-openai/src/tools/shell-tool.ts | 2 - .../src/tools/web-search-preview-tool.ts | 2 - .../ai-openai/src/tools/web-search-tool.ts | 2 - .../ai-openai/tests/openai-adapter.test.ts | 4 -- .../typescript/ai/src/tools/tool-calls.ts | 20 +++++----- .../typescript/ai/src/tools/tool-utils.ts | 14 +------ .../typescript/ai/src/tools/zod-converter.ts | 25 ++++++------ packages/typescript/ai/src/types.ts | 2 +- .../smoke-tests/adapters/tsconfig.json | 2 +- pnpm-lock.yaml | 12 ++---- 44 files changed, 89 insertions(+), 231 deletions(-) diff --git a/docs/reference/classes/ToolCallManager.md b/docs/reference/classes/ToolCallManager.md index 12622dc1..8c6c9a0a 100644 --- a/docs/reference/classes/ToolCallManager.md +++ b/docs/reference/classes/ToolCallManager.md @@ -126,7 +126,7 @@ Handles streaming tool calls by accumulating arguments clear(): void; ``` -Defined in: [tools/tool-calls.ts:194](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L194) +Defined in: [tools/tool-calls.ts:193](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L193) Clear the tool calls map for the next iteration diff --git a/docs/reference/functions/convertZodToJsonSchema.md b/docs/reference/functions/convertZodToJsonSchema.md index 326e7233..8b0526ab 100644 --- a/docs/reference/functions/convertZodToJsonSchema.md +++ b/docs/reference/functions/convertZodToJsonSchema.md @@ -6,26 +6,24 @@ title: convertZodToJsonSchema # Function: convertZodToJsonSchema() ```ts -function convertZodToJsonSchema(schema): Record; +function convertZodToJsonSchema(schema): Record | undefined; ``` -Defined in: [tools/zod-converter.ts:33](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/zod-converter.ts#L33) +Defined in: [tools/zod-converter.ts:31](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/zod-converter.ts#L31) Converts a Zod schema to JSON Schema format compatible with LLM providers. -Uses @alcyone-labs/zod-to-json-schema which is compatible with Zod v4. - ## Parameters ### schema -`ZodType` - Zod schema to convert +`ZodType`\<`unknown`, `unknown`, `$ZodTypeInternals`\<`unknown`, `unknown`\>\> | `undefined` + ## Returns -`Record`\<`string`, `any`\> +`Record`\<`string`, `any`\> \| `undefined` JSON Schema object that can be sent to LLM providers diff --git a/docs/reference/functions/tool.md b/docs/reference/functions/tool.md index a0655a98..fc4d1e70 100644 --- a/docs/reference/functions/tool.md +++ b/docs/reference/functions/tool.md @@ -30,33 +30,7 @@ full type safety for tool inputs and outputs. ### config -#### description - -`string` - -#### execute? - -(`args`) => `output`\<`TOutput`\> \| `Promise`\<`output`\<`TOutput`\>\> - -#### inputSchema - -`TInput` - -#### metadata? - -`Record`\<`string`, `any`\> - -#### name - -`string` - -#### needsApproval? - -`boolean` - -#### outputSchema? - -`TOutput` +[`Tool`](../../interfaces/Tool.md)\<`TInput`, `TOutput`\> ## Returns diff --git a/docs/reference/interfaces/Tool.md b/docs/reference/interfaces/Tool.md index b3986c13..9a63b5ff 100644 --- a/docs/reference/interfaces/Tool.md +++ b/docs/reference/interfaces/Tool.md @@ -92,10 +92,10 @@ execute: async (args) => { *** -### inputSchema +### inputSchema? ```ts -inputSchema: TInput; +optional inputSchema: TInput; ``` Defined in: [types.ts:73](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L73) diff --git a/packages/typescript/ai-anthropic/package.json b/packages/typescript/ai-anthropic/package.json index 1e9272c2..7f2b0650 100644 --- a/packages/typescript/ai-anthropic/package.json +++ b/packages/typescript/ai-anthropic/package.json @@ -41,11 +41,11 @@ }, "dependencies": { "@anthropic-ai/sdk": "^0.71.0", - "@tanstack/ai": "workspace:*", - "zod": "^4.1.13" + "@tanstack/ai": "workspace:*" }, "devDependencies": { - "@vitest/coverage-v8": "4.0.14" + "@vitest/coverage-v8": "4.0.14", + "zod": "^4.1.13" }, "peerDependencies": { "@tanstack/ai": "workspace:*" diff --git a/packages/typescript/ai-anthropic/src/anthropic-adapter.ts b/packages/typescript/ai-anthropic/src/anthropic-adapter.ts index 599c16de..6a7f4577 100644 --- a/packages/typescript/ai-anthropic/src/anthropic-adapter.ts +++ b/packages/typescript/ai-anthropic/src/anthropic-adapter.ts @@ -4,7 +4,7 @@ import { ANTHROPIC_MODELS } from './model-meta' import { convertToolsToProviderFormat } from './tools/tool-converter' import { validateTextProviderOptions } from './text/text-provider-options' import type { - ChatStreamOptionsUnion, + ChatOptions, EmbeddingOptions, EmbeddingResult, ModelMessage, @@ -36,16 +36,6 @@ type AnthropicContentBlocks = type AnthropicContentBlock = AnthropicContentBlocks extends Array ? Block : never -type AnthropicChatOptions = ChatStreamOptionsUnion< - BaseAdapter< - typeof ANTHROPIC_MODELS, - [], - AnthropicProviderOptions, - Record, - AnthropicChatModelProviderOptionsByName - > -> - export class Anthropic extends BaseAdapter< typeof ANTHROPIC_MODELS, [], @@ -67,7 +57,9 @@ export class Anthropic extends BaseAdapter< }) } - async *chatStream(options: AnthropicChatOptions): AsyncIterable { + async *chatStream( + options: ChatOptions, + ): AsyncIterable { try { // Map common options to Anthropic format using the centralized mapping function const requestParams = this.mapCommonOptionsToAnthropic(options) @@ -136,7 +128,7 @@ export class Anthropic extends BaseAdapter< } } - async createEmbeddings(_options: EmbeddingOptions): Promise { + createEmbeddings(_options: EmbeddingOptions): Promise { // Note: Anthropic doesn't have a native embeddings API // You would need to use a different service or implement a workaround throw new Error( @@ -176,7 +168,9 @@ export class Anthropic extends BaseAdapter< * Maps common options to Anthropic-specific format * Handles translation of normalized options to Anthropic's API format */ - private mapCommonOptionsToAnthropic(options: AnthropicChatOptions) { + private mapCommonOptionsToAnthropic( + options: ChatOptions, + ) { const providerOptions = options.providerOptions as | InternalTextProviderOptions | undefined diff --git a/packages/typescript/ai-anthropic/src/tools/bash-tool.ts b/packages/typescript/ai-anthropic/src/tools/bash-tool.ts index fd97b815..b1b6abea 100644 --- a/packages/typescript/ai-anthropic/src/tools/bash-tool.ts +++ b/packages/typescript/ai-anthropic/src/tools/bash-tool.ts @@ -1,4 +1,3 @@ -import { z } from 'zod' import type { BetaToolBash20241022, BetaToolBash20250124, @@ -15,7 +14,6 @@ export function bashTool(config: BashTool): Tool { return { name: 'bash', description: '', - inputSchema: z.object({}), metadata: config, } } diff --git a/packages/typescript/ai-anthropic/src/tools/code-execution-tool.ts b/packages/typescript/ai-anthropic/src/tools/code-execution-tool.ts index 8fc58fef..7fdb6f84 100644 --- a/packages/typescript/ai-anthropic/src/tools/code-execution-tool.ts +++ b/packages/typescript/ai-anthropic/src/tools/code-execution-tool.ts @@ -1,4 +1,3 @@ -import { z } from 'zod' import type { BetaCodeExecutionTool20250522, BetaCodeExecutionTool20250825, @@ -20,7 +19,6 @@ export function codeExecutionTool(config: CodeExecutionTool): Tool { return { name: 'code_execution', description: '', - inputSchema: z.object({}), metadata: config, } } diff --git a/packages/typescript/ai-anthropic/src/tools/computer-use-tool.ts b/packages/typescript/ai-anthropic/src/tools/computer-use-tool.ts index 2f895a9c..18c2b7e3 100644 --- a/packages/typescript/ai-anthropic/src/tools/computer-use-tool.ts +++ b/packages/typescript/ai-anthropic/src/tools/computer-use-tool.ts @@ -1,4 +1,3 @@ -import { z } from 'zod' import type { BetaToolComputerUse20241022, BetaToolComputerUse20250124, @@ -20,7 +19,6 @@ export function computerUseTool(config: ComputerUseTool): Tool { return { name: 'computer', description: '', - inputSchema: z.object({}), metadata: config, } } diff --git a/packages/typescript/ai-anthropic/src/tools/custom-tool.ts b/packages/typescript/ai-anthropic/src/tools/custom-tool.ts index 309a1b5c..07ac10a3 100644 --- a/packages/typescript/ai-anthropic/src/tools/custom-tool.ts +++ b/packages/typescript/ai-anthropic/src/tools/custom-tool.ts @@ -34,8 +34,8 @@ export function convertCustomToolToAdapterFormat(tool: Tool): CustomTool { const inputSchema = { type: 'object' as const, - properties: jsonSchema.properties || null, - required: jsonSchema.required || null, + properties: jsonSchema?.properties || null, + required: jsonSchema?.required || null, } return { diff --git a/packages/typescript/ai-anthropic/src/tools/memory-tool.ts b/packages/typescript/ai-anthropic/src/tools/memory-tool.ts index a35ecaa7..f4fca307 100644 --- a/packages/typescript/ai-anthropic/src/tools/memory-tool.ts +++ b/packages/typescript/ai-anthropic/src/tools/memory-tool.ts @@ -1,21 +1,20 @@ -import { z } from 'zod' import type { BetaMemoryTool20250818 } from '@anthropic-ai/sdk/resources/beta' import type { Tool } from '@tanstack/ai' export type MemoryTool = BetaMemoryTool20250818 export function convertMemoryToolToAdapterFormat(tool: Tool): MemoryTool { - const metadata = tool.metadata as MemoryTool - return metadata + const metadata = tool.metadata as Omit + return { + type: 'memory_20250818', + ...metadata, + } } -export function memoryTool(cacheControl?: MemoryTool): Tool { +export function memoryTool(config?: MemoryTool): Tool { return { name: 'memory', description: '', - inputSchema: z.object({}), - metadata: { - cacheControl, - }, + metadata: config, } } diff --git a/packages/typescript/ai-anthropic/src/tools/text-editor-tool.ts b/packages/typescript/ai-anthropic/src/tools/text-editor-tool.ts index e784d52d..5ac36122 100644 --- a/packages/typescript/ai-anthropic/src/tools/text-editor-tool.ts +++ b/packages/typescript/ai-anthropic/src/tools/text-editor-tool.ts @@ -1,4 +1,3 @@ -import { z } from 'zod' import type { ToolTextEditor20250124, ToolTextEditor20250429, @@ -24,7 +23,6 @@ export function textEditorTool(config: T): Tool { return { name: 'str_replace_editor', description: '', - inputSchema: z.object({}), metadata: config, } } diff --git a/packages/typescript/ai-anthropic/src/tools/web-fetch-tool.ts b/packages/typescript/ai-anthropic/src/tools/web-fetch-tool.ts index b1f3edad..e0a03fcc 100644 --- a/packages/typescript/ai-anthropic/src/tools/web-fetch-tool.ts +++ b/packages/typescript/ai-anthropic/src/tools/web-fetch-tool.ts @@ -1,50 +1,23 @@ -import { z } from 'zod' import type { BetaWebFetchTool20250910 } from '@anthropic-ai/sdk/resources/beta' -import type { CacheControl } from '../text/text-provider-options' import type { Tool } from '@tanstack/ai' export type WebFetchTool = BetaWebFetchTool20250910 export function convertWebFetchToolToAdapterFormat(tool: Tool): WebFetchTool { - const metadata = tool.metadata as { - allowedDomains?: Array | null - blockedDomains?: Array | null - maxUses?: number | null - citations?: { enabled?: boolean } | null - maxContentTokens?: number | null - cacheControl?: CacheControl | null - } + const metadata = tool.metadata as Omit return { name: 'web_fetch', type: 'web_fetch_20250910', - allowed_domains: metadata.allowedDomains, - blocked_domains: metadata.blockedDomains, - max_uses: metadata.maxUses, - citations: metadata.citations, - max_content_tokens: metadata.maxContentTokens, - cache_control: metadata.cacheControl || null, + ...metadata, } } -export function webFetchTool(config?: { - allowedDomains?: Array | null - blockedDomains?: Array | null - maxUses?: number | null - citations?: { enabled?: boolean } | null - maxContentTokens?: number | null - cacheControl?: CacheControl | null -}): Tool { +export function webFetchTool( + config?: Omit, +): Tool { return { name: 'web_fetch', description: '', - inputSchema: z.object({}), - metadata: { - allowedDomains: config?.allowedDomains, - blockedDomains: config?.blockedDomains, - maxUses: config?.maxUses, - citations: config?.citations, - maxContentTokens: config?.maxContentTokens, - cacheControl: config?.cacheControl, - }, + metadata: config, } } diff --git a/packages/typescript/ai-anthropic/src/tools/web-search-tool.ts b/packages/typescript/ai-anthropic/src/tools/web-search-tool.ts index a764acb7..27f30695 100644 --- a/packages/typescript/ai-anthropic/src/tools/web-search-tool.ts +++ b/packages/typescript/ai-anthropic/src/tools/web-search-tool.ts @@ -1,4 +1,3 @@ -import { z } from 'zod' import type { WebSearchTool20250305 } from '@anthropic-ai/sdk/resources/messages' import type { CacheControl } from '../text/text-provider-options' import type { Tool } from '@tanstack/ai' @@ -77,7 +76,6 @@ export function webSearchTool(config: WebSearchTool): Tool { return { name: 'web_search', description: '', - inputSchema: z.object({}), metadata: config, } } diff --git a/packages/typescript/ai-gemini/package.json b/packages/typescript/ai-gemini/package.json index 28902d75..5ec5c90f 100644 --- a/packages/typescript/ai-gemini/package.json +++ b/packages/typescript/ai-gemini/package.json @@ -41,8 +41,7 @@ ], "dependencies": { "@google/genai": "^1.30.0", - "@tanstack/ai": "workspace:*", - "zod": "^4.1.13" + "@tanstack/ai": "workspace:*" }, "devDependencies": { "@vitest/coverage-v8": "4.0.14", diff --git a/packages/typescript/ai-gemini/src/gemini-adapter.ts b/packages/typescript/ai-gemini/src/gemini-adapter.ts index 29ece983..61fea078 100644 --- a/packages/typescript/ai-gemini/src/gemini-adapter.ts +++ b/packages/typescript/ai-gemini/src/gemini-adapter.ts @@ -4,7 +4,7 @@ import { GEMINI_EMBEDDING_MODELS, GEMINI_MODELS } from './model-meta' import { convertToolsToProviderFormat } from './tools/tool-converter' import type { AIAdapterConfig, - ChatStreamOptionsUnion, + ChatOptions, EmbeddingOptions, EmbeddingResult, ModelMessage, @@ -27,16 +27,6 @@ export interface GeminiAdapterConfig extends AIAdapterConfig { */ export type GeminiProviderOptions = ExternalTextProviderOptions -type ChatOptions = ChatStreamOptionsUnion< - BaseAdapter< - typeof GEMINI_MODELS, - typeof GEMINI_EMBEDDING_MODELS, - GeminiProviderOptions, - Record, - GeminiChatModelProviderOptionsByName - > -> - export class GeminiAdapter extends BaseAdapter< typeof GEMINI_MODELS, typeof GEMINI_EMBEDDING_MODELS, @@ -57,7 +47,9 @@ export class GeminiAdapter extends BaseAdapter< }) } - async *chatStream(options: ChatOptions): AsyncIterable { + async *chatStream( + options: ChatOptions, + ): AsyncIterable { // Map common options to Gemini format const mappedOptions = this.mapCommonOptionsToGemini(options) @@ -99,7 +91,7 @@ export class GeminiAdapter extends BaseAdapter< return { id: this.generateId(), - model: options.model || 'gemini-pro', + model: options.model, summary, usage: { promptTokens, diff --git a/packages/typescript/ai-gemini/src/tools/code-execution-tool.ts b/packages/typescript/ai-gemini/src/tools/code-execution-tool.ts index e3f35d05..04a2ebba 100644 --- a/packages/typescript/ai-gemini/src/tools/code-execution-tool.ts +++ b/packages/typescript/ai-gemini/src/tools/code-execution-tool.ts @@ -1,4 +1,3 @@ -import { z } from 'zod' import type { Tool } from '@tanstack/ai' export interface CodeExecutionTool {} @@ -13,7 +12,6 @@ export function codeExecutionTool(): Tool { return { name: 'code_execution', description: '', - inputSchema: z.object({}), metadata: {}, } } diff --git a/packages/typescript/ai-gemini/src/tools/computer-use-tool.ts b/packages/typescript/ai-gemini/src/tools/computer-use-tool.ts index e54a8b3e..b4a49f0e 100644 --- a/packages/typescript/ai-gemini/src/tools/computer-use-tool.ts +++ b/packages/typescript/ai-gemini/src/tools/computer-use-tool.ts @@ -1,4 +1,3 @@ -import { z } from 'zod' import type { ComputerUse } from '@google/genai' import type { Tool } from '@tanstack/ai' @@ -18,7 +17,6 @@ export function computerUseTool(config: ComputerUseTool): Tool { return { name: 'computer_use', description: '', - inputSchema: z.object({}), metadata: { environment: config.environment, excludedPredefinedFunctions: config.excludedPredefinedFunctions, diff --git a/packages/typescript/ai-gemini/src/tools/file-search-tool.ts b/packages/typescript/ai-gemini/src/tools/file-search-tool.ts index 0e8c33b0..87b9286b 100644 --- a/packages/typescript/ai-gemini/src/tools/file-search-tool.ts +++ b/packages/typescript/ai-gemini/src/tools/file-search-tool.ts @@ -1,4 +1,3 @@ -import { z } from 'zod' import type { Tool } from '@tanstack/ai' import type { FileSearch } from '@google/genai' @@ -15,7 +14,6 @@ export function fileSearchTool(config: FileSearchTool): Tool { return { name: 'file_search', description: '', - inputSchema: z.object({}), metadata: config, } } diff --git a/packages/typescript/ai-gemini/src/tools/google-maps-tool.ts b/packages/typescript/ai-gemini/src/tools/google-maps-tool.ts index edc60044..42e3c27c 100644 --- a/packages/typescript/ai-gemini/src/tools/google-maps-tool.ts +++ b/packages/typescript/ai-gemini/src/tools/google-maps-tool.ts @@ -1,4 +1,3 @@ -import { z } from 'zod' import type { GoogleMaps } from '@google/genai' import type { Tool } from '@tanstack/ai' @@ -15,7 +14,6 @@ export function googleMapsTool(config?: GoogleMapsTool): Tool { return { name: 'google_maps', description: '', - inputSchema: z.object({}), metadata: config, } } diff --git a/packages/typescript/ai-gemini/src/tools/google-search-retriveal-tool.ts b/packages/typescript/ai-gemini/src/tools/google-search-retriveal-tool.ts index 6d511b6e..24611c6e 100644 --- a/packages/typescript/ai-gemini/src/tools/google-search-retriveal-tool.ts +++ b/packages/typescript/ai-gemini/src/tools/google-search-retriveal-tool.ts @@ -1,4 +1,3 @@ -import { z } from 'zod' import type { GoogleSearchRetrieval } from '@google/genai' import type { Tool } from '@tanstack/ai' @@ -17,7 +16,6 @@ export function googleSearchRetrievalTool( return { name: 'google_search_retrieval', description: '', - inputSchema: z.object({}), metadata: config, } } diff --git a/packages/typescript/ai-gemini/src/tools/google-search-tool.ts b/packages/typescript/ai-gemini/src/tools/google-search-tool.ts index 6550eed2..cd72f42d 100644 --- a/packages/typescript/ai-gemini/src/tools/google-search-tool.ts +++ b/packages/typescript/ai-gemini/src/tools/google-search-tool.ts @@ -1,4 +1,3 @@ -import { z } from 'zod' import type { GoogleSearch } from '@google/genai' import type { Tool } from '@tanstack/ai' @@ -15,7 +14,6 @@ export function googleSearchTool(config?: GoogleSearchTool): Tool { return { name: 'google_search', description: '', - inputSchema: z.object({}), metadata: config, } } diff --git a/packages/typescript/ai-gemini/src/tools/url-context-tool.ts b/packages/typescript/ai-gemini/src/tools/url-context-tool.ts index 9a6796b8..3518dd7f 100644 --- a/packages/typescript/ai-gemini/src/tools/url-context-tool.ts +++ b/packages/typescript/ai-gemini/src/tools/url-context-tool.ts @@ -1,4 +1,3 @@ -import { z } from 'zod' import type { Tool } from '@tanstack/ai' export interface UrlContextTool {} @@ -13,7 +12,6 @@ export function urlContextTool(): Tool { return { name: 'url_context', description: '', - inputSchema: z.object({}), metadata: {}, } } diff --git a/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts b/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts index ff7b5ab6..f37d4475 100644 --- a/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts +++ b/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts @@ -1,17 +1,17 @@ import { describe, it, expect, beforeEach, vi } from 'vitest' import { chat, summarize, embedding } from '@tanstack/ai' import type { Tool, StreamChunk } from '@tanstack/ai' -import type { - HarmBlockThreshold, - HarmCategory, - SafetySetting, +import { + Type, + type HarmBlockThreshold, + type HarmCategory, + type SafetySetting, } from '@google/genai' -import type { Schema } from '../src/tools/function-declaration-tool' import { GeminiAdapter, type GeminiProviderOptions, } from '../src/gemini-adapter' -import { z } from 'zod' +import type { Schema } from '@google/genai' const mocks = vi.hoisted(() => { return { @@ -23,7 +23,7 @@ const mocks = vi.hoisted(() => { } }) -vi.mock('@google/genai', () => { +vi.mock('@google/genai', async () => { const { constructorSpy, generateContentSpy, @@ -32,6 +32,7 @@ vi.mock('@google/genai', () => { getGenerativeModelSpy, } = mocks + const actual = await vi.importActual('@google/genai') class MockGoogleGenAI { public models = { generateContent: generateContentSpy, @@ -46,7 +47,7 @@ vi.mock('@google/genai', () => { } } - return { GoogleGenAI: MockGoogleGenAI } + return { GoogleGenAI: MockGoogleGenAI, Type: actual.Type } }) const createAdapter = () => new GeminiAdapter({ apiKey: 'test-key' }) @@ -54,9 +55,6 @@ const createAdapter = () => new GeminiAdapter({ apiKey: 'test-key' }) const weatherTool: Tool = { name: 'lookup_weather', description: 'Return the weather for a location', - inputSchema: z.object({ - location: z.string(), - }), } const createStream = (chunks: Array>) => { @@ -158,16 +156,16 @@ describe('GeminiAdapter through AI', () => { ] const responseSchema: Schema = { - type: 'OBJECT', + type: Type.OBJECT, properties: { - summary: { type: 'STRING' }, + summary: { type: Type.STRING }, }, } const responseJsonSchema: Schema = { - type: 'OBJECT', + type: Type.OBJECT, properties: { - ok: { type: 'BOOLEAN' }, + ok: { type: Type.BOOLEAN }, }, } diff --git a/packages/typescript/ai-openai/package.json b/packages/typescript/ai-openai/package.json index 30092a0d..cda0f794 100644 --- a/packages/typescript/ai-openai/package.json +++ b/packages/typescript/ai-openai/package.json @@ -41,13 +41,11 @@ ], "dependencies": { "@tanstack/ai": "workspace:*", - "openai": "^6.9.1", - "zod": "^4.1.13" + "openai": "^6.9.1" }, "devDependencies": { "@vitest/coverage-v8": "4.0.14", - "vite": "^7.2.4", - "zod": "^4.1.13" + "vite": "^7.2.4" }, "peerDependencies": { "@tanstack/ai": "workspace:*" diff --git a/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts b/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts index 61a664ce..8e73cc89 100644 --- a/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts +++ b/packages/typescript/ai-openai/src/tools/apply-patch-tool.ts @@ -1,4 +1,3 @@ -import { z } from 'zod' import type OpenAI from 'openai' import type { Tool } from '@tanstack/ai' @@ -22,7 +21,6 @@ export function applyPatchTool(): Tool { return { name: 'apply_patch', description: 'Apply a patch to modify files', - inputSchema: z.object({}), metadata: {}, } } diff --git a/packages/typescript/ai-openai/src/tools/code-interpreter-tool.ts b/packages/typescript/ai-openai/src/tools/code-interpreter-tool.ts index 529fd22c..15bd8e42 100644 --- a/packages/typescript/ai-openai/src/tools/code-interpreter-tool.ts +++ b/packages/typescript/ai-openai/src/tools/code-interpreter-tool.ts @@ -1,4 +1,3 @@ -import { z } from 'zod' import type { Tool } from '@tanstack/ai' import type OpenAI from 'openai' @@ -24,7 +23,6 @@ export function codeInterpreterTool(container: CodeInterpreterTool): Tool { return { name: 'code_interpreter', description: 'Execute code in a sandboxed environment', - inputSchema: z.object({}), metadata: { type: 'code_interpreter', container, diff --git a/packages/typescript/ai-openai/src/tools/computer-use-tool.ts b/packages/typescript/ai-openai/src/tools/computer-use-tool.ts index a266fd0c..1a19b573 100644 --- a/packages/typescript/ai-openai/src/tools/computer-use-tool.ts +++ b/packages/typescript/ai-openai/src/tools/computer-use-tool.ts @@ -1,4 +1,3 @@ -import { z } from 'zod' import type OpenAI from 'openai' import type { Tool } from '@tanstack/ai' @@ -25,7 +24,6 @@ export function computerUseTool(toolData: ComputerUseTool): Tool { return { name: 'computer_use_preview', description: 'Control a virtual computer', - inputSchema: z.object({}), metadata: { ...toolData, }, diff --git a/packages/typescript/ai-openai/src/tools/custom-tool.ts b/packages/typescript/ai-openai/src/tools/custom-tool.ts index 9cf52319..ad7de4d2 100644 --- a/packages/typescript/ai-openai/src/tools/custom-tool.ts +++ b/packages/typescript/ai-openai/src/tools/custom-tool.ts @@ -1,4 +1,3 @@ -import { z } from 'zod' import type OpenAI from 'openai' import type { Tool } from '@tanstack/ai' @@ -24,7 +23,6 @@ export function customTool(toolData: CustomTool): Tool { return { name: 'custom', description: toolData.description || 'A custom tool', - inputSchema: z.object({}), metadata: { ...toolData, }, diff --git a/packages/typescript/ai-openai/src/tools/file-search-tool.ts b/packages/typescript/ai-openai/src/tools/file-search-tool.ts index 2bfd9a59..0fc85f06 100644 --- a/packages/typescript/ai-openai/src/tools/file-search-tool.ts +++ b/packages/typescript/ai-openai/src/tools/file-search-tool.ts @@ -1,4 +1,3 @@ -import { z } from 'zod' import type OpenAI from 'openai' import type { Tool } from '@tanstack/ai' @@ -36,7 +35,6 @@ export function fileSearchTool( return { name: 'file_search', description: 'Search files in vector stores', - inputSchema: z.object({}), metadata: { ...toolData, }, diff --git a/packages/typescript/ai-openai/src/tools/function-tool.ts b/packages/typescript/ai-openai/src/tools/function-tool.ts index e9f85129..60737b4c 100644 --- a/packages/typescript/ai-openai/src/tools/function-tool.ts +++ b/packages/typescript/ai-openai/src/tools/function-tool.ts @@ -9,12 +9,14 @@ export type FunctionTool = OpenAI.Responses.FunctionTool */ export function convertFunctionToolToAdapterFormat(tool: Tool): FunctionTool { // Convert Zod schema to JSON Schema - const jsonSchema = convertZodToJsonSchema(tool.inputSchema) + const jsonSchema = tool.inputSchema + ? convertZodToJsonSchema(tool.inputSchema) + : undefined // Determine if we can use strict mode // Strict mode requires all properties to be in the required array - const properties = jsonSchema.properties || {} - const required = jsonSchema.required || [] + const properties = jsonSchema?.properties || {} + const required = jsonSchema?.required || [] const propertyNames = Object.keys(properties) // Only enable strict mode if all properties are required diff --git a/packages/typescript/ai-openai/src/tools/image-generation-tool.ts b/packages/typescript/ai-openai/src/tools/image-generation-tool.ts index d9e89b6a..c48ff1e0 100644 --- a/packages/typescript/ai-openai/src/tools/image-generation-tool.ts +++ b/packages/typescript/ai-openai/src/tools/image-generation-tool.ts @@ -1,4 +1,3 @@ -import { z } from 'zod' import type OpenAI from 'openai' import type { Tool } from '@tanstack/ai' @@ -33,7 +32,6 @@ export function imageGenerationTool( return { name: 'image_generation', description: 'Generate images based on text descriptions', - inputSchema: z.object({}), metadata: { ...toolData, }, diff --git a/packages/typescript/ai-openai/src/tools/local-shell-tool.ts b/packages/typescript/ai-openai/src/tools/local-shell-tool.ts index d6ca2543..ed829cb2 100644 --- a/packages/typescript/ai-openai/src/tools/local-shell-tool.ts +++ b/packages/typescript/ai-openai/src/tools/local-shell-tool.ts @@ -1,4 +1,3 @@ -import { z } from 'zod' import type OpenAI from 'openai' import type { Tool } from '@tanstack/ai' @@ -22,7 +21,6 @@ export function localShellTool(): Tool { return { name: 'local_shell', description: 'Execute local shell commands', - inputSchema: z.object({}), metadata: {}, } } diff --git a/packages/typescript/ai-openai/src/tools/mcp-tool.ts b/packages/typescript/ai-openai/src/tools/mcp-tool.ts index d9398299..64b94357 100644 --- a/packages/typescript/ai-openai/src/tools/mcp-tool.ts +++ b/packages/typescript/ai-openai/src/tools/mcp-tool.ts @@ -1,4 +1,3 @@ -import { z } from 'zod' import type OpenAI from 'openai' import type { Tool } from '@tanstack/ai' @@ -37,7 +36,6 @@ export function mcpTool(toolData: Omit): Tool { return { name: 'mcp', description: toolData.server_description || '', - inputSchema: z.object({}), metadata: toolData, } } diff --git a/packages/typescript/ai-openai/src/tools/shell-tool.ts b/packages/typescript/ai-openai/src/tools/shell-tool.ts index 83aff438..83b301a2 100644 --- a/packages/typescript/ai-openai/src/tools/shell-tool.ts +++ b/packages/typescript/ai-openai/src/tools/shell-tool.ts @@ -1,4 +1,3 @@ -import { z } from 'zod' import type OpenAI from 'openai' import type { Tool } from '@tanstack/ai' @@ -20,7 +19,6 @@ export function shellTool(): Tool { return { name: 'shell', description: 'Execute shell commands', - inputSchema: z.object({}), metadata: {}, } } diff --git a/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts b/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts index 1695afd8..48942d43 100644 --- a/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts +++ b/packages/typescript/ai-openai/src/tools/web-search-preview-tool.ts @@ -1,4 +1,3 @@ -import { z } from 'zod' import type OpenAI from 'openai' import type { Tool } from '@tanstack/ai' @@ -25,7 +24,6 @@ export function webSearchPreviewTool(toolData: WebSearchPreviewTool): Tool { return { name: 'web_search_preview', description: 'Search the web (preview version)', - inputSchema: z.object({}), metadata: toolData, } } diff --git a/packages/typescript/ai-openai/src/tools/web-search-tool.ts b/packages/typescript/ai-openai/src/tools/web-search-tool.ts index 7e567c70..c7d5aef6 100644 --- a/packages/typescript/ai-openai/src/tools/web-search-tool.ts +++ b/packages/typescript/ai-openai/src/tools/web-search-tool.ts @@ -1,4 +1,3 @@ -import { z } from 'zod' import type OpenAI from 'openai' import type { Tool } from '@tanstack/ai' @@ -19,7 +18,6 @@ export function webSearchTool(toolData: WebSearchTool): Tool { return { name: 'web_search', description: 'Search the web', - inputSchema: z.object({}), metadata: toolData, } } diff --git a/packages/typescript/ai-openai/tests/openai-adapter.test.ts b/packages/typescript/ai-openai/tests/openai-adapter.test.ts index 740f8722..febed8db 100644 --- a/packages/typescript/ai-openai/tests/openai-adapter.test.ts +++ b/packages/typescript/ai-openai/tests/openai-adapter.test.ts @@ -1,7 +1,6 @@ import { describe, it, expect, beforeEach, vi } from 'vitest' import { chat, type Tool, type StreamChunk } from '@tanstack/ai' import { OpenAI, type OpenAIProviderOptions } from '../src/openai-adapter' -import { z } from 'zod' const createAdapter = () => new OpenAI({ apiKey: 'test-key' }) @@ -10,9 +9,6 @@ const toolArguments = JSON.stringify({ location: 'Berlin' }) const weatherTool: Tool = { name: 'lookup_weather', description: 'Return the forecast for a location', - inputSchema: z.object({ - location: z.string(), - }), } function createMockChatCompletionsStream( diff --git a/packages/typescript/ai/src/tools/tool-calls.ts b/packages/typescript/ai/src/tools/tool-calls.ts index 802df0a4..ab43d86c 100644 --- a/packages/typescript/ai/src/tools/tool-calls.ts +++ b/packages/typescript/ai/src/tools/tool-calls.ts @@ -131,7 +131,6 @@ export class ToolCallManager { } // Validate input against inputSchema - // eslint-disable-next-line @typescript-eslint/no-unnecessary-condition if (tool.inputSchema) { try { args = tool.inputSchema.parse(args) @@ -279,7 +278,6 @@ export async function executeToolCalls( } // Validate input against inputSchema - // eslint-disable-next-line @typescript-eslint/no-unnecessary-condition if (tool.inputSchema) { try { input = tool.inputSchema.parse(input) @@ -371,11 +369,12 @@ export async function executeToolCalls( // Validate output against outputSchema if provided if (tool.outputSchema && result !== undefined && result !== null) { - try { - result = tool.outputSchema.parse(result) - } catch (validationError: any) { + const parsed = tool.outputSchema.safeParse(result) + if (parsed.success) { + result = parsed.data + } else { throw new Error( - `Output validation failed for tool ${tool.name}: ${validationError.message}`, + `Output validation failed for tool ${tool.name}: ${parsed.error.message}`, ) } } @@ -420,11 +419,12 @@ export async function executeToolCalls( // Validate output against outputSchema if provided if (tool.outputSchema && result !== undefined && result !== null) { - try { - result = tool.outputSchema.parse(result) - } catch (validationError: any) { + const parsed = tool.outputSchema.safeParse(result) + if (parsed.success) { + result = parsed.data + } else { throw new Error( - `Output validation failed for tool ${tool.name}: ${validationError.message}`, + `Output validation failed for tool ${tool.name}: ${parsed.error.message}`, ) } } diff --git a/packages/typescript/ai/src/tools/tool-utils.ts b/packages/typescript/ai/src/tools/tool-utils.ts index 5498167a..12d87787 100644 --- a/packages/typescript/ai/src/tools/tool-utils.ts +++ b/packages/typescript/ai/src/tools/tool-utils.ts @@ -34,16 +34,6 @@ import type { Tool } from '../types' export function tool< TInput extends z.ZodType, TOutput extends z.ZodType = z.ZodAny, ->(config: { - name: string - description: string - inputSchema: TInput - outputSchema?: TOutput - execute?: ( - args: z.infer, - ) => Promise> | z.infer - needsApproval?: boolean - metadata?: Record -}): Tool { - return config as Tool +>(config: Tool): Tool { + return config } diff --git a/packages/typescript/ai/src/tools/zod-converter.ts b/packages/typescript/ai/src/tools/zod-converter.ts index e5011ec5..6104a5f3 100644 --- a/packages/typescript/ai/src/tools/zod-converter.ts +++ b/packages/typescript/ai/src/tools/zod-converter.ts @@ -1,10 +1,8 @@ -import { zodToJsonSchema } from '@alcyone-labs/zod-to-json-schema' +import { toJSONSchema } from 'zod' import type { z } from 'zod' - /** * Converts a Zod schema to JSON Schema format compatible with LLM providers. * - * Uses @alcyone-labs/zod-to-json-schema which is compatible with Zod v4. * * @param schema - Zod schema to convert * @returns JSON Schema object that can be sent to LLM providers @@ -30,15 +28,19 @@ import type { z } from 'zod' * // } * ``` */ -export function convertZodToJsonSchema(schema: z.ZodType): Record { +export function convertZodToJsonSchema( + schema: z.ZodType | undefined, +): Record | undefined { + if (!schema) return undefined + // Use Alcyone Labs fork which is compatible with Zod v4 - const jsonSchema = zodToJsonSchema(schema as any, { - target: 'openApi3', - $refStrategy: 'none', // Inline all references for LLM compatibility + const jsonSchema = toJSONSchema(schema, { + target: 'openapi-3.0', + reused: 'ref', }) // Remove $schema property as it's not needed for LLM providers - let result = jsonSchema as Record + let result = jsonSchema if (typeof result === 'object' && '$schema' in result) { const { $schema, ...rest } = result result = rest @@ -51,12 +53,7 @@ export function convertZodToJsonSchema(schema: z.ZodType): Record { const isZodObject = typeof schema === 'object' && 'def' in schema && - (schema as any).def?.type === 'object' - - // If type is explicitly "None", fix it - if (result.type === 'None') { - result.type = 'object' - } + schema.def.type === 'object' // If we know it's a ZodObject but result doesn't have type, set it if (isZodObject && !result.type) { diff --git a/packages/typescript/ai/src/types.ts b/packages/typescript/ai/src/types.ts index a6c807df..2546a4b5 100644 --- a/packages/typescript/ai/src/types.ts +++ b/packages/typescript/ai/src/types.ts @@ -70,7 +70,7 @@ export interface Tool< * unit: z.enum(["celsius", "fahrenheit"]).optional() * }) */ - inputSchema: TInput + inputSchema?: TInput /** * Optional Zod schema for validating tool output. diff --git a/packages/typescript/smoke-tests/adapters/tsconfig.json b/packages/typescript/smoke-tests/adapters/tsconfig.json index f2a38238..710d1414 100644 --- a/packages/typescript/smoke-tests/adapters/tsconfig.json +++ b/packages/typescript/smoke-tests/adapters/tsconfig.json @@ -1,5 +1,5 @@ { - "extends": "../../../tsconfig.json", + "extends": "../../../../tsconfig.json", "compilerOptions": { "outDir": "dist", "rootDir": "." diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 2c73a677..663e0fea 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -464,13 +464,13 @@ importers: '@tanstack/ai': specifier: workspace:* version: link:../ai - zod: - specifier: ^4.1.13 - version: 4.1.13 devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 version: 4.0.14(vitest@4.0.14(@types/node@24.10.1)(happy-dom@20.0.10)(jiti@2.6.1)(jsdom@27.2.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.0)(tsx@4.20.6)(yaml@2.8.1)) + zod: + specifier: ^4.1.13 + version: 4.1.13 packages/typescript/ai-client: dependencies: @@ -527,9 +527,6 @@ importers: '@tanstack/ai': specifier: workspace:* version: link:../ai - zod: - specifier: ^4.1.13 - version: 4.1.13 devDependencies: '@vitest/coverage-v8': specifier: 4.0.14 @@ -565,9 +562,6 @@ importers: openai: specifier: ^6.9.1 version: 6.9.1(ws@8.18.3)(zod@4.1.13) - zod: - specifier: ^4.1.13 - version: 4.1.13 devDependencies: '@vitest/coverage-v8': specifier: 4.0.14