From dbb9dfe32a602f28c4c4e138b135d726c4ff6da9 Mon Sep 17 00:00:00 2001 From: Khaliq Date: Tue, 17 Mar 2026 12:42:14 +0100 Subject: [PATCH 1/3] Add AI SDK relay adapter and workflow docs --- docs/communicate.mdx | 8 +- docs/communicate/ai-sdk.mdx | 135 ++++++++++++++ docs/introduction.mdx | 2 +- docs/markdown/communicate.md | 8 +- docs/markdown/communicate/ai-sdk.md | 90 ++++++++++ docs/markdown/introduction.md | 2 +- packages/sdk/package.json | 9 + .../communicate/adapters/ai-sdk.test.ts | 106 +++++++++++ .../sdk/src/communicate/adapters/ai-sdk.ts | 166 ++++++++++++++++++ .../sdk/src/communicate/adapters/index.ts | 1 + packages/sdk/src/communicate/index.ts | 2 +- packages/sdk/src/workflows/README.md | 53 ++++++ 12 files changed, 575 insertions(+), 7 deletions(-) create mode 100644 docs/communicate/ai-sdk.mdx create mode 100644 docs/markdown/communicate/ai-sdk.md create mode 100644 packages/sdk/src/__tests__/communicate/adapters/ai-sdk.test.ts create mode 100644 packages/sdk/src/communicate/adapters/ai-sdk.ts diff --git a/docs/communicate.mdx b/docs/communicate.mdx index 656c356a7..4273ab6f1 100644 --- a/docs/communicate.mdx +++ b/docs/communicate.mdx @@ -15,9 +15,11 @@ agent = on_relay(my_agent, relay) ``` ```typescript TypeScript +import { wrapLanguageModel } from 'ai'; import { Relay } from '@agent-relay/sdk/communicate'; -import { onRelay } from '@agent-relay/sdk/communicate/adapters/pi'; -const config = onRelay('MyAgent', piConfig, new Relay('MyAgent')); +import { onRelay } from '@agent-relay/sdk/communicate/adapters/ai-sdk'; +const session = onRelay({ name: 'MyAgent' }, new Relay('MyAgent')); +const model = wrapLanguageModel({ model: baseModel, middleware: session.middleware }); ``` @@ -30,6 +32,7 @@ const config = onRelay('MyAgent', piConfig, new Relay('MyAgent')); | Claude Agent SDK | Python, TypeScript | Push (Tier 1) | Hooks: PostToolUse, Stop | | Google ADK | Python | Push (Tier 1) | before_model_callback injection | | Pi | TypeScript | Push (Tier 1) | session.steer / session.followUp | +| AI SDK | TypeScript | Poll (Tier 2) | Tools + middleware system injection | | OpenAI Agents | Python | Poll (Tier 2) | Tools + instructions wrapper | | Agno | Python | Poll (Tier 2) | Tools + instructions wrapper | | Swarms | Python | Poll (Tier 2) | Tools + on_message callback | @@ -73,6 +76,7 @@ await relay.close() ## Per-Framework Guides + TypeScript adapter for Vercel AI SDK apps Python adapter for OpenAI Agents SDK Python + TypeScript adapter Python adapter for Google ADK diff --git a/docs/communicate/ai-sdk.mdx b/docs/communicate/ai-sdk.mdx new file mode 100644 index 000000000..7a6c8061f --- /dev/null +++ b/docs/communicate/ai-sdk.mdx @@ -0,0 +1,135 @@ +--- +title: 'AI SDK' +description: 'Connect Vercel AI SDK apps to Relaycast with onRelay().' +--- + +Connect an [AI SDK](https://ai-sdk.dev/docs/introduction) app to Relaycast with a single `onRelay()` call. + +## Quick Start + +```typescript +import { streamText, wrapLanguageModel } from 'ai'; +import { openai } from '@ai-sdk/openai'; +import { Relay } from '@agent-relay/sdk/communicate'; +import { onRelay } from '@agent-relay/sdk/communicate/adapters/ai-sdk'; + +const relay = new Relay('SupportLead'); +const relaySession = onRelay({ name: 'SupportLead' }, relay); + +const model = wrapLanguageModel({ + model: openai('gpt-4o-mini'), + middleware: relaySession.middleware, +}); + +const result = await streamText({ + model, + system: 'You coordinate support specialists and keep the user informed.', + tools: relaySession.tools, + prompt: 'Triage the latest onboarding issue.', +}); +``` + +## What `onRelay()` Provides + +`onRelay()` returns a session object with: + +- `tools` — AI SDK-compatible relay tools for `generateText()` / `streamText()` +- `middleware` — language model middleware that injects newly received relay messages into the `system` prompt before each call +- `cleanup()` — unsubscribes from live relay delivery and clears buffered injections + +This fits the AI SDK model cleanly: tool calls remain explicit, while incoming relay messages show up as fresh coordination context on the next model turn. + +## Tools Added + +`onRelay()` exposes four tools: + +- `relay_send({ to, text })` +- `relay_inbox()` +- `relay_post({ channel, text })` +- `relay_agents()` + +These can be passed straight into `generateText()` or `streamText()`. + +## Workflow-Friendly Pattern + +For consumer-facing apps, the usual pattern is: + +1. **Frontend app** uses AI SDK UI (`useChat`, streamed responses, etc.) +2. **Server route** runs `streamText()` with Relay tools attached +3. **Specialists or reviewers** participate via Relay / workflow runner +4. **Workflow runner** handles longer multi-agent execution when the chat turn needs more than one model call + +### Next.js route that can escalate into a Relay workflow + +```typescript +import { streamText, wrapLanguageModel } from 'ai'; +import { openai } from '@ai-sdk/openai'; +import { Relay } from '@agent-relay/sdk/communicate'; +import { onRelay } from '@agent-relay/sdk/communicate/adapters/ai-sdk'; +import { runWorkflow } from '@agent-relay/sdk/workflows'; + +export async function POST(req: Request) { + const { prompt, repo, escalate } = await req.json(); + + const relay = new Relay('CustomerFacingLead'); + const relaySession = onRelay({ + name: 'CustomerFacingLead', + instructions: + 'If implementation needs multiple specialists, post status to the team and summarize clearly for the end user.', + }, relay); + + const model = wrapLanguageModel({ + model: openai('gpt-4o-mini'), + middleware: relaySession.middleware, + }); + + if (escalate) { + const workflow = await runWorkflow('workflows/feature-dev.yaml', { + vars: { repo, task: prompt }, + }); + + return Response.json({ + status: workflow.status, + runId: workflow.runId, + }); + } + + const result = streamText({ + model, + tools: relaySession.tools, + system: 'You are the point person for the user. Coordinate internally via Relay when needed.', + prompt, + }); + + return result.toUIMessageStreamResponse({ + onFinish() { + relaySession.cleanup(); + void relay.close(); + }, + }); +} +``` + +## API + +### `onRelay(options, relay?)` + +**Parameters** + +- `options.name` — Relay agent name +- `options.instructions` — optional extra relay-specific instructions +- `options.includeDefaultInstructions` — set to `false` if you want full control over the injected relay guidance +- `relay` — optional pre-configured `Relay` client + +**Returns** + +- `tools` +- `middleware` +- `relay` +- `cleanup()` + +## Notes + +- Incoming relay messages are injected on the **next** model call, which matches AI SDK’s request/response model. +- `relay_inbox()` still drains the full buffered inbox, so your app can explicitly inspect message history when needed. +- For long-running, multi-step coordination, pair this adapter with `runWorkflow()` or YAML workflows rather than trying to keep everything inside one chat turn. diff --git a/docs/introduction.mdx b/docs/introduction.mdx index 97cff8dfe..4f1040794 100644 --- a/docs/introduction.mdx +++ b/docs/introduction.mdx @@ -6,7 +6,7 @@ description: 'Spawn, coordinate, and connect AI agents from TypeScript or Python The Agent Relay SDK has two modes: - **Orchestrate** — Spawn and manage AI agents (Claude, Codex, Gemini, OpenCode) from code. Send messages, listen for responses, and shut them down when done. -- **Communicate** — Put an existing framework agent "on the relay" with a single `on_relay()` call. Works with OpenAI Agents, Claude Agent SDK, Google ADK, Pi, Agno, Swarms, and CrewAI. +- **Communicate** — Put an existing framework agent "on the relay" with a single `on_relay()` / `onRelay()` call. Works with AI SDK, OpenAI Agents, Claude Agent SDK, Google ADK, Pi, Agno, Swarms, and CrewAI. ```bash TypeScript diff --git a/docs/markdown/communicate.md b/docs/markdown/communicate.md index 27368dbfd..bb98319c6 100644 --- a/docs/markdown/communicate.md +++ b/docs/markdown/communicate.md @@ -15,9 +15,11 @@ agent = on_relay(my_agent, relay) ```typescript // TypeScript +import { wrapLanguageModel } from 'ai'; import { Relay } from '@agent-relay/sdk/communicate'; -import { onRelay } from '@agent-relay/sdk/communicate/adapters/pi'; -const config = onRelay('MyAgent', piConfig, new Relay('MyAgent')); +import { onRelay } from '@agent-relay/sdk/communicate/adapters/ai-sdk'; +const session = onRelay({ name: 'MyAgent' }, new Relay('MyAgent')); +const model = wrapLanguageModel({ model: baseModel, middleware: session.middleware }); ``` `on_relay()` auto-detects the framework and applies the right adapter. No configuration needed. @@ -29,6 +31,7 @@ const config = onRelay('MyAgent', piConfig, new Relay('MyAgent')); | Claude Agent SDK | Python, TypeScript | Push (Tier 1) | Hooks: PostToolUse, Stop | | Google ADK | Python | Push (Tier 1) | before_model_callback injection | | Pi | TypeScript | Push (Tier 1) | session.steer / session.followUp | +| AI SDK | TypeScript | Poll (Tier 2) | Tools + middleware system injection | | OpenAI Agents | Python | Poll (Tier 2) | Tools + instructions wrapper | | Agno | Python | Poll (Tier 2) | Tools + instructions wrapper | | Swarms | Python | Poll (Tier 2) | Tools + on_message callback | @@ -70,6 +73,7 @@ await relay.close() ## Per-Framework Guides +- [AI SDK](/communicate/ai-sdk) — TypeScript adapter for Vercel AI SDK apps - [OpenAI Agents](/communicate/openai-agents) — Python adapter for OpenAI Agents SDK - [Claude Agent SDK](/communicate/claude-sdk) — Python + TypeScript adapter - [Google ADK](/communicate/google-adk) — Python adapter for Google ADK diff --git a/docs/markdown/communicate/ai-sdk.md b/docs/markdown/communicate/ai-sdk.md new file mode 100644 index 000000000..493dfe62e --- /dev/null +++ b/docs/markdown/communicate/ai-sdk.md @@ -0,0 +1,90 @@ +# AI SDK + +Connect an [AI SDK](https://ai-sdk.dev/docs/introduction) app to Relaycast with a single `onRelay()` call. + +## Quick Start + +```typescript +import { streamText, wrapLanguageModel } from 'ai'; +import { openai } from '@ai-sdk/openai'; +import { Relay } from '@agent-relay/sdk/communicate'; +import { onRelay } from '@agent-relay/sdk/communicate/adapters/ai-sdk'; + +const relay = new Relay('SupportLead'); +const relaySession = onRelay({ name: 'SupportLead' }, relay); + +const model = wrapLanguageModel({ + model: openai('gpt-4o-mini'), + middleware: relaySession.middleware, +}); + +const result = await streamText({ + model, + system: 'You coordinate support specialists and keep the user informed.', + tools: relaySession.tools, + prompt: 'Triage the latest onboarding issue.', +}); +``` + +## What `onRelay()` Provides + +`onRelay()` returns: + +- `tools` for `generateText()` / `streamText()` +- `middleware` that injects live relay messages into `system` +- `cleanup()` to unsubscribe and clear buffered injections + +## Workflow-Friendly Pattern + +Use AI SDK in the consumer-facing app, and Relay workflows for the longer internal coordination path: + +```typescript +import { streamText, wrapLanguageModel } from 'ai'; +import { openai } from '@ai-sdk/openai'; +import { Relay } from '@agent-relay/sdk/communicate'; +import { onRelay } from '@agent-relay/sdk/communicate/adapters/ai-sdk'; +import { runWorkflow } from '@agent-relay/sdk/workflows'; + +export async function POST(req: Request) { + const { prompt, repo, escalate } = await req.json(); + + const relay = new Relay('CustomerFacingLead'); + const relaySession = onRelay({ name: 'CustomerFacingLead' }, relay); + + const model = wrapLanguageModel({ + model: openai('gpt-4o-mini'), + middleware: relaySession.middleware, + }); + + if (escalate) { + const workflow = await runWorkflow('workflows/feature-dev.yaml', { + vars: { repo, task: prompt }, + }); + + return Response.json({ status: workflow.status, runId: workflow.runId }); + } + + const result = streamText({ + model, + tools: relaySession.tools, + system: 'You are the point person for the user. Coordinate internally via Relay when needed.', + prompt, + }); + + return result.toUIMessageStreamResponse({ + onFinish() { + relaySession.cleanup(); + void relay.close(); + }, + }); +} +``` + +## API + +### `onRelay(options, relay?)` + +- `options.name` — Relay agent name +- `options.instructions` — optional extra instructions +- `options.includeDefaultInstructions` — disable built-in relay guidance if needed +- `relay` — optional pre-configured `Relay` diff --git a/docs/markdown/introduction.md b/docs/markdown/introduction.md index b0bb57b51..8cab10e0f 100644 --- a/docs/markdown/introduction.md +++ b/docs/markdown/introduction.md @@ -5,7 +5,7 @@ Spawn, coordinate, and connect AI agents from TypeScript or Python. The Agent Relay SDK has two modes: - **Orchestrate** — Spawn and manage AI agents (Claude, Codex, Gemini, OpenCode) from code. Send messages, listen for responses, and shut them down when done. -- **Communicate** — Put an existing framework agent "on the relay" with a single `on_relay()` call. Works with OpenAI Agents, Claude Agent SDK, Google ADK, Pi, Agno, Swarms, and CrewAI. +- **Communicate** — Put an existing framework agent "on the relay" with a single `on_relay()` / `onRelay()` call. Works with AI SDK, OpenAI Agents, Claude Agent SDK, Google ADK, Pi, Agno, Swarms, and CrewAI. ## Install diff --git a/packages/sdk/package.json b/packages/sdk/package.json index 96f5d9244..bfd9e73b2 100644 --- a/packages/sdk/package.json +++ b/packages/sdk/package.json @@ -64,6 +64,11 @@ "types": "./dist/communicate/adapters/claude-sdk.d.ts", "import": "./dist/communicate/adapters/claude-sdk.js", "default": "./dist/communicate/adapters/claude-sdk.js" + }, + "./communicate/adapters/ai-sdk": { + "types": "./dist/communicate/adapters/ai-sdk.d.ts", + "import": "./dist/communicate/adapters/ai-sdk.js", + "default": "./dist/communicate/adapters/ai-sdk.js" } }, "files": [ @@ -109,6 +114,7 @@ "@langchain/langgraph": ">=1.2.0", "@mariozechner/pi-coding-agent": ">=0.50.0", "@openai/agents": ">=0.7.0", + "ai": ">=5.0.0", "crewai": ">=1.0.0" }, "peerDependenciesMeta": { @@ -127,6 +133,9 @@ "@openai/agents": { "optional": true }, + "ai": { + "optional": true + }, "crewai": { "optional": true } diff --git a/packages/sdk/src/__tests__/communicate/adapters/ai-sdk.test.ts b/packages/sdk/src/__tests__/communicate/adapters/ai-sdk.test.ts new file mode 100644 index 000000000..dffca6318 --- /dev/null +++ b/packages/sdk/src/__tests__/communicate/adapters/ai-sdk.test.ts @@ -0,0 +1,106 @@ +import assert from 'node:assert/strict'; +import test from 'node:test'; + +const adapterModulePath = '../../../communicate/adapters/ai-sdk.js'; + +async function loadModule(): Promise { + return import(adapterModulePath); +} + +class FakeRelay { + private callbacks: Array<(message: any) => void | Promise> = []; + + sent: Array<{ to: string; text: string }> = []; + posted: Array<{ channel: string; text: string }> = []; + inboxMessages: any[] = []; + + async send(to: string, text: string): Promise { + this.sent.push({ to, text }); + } + + async post(channel: string, text: string): Promise { + this.posted.push({ channel, text }); + } + + async inbox(): Promise { + const messages = [...this.inboxMessages]; + this.inboxMessages = []; + return messages; + } + + async agents(): Promise { + return ['Lead', 'Researcher']; + } + + onMessage(callback: (message: any) => void | Promise): () => void { + this.callbacks.push(callback); + return () => { + this.callbacks = this.callbacks.filter((entry) => entry !== callback); + }; + } + + async emit(message: any): Promise { + for (const callback of [...this.callbacks]) { + await callback(message); + } + } +} + +test('AI SDK onRelay returns relay tool definitions', async () => { + const { onRelay } = await loadModule(); + const relay = new FakeRelay(); + + const session = onRelay({ name: 'AiSdkTester' }, relay); + const toolNames = Object.keys(session.tools); + + assert.deepEqual(toolNames, ['relay_send', 'relay_inbox', 'relay_post', 'relay_agents']); + assert.equal(typeof session.middleware.transformParams, 'function'); + assert.equal(typeof session.cleanup, 'function'); +}); + +test('AI SDK relay tools execute against the relay client', async () => { + const { onRelay } = await loadModule(); + const relay = new FakeRelay(); + relay.inboxMessages = [{ sender: 'Lead', text: 'Check status', messageId: 'msg-1' }]; + + const session = onRelay({ name: 'AiSdkTester' }, relay); + + await session.tools.relay_send.execute?.({ to: 'Lead', text: 'Working on it' }); + await session.tools.relay_post.execute?.({ channel: 'ops', text: 'status update' }); + const inboxResult = await session.tools.relay_inbox.execute?.({}); + const agentsResult = await session.tools.relay_agents.execute?.({}); + + assert.deepEqual(relay.sent, [{ to: 'Lead', text: 'Working on it' }]); + assert.deepEqual(relay.posted, [{ channel: 'ops', text: 'status update' }]); + assert.match(String((inboxResult as any).text), /Check status/); + assert.deepEqual((agentsResult as any).agents, ['Lead', 'Researcher']); +}); + +test('AI SDK middleware appends relay instructions and pending messages to system', async () => { + const { onRelay } = await loadModule(); + const relay = new FakeRelay(); + const session = onRelay({ name: 'AiSdkTester', instructions: 'Escalate blockers quickly.' }, relay); + + await relay.emit({ sender: 'Lead', text: 'Need an update', messageId: 'msg-2' }); + + const first = await session.middleware.transformParams?.({ params: { system: 'Base system.' } }); + const second = await session.middleware.transformParams?.({ params: { system: 'Base system.' } }); + + assert.match(String(first?.system), /Base system\./); + assert.match(String(first?.system), /Use relay_send/); + assert.match(String(first?.system), /Escalate blockers quickly\./); + assert.match(String(first?.system), /Need an update/); + assert.equal(second?.system?.includes('Need an update'), false); +}); + +test('AI SDK cleanup unsubscribes from live relay messages', async () => { + const { onRelay } = await loadModule(); + const relay = new FakeRelay(); + const session = onRelay({ name: 'AiSdkTester' }, relay); + + session.cleanup(); + await relay.emit({ sender: 'Lead', text: 'Late ping', messageId: 'msg-3' }); + const result = await session.middleware.transformParams?.({ params: {} }); + + assert.equal(String(result?.system).includes('Late ping'), false); +}); diff --git a/packages/sdk/src/communicate/adapters/ai-sdk.ts b/packages/sdk/src/communicate/adapters/ai-sdk.ts new file mode 100644 index 000000000..2e8661b81 --- /dev/null +++ b/packages/sdk/src/communicate/adapters/ai-sdk.ts @@ -0,0 +1,166 @@ +import { Relay } from '../core.js'; +import { formatRelayMessage, type Message, type MessageCallback } from '../types.js'; + +const DEFAULT_RELAY_SYSTEM_INSTRUCTIONS = [ + 'You are connected to Agent Relay.', + 'Use relay_send for direct messages, relay_post for channel updates, relay_agents to inspect who is online, and relay_inbox to fetch buffered messages.', + 'When relay messages are injected below, treat them as the latest coordination context and respond or delegate as needed.', +].join(' '); + +type JsonObjectSchema = { + type: 'object'; + properties: Record>; + required: string[]; + additionalProperties: boolean; +}; + +export type AiSdkToolLike = { + description?: string; + inputSchema: JsonObjectSchema; + execute?: (input: Record) => Promise; +}; + +export type AiSdkTools = Record; + +export type AiSdkCallParams = { + system?: string; + [key: string]: unknown; +}; + +export type AiSdkMiddlewareLike = { + transformParams?: (input: { params: AiSdkCallParams }) => Promise | AiSdkCallParams; +}; + +export type RelayLike = { + send(to: string, text: string): Promise; + post(channel: string, text: string): Promise; + inbox(): Promise; + agents(): Promise; + onMessage(callback: MessageCallback): () => void; +}; + +export interface AiSdkRelayOptions { + /** Agent name used when registering with Relaycast. */ + name: string; + /** Optional custom instructions appended to the system prompt on every model call. */ + instructions?: string; + /** Disable the default relay instructions if you want to provide your own. */ + includeDefaultInstructions?: boolean; +} + +export interface AiSdkRelaySession { + /** AI SDK-compatible tool map for generateText/streamText. */ + tools: AiSdkTools; + /** AI SDK language model middleware that injects pending relay messages into system. */ + middleware: AiSdkMiddlewareLike; + /** Underlying relay client. */ + relay: RelayLike; + /** Stop live routing and clear any injected-message state. */ + cleanup: () => void; +} + +function schema(props: Record>, required: string[]): JsonObjectSchema { + return { type: 'object', properties: props, required, additionalProperties: false }; +} + +function createRelayTools(relay: RelayLike): AiSdkTools { + return { + relay_send: { + description: 'Send a direct message to another relay agent.', + inputSchema: schema({ to: { type: 'string' }, text: { type: 'string' } }, ['to', 'text']), + async execute(input) { + await relay.send(input.to, input.text); + return { ok: true, status: `Sent relay message to ${input.to}.` }; + }, + }, + relay_inbox: { + description: 'Drain and inspect newly received relay messages.', + inputSchema: schema({}, []), + async execute() { + const messages = await relay.inbox(); + return { + ok: true, + messages, + text: messages.length === 0 ? 'No new relay messages.' : messages.map(formatRelayMessage).join('\n'), + }; + }, + }, + relay_post: { + description: 'Post a message to a relay channel.', + inputSchema: schema({ channel: { type: 'string' }, text: { type: 'string' } }, ['channel', 'text']), + async execute(input) { + await relay.post(input.channel, input.text); + return { ok: true, status: `Posted relay message to #${input.channel}.` }; + }, + }, + relay_agents: { + description: 'List currently online relay agents.', + inputSchema: schema({}, []), + async execute() { + const agents = await relay.agents(); + return { ok: true, agents, text: agents.join('\n') }; + }, + }, + }; +} + +function composeSystemPrompt(baseSystem: string | undefined, pendingMessages: string[], options: AiSdkRelayOptions): string { + const sections: string[] = []; + + if (baseSystem && baseSystem.trim().length > 0) { + sections.push(baseSystem.trim()); + } + + const relayInstructions = [ + options.includeDefaultInstructions === false ? '' : DEFAULT_RELAY_SYSTEM_INSTRUCTIONS, + options.instructions?.trim() ?? '', + ] + .filter((value) => value.length > 0) + .join('\n\n'); + + if (relayInstructions.length > 0) { + sections.push(relayInstructions); + } + + if (pendingMessages.length > 0) { + sections.push(`--- Relay Messages ---\n${pendingMessages.join('\n')}`); + } + + return sections.join('\n\n'); +} + +/** + * Create AI SDK tools + middleware for putting a model-driven app on Agent Relay. + * + * Typical usage pairs the returned `middleware` with `wrapLanguageModel(...)` + * from `ai`, and the returned `tools` with `generateText(...)` or `streamText(...)`. + */ +export function onRelay( + options: AiSdkRelayOptions, + relay: RelayLike = new Relay(options.name), +): AiSdkRelaySession { + const tools = createRelayTools(relay); + const pendingMessages: string[] = []; + + const unsubscribe = relay.onMessage(async (message) => { + pendingMessages.push(formatRelayMessage(message)); + }); + + return { + tools, + relay, + middleware: { + async transformParams({ params }): Promise { + const liveMessages = pendingMessages.splice(0, pendingMessages.length); + return { + ...params, + system: composeSystemPrompt(typeof params.system === 'string' ? params.system : undefined, liveMessages, options), + }; + }, + }, + cleanup() { + unsubscribe(); + pendingMessages.splice(0, pendingMessages.length); + }, + }; +} diff --git a/packages/sdk/src/communicate/adapters/index.ts b/packages/sdk/src/communicate/adapters/index.ts index 7d198e67c..d3260b74c 100644 --- a/packages/sdk/src/communicate/adapters/index.ts +++ b/packages/sdk/src/communicate/adapters/index.ts @@ -4,3 +4,4 @@ export { onRelay as onCrewAIRelay, onCrewRelay } from './crewai.js'; export { onRelay as onOpenAIAgentsRelay } from './openai-agents.js'; export { onRelay as onLangGraphRelay } from './langgraph.js'; export { onRelay as onGoogleAdkRelay } from './google-adk.js'; +export { onRelay as onAiSdkRelay } from './ai-sdk.js'; diff --git a/packages/sdk/src/communicate/index.ts b/packages/sdk/src/communicate/index.ts index c0cfba9a7..d29085ab3 100644 --- a/packages/sdk/src/communicate/index.ts +++ b/packages/sdk/src/communicate/index.ts @@ -1,3 +1,3 @@ export * from './types.js'; export { Relay } from './core.js'; -export { onPiRelay, onClaudeRelay } from './adapters/index.js'; +export { onPiRelay, onClaudeRelay, onAiSdkRelay } from './adapters/index.js'; diff --git a/packages/sdk/src/workflows/README.md b/packages/sdk/src/workflows/README.md index 3e7ecbb48..ccd7568ec 100644 --- a/packages/sdk/src/workflows/README.md +++ b/packages/sdk/src/workflows/README.md @@ -67,6 +67,59 @@ result = ( ) ``` +## Consumer-Facing Apps + AI SDK Communicate Flows + +A good production split is: + +1. **AI SDK app** handles the user conversation and streaming UI +2. **Communicate / `onRelay()`** lets that point-person coordinate with specialists over Relay +3. **Workflows / `runWorkflow()`** take over when a request needs multi-step execution, verification, or handoffs + +```typescript +import { streamText, wrapLanguageModel } from 'ai'; +import { openai } from '@ai-sdk/openai'; +import { Relay } from '@agent-relay/sdk/communicate'; +import { onRelay } from '@agent-relay/sdk/communicate/adapters/ai-sdk'; +import { runWorkflow } from '@agent-relay/sdk/workflows'; + +export async function POST(req: Request) { + const { prompt, escalate, repo } = await req.json(); + + const relay = new Relay('AppLead'); + const relaySession = onRelay({ + name: 'AppLead', + instructions: 'You are the customer-facing lead. Keep the user updated and delegate implementation via Relay when needed.', + }, relay); + + const model = wrapLanguageModel({ + model: openai('gpt-4o-mini'), + middleware: relaySession.middleware, + }); + + if (escalate) { + const workflow = await runWorkflow('workflows/feature-dev.yaml', { + vars: { task: prompt, repo }, + }); + + return Response.json({ status: workflow.status, runId: workflow.runId }); + } + + return streamText({ + model, + tools: relaySession.tools, + system: 'Answer directly when possible; coordinate internally when the task needs specialists.', + prompt, + }).toUIMessageStreamResponse({ + onFinish() { + relaySession.cleanup(); + void relay.close(); + }, + }); +} +``` + +That pattern keeps the user experience snappy while still letting longer Relay workflows run with proper ownership, retries, and verification. + ## YAML Format Workflows are defined as `relay.yaml` files: From 7ee4439b3cb937cfc49eef4134bfb72d5d8ca326 Mon Sep 17 00:00:00 2001 From: Khaliq Date: Tue, 17 Mar 2026 12:51:03 +0100 Subject: [PATCH 2/3] Add AI SDK message-array relay injection example --- docs/communicate/ai-sdk.mdx | 19 ++++++- docs/markdown/communicate/ai-sdk.md | 21 +++---- examples/ai-sdk-relay-helpdesk/README.md | 39 +++++++++++++ .../app/api/chat/route.ts | 56 +++++++++++++++++++ examples/ai-sdk-relay-helpdesk/app/page.tsx | 54 ++++++++++++++++++ examples/ai-sdk-relay-helpdesk/package.json | 18 ++++++ .../workflows/helpdesk-escalation.yaml | 55 ++++++++++++++++++ .../communicate/adapters/ai-sdk.test.ts | 21 +++++++ .../sdk/src/communicate/adapters/ai-sdk.ts | 52 +++++++++++++---- packages/sdk/src/workflows/README.md | 2 + 10 files changed, 314 insertions(+), 23 deletions(-) create mode 100644 examples/ai-sdk-relay-helpdesk/README.md create mode 100644 examples/ai-sdk-relay-helpdesk/app/api/chat/route.ts create mode 100644 examples/ai-sdk-relay-helpdesk/app/page.tsx create mode 100644 examples/ai-sdk-relay-helpdesk/package.json create mode 100644 examples/ai-sdk-relay-helpdesk/workflows/helpdesk-escalation.yaml diff --git a/docs/communicate/ai-sdk.mdx b/docs/communicate/ai-sdk.mdx index 7a6c8061f..d5d0b43fc 100644 --- a/docs/communicate/ai-sdk.mdx +++ b/docs/communicate/ai-sdk.mdx @@ -25,7 +25,7 @@ const result = await streamText({ model, system: 'You coordinate support specialists and keep the user informed.', tools: relaySession.tools, - prompt: 'Triage the latest onboarding issue.', + messages: [{ role: 'user', content: 'Triage the latest onboarding issue.' }], }); ``` @@ -34,9 +34,11 @@ const result = await streamText({ `onRelay()` returns a session object with: - `tools` — AI SDK-compatible relay tools for `generateText()` / `streamText()` -- `middleware` — language model middleware that injects newly received relay messages into the `system` prompt before each call +- `middleware` — language model middleware that injects newly received relay messages into the next model call - `cleanup()` — unsubscribes from live relay delivery and clears buffered injections +For string-style call sites, relay context is appended to `system`. For message-array-heavy call sites, the middleware also prepends a synthetic `system` message so `messages`-driven flows get the same relay context without needing a separate top-level `system` string. + This fits the AI SDK model cleanly: tool calls remain explicit, while incoming relay messages show up as fresh coordination context on the next model turn. ## Tools Added @@ -98,7 +100,7 @@ export async function POST(req: Request) { model, tools: relaySession.tools, system: 'You are the point person for the user. Coordinate internally via Relay when needed.', - prompt, + messages: [{ role: 'user', content: prompt }], }); return result.toUIMessageStreamResponse({ @@ -110,6 +112,17 @@ export async function POST(req: Request) { } ``` +## Example App + +A small end-to-end example lives at `examples/ai-sdk-relay-helpdesk/`. + +It shows: + +- a tiny Next.js UI +- an AI SDK route using `onRelay()` +- `messages`-based model calls +- a simple escalation gate into `workflows/helpdesk-escalation.yaml` + ## API ### `onRelay(options, relay?)` diff --git a/docs/markdown/communicate/ai-sdk.md b/docs/markdown/communicate/ai-sdk.md index 493dfe62e..83ce36d73 100644 --- a/docs/markdown/communicate/ai-sdk.md +++ b/docs/markdown/communicate/ai-sdk.md @@ -22,7 +22,7 @@ const result = await streamText({ model, system: 'You coordinate support specialists and keep the user informed.', tools: relaySession.tools, - prompt: 'Triage the latest onboarding issue.', + messages: [{ role: 'user', content: 'Triage the latest onboarding issue.' }], }); ``` @@ -31,9 +31,11 @@ const result = await streamText({ `onRelay()` returns: - `tools` for `generateText()` / `streamText()` -- `middleware` that injects live relay messages into `system` +- `middleware` that injects live relay messages into the next model call - `cleanup()` to unsubscribe and clear buffered injections +For string-style call sites, relay context is appended to `system`. For message-array call sites, the middleware also prepends a synthetic `system` message so chat-heavy flows get the same relay context. + ## Workflow-Friendly Pattern Use AI SDK in the consumer-facing app, and Relay workflows for the longer internal coordination path: @@ -64,22 +66,21 @@ export async function POST(req: Request) { return Response.json({ status: workflow.status, runId: workflow.runId }); } - const result = streamText({ + const result = await streamText({ model, tools: relaySession.tools, system: 'You are the point person for the user. Coordinate internally via Relay when needed.', - prompt, + messages: [{ role: 'user', content: prompt }], }); - return result.toUIMessageStreamResponse({ - onFinish() { - relaySession.cleanup(); - void relay.close(); - }, - }); + return Response.json({ mode: 'chat', text: await result.text }); } ``` +## Example App + +See `examples/ai-sdk-relay-helpdesk/` for a compact Next.js example that pairs AI SDK chat with Relay workflow escalation. + ## API ### `onRelay(options, relay?)` diff --git a/examples/ai-sdk-relay-helpdesk/README.md b/examples/ai-sdk-relay-helpdesk/README.md new file mode 100644 index 000000000..8d641f76a --- /dev/null +++ b/examples/ai-sdk-relay-helpdesk/README.md @@ -0,0 +1,39 @@ +# AI SDK + Relay Helpdesk Example + +A small consumer-facing Next.js app that uses the AI SDK adapter as the point-person layer and escalates bigger requests into a Relay workflow. + +## What it demonstrates + +- `onRelay()` attached to an AI SDK model via `wrapLanguageModel()` +- normal user-facing chat turns through `streamText()` +- a simple escalation gate that kicks off `runWorkflow()` for longer multi-step work +- a workflow file that uses a lead + specialist review path + +## Files + +- `app/page.tsx` — tiny browser UI +- `app/api/chat/route.ts` — AI SDK route with Relay communicate middleware +- `workflows/helpdesk-escalation.yaml` — Relay workflow used for escalations + +## Run + +```bash +cd examples/ai-sdk-relay-helpdesk +npm install +npm run dev +``` + +Set the env vars your app needs first, for example: + +```bash +export OPENAI_API_KEY=... +export RELAY_API_KEY=... +export RELAY_BASE_URL=http://localhost:3888 +``` + +Then open `http://localhost:3000` and try: + +- a normal question like `Summarize the latest support issue` +- an escalation like `Please escalate: coordinate a migration plan for repo X` + +If the prompt begins with `Please escalate:`, the route starts the Relay workflow and returns the workflow run id instead of trying to finish everything in one chat turn. diff --git a/examples/ai-sdk-relay-helpdesk/app/api/chat/route.ts b/examples/ai-sdk-relay-helpdesk/app/api/chat/route.ts new file mode 100644 index 000000000..35a7c48b7 --- /dev/null +++ b/examples/ai-sdk-relay-helpdesk/app/api/chat/route.ts @@ -0,0 +1,56 @@ +import { openai } from '@ai-sdk/openai'; +import { streamText, wrapLanguageModel } from 'ai'; +import { Relay } from '@agent-relay/sdk/communicate'; +import { onRelay } from '@agent-relay/sdk/communicate/adapters/ai-sdk'; +import { runWorkflow } from '@agent-relay/sdk/workflows'; + +const ESCALATE_PREFIX = 'please escalate:'; + +export async function POST(request: Request) { + const { prompt } = (await request.json()) as { prompt?: string }; + const text = prompt?.trim() ?? ''; + + if (text.length === 0) { + return Response.json({ error: 'prompt is required' }, { status: 400 }); + } + + const relay = new Relay('HelpdeskLead'); + const relaySession = onRelay( + { + name: 'HelpdeskLead', + instructions: + 'You are the customer-facing lead. Answer directly when you can. When work needs specialists, use Relay tools, keep the user updated, and escalate to a workflow when the task is clearly multi-step.', + }, + relay, + ); + + const model = wrapLanguageModel({ + model: openai('gpt-4o-mini'), + middleware: relaySession.middleware, + }); + + try { + if (text.toLowerCase().startsWith(ESCALATE_PREFIX)) { + const workflow = await runWorkflow('workflows/helpdesk-escalation.yaml', { + vars: { + request: text.slice(ESCALATE_PREFIX.length).trim(), + }, + }); + + return Response.json({ mode: 'workflow', status: workflow.status, runId: workflow.runId }); + } + + const result = await streamText({ + model, + tools: relaySession.tools, + system: + 'You are the point person for the user. Coordinate through Relay when needed, but keep the final answer concise and user-facing.', + messages: [{ role: 'user', content: text }], + }); + + return Response.json({ mode: 'chat', text: await result.text }); + } finally { + relaySession.cleanup(); + await relay.close(); + } +} diff --git a/examples/ai-sdk-relay-helpdesk/app/page.tsx b/examples/ai-sdk-relay-helpdesk/app/page.tsx new file mode 100644 index 000000000..ae1cd3a2e --- /dev/null +++ b/examples/ai-sdk-relay-helpdesk/app/page.tsx @@ -0,0 +1,54 @@ +'use client'; + +import { useState } from 'react'; + +type ChatResponse = + | { mode: 'chat'; text: string } + | { mode: 'workflow'; status: string; runId: string }; + +export default function Page() { + const [prompt, setPrompt] = useState('Summarize the latest support issue.'); + const [result, setResult] = useState(null); + const [loading, setLoading] = useState(false); + + async function onSubmit(event: React.FormEvent) { + event.preventDefault(); + setLoading(true); + + try { + const response = await fetch('/api/chat', { + method: 'POST', + headers: { 'content-type': 'application/json' }, + body: JSON.stringify({ prompt }), + }); + + const data = (await response.json()) as ChatResponse; + setResult(data); + } finally { + setLoading(false); + } + } + + return ( +
+

AI SDK + Relay Helpdesk

+

Normal prompts stay in the chat loop. Prompts starting with Please escalate: hand work to a Relay workflow.

+ +
+