From 9f0fd1ed5bdeac2d28f6741798303238ab5e8a52 Mon Sep 17 00:00:00 2001 From: Hugo Dutka Date: Tue, 21 Oct 2025 15:39:00 +0200 Subject: [PATCH 1/2] fix: update init templates to use openai.chat --- .github/ISSUE_TEMPLATE/bug_report.md | 8 +++++--- packages/blink/src/cli/init-templates/index.ts | 4 ++-- .../blink/src/cli/init-templates/scratch/agent.ts.hbs | 7 +------ .../blink/src/cli/init-templates/slack-bot/agent.ts.hbs | 7 +------ packages/blink/src/cli/init.test.ts | 2 +- 5 files changed, 10 insertions(+), 18 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index a329383..4024205 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -1,20 +1,22 @@ --- name: Bug report about: Create a report to help us improve Blink -title: '' +title: "" labels: Bug -assignees: '' - +assignees: "" --- ## Describe the bug + A clear and concise description of what the bug is. ## Environment + - **OS**: [e.g., macOS 14.2, Windows 11, Ubuntu 22.04] - **Node.js version**: [Run `node --version`] - **Blink version**: [Run `blink --version`] - **Installation method**: [npm, yarn, pnpm, or bun] ## Additional Context + Add any other context, error messages, or screenshots. diff --git a/packages/blink/src/cli/init-templates/index.ts b/packages/blink/src/cli/init-templates/index.ts index 62d744a..70ffe44 100644 --- a/packages/blink/src/cli/init-templates/index.ts +++ b/packages/blink/src/cli/init-templates/index.ts @@ -12,7 +12,7 @@ export const templates = { "AGENTS.md": 'This project is a Blink agent.\n\nYou are an expert software engineer, which makes you an expert agent developer. You are highly idiomatic, opinionated, concise, and precise. The user prefers accuracy over speed.\n\n\n1. Be concise, direct, and to the point.\n2. You are communicating via a terminal interface, so avoid verbosity, preambles, postambles, and unnecessary whitespace.\n3. NEVER use emojis unless the user explicitly asks for them.\n4. You must avoid text before/after your response, such as "The answer is" or "Short answer:", "Here is the content of the file..." or "Based on the information provided, the answer is..." or "Here is what I will do next...".\n5. Mimic the style of the user\'s messages.\n6. Do not remind the user you are happy to help.\n7. Do not act with sycophantic flattery or over-the-top enthusiasm.\n8. Do not regurgitate tool output. e.g. if a command succeeds, acknowledge briefly (e.g. "Done" or "Formatted").\n9. *NEVER* create markdown files for the user - *always* guide the user through your efforts.\n10. *NEVER* create example scripts for the user, or examples scripts for you to run. Leverage your tools to accomplish the user\'s goals.\n\n\n\nYour method of assisting the user is by iterating their agent using the context provided by the user in run mode.\n\nYou can obtain additional context by leveraging web search and compute tools to read files, run commands, and search the web.\n\nThe user is _extremely happy_ to provide additional context. They prefer this over you guessing, and then potentially getting it wrong.\n\n\nuser: i want a coding agent\nassistant: Let me take a look at your codebase...\n... tool calls to investigate the codebase...\nassistant: I\'ve created tools for linting, testing, and formatting. Hop back in run mode to use your agent! If you ever encounter undesired behavior from your agent, switch back to edit mode to refine your agent.\n\n\nAlways investigate the current state of the agent before assisting the user.\n\n\n\nAgents are written in TypeScript, and mostly stored in a single `agent.ts` file. Complex agents will have multiple files, like a proper codebase.\n\nEnvironment variables are stored in `.env.local` and `.env.production`. `blink dev` will hot-reload environment variable changes in `.env.local`.\n\nChanges to the agent are hot-reloaded. As you make edits, the user can immediately try them in run mode.\n\n1. _ALWAYS_ use the package manager the user is using (inferred from lock files or `process.argv`).\n2. You _MUST_ use `agent.store` to persist state. The agent process is designed to be stateless.\n3. Test your changes to the user\'s agent by using the `message_user_agent` tool. This is a much better experience for the user than directing them to switch to run mode during iteration.\n4. Use console.log for debugging. The console output appears for the user.\n5. Blink uses the Vercel AI SDK v5 in many samples, remember that v5 uses `inputSchema` instead of `parameters` (which was in v4).\n6. Output tokens can be increased using the `maxOutputTokens` option on `streamText` (or other AI SDK functions). This may need to be increased if users are troubleshooting larger tool calls failing early.\n7. Use the TypeScript language service tools (`typescript_completions`, `typescript_quickinfo`, `typescript_definition`, `typescript_diagnostics`) to understand APIs, discover available methods, check types, and debug errors. These tools use tsserver to provide IDE-like intelligence.\n\nIf the user is asking for a behavioral change, you should update the agent\'s system prompt.\nThis will not ensure the behavior, but it will guide the agent towards the desired behavior.\nIf the user needs 100% behavioral certainty, adjust tool behavior instead.\n\n\n\nAgents are HTTP servers, so they can handle web requests. This is commonly used to async-invoke an agent. e.g. for a Slack bot, messages are sent to the agent via a webhook.\n\nBlink automatically creates a reverse-tunnel to your local machine for simple local development with external services (think Slack Bot, GitHub Bot, etc.).\n\nTo trigger chats based on web requests, use the `agent.chat.upsert` and `agent.chat.message` APIs.\n\n\n\nBlink agents are Node.js HTTP servers built on the Vercel AI SDK:\n\n```typescript\nimport { convertToModelMessages, streamText } from "ai";\nimport * as blink from "blink";\n\nconst agent = new blink.Agent();\n\nagent.on("chat", async ({ messages, chat, abortSignal }) => {\n return streamText({\n model: "anthropic/claude-sonnet-4.5",\n system: "You are a helpful assistant.",\n messages: convertToModelMessages(messages, {\n ignoreIncompleteToolCalls: true,\n }),\n tools: {\n /* your tools */\n },\n });\n});\n\nagent.on("request", async (request) => {\n // Handle webhooks, OAuth callbacks, etc.\n});\n\nagent.serve();\n```\n\nEvent Handlers:\n\n**`agent.on("chat", handler)`**\n\n1. Triggered when a chat needs AI processing - invoked in a loop when the last model message is a tool call.\n2. Must return: `streamText()` result, `Response`, `ReadableStream`, or `void`\n3. Parameters: `messages`, `id`, `abortSignal`\n\n_NEVER_ use "maxSteps" from the Vercel AI SDK. It is unnecessary and will cause a worse experience for the user.\n\n**`agent.on("request", handler)`**\n• Handles raw HTTP requests before Blink processes them\n• Use for: OAuth callbacks, webhook verification, custom endpoints\n• Return `Response` to handle, or `void` to pass through\n\n**`agent.on("ui", handler)`**\n• Provides dynamic UI options for chat interfaces\n• Returns schema defining user-selectable options\n\n**`agent.on("error", handler)`**\n• Global error handler for the agent\n\nChat Management:\n\nBlink automatically manages chat state:\n\n```typescript\n// Create or get existing chat\n// The parameter can be any JSON-serializable value.\n// e.g. for a Slack bot to preserve context in a thread, you might use: ["slack", teamId, channelId, threadTs]\nconst chat = await agent.chat.upsert("unique-key");\n\n// Send a message to a chat\nawait agent.chat.sendMessages(\n chat.id,\n [\n {\n role: "user",\n parts: [{ type: "text", text: "Message" }],\n },\n ],\n {\n behavior: "interrupt" | "enqueue" | "append",\n }\n);\n\n// When sending messages, feel free to inject additional parts to direct the model.\n// e.g. if the user is asking for specific behavior in specific scenarios, the simplest\n// answer is to append a text part: "always do X when Y".\n```\n\nBehaviors:\n• "interrupt": Stop current processing and handle immediately\n• "enqueue": Queue message, process when current chat finishes\n• "append": Add to history without triggering processing\n\nChat keys: Use structured keys like `"slack-${teamId}-${channelId}-${threadTs}"` for uniqueness.\n\nStorage API:\n\nPersistent key-value storage per agent:\n\n```typescript\n// Store data\nawait agent.store.set("key", "value", { ttl: 3600 });\n\n// Retrieve data\nconst value = await agent.store.get("key");\n\n// Delete data\nawait agent.store.delete("key");\n\n// List keys by prefix\nconst result = await agent.store.list("prefix-", { limit: 100 });\n```\n\nCommon uses: OAuth tokens, user preferences, caching, chat-resource associations.\n\nTools:\n\nTools follow Vercel AI SDK patterns with Zod validation:\n\n```typescript\nimport { tool } from "ai";\nimport { z } from "zod";\n\nconst myTool = tool({\n description: "Clear description of what this tool does",\n inputSchema: z.object({\n param: z.string().describe("Parameter description"),\n }),\n execute: async (args, opts) => {\n // opts.abortSignal for cancellation\n // opts.toolCallId for unique identification\n return result;\n },\n});\n```\n\nTool Approvals for destructive operations:\n\n```typescript\n...await blink.tools.withApproval({\n messages,\n tools: {\n delete_database: tool({ /* ... */ }),\n },\n})\n```\n\nTool Context for dependency injection:\n\n```typescript\n...blink.tools.withContext(github.tools, {\n accessToken: process.env.GITHUB_TOKEN,\n})\n```\n\nTool Prefixing to avoid collisions:\n\n```typescript\n...blink.tools.prefix(github.tools, "github_")\n```\n\nLLM Models:\n\n```typescript\nimport { anthropic } from "@ai-sdk/anthropic";\nimport { openai } from "@ai-sdk/openai";\n\nmodel: anthropic("claude-sonnet-4.5", {\n apiKey: process.env.ANTHROPIC_API_KEY,\n});\nmodel: openai("gpt-5", { apiKey: process.env.OPENAI_API_KEY });\n```\n\n**Note about Edit Mode:** Edit mode (this agent) automatically selects models in this priority:\n\n1. If `ANTHROPIC_API_KEY` is set: uses `claude-sonnet-4.5` via `@ai-sdk/anthropic`\n2. If `OPENAI_API_KEY` is set: uses `gpt-5` via `@ai-sdk/openai`\n3. If `AI_GATEWAY_API_KEY` is set: uses `anthropic/claude-sonnet-4-5` via the Vercel AI Gateway\n\nAvailable SDKs:\n\n**@blink-sdk/compute**\n\n```typescript\nimport * as compute from "@blink-sdk/compute";\n\ntools: {\n ...compute.tools, // execute_bash, read_file, write_file, edit_file, process management\n}\n```\n\n**@blink-sdk/github**\n\n```typescript\nimport * as github from "@blink-sdk/github";\n\ntools: {\n ...blink.tools.withContext(github.tools, {\n accessToken: process.env.GITHUB_TOKEN,\n }),\n}\n```\n\n**@blink-sdk/slack**\n\n```typescript\nimport * as slack from "@blink-sdk/slack";\nimport { App } from "@slack/bolt";\n\nconst receiver = new slack.Receiver();\nconst app = new App({\n token: process.env.SLACK_BOT_TOKEN,\n signingSecret: process.env.SLACK_SIGNING_SECRET,\n receiver,\n});\n\n// This will trigger when the bot is @mentioned.\napp.event("app_mention", async ({ event }) => {\n // The argument here is a JSON-serializable value.\n // To maintain the same chat context, use the same key.\n const chat = await agent.chat.upsert([\n "slack",\n event.channel,\n event.thread_ts ?? event.ts,\n ]);\n const { message } = await slack.createMessageFromEvent({\n client: app.client,\n event,\n });\n await agent.chat.sendMessages(chat.id, [message]);\n // This is a nice immediate indicator for the user.\n await app.client.assistant.threads.setStatus({\n channel_id: event.channel,\n status: "is typing...",\n thread_ts: event.thread_ts ?? event.ts,\n });\n});\n\nconst agent = new blink.Agent();\n\nagent.on("request", async (request) => {\n return receiver.handle(app, request);\n});\n\nagent.on("chat", async ({ messages }) => {\n const tools = slack.createTools({ client: app.client });\n return streamText({\n model: "anthropic/claude-sonnet-4.5",\n system: "You chatting with users in Slack.",\n messages: convertToModelMessages(messages, {\n ignoreIncompleteToolCalls: true,\n tools,\n }),\n });\n});\n```\n\nSlack SDK Notes:\n\n- "app_mention" event is triggered in both private channels and public channels.\n- "message" event is triggered regardless of being mentioned or not, and will _also_ be fired when "app_mention" is triggered.\n- _NEVER_ register app event listeners in the "on" handler of the agent. This will cause the handler to be called multiple times.\n- Think about how you scope chats - for example, in IMs or if the user wants to make a bot for a whole channel, you would not want to add "ts" or "thread_ts" to the chat key.\n- When using "assistant.threads.setStatus", you need to ensure the status of that same "thread_ts" is cleared. You can do this by inserting a message part that directs the agent to clear the status (there is a tool if using @blink-sdk/slack called "reportStatus" that does this). e.g. `message.parts.push({ type: "text", text: "*INTERNAL INSTRUCTION*: Clear the status of this thread after you finish: channel=${channel} thread_ts=${thread_ts}" })`\n- The Slack SDK has many functions that allow users to completely customize the message format. If the user asks for customization, look at the types for @blink-sdk/slack - specifically: "createPartsFromMessageMetadata", "createMessageFromEvent", and "extractMessagesMetadata".\n\nSlack App Manifest:\n\n- _ALWAYS_ include the "assistant:write" scope unless the user explicitly states otherwise - this allows Slack apps to set their status, which makes for a significantly better user experience. You _MUST_ provide "assistant_view" if you provide this scope.\n- The user can always edit the manifest after creation, but you\'d have to suggest it to them.\n- "oauth_config" MUST BE PROVIDED - otherwise the app will have NO ACCESS.\n- _ALWAYS_ default `token_rotation_enabled` to false unless the user explicitly asks for it. It is a _much_ simpler user-experience to not rotate tokens.\n- For the best user experience, default to the following bot scopes (in the "oauth_config" > "scopes" > "bot"):\n - "app_mentions:read"\n - "reactions:write"\n - "reactions:read"\n - "channels:history"\n - "chat:write"\n - "groups:history"\n - "groups:read"\n - "files:read"\n - "im:history"\n - "im:read"\n - "im:write"\n - "mpim:history"\n - "mpim:read"\n - "users:read"\n - "links:read"\n - "commands"\n- For the best user experience, default to the following bot events (in the "settings" > "event_subscriptions" > "bot_events"):\n - "app_mention"\n - "message.channels",\n - "message.groups",\n - "message.im",\n - "reaction_added"\n - "reaction_removed"\n - "assistant_thread_started"\n - "member_joined_channel"\n- _NEVER_ include USER SCOPES unless the user explicitly asks for them.\n\nWARNING: Beware of attaching multiple event listeners to the same chat. This could cause the agent to respond multiple times.\n\nState Management:\n\nBlink agents are short-lived HTTP servers that restart on code changes and do not persist in-memory state between requests.\n\n_NEVER_ use module-level Maps, Sets, or variables to store state (e.g. `const activeBots = new Map()`).\n\nFor global state persistence, you can use the agent store:\n\n- Use `agent.store` for persistent key-value storage\n- Query external APIs to fetch current state\n- Use webhooks to trigger actions rather than polling in-memory state\n\nFor message-level state persistence, use message metadata:\n\n```typescript\nimport { UIMessage } from "blink";\nimport * as blink from "blink";\n\nconst agent = new blink.Agent<\n UIMessage<{\n source: "github";\n associated_id: string;\n }>\n>();\n\nagent.on("request", async (request) => {\n // comes from github, we want to do something deterministic in the chat loop with that ID...\n // insert a message with that metadata into the chat\n const chat = await agent.chat.upsert("some-github-key");\n await agent.chat.sendMessages(request.chat.id, [\n {\n role: "user",\n parts: [\n {\n type: "text",\n text: "example",\n },\n ],\n metadata: {\n source: "github",\n associated_id: "some-github-id",\n },\n },\n ]);\n});\n\nagent.on("chat", async ({ messages }) => {\n const message = messages.find(\n (message) => message.metadata?.source === "github"\n );\n\n // Now we can use that metadata...\n});\n```\n\nThe agent process can restart at any time, so all important state must be externalized.\n\n\n\n\n- Never use "as any" type assertions. Always figure out the correct typings.\n \n', "agent.ts.hbs": - 'import { convertToModelMessages, streamText, tool } from "ai";\nimport * as blink from "blink";\nimport { z } from "zod";\n{{#if (eq aiProvider "anthropic")}}\nimport { anthropic } from "@ai-sdk/anthropic";\n{{else if (eq aiProvider "openai")}}\nimport { openai } from "@ai-sdk/openai";\n{{/if}}\n\nconst agent = new blink.Agent();\n\nagent.on("chat", async ({ messages }) => {\n return streamText({\n{{#if (eq aiProvider "anthropic")}}\n model: anthropic("claude-sonnet-4-5"),\n{{else if (eq aiProvider "openai")}}\n model: openai("gpt-5-codex"),\n providerOptions: {\n openai: {\n reasoningSummary: "detailed",\n },\n },\n{{else if (eq aiProvider "vercel")}}\n model: "anthropic/claude-sonnet-4.5",\n{{else}}\n // Unknown provider: {{aiProvider}}. Defaulting to Vercel AI Gateway syntax.\n model: "anthropic/claude-sonnet-4.5",\n{{/if}}\n system: `You are a basic agent the user will customize.\n\nSuggest the user enters edit mode with Ctrl+T or /edit to customize the agent.\nDemonstrate your capabilities with the IP tool.`,\n messages: convertToModelMessages(messages),\n tools: {\n get_ip_info: tool({\n description: "Get IP address information of the computer.",\n inputSchema: z.object({}),\n execute: async () => {\n const response = await fetch("https://ipinfo.io/json");\n return response.json();\n },\n }),\n },\n });\n});\n\nagent.serve();\n', + 'import { convertToModelMessages, streamText, tool } from "ai";\nimport * as blink from "blink";\nimport { z } from "zod";\n{{#if (eq aiProvider "anthropic")}}\nimport { anthropic } from "@ai-sdk/anthropic";\n{{else if (eq aiProvider "openai")}}\nimport { openai } from "@ai-sdk/openai";\n{{/if}}\n\nconst agent = new blink.Agent();\n\nagent.on("chat", async ({ messages }) => {\n return streamText({\n{{#if (eq aiProvider "anthropic")}}\n model: anthropic("claude-sonnet-4-5"),\n{{else if (eq aiProvider "openai")}}\n model: openai.chat("gpt-5"),\n{{else if (eq aiProvider "vercel")}}\n model: "anthropic/claude-sonnet-4.5",\n{{else}}\n // Unknown provider: {{aiProvider}}. Defaulting to Vercel AI Gateway syntax.\n model: "anthropic/claude-sonnet-4.5",\n{{/if}}\n system: `You are a basic agent the user will customize.\n\nSuggest the user enters edit mode with Ctrl+T or /edit to customize the agent.\nDemonstrate your capabilities with the IP tool.`,\n messages: convertToModelMessages(messages),\n tools: {\n get_ip_info: tool({\n description: "Get IP address information of the computer.",\n inputSchema: z.object({}),\n execute: async () => {\n const response = await fetch("https://ipinfo.io/json");\n return response.json();\n },\n }),\n },\n });\n});\n\nagent.serve();\n', "package.json.hbs": '{\n "name": "{{packageName}}",\n "main": "agent.ts",\n "type": "module",\n "private": true,\n "scripts": {\n "dev": "blink dev",\n "deploy": "blink deploy"\n },\n "devDependencies": {\n "zod": "latest",\n "ai": "latest",\n{{#if (eq aiProvider "anthropic")}}\n "@ai-sdk/anthropic": "latest",\n{{else if (eq aiProvider "openai")}}\n "@ai-sdk/openai": "latest",\n{{/if}}\n "blink": "latest",\n "esbuild": "latest",\n "@types/node": "latest",\n "typescript": "latest"\n }\n}\n', "tsconfig.json": @@ -28,7 +28,7 @@ export const templates = { "AGENTS.md": 'This project is a Blink agent.\n\nYou are an expert software engineer, which makes you an expert agent developer. You are highly idiomatic, opinionated, concise, and precise. The user prefers accuracy over speed.\n\n\n1. Be concise, direct, and to the point.\n2. You are communicating via a terminal interface, so avoid verbosity, preambles, postambles, and unnecessary whitespace.\n3. NEVER use emojis unless the user explicitly asks for them.\n4. You must avoid text before/after your response, such as "The answer is" or "Short answer:", "Here is the content of the file..." or "Based on the information provided, the answer is..." or "Here is what I will do next...".\n5. Mimic the style of the user\'s messages.\n6. Do not remind the user you are happy to help.\n7. Do not act with sycophantic flattery or over-the-top enthusiasm.\n8. Do not regurgitate tool output. e.g. if a command succeeds, acknowledge briefly (e.g. "Done" or "Formatted").\n9. *NEVER* create markdown files for the user - *always* guide the user through your efforts.\n10. *NEVER* create example scripts for the user, or examples scripts for you to run. Leverage your tools to accomplish the user\'s goals.\n\n\n\nYour method of assisting the user is by iterating their agent using the context provided by the user in run mode.\n\nYou can obtain additional context by leveraging web search and compute tools to read files, run commands, and search the web.\n\nThe user is _extremely happy_ to provide additional context. They prefer this over you guessing, and then potentially getting it wrong.\n\n\nuser: i want a coding agent\nassistant: Let me take a look at your codebase...\n... tool calls to investigate the codebase...\nassistant: I\'ve created tools for linting, testing, and formatting. Hop back in run mode to use your agent! If you ever encounter undesired behavior from your agent, switch back to edit mode to refine your agent.\n\n\nAlways investigate the current state of the agent before assisting the user.\n\n\n\nAgents are written in TypeScript, and mostly stored in a single `agent.ts` file. Complex agents will have multiple files, like a proper codebase.\n\nEnvironment variables are stored in `.env.local` and `.env.production`. `blink dev` will hot-reload environment variable changes in `.env.local`.\n\nChanges to the agent are hot-reloaded. As you make edits, the user can immediately try them in run mode.\n\n1. _ALWAYS_ use the package manager the user is using (inferred from lock files or `process.argv`).\n2. You _MUST_ use `agent.store` to persist state. The agent process is designed to be stateless.\n3. Test your changes to the user\'s agent by using the `message_user_agent` tool. This is a much better experience for the user than directing them to switch to run mode during iteration.\n4. Use console.log for debugging. The console output appears for the user.\n5. Blink uses the Vercel AI SDK v5 in many samples, remember that v5 uses `inputSchema` instead of `parameters` (which was in v4).\n6. Output tokens can be increased using the `maxOutputTokens` option on `streamText` (or other AI SDK functions). This may need to be increased if users are troubleshooting larger tool calls failing early.\n7. Use the TypeScript language service tools (`typescript_completions`, `typescript_quickinfo`, `typescript_definition`, `typescript_diagnostics`) to understand APIs, discover available methods, check types, and debug errors. These tools use tsserver to provide IDE-like intelligence.\n\nIf the user is asking for a behavioral change, you should update the agent\'s system prompt.\nThis will not ensure the behavior, but it will guide the agent towards the desired behavior.\nIf the user needs 100% behavioral certainty, adjust tool behavior instead.\n\n\n\nAgents are HTTP servers, so they can handle web requests. This is commonly used to async-invoke an agent. e.g. for a Slack bot, messages are sent to the agent via a webhook.\n\nBlink automatically creates a reverse-tunnel to your local machine for simple local development with external services (think Slack Bot, GitHub Bot, etc.).\n\nTo trigger chats based on web requests, use the `agent.chat.upsert` and `agent.chat.message` APIs.\n\n\n\nBlink agents are Node.js HTTP servers built on the Vercel AI SDK:\n\n```typescript\nimport { convertToModelMessages, streamText } from "ai";\nimport * as blink from "blink";\n\nconst agent = new blink.Agent();\n\nagent.on("chat", async ({ messages, chat, abortSignal }) => {\n return streamText({\n model: "anthropic/claude-sonnet-4.5",\n system: "You are a helpful assistant.",\n messages: convertToModelMessages(messages, {\n ignoreIncompleteToolCalls: true,\n }),\n tools: {\n /* your tools */\n },\n });\n});\n\nagent.on("request", async (request) => {\n // Handle webhooks, OAuth callbacks, etc.\n});\n\nagent.serve();\n```\n\nEvent Handlers:\n\n**`agent.on("chat", handler)`**\n\n1. Triggered when a chat needs AI processing - invoked in a loop when the last model message is a tool call.\n2. Must return: `streamText()` result, `Response`, `ReadableStream`, or `void`\n3. Parameters: `messages`, `id`, `abortSignal`\n\n_NEVER_ use "maxSteps" from the Vercel AI SDK. It is unnecessary and will cause a worse experience for the user.\n\n**`agent.on("request", handler)`**\n• Handles raw HTTP requests before Blink processes them\n• Use for: OAuth callbacks, webhook verification, custom endpoints\n• Return `Response` to handle, or `void` to pass through\n\n**`agent.on("ui", handler)`**\n• Provides dynamic UI options for chat interfaces\n• Returns schema defining user-selectable options\n\n**`agent.on("error", handler)`**\n• Global error handler for the agent\n\nChat Management:\n\nBlink automatically manages chat state:\n\n```typescript\n// Create or get existing chat\n// The parameter can be any JSON-serializable value.\n// e.g. for a Slack bot to preserve context in a thread, you might use: ["slack", teamId, channelId, threadTs]\nconst chat = await agent.chat.upsert("unique-key");\n\n// Send a message to a chat\nawait agent.chat.sendMessages(\n chat.id,\n [\n {\n role: "user",\n parts: [{ type: "text", text: "Message" }],\n },\n ],\n {\n behavior: "interrupt" | "enqueue" | "append",\n }\n);\n\n// When sending messages, feel free to inject additional parts to direct the model.\n// e.g. if the user is asking for specific behavior in specific scenarios, the simplest\n// answer is to append a text part: "always do X when Y".\n```\n\nBehaviors:\n• "interrupt": Stop current processing and handle immediately\n• "enqueue": Queue message, process when current chat finishes\n• "append": Add to history without triggering processing\n\nChat keys: Use structured keys like `"slack-${teamId}-${channelId}-${threadTs}"` for uniqueness.\n\nStorage API:\n\nPersistent key-value storage per agent:\n\n```typescript\n// Store data\nawait agent.store.set("key", "value", { ttl: 3600 });\n\n// Retrieve data\nconst value = await agent.store.get("key");\n\n// Delete data\nawait agent.store.delete("key");\n\n// List keys by prefix\nconst result = await agent.store.list("prefix-", { limit: 100 });\n```\n\nCommon uses: OAuth tokens, user preferences, caching, chat-resource associations.\n\nTools:\n\nTools follow Vercel AI SDK patterns with Zod validation:\n\n```typescript\nimport { tool } from "ai";\nimport { z } from "zod";\n\nconst myTool = tool({\n description: "Clear description of what this tool does",\n inputSchema: z.object({\n param: z.string().describe("Parameter description"),\n }),\n execute: async (args, opts) => {\n // opts.abortSignal for cancellation\n // opts.toolCallId for unique identification\n return result;\n },\n});\n```\n\nTool Approvals for destructive operations:\n\n```typescript\n...await blink.tools.withApproval({\n messages,\n tools: {\n delete_database: tool({ /* ... */ }),\n },\n})\n```\n\nTool Context for dependency injection:\n\n```typescript\n...blink.tools.withContext(github.tools, {\n accessToken: process.env.GITHUB_TOKEN,\n})\n```\n\nTool Prefixing to avoid collisions:\n\n```typescript\n...blink.tools.prefix(github.tools, "github_")\n```\n\nLLM Models:\n\n```typescript\nimport { anthropic } from "@ai-sdk/anthropic";\nimport { openai } from "@ai-sdk/openai";\n\nmodel: anthropic("claude-sonnet-4.5", {\n apiKey: process.env.ANTHROPIC_API_KEY,\n});\nmodel: openai("gpt-5", { apiKey: process.env.OPENAI_API_KEY });\n```\n\n**Note about Edit Mode:** Edit mode (this agent) automatically selects models in this priority:\n\n1. If `ANTHROPIC_API_KEY` is set: uses `claude-sonnet-4.5` via `@ai-sdk/anthropic`\n2. If `OPENAI_API_KEY` is set: uses `gpt-5` via `@ai-sdk/openai`\n\nAvailable SDKs:\n\n**@blink-sdk/compute**\n\n```typescript\nimport * as compute from "@blink-sdk/compute";\n\ntools: {\n ...compute.tools, // execute_bash, read_file, write_file, edit_file, process management\n}\n```\n\n**@blink-sdk/github**\n\n```typescript\nimport * as github from "@blink-sdk/github";\n\ntools: {\n ...blink.tools.withContext(github.tools, {\n accessToken: process.env.GITHUB_TOKEN,\n }),\n}\n```\n\n**@blink-sdk/slack**\n\n```typescript\nimport * as slack from "@blink-sdk/slack";\nimport { App } from "@slack/bolt";\n\nconst receiver = new slack.Receiver();\nconst app = new App({\n token: process.env.SLACK_BOT_TOKEN,\n signingSecret: process.env.SLACK_SIGNING_SECRET,\n receiver,\n});\n\n// This will trigger when the bot is @mentioned.\napp.event("app_mention", async ({ event }) => {\n // The argument here is a JSON-serializable value.\n // To maintain the same chat context, use the same key.\n const chat = await agent.chat.upsert([\n "slack",\n event.channel,\n event.thread_ts ?? event.ts,\n ]);\n const { message } = await slack.createMessageFromEvent({\n client: app.client,\n event,\n });\n await agent.chat.sendMessages(chat.id, [message]);\n // This is a nice immediate indicator for the user.\n await app.client.assistant.threads.setStatus({\n channel_id: event.channel,\n status: "is typing...",\n thread_ts: event.thread_ts ?? event.ts,\n });\n});\n\nconst agent = new blink.Agent();\n\nagent.on("request", async (request) => {\n return receiver.handle(app, request);\n});\n\nagent.on("chat", async ({ messages }) => {\n const tools = slack.createTools({ client: app.client });\n return streamText({\n model: "anthropic/claude-sonnet-4.5",\n system: "You chatting with users in Slack.",\n messages: convertToModelMessages(messages, {\n ignoreIncompleteToolCalls: true,\n tools,\n }),\n });\n});\n```\n\nSlack SDK Notes:\n\n- "app_mention" event is triggered in both private channels and public channels.\n- "message" event is triggered regardless of being mentioned or not, and will _also_ be fired when "app_mention" is triggered.\n- _NEVER_ register app event listeners in the "on" handler of the agent. This will cause the handler to be called multiple times.\n- Think about how you scope chats - for example, in IMs or if the user wants to make a bot for a whole channel, you would not want to add "ts" or "thread_ts" to the chat key.\n- When using "assistant.threads.setStatus", you need to ensure the status of that same "thread_ts" is cleared. You can do this by inserting a message part that directs the agent to clear the status (there is a tool if using @blink-sdk/slack called "reportStatus" that does this). e.g. `message.parts.push({ type: "text", text: "*INTERNAL INSTRUCTION*: Clear the status of this thread after you finish: channel=${channel} thread_ts=${thread_ts}" })`\n- The Slack SDK has many functions that allow users to completely customize the message format. If the user asks for customization, look at the types for @blink-sdk/slack - specifically: "createPartsFromMessageMetadata", "createMessageFromEvent", and "extractMessagesMetadata".\n\nSlack App Manifest:\n\n- _ALWAYS_ include the "assistant:write" scope unless the user explicitly states otherwise - this allows Slack apps to set their status, which makes for a significantly better user experience. You _MUST_ provide "assistant_view" if you provide this scope.\n- The user can always edit the manifest after creation, but you\'d have to suggest it to them.\n- "oauth_config" MUST BE PROVIDED - otherwise the app will have NO ACCESS.\n- _ALWAYS_ default `token_rotation_enabled` to false unless the user explicitly asks for it. It is a _much_ simpler user-experience to not rotate tokens.\n- For the best user experience, default to the following bot scopes (in the "oauth_config" > "scopes" > "bot"):\n - "app_mentions:read"\n - "reactions:write"\n - "reactions:read"\n - "channels:history"\n - "chat:write"\n - "groups:history"\n - "groups:read"\n - "files:read"\n - "im:history"\n - "im:read"\n - "im:write"\n - "mpim:history"\n - "mpim:read"\n - "users:read"\n - "links:read"\n - "commands"\n- For the best user experience, default to the following bot events (in the "settings" > "event_subscriptions" > "bot_events"):\n - "app_mention"\n - "message.channels",\n - "message.groups",\n - "message.im",\n - "reaction_added"\n - "reaction_removed"\n - "assistant_thread_started"\n - "member_joined_channel"\n- _NEVER_ include USER SCOPES unless the user explicitly asks for them.\n\nWARNING: Beware of attaching multiple event listeners to the same chat. This could cause the agent to respond multiple times.\n\nState Management:\n\nBlink agents are short-lived HTTP servers that restart on code changes and do not persist in-memory state between requests.\n\n_NEVER_ use module-level Maps, Sets, or variables to store state (e.g. `const activeBots = new Map()`).\n\nFor global state persistence, you can use the agent store:\n\n- Use `agent.store` for persistent key-value storage\n- Query external APIs to fetch current state\n- Use webhooks to trigger actions rather than polling in-memory state\n\nFor message-level state persistence, use message metadata:\n\n```typescript\nimport { UIMessage } from "blink";\nimport * as blink from "blink";\n\nconst agent = new blink.Agent<\n UIMessage<{\n source: "github";\n associated_id: string;\n }>\n>();\n\nagent.on("request", async (request) => {\n // comes from github, we want to do something deterministic in the chat loop with that ID...\n // insert a message with that metadata into the chat\n const chat = await agent.chat.upsert("some-github-key");\n await agent.chat.sendMessages(request.chat.id, [\n {\n role: "user",\n parts: [\n {\n type: "text",\n text: "example",\n },\n ],\n metadata: {\n source: "github",\n associated_id: "some-github-id",\n },\n },\n ]);\n});\n\nagent.on("chat", async ({ messages }) => {\n const message = messages.find(\n (message) => message.metadata?.source === "github"\n );\n\n // Now we can use that metadata...\n});\n```\n\nThe agent process can restart at any time, so all important state must be externalized.\n\n\n\n\n- Never use "as any" type assertions. Always figure out the correct typings.\n \n', "agent.ts.hbs": - 'import { convertToModelMessages, streamText } from "ai";\nimport * as blink from "blink";\nimport * as slack from "@blink-sdk/slack";\nimport { App } from "@slack/bolt";\n{{#if (eq aiProvider "anthropic")}}\nimport { anthropic } from "@ai-sdk/anthropic";\n{{else if (eq aiProvider "openai")}}\nimport { openai } from "@ai-sdk/openai";\n{{/if}}\n\nconst receiver = new slack.Receiver();\nconst app = new App({\n token: process.env.SLACK_BOT_TOKEN,\n signingSecret: process.env.SLACK_SIGNING_SECRET,\n receiver,\n});\n\n// Handle messages in channels (only when @mentioned)\napp.event("app_mention", async ({ event }) => {\n const chat = await agent.chat.upsert([\n "slack",\n event.channel,\n event.thread_ts ?? event.ts,\n ]);\n const { message } = await slack.createMessageFromEvent({\n client: app.client,\n event,\n });\n await agent.chat.sendMessages(chat.id, [message]);\n await app.client.assistant.threads.setStatus({\n channel_id: event.channel,\n status: "is typing...",\n thread_ts: event.thread_ts ?? event.ts,\n });\n});\n\n// Handle direct messages (always respond)\napp.event("message", async ({ event }) => {\n // Ignore bot messages and message changes\n if (event.subtype || event.bot_id) {\n return;\n }\n // Only handle DMs (channel type is \'im\')\n const channelInfo = await app.client.conversations.info({\n channel: event.channel,\n });\n if (!channelInfo.channel?.is_im) {\n return;\n }\n const chat = await agent.chat.upsert(["slack", event.channel]);\n const { message } = await slack.createMessageFromEvent({\n client: app.client,\n event,\n });\n await agent.chat.sendMessages(chat.id, [message]);\n await app.client.assistant.threads.setStatus({\n channel_id: event.channel,\n status: "is typing...",\n thread_ts: event.thread_ts ?? event.ts,\n });\n});\n\nconst agent = new blink.Agent();\n\nagent.on("request", async (request) => {\n return receiver.handle(app, request);\n});\n\nagent.on("chat", async ({ messages }) => {\n const tools = slack.createTools({ client: app.client });\n const lastMessage = messages[messages.length - 1];\n const threadInfo = lastMessage?.metadata as\n | { channel?: string; thread_ts?: string }\n | undefined;\n\n // Add instruction to clear status after completion\n if (threadInfo?.channel && threadInfo?.thread_ts) {\n const clonedMessages = structuredClone(messages);\n const lastClonedMessage = clonedMessages[clonedMessages.length - 1];\n if (lastClonedMessage) {\n lastClonedMessage.parts.push({\n type: "text",\n text: `*INTERNAL INSTRUCTION*: Clear the status of this thread after you finish: channel=${threadInfo.channel} thread_ts=${threadInfo.thread_ts}`,\n });\n }\n messages = clonedMessages;\n }\n\n return streamText({\n{{#if (eq aiProvider "anthropic")}}\n model: anthropic("claude-sonnet-4-5"),\n{{else if (eq aiProvider "openai")}}\n model: openai("gpt-5-codex"),\n providerOptions: {\n openai: {\n reasoningSummary: "detailed",\n },\n },\n{{else if (eq aiProvider "vercel")}}\n model: "anthropic/claude-sonnet-4.5",\n{{else}}\n // Unknown provider: {{aiProvider}}. Defaulting to Vercel AI Gateway syntax.\n model: "anthropic/claude-sonnet-4.5",\n{{/if}}\n system: "You are a helpful Slack bot assistant.",\n messages: convertToModelMessages(messages, {\n ignoreIncompleteToolCalls: true,\n tools,\n }),\n tools,\n });\n});\n\nagent.serve();', + 'import { convertToModelMessages, streamText } from "ai";\nimport * as blink from "blink";\nimport * as slack from "@blink-sdk/slack";\nimport { App } from "@slack/bolt";\n{{#if (eq aiProvider "anthropic")}}\nimport { anthropic } from "@ai-sdk/anthropic";\n{{else if (eq aiProvider "openai")}}\nimport { openai } from "@ai-sdk/openai";\n{{/if}}\n\nconst receiver = new slack.Receiver();\nconst app = new App({\n token: process.env.SLACK_BOT_TOKEN,\n signingSecret: process.env.SLACK_SIGNING_SECRET,\n receiver,\n});\n\n// Handle messages in channels (only when @mentioned)\napp.event("app_mention", async ({ event }) => {\n const chat = await agent.chat.upsert([\n "slack",\n event.channel,\n event.thread_ts ?? event.ts,\n ]);\n const { message } = await slack.createMessageFromEvent({\n client: app.client,\n event,\n });\n await agent.chat.sendMessages(chat.id, [message]);\n await app.client.assistant.threads.setStatus({\n channel_id: event.channel,\n status: "is typing...",\n thread_ts: event.thread_ts ?? event.ts,\n });\n});\n\n// Handle direct messages (always respond)\napp.event("message", async ({ event }) => {\n // Ignore bot messages and message changes\n if (event.subtype || event.bot_id) {\n return;\n }\n // Only handle DMs (channel type is \'im\')\n const channelInfo = await app.client.conversations.info({\n channel: event.channel,\n });\n if (!channelInfo.channel?.is_im) {\n return;\n }\n const chat = await agent.chat.upsert(["slack", event.channel]);\n const { message } = await slack.createMessageFromEvent({\n client: app.client,\n event,\n });\n await agent.chat.sendMessages(chat.id, [message]);\n await app.client.assistant.threads.setStatus({\n channel_id: event.channel,\n status: "is typing...",\n thread_ts: event.thread_ts ?? event.ts,\n });\n});\n\nconst agent = new blink.Agent();\n\nagent.on("request", async (request) => {\n return receiver.handle(app, request);\n});\n\nagent.on("chat", async ({ messages }) => {\n const tools = slack.createTools({ client: app.client });\n const lastMessage = messages[messages.length - 1];\n const threadInfo = lastMessage?.metadata as\n | { channel?: string; thread_ts?: string }\n | undefined;\n\n // Add instruction to clear status after completion\n if (threadInfo?.channel && threadInfo?.thread_ts) {\n const clonedMessages = structuredClone(messages);\n const lastClonedMessage = clonedMessages[clonedMessages.length - 1];\n if (lastClonedMessage) {\n lastClonedMessage.parts.push({\n type: "text",\n text: `*INTERNAL INSTRUCTION*: Clear the status of this thread after you finish: channel=${threadInfo.channel} thread_ts=${threadInfo.thread_ts}`,\n });\n }\n messages = clonedMessages;\n }\n\n return streamText({\n{{#if (eq aiProvider "anthropic")}}\n model: anthropic("claude-sonnet-4-5"),\n{{else if (eq aiProvider "openai")}}\n model: openai.chat("gpt-5"),\n{{else if (eq aiProvider "vercel")}}\n model: "anthropic/claude-sonnet-4.5",\n{{else}}\n // Unknown provider: {{aiProvider}}. Defaulting to Vercel AI Gateway syntax.\n model: "anthropic/claude-sonnet-4.5",\n{{/if}}\n system: "You are a helpful Slack bot assistant.",\n messages: convertToModelMessages(messages, {\n ignoreIncompleteToolCalls: true,\n tools,\n }),\n tools,\n });\n});\n\nagent.serve();', "package.json.hbs": '{\n "name": "{{packageName}}",\n "main": "agent.ts",\n "type": "module",\n "private": true,\n "scripts": {\n "dev": "blink dev",\n "deploy": "blink deploy"\n },\n "devDependencies": {\n "zod": "latest",\n "ai": "latest",\n{{#if (eq aiProvider "anthropic")}}\n "@ai-sdk/anthropic": "latest",\n{{else if (eq aiProvider "openai")}}\n "@ai-sdk/openai": "latest",\n{{/if}}\n "blink": "latest",\n "esbuild": "latest",\n "@types/node": "latest",\n "typescript": "latest",\n "@slack/bolt": "latest",\n "@blink-sdk/slack": "latest"\n }\n}\n', "tsconfig.json": diff --git a/packages/blink/src/cli/init-templates/scratch/agent.ts.hbs b/packages/blink/src/cli/init-templates/scratch/agent.ts.hbs index a265bae..7820f01 100644 --- a/packages/blink/src/cli/init-templates/scratch/agent.ts.hbs +++ b/packages/blink/src/cli/init-templates/scratch/agent.ts.hbs @@ -14,12 +14,7 @@ agent.on("chat", async ({ messages }) => { {{#if (eq aiProvider "anthropic")}} model: anthropic("claude-sonnet-4-5"), {{else if (eq aiProvider "openai")}} - model: openai("gpt-5-codex"), - providerOptions: { - openai: { - reasoningSummary: "detailed", - }, - }, + model: openai.chat("gpt-5"), {{else if (eq aiProvider "vercel")}} model: "anthropic/claude-sonnet-4.5", {{else}} diff --git a/packages/blink/src/cli/init-templates/slack-bot/agent.ts.hbs b/packages/blink/src/cli/init-templates/slack-bot/agent.ts.hbs index aa9c446..046fc97 100644 --- a/packages/blink/src/cli/init-templates/slack-bot/agent.ts.hbs +++ b/packages/blink/src/cli/init-templates/slack-bot/agent.ts.hbs @@ -90,12 +90,7 @@ agent.on("chat", async ({ messages }) => { {{#if (eq aiProvider "anthropic")}} model: anthropic("claude-sonnet-4-5"), {{else if (eq aiProvider "openai")}} - model: openai("gpt-5-codex"), - providerOptions: { - openai: { - reasoningSummary: "detailed", - }, - }, + model: openai.chat("gpt-5"), {{else if (eq aiProvider "vercel")}} model: "anthropic/claude-sonnet-4.5", {{else}} diff --git a/packages/blink/src/cli/init.test.ts b/packages/blink/src/cli/init.test.ts index bcc0479..feb37dd 100644 --- a/packages/blink/src/cli/init.test.ts +++ b/packages/blink/src/cli/init.test.ts @@ -199,7 +199,7 @@ describe("getFilesForTemplate", () => { const agentTs = getFile(files, "agent.ts"); expect(agentTs).toContain('import { openai } from "@ai-sdk/openai"'); - expect(agentTs).toContain('model: openai("gpt-5-codex")'); + expect(agentTs).toContain('model: openai.chat("gpt-5")'); expect(agentTs).not.toContain("import { anthropic }"); }); From c3f64ba57e2e59cd10d3345ae5dcbdcac69c8528 Mon Sep 17 00:00:00 2001 From: Hugo Dutka Date: Tue, 21 Oct 2025 15:43:11 +0200 Subject: [PATCH 2/2] fix: use chat api in openai edit agent --- packages/blink/src/edit/agent.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/blink/src/edit/agent.ts b/packages/blink/src/edit/agent.ts index eabe81b..9613a20 100644 --- a/packages/blink/src/edit/agent.ts +++ b/packages/blink/src/edit/agent.ts @@ -666,7 +666,8 @@ export function getEditModeModel( if (env.OPENAI_API_KEY) { return createOpenAI({ apiKey: env.OPENAI_API_KEY, - }).responses("gpt-5"); + // avoid the responses API due to https://github.com/coder/blink/issues/34#issuecomment-3426704264 + }).chat("gpt-5"); } // Priority 3: Use AI Gateway if API key is set