From 861224dbf5b525269c7c21b10e566110472a4ac1 Mon Sep 17 00:00:00 2001 From: tkattkat Date: Fri, 21 Nov 2025 14:24:38 -0800 Subject: [PATCH 01/29] agent steaming --- .../core/lib/v3/handlers/v3AgentHandler.ts | 76 +++++++++++++++++++ packages/core/lib/v3/v3.ts | 33 ++++++++ 2 files changed, 109 insertions(+) diff --git a/packages/core/lib/v3/handlers/v3AgentHandler.ts b/packages/core/lib/v3/handlers/v3AgentHandler.ts index 503fc78e1..6258dbaad 100644 --- a/packages/core/lib/v3/handlers/v3AgentHandler.ts +++ b/packages/core/lib/v3/handlers/v3AgentHandler.ts @@ -186,6 +186,82 @@ export class V3AgentHandler { } } + public async stream( + instructionOrOptions: string | AgentExecuteOptions, + ) { + const options = + typeof instructionOrOptions === "string" + ? { instruction: instructionOrOptions } + : instructionOrOptions; + + const maxSteps = options.maxSteps || 10; + // We can track actions locally for logging purposes, but they won't be returned in the stream result directly + const collectedReasoning: string[] = []; + + let currentPageUrl = (await this.v3.context.awaitActivePage()).url(); + + const systemPrompt = this.buildSystemPrompt( + options.instruction, + this.systemInstructions, + ); + const tools = this.createTools(); + const allTools: ToolSet = { ...tools, ...this.mcpTools }; + const messages: ModelMessage[] = [ + { role: "user", content: options.instruction }, + ]; + + if (!this.llmClient?.getLanguageModel) { + throw new MissingLLMConfigurationError(); + } + const baseModel = this.llmClient.getLanguageModel(); + const wrappedModel = wrapLanguageModel({ + model: baseModel, + middleware: { + transformParams: async ({ params }) => { + const { processedPrompt } = processMessages(params); + return { ...params, prompt: processedPrompt } as typeof params; + }, + }, + }); + + return this.llmClient.streamText({ + model: wrappedModel, + system: systemPrompt, + messages, + tools: allTools, + stopWhen: stepCountIs(maxSteps), + temperature: 1, + toolChoice: "auto", + onStepFinish: async (event) => { + this.logger({ + category: "agent", + message: `Step finished: ${event.finishReason}`, + level: 2, + }); + + if (event.toolCalls && event.toolCalls.length > 0) { + for (let i = 0; i < event.toolCalls.length; i++) { + const toolCall = event.toolCalls[i]; + // eslint-disable-next-line @typescript-eslint/no-unused-vars + const args = toolCall.input as Record; + + if (event.text && event.text.length > 0) { + collectedReasoning.push(event.text); + this.logger({ + category: "agent", + message: `reasoning: ${event.text}`, + level: 1, + }); + } + + // We don't build the actions array here for return, but we update the page URL + } + currentPageUrl = (await this.v3.context.awaitActivePage()).url(); + } + }, + }); + } + private buildSystemPrompt( executionInstruction: string, systemInstructions?: string, diff --git a/packages/core/lib/v3/v3.ts b/packages/core/lib/v3/v3.ts index 3c795d8ac..f11e918e5 100644 --- a/packages/core/lib/v3/v3.ts +++ b/packages/core/lib/v3/v3.ts @@ -66,6 +66,7 @@ import { MissingEnvironmentVariableError, StagehandInitError, } from "./types/public"; +import { StreamTextResult, ToolSet } from "ai"; import { V3Context } from "./understudy/context"; import { Page } from "./understudy/page"; import { resolveModel } from "../modelUtils"; @@ -1498,6 +1499,9 @@ export class V3 { execute: ( instructionOrOptions: string | AgentExecuteOptions, ) => Promise; + stream?: ( + instructionOrOptions: string | AgentExecuteOptions, + ) => Promise>; } { this.logger({ category: "agent", @@ -1735,6 +1739,35 @@ export class V3 { } } }), + stream: async (instructionOrOptions: string | AgentExecuteOptions) => + withInstanceLogContext(this.instanceId, async () => { + if ((options?.integrations || options?.tools) && !this.experimental) { + throw new ExperimentalNotConfiguredError( + "MCP integrations and custom tools", + ); + } + + const tools = options?.integrations + ? await resolveTools(options.integrations, options.tools) + : (options?.tools ?? {}); + + const agentLlmClient = options?.model + ? this.resolveLlmClient(options.model) + : this.llmClient; + + const handler = new V3AgentHandler( + this, + this.logger, + agentLlmClient, + typeof options?.executionModel === "string" + ? options.executionModel + : options?.executionModel?.modelName, + options?.systemPrompt, + tools, + ); + + return handler.stream(instructionOrOptions); + }), }; } } From 1fa6093cf65e41b04f542b20c238ea7104b827e6 Mon Sep 17 00:00:00 2001 From: tkattkat Date: Fri, 21 Nov 2025 16:47:37 -0800 Subject: [PATCH 02/29] put behind experimental --- .../core/lib/v3/handlers/v3AgentHandler.ts | 376 ++++++++++-------- packages/core/lib/v3/types/public/agent.ts | 24 +- packages/core/lib/v3/v3.ts | 7 +- 3 files changed, 232 insertions(+), 175 deletions(-) diff --git a/packages/core/lib/v3/handlers/v3AgentHandler.ts b/packages/core/lib/v3/handlers/v3AgentHandler.ts index 6258dbaad..48d3bc556 100644 --- a/packages/core/lib/v3/handlers/v3AgentHandler.ts +++ b/packages/core/lib/v3/handlers/v3AgentHandler.ts @@ -5,9 +5,11 @@ import { ModelMessage, ToolSet, wrapLanguageModel, stepCountIs } from "ai"; import { processMessages } from "../agent/utils/messageProcessing"; import { LLMClient } from "../llm/LLMClient"; import { - AgentAction, AgentExecuteOptions, AgentResult, + AgentContext, + AgentState, + AgentStreamResult, } from "../types/public/agent"; import { V3FunctionName } from "../types/public/methods"; import { mapToolResultToActions } from "../agent/utils/actionMapping"; @@ -37,139 +39,138 @@ export class V3AgentHandler { this.mcpTools = mcpTools; } - public async execute( + private async prepareAgent( instructionOrOptions: string | AgentExecuteOptions, - ): Promise { - const startTime = Date.now(); + ): Promise { const options = typeof instructionOrOptions === "string" ? { instruction: instructionOrOptions } : instructionOrOptions; const maxSteps = options.maxSteps || 10; - const actions: AgentAction[] = []; - let finalMessage = ""; - let completed = false; - const collectedReasoning: string[] = []; - - let currentPageUrl = (await this.v3.context.awaitActivePage()).url(); - try { - const systemPrompt = this.buildSystemPrompt( - options.instruction, - this.systemInstructions, - ); - const tools = this.createTools(); - const allTools = { ...tools, ...this.mcpTools }; - const messages: ModelMessage[] = [ - { role: "user", content: options.instruction }, - ]; + const systemPrompt = this.buildSystemPrompt( + options.instruction, + this.systemInstructions, + ); + const tools = this.createTools(); + const allTools: ToolSet = { ...tools, ...this.mcpTools }; + const messages: ModelMessage[] = [ + { role: "user", content: options.instruction }, + ]; - if (!this.llmClient?.getLanguageModel) { - throw new MissingLLMConfigurationError(); - } - const baseModel = this.llmClient.getLanguageModel(); - const wrappedModel = wrapLanguageModel({ - model: baseModel, - middleware: { - transformParams: async ({ params }) => { - const { processedPrompt } = processMessages(params); - return { ...params, prompt: processedPrompt } as typeof params; - }, + if (!this.llmClient?.getLanguageModel) { + throw new MissingLLMConfigurationError(); + } + const baseModel = this.llmClient.getLanguageModel(); + const wrappedModel = wrapLanguageModel({ + model: baseModel, + middleware: { + transformParams: async ({ params }) => { + const { processedPrompt } = processMessages(params); + return { ...params, prompt: processedPrompt } as typeof params; }, - }); + }, + }); - const result = await this.llmClient.generateText({ - model: wrappedModel, - system: systemPrompt, - messages, - tools: allTools, - stopWhen: stepCountIs(maxSteps), - temperature: 1, - toolChoice: "auto", - onStepFinish: async (event) => { - this.logger({ - category: "agent", - message: `Step finished: ${event.finishReason}`, - level: 2, - }); + const initialPageUrl = (await this.v3.context.awaitActivePage()).url(); - if (event.toolCalls && event.toolCalls.length > 0) { - for (let i = 0; i < event.toolCalls.length; i++) { - const toolCall = event.toolCalls[i]; - const args = toolCall.input as Record; - const toolResult = event.toolResults?.[i]; + return { + options, + maxSteps, + systemPrompt, + allTools, + messages, + wrappedModel, + initialPageUrl, + }; + } - if (event.text.length > 0) { - collectedReasoning.push(event.text); - this.logger({ - category: "agent", - message: `reasoning: ${event.text}`, - level: 1, - }); - } + private createStepHandler(state: AgentState) { + return async (event: any) => { + this.logger({ + category: "agent", + message: `Step finished: ${event.finishReason}`, + level: 2, + }); + + if (event.toolCalls && event.toolCalls.length > 0) { + for (let i = 0; i < event.toolCalls.length; i++) { + const toolCall = event.toolCalls[i]; + const args = toolCall.input; + const toolResult = event.toolResults?.[i]; - if (toolCall.toolName === "close") { - completed = true; - if (args?.taskComplete) { - const closeReasoning = args.reasoning; - const allReasoning = collectedReasoning.join(" "); - finalMessage = closeReasoning - ? `${allReasoning} ${closeReasoning}`.trim() - : allReasoning || "Task completed successfully"; - } - } - const mappedActions = mapToolResultToActions({ - toolCallName: toolCall.toolName, - toolResult, - args, - reasoning: event.text || undefined, - }); + if (event.text && event.text.length > 0) { + state.collectedReasoning.push(event.text); + this.logger({ + category: "agent", + message: `reasoning: ${event.text}`, + level: 1, + }); + } - for (const action of mappedActions) { - action.pageUrl = currentPageUrl; - action.timestamp = Date.now(); - actions.push(action); - } + if (toolCall.toolName === "close") { + state.completed = true; + if (args?.taskComplete) { + const closeReasoning = args.reasoning; + const allReasoning = state.collectedReasoning.join(" "); + state.finalMessage = closeReasoning + ? `${allReasoning} ${closeReasoning}`.trim() + : allReasoning || "Task completed successfully"; } - currentPageUrl = (await this.v3.context.awaitActivePage()).url(); } - }, - }); + const mappedActions = mapToolResultToActions({ + toolCallName: toolCall.toolName, + toolResult, + args, + reasoning: event.text || undefined, + }); - if (!finalMessage) { - const allReasoning = collectedReasoning.join(" ").trim(); - finalMessage = allReasoning || result.text; + for (const action of mappedActions) { + action.pageUrl = state.currentPageUrl; + action.timestamp = Date.now(); + state.actions.push(action); + } + } + state.currentPageUrl = (await this.v3.context.awaitActivePage()).url(); } + }; + } - const endTime = Date.now(); - const inferenceTimeMs = endTime - startTime; - if (result.usage) { - this.v3.updateMetrics( - V3FunctionName.AGENT, - result.usage.inputTokens || 0, - result.usage.outputTokens || 0, - result.usage.reasoningTokens || 0, - result.usage.cachedInputTokens || 0, - inferenceTimeMs, - ); - } + public async execute( + instructionOrOptions: string | AgentExecuteOptions, + ): Promise { + const startTime = Date.now(); + const { + maxSteps, + systemPrompt, + allTools, + messages, + wrappedModel, + initialPageUrl, + } = await this.prepareAgent(instructionOrOptions); - return { - success: completed, - message: finalMessage || "Task execution completed", - actions, - completed, - usage: result.usage - ? { - input_tokens: result.usage.inputTokens || 0, - output_tokens: result.usage.outputTokens || 0, - reasoning_tokens: result.usage.reasoningTokens || 0, - cached_input_tokens: result.usage.cachedInputTokens || 0, - inference_time_ms: inferenceTimeMs, - } - : undefined, - }; + const state: AgentState = { + collectedReasoning: [], + actions: [], + finalMessage: "", + completed: false, + currentPageUrl: initialPageUrl, + }; + + try { + const result = await this.llmClient.generateText({ + model: wrappedModel, + system: systemPrompt, + messages, + tools: allTools, + stopWhen: (result) => this.handleStop(result, maxSteps), + temperature: 1, + toolChoice: "auto", + onStepFinish: this.createStepHandler(state), + }); + + return this.consolidateMetricsAndResult(startTime, state, result); } catch (error) { const errorMessage = error?.message ?? String(error); this.logger({ @@ -179,7 +180,7 @@ export class V3AgentHandler { }); return { success: false, - actions, + actions: state.actions, message: `Failed to execute task: ${errorMessage}`, completed: false, }; @@ -188,78 +189,98 @@ export class V3AgentHandler { public async stream( instructionOrOptions: string | AgentExecuteOptions, - ) { - const options = - typeof instructionOrOptions === "string" - ? { instruction: instructionOrOptions } - : instructionOrOptions; - - const maxSteps = options.maxSteps || 10; - // We can track actions locally for logging purposes, but they won't be returned in the stream result directly - const collectedReasoning: string[] = []; - - let currentPageUrl = (await this.v3.context.awaitActivePage()).url(); + ): Promise { + const { + maxSteps, + systemPrompt, + allTools, + messages, + wrappedModel, + initialPageUrl, + } = await this.prepareAgent(instructionOrOptions); - const systemPrompt = this.buildSystemPrompt( - options.instruction, - this.systemInstructions, - ); - const tools = this.createTools(); - const allTools: ToolSet = { ...tools, ...this.mcpTools }; - const messages: ModelMessage[] = [ - { role: "user", content: options.instruction }, - ]; + const state: AgentState = { + collectedReasoning: [], + actions: [], + finalMessage: "", + completed: false, + currentPageUrl: initialPageUrl, + }; + const startTime = Date.now(); - if (!this.llmClient?.getLanguageModel) { - throw new MissingLLMConfigurationError(); - } - const baseModel = this.llmClient.getLanguageModel(); - const wrappedModel = wrapLanguageModel({ - model: baseModel, - middleware: { - transformParams: async ({ params }) => { - const { processedPrompt } = processMessages(params); - return { ...params, prompt: processedPrompt } as typeof params; - }, - }, + let resolveResult: (value: AgentResult | PromiseLike) => void; + let rejectResult: (reason?: any) => void; + const resultPromise = new Promise((resolve, reject) => { + resolveResult = resolve; + rejectResult = reject; }); - return this.llmClient.streamText({ + const streamResult = this.llmClient.streamText({ model: wrappedModel, system: systemPrompt, messages, tools: allTools, - stopWhen: stepCountIs(maxSteps), + stopWhen: (result) => this.handleStop(result, maxSteps), temperature: 1, toolChoice: "auto", - onStepFinish: async (event) => { - this.logger({ - category: "agent", - message: `Step finished: ${event.finishReason}`, - level: 2, - }); + onStepFinish: this.createStepHandler(state), + onFinish: (event) => { + try { + const result = this.consolidateMetricsAndResult( + startTime, + state, + event, + ); + resolveResult(result); + } catch (error) { + rejectResult(error); + } + }, + }); - if (event.toolCalls && event.toolCalls.length > 0) { - for (let i = 0; i < event.toolCalls.length; i++) { - const toolCall = event.toolCalls[i]; - // eslint-disable-next-line @typescript-eslint/no-unused-vars - const args = toolCall.input as Record; + const agentStreamResult = streamResult as AgentStreamResult; + agentStreamResult.result = resultPromise; + return agentStreamResult; + } - if (event.text && event.text.length > 0) { - collectedReasoning.push(event.text); - this.logger({ - category: "agent", - message: `reasoning: ${event.text}`, - level: 1, - }); - } + private consolidateMetricsAndResult( + startTime: number, + state: AgentState, + result: { text?: string; usage?: any }, + ): AgentResult { + if (!state.finalMessage) { + const allReasoning = state.collectedReasoning.join(" ").trim(); + state.finalMessage = allReasoning || result.text || ""; + } + + const endTime = Date.now(); + const inferenceTimeMs = endTime - startTime; + if (result.usage) { + this.v3.updateMetrics( + V3FunctionName.AGENT, + result.usage.inputTokens || 0, + result.usage.outputTokens || 0, + result.usage.reasoningTokens || 0, + result.usage.cachedInputTokens || 0, + inferenceTimeMs, + ); + } - // We don't build the actions array here for return, but we update the page URL + return { + success: state.completed, + message: state.finalMessage || "Task execution completed", + actions: state.actions, + completed: state.completed, + usage: result.usage + ? { + input_tokens: result.usage.inputTokens || 0, + output_tokens: result.usage.outputTokens || 0, + reasoning_tokens: result.usage.reasoningTokens || 0, + cached_input_tokens: result.usage.cachedInputTokens || 0, + inference_time_ms: inferenceTimeMs, } - currentPageUrl = (await this.v3.context.awaitActivePage()).url(); - } - }, - }); + : undefined, + }; } private buildSystemPrompt( @@ -278,4 +299,15 @@ export class V3AgentHandler { logger: this.logger, }); } + + private handleStop( + result: Parameters>[0], + maxSteps: number, + ): boolean | PromiseLike { + const lastStep = result.steps[result.steps.length - 1]; + if (lastStep?.toolCalls?.some((tc) => tc.toolName === "close")) { + return true; + } + return stepCountIs(maxSteps)(result); + } } diff --git a/packages/core/lib/v3/types/public/agent.ts b/packages/core/lib/v3/types/public/agent.ts index 9ce9bcb21..8ed405cc5 100644 --- a/packages/core/lib/v3/types/public/agent.ts +++ b/packages/core/lib/v3/types/public/agent.ts @@ -1,11 +1,29 @@ import type { Client } from "@modelcontextprotocol/sdk/client/index.js"; -import { ToolSet } from "ai"; +import { ToolSet, ModelMessage, wrapLanguageModel, StreamTextResult } from "ai"; import { LogLine } from "./logs"; import { Page as PlaywrightPage } from "playwright-core"; import { Page as PuppeteerPage } from "puppeteer-core"; import { Page as PatchrightPage } from "patchright-core"; import { Page } from "../../understudy/page"; +export interface AgentContext { + options: AgentExecuteOptions; + maxSteps: number; + systemPrompt: string; + allTools: ToolSet; + messages: ModelMessage[]; + wrappedModel: ReturnType; + initialPageUrl: string; +} + +export interface AgentState { + collectedReasoning: string[]; + actions: AgentAction[]; + finalMessage: string; + completed: boolean; + currentPageUrl: string; +} + export interface AgentAction { type: string; reasoning?: string; @@ -34,6 +52,10 @@ export interface AgentResult { }; } +export type AgentStreamResult = StreamTextResult & { + result: Promise; +}; + export interface AgentExecuteOptions { instruction: string; maxSteps?: number; diff --git a/packages/core/lib/v3/v3.ts b/packages/core/lib/v3/v3.ts index f11e918e5..da25efeed 100644 --- a/packages/core/lib/v3/v3.ts +++ b/packages/core/lib/v3/v3.ts @@ -65,8 +65,8 @@ import { StagehandNotInitializedError, MissingEnvironmentVariableError, StagehandInitError, + AgentStreamResult, } from "./types/public"; -import { StreamTextResult, ToolSet } from "ai"; import { V3Context } from "./understudy/context"; import { Page } from "./understudy/page"; import { resolveModel } from "../modelUtils"; @@ -1501,7 +1501,7 @@ export class V3 { ) => Promise; stream?: ( instructionOrOptions: string | AgentExecuteOptions, - ) => Promise>; + ) => Promise; } { this.logger({ category: "agent", @@ -1741,6 +1741,9 @@ export class V3 { }), stream: async (instructionOrOptions: string | AgentExecuteOptions) => withInstanceLogContext(this.instanceId, async () => { + if (!this.experimental) { + throw new ExperimentalNotConfiguredError("Agent streaming"); + } if ((options?.integrations || options?.tools) && !this.experimental) { throw new ExperimentalNotConfiguredError( "MCP integrations and custom tools", From a0c8ddf9a1627279d3f2dad115109c48eaa44093 Mon Sep 17 00:00:00 2001 From: tkattkat Date: Fri, 21 Nov 2025 16:53:04 -0800 Subject: [PATCH 03/29] update typing --- packages/core/lib/v3/handlers/v3AgentHandler.ts | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/packages/core/lib/v3/handlers/v3AgentHandler.ts b/packages/core/lib/v3/handlers/v3AgentHandler.ts index 48d3bc556..e45fa5712 100644 --- a/packages/core/lib/v3/handlers/v3AgentHandler.ts +++ b/packages/core/lib/v3/handlers/v3AgentHandler.ts @@ -1,7 +1,13 @@ import { createAgentTools } from "../agent/tools"; import { LogLine } from "../types/public/logs"; import { V3 } from "../v3"; -import { ModelMessage, ToolSet, wrapLanguageModel, stepCountIs } from "ai"; +import { + ModelMessage, + ToolSet, + wrapLanguageModel, + stepCountIs, + type LanguageModelUsage, +} from "ai"; import { processMessages } from "../agent/utils/messageProcessing"; import { LLMClient } from "../llm/LLMClient"; import { @@ -246,7 +252,7 @@ export class V3AgentHandler { private consolidateMetricsAndResult( startTime: number, state: AgentState, - result: { text?: string; usage?: any }, + result: { text?: string; usage?: LanguageModelUsage }, ): AgentResult { if (!state.finalMessage) { const allReasoning = state.collectedReasoning.join(" ").trim(); From 8d4f29dce72db00c22161e87ef17975290a5d900 Mon Sep 17 00:00:00 2001 From: tkattkat Date: Fri, 21 Nov 2025 17:29:04 -0800 Subject: [PATCH 04/29] add streaming example --- .../core/examples/agent_stream_example.ts | 53 +++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 packages/core/examples/agent_stream_example.ts diff --git a/packages/core/examples/agent_stream_example.ts b/packages/core/examples/agent_stream_example.ts new file mode 100644 index 000000000..46e43e7da --- /dev/null +++ b/packages/core/examples/agent_stream_example.ts @@ -0,0 +1,53 @@ + +import { Stagehand } from "../lib/v3"; +import dotenv from "dotenv"; +import chalk from "chalk"; + +// Load environment variables +dotenv.config(); +async function main() { + console.log(`\n${chalk.bold("Stagehand 🤘 Agent Streaming Example")}\n`); + // Initialize Stagehand + const stagehand = new Stagehand({ + env: "LOCAL", + verbose: 2, + cacheDir: "stagehand-agent-cache", + logInferenceToFile: false, + experimental: true, + }); + + await stagehand.init(); + + try { + const page = stagehand.context.pages()[0]; + await page.goto( + "https://amazon.com", + ); + const agent = stagehand.agent({ + model: "anthropic/claude-sonnet-4-5-20250929", + executionModel: "google/gemini-2.5-flash", + + }); + + const result = await agent.stream({ + instruction: + "go to amazon, and seach for shampoo, stop after searching", + maxSteps: 20, + }); + // stream the text + for await (const delta of result.textStream) { + process.stdout.write(delta); + } + // stream everything ( toolcalls, messages, etc.) + // for await (const delta of result.fullStream) { + // console.log(delta); + // } + + + const finalResult = await result.result; + console.log("Final Result:", finalResult); + } catch (error) { + console.log(`${chalk.red("✗")} Error: ${error}`); + } +} +main(); From e9c63f6e204611750c84ef9662346045f21c6d13 Mon Sep 17 00:00:00 2001 From: tkattkat Date: Fri, 21 Nov 2025 17:34:37 -0800 Subject: [PATCH 05/29] lint --- packages/core/examples/agent_stream_example.ts | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/packages/core/examples/agent_stream_example.ts b/packages/core/examples/agent_stream_example.ts index 46e43e7da..3b99c1477 100644 --- a/packages/core/examples/agent_stream_example.ts +++ b/packages/core/examples/agent_stream_example.ts @@ -1,4 +1,3 @@ - import { Stagehand } from "../lib/v3"; import dotenv from "dotenv"; import chalk from "chalk"; @@ -20,21 +19,17 @@ async function main() { try { const page = stagehand.context.pages()[0]; - await page.goto( - "https://amazon.com", - ); + await page.goto("https://amazon.com"); const agent = stagehand.agent({ model: "anthropic/claude-sonnet-4-5-20250929", executionModel: "google/gemini-2.5-flash", - }); const result = await agent.stream({ - instruction: - "go to amazon, and seach for shampoo, stop after searching", + instruction: "go to amazon, and seach for shampoo, stop after searching", maxSteps: 20, }); - // stream the text + // stream the text for await (const delta of result.textStream) { process.stdout.write(delta); } @@ -43,7 +38,6 @@ async function main() { // console.log(delta); // } - const finalResult = await result.result; console.log("Final Result:", finalResult); } catch (error) { From f0bfffef8b1e3e1eb4c5791c381ff32f889de4aa Mon Sep 17 00:00:00 2001 From: tkattkat Date: Fri, 21 Nov 2025 18:09:51 -0800 Subject: [PATCH 06/29] update types --- packages/core/lib/v3/handlers/v3AgentHandler.ts | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/packages/core/lib/v3/handlers/v3AgentHandler.ts b/packages/core/lib/v3/handlers/v3AgentHandler.ts index e45fa5712..af9b8ea65 100644 --- a/packages/core/lib/v3/handlers/v3AgentHandler.ts +++ b/packages/core/lib/v3/handlers/v3AgentHandler.ts @@ -7,6 +7,7 @@ import { wrapLanguageModel, stepCountIs, type LanguageModelUsage, + type StepResult, } from "ai"; import { processMessages } from "../agent/utils/messageProcessing"; import { LLMClient } from "../llm/LLMClient"; @@ -93,7 +94,7 @@ export class V3AgentHandler { } private createStepHandler(state: AgentState) { - return async (event: any) => { + return async (event: StepResult) => { this.logger({ category: "agent", message: `Step finished: ${event.finishReason}`, @@ -215,7 +216,7 @@ export class V3AgentHandler { const startTime = Date.now(); let resolveResult: (value: AgentResult | PromiseLike) => void; - let rejectResult: (reason?: any) => void; + let rejectResult: (reason?: string) => void; const resultPromise = new Promise((resolve, reject) => { resolveResult = resolve; rejectResult = reject; From b21cd59d1d292628fada831fa4c71b6bcf623308 Mon Sep 17 00:00:00 2001 From: tkattkat Date: Fri, 21 Nov 2025 19:32:42 -0800 Subject: [PATCH 07/29] change default max steps to 20 --- packages/core/lib/v3/handlers/v3AgentHandler.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/core/lib/v3/handlers/v3AgentHandler.ts b/packages/core/lib/v3/handlers/v3AgentHandler.ts index af9b8ea65..47906ca84 100644 --- a/packages/core/lib/v3/handlers/v3AgentHandler.ts +++ b/packages/core/lib/v3/handlers/v3AgentHandler.ts @@ -54,7 +54,7 @@ export class V3AgentHandler { ? { instruction: instructionOrOptions } : instructionOrOptions; - const maxSteps = options.maxSteps || 10; + const maxSteps = options.maxSteps || 20; const systemPrompt = this.buildSystemPrompt( options.instruction, From 57ec8c169e019118d415af007e4d66b9887202eb Mon Sep 17 00:00:00 2001 From: tkattkat Date: Tue, 25 Nov 2025 13:29:25 -0800 Subject: [PATCH 08/29] manually bump version --- packages/core/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/core/package.json b/packages/core/package.json index f6f1b5953..0152611f9 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -1,6 +1,6 @@ { "name": "@browserbasehq/stagehand", - "version": "3.0.4", + "version": "3.0.5", "description": "An AI web browsing framework focused on simplicity and extensibility.", "main": "./dist/index.js", "module": "./dist/index.js", From e31d2df9110d049e21d024242dc8ee438dae9566 Mon Sep 17 00:00:00 2001 From: tkattkat <48974763+tkattkat@users.noreply.github.com> Date: Tue, 25 Nov 2025 13:29:50 -0800 Subject: [PATCH 09/29] Update packages/core/examples/agent_stream_example.ts Co-authored-by: greptile-apps[bot] <165735046+greptile-apps[bot]@users.noreply.github.com> --- packages/core/examples/agent_stream_example.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/core/examples/agent_stream_example.ts b/packages/core/examples/agent_stream_example.ts index 3b99c1477..70a6f40a6 100644 --- a/packages/core/examples/agent_stream_example.ts +++ b/packages/core/examples/agent_stream_example.ts @@ -26,7 +26,7 @@ async function main() { }); const result = await agent.stream({ - instruction: "go to amazon, and seach for shampoo, stop after searching", + instruction: "go to amazon, and search for shampoo, stop after searching", maxSteps: 20, }); // stream the text From ef9a78e73d8779564b256a9580b3b32daa23d291 Mon Sep 17 00:00:00 2001 From: tkattkat Date: Tue, 25 Nov 2025 13:43:49 -0800 Subject: [PATCH 10/29] changeset --- .changeset/four-knives-ask.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changeset/four-knives-ask.md diff --git a/.changeset/four-knives-ask.md b/.changeset/four-knives-ask.md new file mode 100644 index 000000000..e6eb49ab1 --- /dev/null +++ b/.changeset/four-knives-ask.md @@ -0,0 +1,5 @@ +--- +"@browserbasehq/stagehand": patch +--- + +Add streaming support to agent through agent.stream From 5b5619cbeccb27f16cddd26741b32ec5aef52a6d Mon Sep 17 00:00:00 2001 From: tkattkat Date: Tue, 25 Nov 2025 13:44:46 -0800 Subject: [PATCH 11/29] change example to verbose 0 --- packages/core/examples/agent_stream_example.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/core/examples/agent_stream_example.ts b/packages/core/examples/agent_stream_example.ts index 70a6f40a6..22dc3099a 100644 --- a/packages/core/examples/agent_stream_example.ts +++ b/packages/core/examples/agent_stream_example.ts @@ -9,7 +9,7 @@ async function main() { // Initialize Stagehand const stagehand = new Stagehand({ env: "LOCAL", - verbose: 2, + verbose: 0, cacheDir: "stagehand-agent-cache", logInferenceToFile: false, experimental: true, From e78267491fedf5b0639880b61a04b9811800b7d1 Mon Sep 17 00:00:00 2001 From: tkattkat Date: Tue, 25 Nov 2025 16:33:10 -0800 Subject: [PATCH 12/29] add caching --- packages/core/lib/v3/cache/AgentCache.ts | 79 ++++++++++++++++++++++++ packages/core/lib/v3/v3.ts | 68 +++++++++++++++++++- 2 files changed, 146 insertions(+), 1 deletion(-) diff --git a/packages/core/lib/v3/cache/AgentCache.ts b/packages/core/lib/v3/cache/AgentCache.ts index 92c0504f9..04b4b4cc8 100644 --- a/packages/core/lib/v3/cache/AgentCache.ts +++ b/packages/core/lib/v3/cache/AgentCache.ts @@ -18,6 +18,7 @@ import type { import type { AvailableModel, AgentResult, + AgentStreamResult, AgentConfig, AgentExecuteOptions, Logger, @@ -185,6 +186,84 @@ export class AgentCache { return await this.replayAgentCacheEntry(entry); } + /** + * Attempts to replay a cached agent execution and returns it as a stream result. + * + * This method exists because the agent API exposes two execution modes: + * - `execute()` - Returns a Promise directly + * - `stream()` - Returns an AgentStreamResult with async iterables for real-time output + * + * When a cache hit occurs, we need to return the appropriate type for each mode: + * - For `execute()`, we use `tryReplay()` which returns AgentResult + * - For `stream()`, we use `tryReplayAsStream()` which wraps the result in a + * stream-compatible interface + * + * This ensures consumers using `stream()` can still iterate over `textStream` + * and await `result` even when the response comes from cache, maintaining + * API consistency regardless of whether the result was cached or live. + */ + async tryReplayAsStream( + context: AgentCacheContext, + ): Promise { + const result = await this.tryReplay(context); + if (!result) return null; + return this.createCachedStreamResult(result); + } + + /** + * Creates a mock AgentStreamResult that wraps a cached AgentResult. + * + * AgentStreamResult (from the AI SDK) is a complex type with multiple async + * iterables and promises. When serving from cache, we don't have an actual + * LLM stream to consume - we just have the final result. This method creates + * a "fake" stream + + * This approach lets cached responses be transparent to the consumer - + * they can use the same iteration patterns whether the result is live or cached. + */ + private createCachedStreamResult(cachedResult: AgentResult): AgentStreamResult { + const message = cachedResult.message ?? ""; + + async function* textStreamGenerator(): AsyncGenerator { + yield message; + } + + async function* fullStreamGenerator(): AsyncGenerator<{ + type: string; + textDelta?: string; + }> { + yield { type: "text-delta", textDelta: message }; + yield { type: "finish" }; + } + + const mockStreamResult = { + textStream: textStreamGenerator(), + fullStream: fullStreamGenerator(), + result: Promise.resolve(cachedResult), + text: Promise.resolve(message), + usage: Promise.resolve({ + promptTokens: 0, + completionTokens: 0, + totalTokens: 0, + }), + finishReason: Promise.resolve("stop" as const), + experimental_providerMetadata: Promise.resolve(undefined), + response: Promise.resolve({ + id: "cached", + timestamp: new Date(), + modelId: "cached", + }), + rawResponse: Promise.resolve({ headers: {} }), + warnings: Promise.resolve([]), + steps: Promise.resolve([]), + toolCalls: Promise.resolve([]), + toolResults: Promise.resolve([]), + [Symbol.asyncIterator]: () => textStreamGenerator(), + } as unknown as AgentStreamResult; + + return mockStreamResult; + } + async store( context: AgentCacheContext, steps: AgentReplayStep[], diff --git a/packages/core/lib/v3/v3.ts b/packages/core/lib/v3/v3.ts index da25efeed..b627d604b 100644 --- a/packages/core/lib/v3/v3.ts +++ b/packages/core/lib/v3/v3.ts @@ -1769,7 +1769,73 @@ export class V3 { tools, ); - return handler.stream(instructionOrOptions); + const resolvedOptions: AgentExecuteOptions = + typeof instructionOrOptions === "string" + ? { instruction: instructionOrOptions } + : instructionOrOptions; + if (resolvedOptions.page) { + const normalizedPage = await this.normalizeToV3Page( + resolvedOptions.page, + ); + this.ctx!.setActivePage(normalizedPage); + } + const instruction = resolvedOptions.instruction.trim(); + const sanitizedOptions = + this.agentCache.sanitizeExecuteOptions(resolvedOptions); + + let cacheContext: AgentCacheContext | null = null; + if (this.agentCache.shouldAttemptCache(instruction)) { + const startPage = await this.ctx!.awaitActivePage(); + cacheContext = await this.agentCache.prepareContext({ + instruction, + options: sanitizedOptions, + configSignature: agentConfigSignature, + page: startPage, + }); + if (cacheContext) { + const replayed = + await this.agentCache.tryReplayAsStream(cacheContext); + if (replayed) { + return replayed; + } + } + } + + let agentSteps: AgentReplayStep[] = []; + const recording = !!cacheContext; + if (recording) { + this.beginAgentReplayRecording(); + } + + try { + const streamResult = await handler.stream(instructionOrOptions); + + // Wrap the result promise to handle caching on completion + const originalResultPromise = streamResult.result; + const wrappedResultPromise = originalResultPromise.then( + async (result) => { + if (recording) { + agentSteps = this.endAgentReplayRecording(); + } + + if (cacheContext && result.success && agentSteps.length > 0) { + await this.agentCache.store(cacheContext, agentSteps, result); + } + + return result; + }, + (error) => { + if (recording) this.discardAgentReplayRecording(); + throw error; + }, + ); + + streamResult.result = wrappedResultPromise; + return streamResult; + } catch (err) { + if (recording) this.discardAgentReplayRecording(); + throw err; + } }), }; } From 1b7d126988764eeaae62c98a61ae6bf1226717c0 Mon Sep 17 00:00:00 2001 From: tkattkat Date: Tue, 25 Nov 2025 16:34:46 -0800 Subject: [PATCH 13/29] format --- packages/core/lib/v3/cache/AgentCache.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/core/lib/v3/cache/AgentCache.ts b/packages/core/lib/v3/cache/AgentCache.ts index 04b4b4cc8..c324f8c11 100644 --- a/packages/core/lib/v3/cache/AgentCache.ts +++ b/packages/core/lib/v3/cache/AgentCache.ts @@ -221,7 +221,9 @@ export class AgentCache { * This approach lets cached responses be transparent to the consumer - * they can use the same iteration patterns whether the result is live or cached. */ - private createCachedStreamResult(cachedResult: AgentResult): AgentStreamResult { + private createCachedStreamResult( + cachedResult: AgentResult, + ): AgentStreamResult { const message = cachedResult.message ?? ""; async function* textStreamGenerator(): AsyncGenerator { From 51877ddc30481dce28e6c81c318b73c5193ff51e Mon Sep 17 00:00:00 2001 From: tkattkat Date: Tue, 25 Nov 2025 16:46:12 -0800 Subject: [PATCH 14/29] add wrapStreamForCaching --- packages/core/lib/v3/cache/AgentCache.ts | 49 ++++++++++++++++ packages/core/lib/v3/v3.ts | 73 +++++++++--------------- 2 files changed, 75 insertions(+), 47 deletions(-) diff --git a/packages/core/lib/v3/cache/AgentCache.ts b/packages/core/lib/v3/cache/AgentCache.ts index c324f8c11..637374d3e 100644 --- a/packages/core/lib/v3/cache/AgentCache.ts +++ b/packages/core/lib/v3/cache/AgentCache.ts @@ -266,6 +266,55 @@ export class AgentCache { return mockStreamResult; } + /** + * Wraps an AgentStreamResult with caching logic. + * + * This method handles the complexity of caching for streaming responses: + * 1. Begins recording agent replay steps + * 2. Wraps the stream's result promise to capture completion + * 3. On success: ends recording and stores the cache entry + * 4. On error: discards the recording + * + * This keeps the caching orchestration in AgentCache rather than + * spreading it across the V3 class. + * + * @param context - The cache context for this execution + * @param streamResult - The stream result from the agent handler + * @param beginRecording - Callback to start recording (from V3) + * @param endRecording - Callback to end recording and get steps (from V3) + * @param discardRecording - Callback to discard recording on error (from V3) + * @returns The wrapped stream result with caching enabled + */ + wrapStreamForCaching( + context: AgentCacheContext, + streamResult: AgentStreamResult, + beginRecording: () => void, + endRecording: () => AgentReplayStep[], + discardRecording: () => void, + ): AgentStreamResult { + beginRecording(); + + const originalResultPromise = streamResult.result; + const wrappedResultPromise = originalResultPromise.then( + async (result) => { + const agentSteps = endRecording(); + + if (result.success && agentSteps.length > 0) { + await this.store(context, agentSteps, result); + } + + return result; + }, + (error) => { + discardRecording(); + throw error; + }, + ); + + streamResult.result = wrappedResultPromise; + return streamResult; + } + async store( context: AgentCacheContext, steps: AgentReplayStep[], diff --git a/packages/core/lib/v3/v3.ts b/packages/core/lib/v3/v3.ts index b627d604b..55973be57 100644 --- a/packages/core/lib/v3/v3.ts +++ b/packages/core/lib/v3/v3.ts @@ -1783,59 +1783,38 @@ export class V3 { const sanitizedOptions = this.agentCache.sanitizeExecuteOptions(resolvedOptions); - let cacheContext: AgentCacheContext | null = null; - if (this.agentCache.shouldAttemptCache(instruction)) { - const startPage = await this.ctx!.awaitActivePage(); - cacheContext = await this.agentCache.prepareContext({ - instruction, - options: sanitizedOptions, - configSignature: agentConfigSignature, - page: startPage, - }); - if (cacheContext) { - const replayed = - await this.agentCache.tryReplayAsStream(cacheContext); - if (replayed) { - return replayed; - } + // Try cache replay first + const cacheContext = this.agentCache.shouldAttemptCache(instruction) + ? await this.agentCache.prepareContext({ + instruction, + options: sanitizedOptions, + configSignature: agentConfigSignature, + page: await this.ctx!.awaitActivePage(), + }) + : null; + + if (cacheContext) { + const replayed = + await this.agentCache.tryReplayAsStream(cacheContext); + if (replayed) { + return replayed; } } - let agentSteps: AgentReplayStep[] = []; - const recording = !!cacheContext; - if (recording) { - this.beginAgentReplayRecording(); - } - - try { - const streamResult = await handler.stream(instructionOrOptions); - - // Wrap the result promise to handle caching on completion - const originalResultPromise = streamResult.result; - const wrappedResultPromise = originalResultPromise.then( - async (result) => { - if (recording) { - agentSteps = this.endAgentReplayRecording(); - } - - if (cacheContext && result.success && agentSteps.length > 0) { - await this.agentCache.store(cacheContext, agentSteps, result); - } + // No cache hit - execute and optionally record for caching + const streamResult = await handler.stream(instructionOrOptions); - return result; - }, - (error) => { - if (recording) this.discardAgentReplayRecording(); - throw error; - }, + if (cacheContext) { + return this.agentCache.wrapStreamForCaching( + cacheContext, + streamResult, + () => this.beginAgentReplayRecording(), + () => this.endAgentReplayRecording(), + () => this.discardAgentReplayRecording(), ); - - streamResult.result = wrappedResultPromise; - return streamResult; - } catch (err) { - if (recording) this.discardAgentReplayRecording(); - throw err; } + + return streamResult; }), }; } From ebd814fff71037708dafb294208eac9c184e07bd Mon Sep 17 00:00:00 2001 From: tkattkat Date: Wed, 26 Nov 2025 10:41:54 -0800 Subject: [PATCH 15/29] consolidate shared logic --- packages/core/lib/v3/v3.ts | 176 ++++++++++++++++--------------------- 1 file changed, 78 insertions(+), 98 deletions(-) diff --git a/packages/core/lib/v3/v3.ts b/packages/core/lib/v3/v3.ts index 55973be57..fc5ff8055 100644 --- a/packages/core/lib/v3/v3.ts +++ b/packages/core/lib/v3/v3.ts @@ -1491,6 +1491,71 @@ export class V3 { } } + /** + * Prepares shared context for agent execution (both execute and stream). + * Extracts duplicated setup logic into a single helper. + */ + private async prepareAgentExecution( + options: AgentConfig | undefined, + instructionOrOptions: string | AgentExecuteOptions, + agentConfigSignature: string, + ): Promise<{ + handler: V3AgentHandler; + resolvedOptions: AgentExecuteOptions; + instruction: string; + cacheContext: AgentCacheContext | null; + }> { + if ((options?.integrations || options?.tools) && !this.experimental) { + throw new ExperimentalNotConfiguredError( + "MCP integrations and custom tools", + ); + } + + const tools = options?.integrations + ? await resolveTools(options.integrations, options.tools) + : (options?.tools ?? {}); + + const agentLlmClient = options?.model + ? this.resolveLlmClient(options.model) + : this.llmClient; + + const handler = new V3AgentHandler( + this, + this.logger, + agentLlmClient, + typeof options?.executionModel === "string" + ? options.executionModel + : options?.executionModel?.modelName, + options?.systemPrompt, + tools, + ); + + const resolvedOptions: AgentExecuteOptions = + typeof instructionOrOptions === "string" + ? { instruction: instructionOrOptions } + : instructionOrOptions; + + if (resolvedOptions.page) { + const normalizedPage = await this.normalizeToV3Page(resolvedOptions.page); + this.ctx!.setActivePage(normalizedPage); + } + + const instruction = resolvedOptions.instruction.trim(); + const sanitizedOptions = + this.agentCache.sanitizeExecuteOptions(resolvedOptions); + + const cacheContext = this.agentCache.shouldAttemptCache(instruction) + ? await this.agentCache.prepareContext({ + instruction, + options: sanitizedOptions, + configSignature: agentConfigSignature, + page: await this.ctx!.awaitActivePage(), + }) + : null; + + return { handler, resolvedOptions, instruction, cacheContext }; + } + /** * Create a v3 agent instance (AISDK tool-based) with execute(). * Mirrors the v2 Stagehand.agent() tool mode (no CUA provider here). @@ -1645,61 +1710,17 @@ export class V3 { return { execute: async (instructionOrOptions: string | AgentExecuteOptions) => withInstanceLogContext(this.instanceId, async () => { - if ((options?.integrations || options?.tools) && !this.experimental) { - throw new ExperimentalNotConfiguredError( - "MCP integrations and custom tools", + const { handler, resolvedOptions, cacheContext } = + await this.prepareAgentExecution( + options, + instructionOrOptions, + agentConfigSignature, ); - } - - const tools = options?.integrations - ? await resolveTools(options.integrations, options.tools) - : (options?.tools ?? {}); - - // Resolve the LLM client for the agent based on the model parameter - // Use the agent's model if specified, otherwise fall back to the default - const agentLlmClient = options?.model - ? this.resolveLlmClient(options.model) - : this.llmClient; - - const handler = new V3AgentHandler( - this, - this.logger, - agentLlmClient, - typeof options?.executionModel === "string" - ? options.executionModel - : options?.executionModel?.modelName, - options?.systemPrompt, - tools, - ); - const resolvedOptions: AgentExecuteOptions = - typeof instructionOrOptions === "string" - ? { instruction: instructionOrOptions } - : instructionOrOptions; - if (resolvedOptions.page) { - const normalizedPage = await this.normalizeToV3Page( - resolvedOptions.page, - ); - this.ctx!.setActivePage(normalizedPage); - } - const instruction = resolvedOptions.instruction.trim(); - const sanitizedOptions = - this.agentCache.sanitizeExecuteOptions(resolvedOptions); - - let cacheContext: AgentCacheContext | null = null; - if (this.agentCache.shouldAttemptCache(instruction)) { - const startPage = await this.ctx!.awaitActivePage(); - cacheContext = await this.agentCache.prepareContext({ - instruction, - options: sanitizedOptions, - configSignature: agentConfigSignature, - page: startPage, - }); - if (cacheContext) { - const replayed = await this.agentCache.tryReplay(cacheContext); - if (replayed) { - return replayed; - } + if (cacheContext) { + const replayed = await this.agentCache.tryReplay(cacheContext); + if (replayed) { + return replayed; } } @@ -1744,55 +1765,14 @@ export class V3 { if (!this.experimental) { throw new ExperimentalNotConfiguredError("Agent streaming"); } - if ((options?.integrations || options?.tools) && !this.experimental) { - throw new ExperimentalNotConfiguredError( - "MCP integrations and custom tools", - ); - } - - const tools = options?.integrations - ? await resolveTools(options.integrations, options.tools) - : (options?.tools ?? {}); - const agentLlmClient = options?.model - ? this.resolveLlmClient(options.model) - : this.llmClient; - - const handler = new V3AgentHandler( - this, - this.logger, - agentLlmClient, - typeof options?.executionModel === "string" - ? options.executionModel - : options?.executionModel?.modelName, - options?.systemPrompt, - tools, + const { handler, cacheContext } = await this.prepareAgentExecution( + options, + instructionOrOptions, + agentConfigSignature, ); - const resolvedOptions: AgentExecuteOptions = - typeof instructionOrOptions === "string" - ? { instruction: instructionOrOptions } - : instructionOrOptions; - if (resolvedOptions.page) { - const normalizedPage = await this.normalizeToV3Page( - resolvedOptions.page, - ); - this.ctx!.setActivePage(normalizedPage); - } - const instruction = resolvedOptions.instruction.trim(); - const sanitizedOptions = - this.agentCache.sanitizeExecuteOptions(resolvedOptions); - // Try cache replay first - const cacheContext = this.agentCache.shouldAttemptCache(instruction) - ? await this.agentCache.prepareContext({ - instruction, - options: sanitizedOptions, - configSignature: agentConfigSignature, - page: await this.ctx!.awaitActivePage(), - }) - : null; - if (cacheContext) { const replayed = await this.agentCache.tryReplayAsStream(cacheContext); From 0d06078322c0730a59c35dbc7a2bea7f794182d8 Mon Sep 17 00:00:00 2001 From: tkattkat Date: Wed, 26 Nov 2025 10:43:23 -0800 Subject: [PATCH 16/29] remove version change --- packages/core/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/core/package.json b/packages/core/package.json index 0152611f9..f6f1b5953 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -1,6 +1,6 @@ { "name": "@browserbasehq/stagehand", - "version": "3.0.5", + "version": "3.0.4", "description": "An AI web browsing framework focused on simplicity and extensibility.", "main": "./dist/index.js", "module": "./dist/index.js", From eeee0dd1e63c137c0777ad8abf34f4f092f9a75d Mon Sep 17 00:00:00 2001 From: tkattkat Date: Wed, 26 Nov 2025 11:01:39 -0800 Subject: [PATCH 17/29] agents streaming --- packages/core/lib/v3/types/public/agent.ts | 26 ++++++ packages/core/lib/v3/v3.ts | 92 ++++++++++++---------- 2 files changed, 77 insertions(+), 41 deletions(-) diff --git a/packages/core/lib/v3/types/public/agent.ts b/packages/core/lib/v3/types/public/agent.ts index 71a45af02..aaf2bd489 100644 --- a/packages/core/lib/v3/types/public/agent.ts +++ b/packages/core/lib/v3/types/public/agent.ts @@ -221,4 +221,30 @@ export type AgentConfig = { * Format: "provider/model" (e.g., "openai/gpt-4o-mini", "google/gemini-2.0-flash-exp") */ executionModel?: string | AgentModelConfig; + /** + * Enable streaming mode for the agent. + * When true, execute() returns AgentStreamResult with textStream for incremental output. + * When false (default), execute() returns AgentResult after completion. + */ + stream?: boolean; }; + +/** + * Agent instance returned when stream: true is set in AgentConfig. + * execute() returns a streaming result that can be consumed incrementally. + */ +export interface StreamingAgentInstance { + execute: ( + instructionOrOptions: string | AgentExecuteOptions, + ) => Promise; +} + +/** + * Agent instance returned when stream is false or not set in AgentConfig. + * execute() returns a result after the agent completes. + */ +export interface NonStreamingAgentInstance { + execute: ( + instructionOrOptions: string | AgentExecuteOptions, + ) => Promise; +} diff --git a/packages/core/lib/v3/v3.ts b/packages/core/lib/v3/v3.ts index fc5ff8055..898df10b2 100644 --- a/packages/core/lib/v3/v3.ts +++ b/packages/core/lib/v3/v3.ts @@ -1559,14 +1559,20 @@ export class V3 { /** * Create a v3 agent instance (AISDK tool-based) with execute(). * Mirrors the v2 Stagehand.agent() tool mode (no CUA provider here). + * + * @overload When stream: true, returns a streaming agent where execute() returns AgentStreamResult + * @overload When stream is false/undefined, returns a non-streaming agent where execute() returns AgentResult */ + agent( + options: AgentConfig & { stream: true }, + ): { execute: (instructionOrOptions: string | AgentExecuteOptions) => Promise }; + agent( + options?: AgentConfig & { stream?: false }, + ): { execute: (instructionOrOptions: string | AgentExecuteOptions) => Promise }; agent(options?: AgentConfig): { execute: ( instructionOrOptions: string | AgentExecuteOptions, - ) => Promise; - stream?: ( - instructionOrOptions: string | AgentExecuteOptions, - ) => Promise; + ) => Promise; } { this.logger({ category: "agent", @@ -1706,10 +1712,50 @@ export class V3 { // Default: AISDK tools-based agent const agentConfigSignature = this.agentCache.buildConfigSignature(options); + const isStreaming = options?.stream ?? false; return { - execute: async (instructionOrOptions: string | AgentExecuteOptions) => + execute: async ( + instructionOrOptions: string | AgentExecuteOptions, + ): Promise => withInstanceLogContext(this.instanceId, async () => { + // Streaming mode + if (isStreaming) { + if (!this.experimental) { + throw new ExperimentalNotConfiguredError("Agent streaming"); + } + + const { handler, cacheContext } = await this.prepareAgentExecution( + options, + instructionOrOptions, + agentConfigSignature, + ); + + if (cacheContext) { + const replayed = + await this.agentCache.tryReplayAsStream(cacheContext); + if (replayed) { + return replayed; + } + } + + + const streamResult = await handler.stream(instructionOrOptions); + + if (cacheContext) { + return this.agentCache.wrapStreamForCaching( + cacheContext, + streamResult, + () => this.beginAgentReplayRecording(), + () => this.endAgentReplayRecording(), + () => this.discardAgentReplayRecording(), + ); + } + + return streamResult; + } + + // Non-streaming mode (default) const { handler, resolvedOptions, cacheContext } = await this.prepareAgentExecution( options, @@ -1760,42 +1806,6 @@ export class V3 { } } }), - stream: async (instructionOrOptions: string | AgentExecuteOptions) => - withInstanceLogContext(this.instanceId, async () => { - if (!this.experimental) { - throw new ExperimentalNotConfiguredError("Agent streaming"); - } - - const { handler, cacheContext } = await this.prepareAgentExecution( - options, - instructionOrOptions, - agentConfigSignature, - ); - - // Try cache replay first - if (cacheContext) { - const replayed = - await this.agentCache.tryReplayAsStream(cacheContext); - if (replayed) { - return replayed; - } - } - - // No cache hit - execute and optionally record for caching - const streamResult = await handler.stream(instructionOrOptions); - - if (cacheContext) { - return this.agentCache.wrapStreamForCaching( - cacheContext, - streamResult, - () => this.beginAgentReplayRecording(), - () => this.endAgentReplayRecording(), - () => this.discardAgentReplayRecording(), - ); - } - - return streamResult; - }), }; } } From 7f88860ec95ce73d699208e77920f2964c92803a Mon Sep 17 00:00:00 2001 From: tkattkat Date: Wed, 26 Nov 2025 11:03:29 -0800 Subject: [PATCH 18/29] update example --- packages/core/examples/agent_stream_example.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/packages/core/examples/agent_stream_example.ts b/packages/core/examples/agent_stream_example.ts index 22dc3099a..c14a8ffc4 100644 --- a/packages/core/examples/agent_stream_example.ts +++ b/packages/core/examples/agent_stream_example.ts @@ -20,12 +20,15 @@ async function main() { try { const page = stagehand.context.pages()[0]; await page.goto("https://amazon.com"); + + // Create a streaming agent with stream: true in the config const agent = stagehand.agent({ model: "anthropic/claude-sonnet-4-5-20250929", executionModel: "google/gemini-2.5-flash", + stream: true, // This makes execute() return AgentStreamResult }); - const result = await agent.stream({ + const result = await agent.execute({ instruction: "go to amazon, and search for shampoo, stop after searching", maxSteps: 20, }); From ae014231f7f43c1bd7f479745ce93b06e799e140 Mon Sep 17 00:00:00 2001 From: tkattkat Date: Wed, 26 Nov 2025 11:04:46 -0800 Subject: [PATCH 19/29] update changeset --- .changeset/four-knives-ask.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changeset/four-knives-ask.md b/.changeset/four-knives-ask.md index e6eb49ab1..a4ca00e7a 100644 --- a/.changeset/four-knives-ask.md +++ b/.changeset/four-knives-ask.md @@ -2,4 +2,4 @@ "@browserbasehq/stagehand": patch --- -Add streaming support to agent through agent.stream +Add streaming support to agent through stream:true in the agent config From 269d7fc69e132d87ca035b3a8bccbd44a47f7faf Mon Sep 17 00:00:00 2001 From: tkattkat Date: Wed, 26 Nov 2025 11:12:30 -0800 Subject: [PATCH 20/29] update example --- packages/core/examples/agent_stream_example.ts | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/packages/core/examples/agent_stream_example.ts b/packages/core/examples/agent_stream_example.ts index c14a8ffc4..66d3257f8 100644 --- a/packages/core/examples/agent_stream_example.ts +++ b/packages/core/examples/agent_stream_example.ts @@ -24,16 +24,15 @@ async function main() { // Create a streaming agent with stream: true in the config const agent = stagehand.agent({ model: "anthropic/claude-sonnet-4-5-20250929", - executionModel: "google/gemini-2.5-flash", stream: true, // This makes execute() return AgentStreamResult }); - const result = await agent.execute({ + const agentRun = await agent.execute({ instruction: "go to amazon, and search for shampoo, stop after searching", maxSteps: 20, }); // stream the text - for await (const delta of result.textStream) { + for await (const delta of agentRun.textStream) { process.stdout.write(delta); } // stream everything ( toolcalls, messages, etc.) @@ -41,7 +40,7 @@ async function main() { // console.log(delta); // } - const finalResult = await result.result; + const finalResult = await agentRun.result; console.log("Final Result:", finalResult); } catch (error) { console.log(`${chalk.red("✗")} Error: ${error}`); From 1e7af1570974a204bb63efab6bea67e47916430e Mon Sep 17 00:00:00 2001 From: tkattkat Date: Wed, 26 Nov 2025 11:19:06 -0800 Subject: [PATCH 21/29] greptile suggestions --- .../core/lib/v3/handlers/v3AgentHandler.ts | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/packages/core/lib/v3/handlers/v3AgentHandler.ts b/packages/core/lib/v3/handlers/v3AgentHandler.ts index 47906ca84..24da7e7ac 100644 --- a/packages/core/lib/v3/handlers/v3AgentHandler.ts +++ b/packages/core/lib/v3/handlers/v3AgentHandler.ts @@ -216,12 +216,23 @@ export class V3AgentHandler { const startTime = Date.now(); let resolveResult: (value: AgentResult | PromiseLike) => void; - let rejectResult: (reason?: string) => void; + let rejectResult: (reason: unknown) => void; const resultPromise = new Promise((resolve, reject) => { resolveResult = resolve; rejectResult = reject; }); + const handleError = (error: unknown) => { + const errorMessage = + error instanceof Error ? error.message : String(error); + this.logger({ + category: "agent", + message: `Error during streaming: ${errorMessage}`, + level: 0, + }); + rejectResult(error); + }; + const streamResult = this.llmClient.streamText({ model: wrappedModel, system: systemPrompt, @@ -231,6 +242,9 @@ export class V3AgentHandler { temperature: 1, toolChoice: "auto", onStepFinish: this.createStepHandler(state), + onError: ({ error }) => { + handleError(error); + }, onFinish: (event) => { try { const result = this.consolidateMetricsAndResult( @@ -240,7 +254,7 @@ export class V3AgentHandler { ); resolveResult(result); } catch (error) { - rejectResult(error); + handleError(error); } }, }); From 4ad4efc683caf70a70c026a5e3b3667d5bcc94b0 Mon Sep 17 00:00:00 2001 From: tkattkat Date: Wed, 26 Nov 2025 11:21:33 -0800 Subject: [PATCH 22/29] format --- packages/core/lib/v3/v3.ts | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/packages/core/lib/v3/v3.ts b/packages/core/lib/v3/v3.ts index 898df10b2..ee6edfca1 100644 --- a/packages/core/lib/v3/v3.ts +++ b/packages/core/lib/v3/v3.ts @@ -1563,12 +1563,16 @@ export class V3 { * @overload When stream: true, returns a streaming agent where execute() returns AgentStreamResult * @overload When stream is false/undefined, returns a non-streaming agent where execute() returns AgentResult */ - agent( - options: AgentConfig & { stream: true }, - ): { execute: (instructionOrOptions: string | AgentExecuteOptions) => Promise }; - agent( - options?: AgentConfig & { stream?: false }, - ): { execute: (instructionOrOptions: string | AgentExecuteOptions) => Promise }; + agent(options: AgentConfig & { stream: true }): { + execute: ( + instructionOrOptions: string | AgentExecuteOptions, + ) => Promise; + }; + agent(options?: AgentConfig & { stream?: false }): { + execute: ( + instructionOrOptions: string | AgentExecuteOptions, + ) => Promise; + }; agent(options?: AgentConfig): { execute: ( instructionOrOptions: string | AgentExecuteOptions, @@ -1739,7 +1743,6 @@ export class V3 { } } - const streamResult = await handler.stream(instructionOrOptions); if (cacheContext) { From 05bdf3c2a2553cf698927ef069d205432bd41d11 Mon Sep 17 00:00:00 2001 From: tkattkat Date: Wed, 26 Nov 2025 11:28:42 -0800 Subject: [PATCH 23/29] throw error when stream is used with cua --- packages/core/lib/v3/v3.ts | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/packages/core/lib/v3/v3.ts b/packages/core/lib/v3/v3.ts index ee6edfca1..71d84d17b 100644 --- a/packages/core/lib/v3/v3.ts +++ b/packages/core/lib/v3/v3.ts @@ -1602,6 +1602,12 @@ export class V3 { // If CUA is enabled, use the computer-use agent path if (options?.cua) { + if (options?.stream) { + throw new StagehandInvalidArgumentError( + "Streaming is not supported with CUA (Computer Use Agent) mode. Remove either 'stream: true' or 'cua: true' from your agent config.", + ); + } + if ((options?.integrations || options?.tools) && !this.experimental) { throw new ExperimentalNotConfiguredError( "MCP integrations and custom tools", From 3744a7182c1e18da93e1d467aefef520c3b63a06 Mon Sep 17 00:00:00 2001 From: tkattkat Date: Wed, 26 Nov 2025 13:34:48 -0800 Subject: [PATCH 24/29] add test for streaming --- .../core/lib/v3/tests/agent-streaming.spec.ts | 196 ++++++++++++++++++ 1 file changed, 196 insertions(+) create mode 100644 packages/core/lib/v3/tests/agent-streaming.spec.ts diff --git a/packages/core/lib/v3/tests/agent-streaming.spec.ts b/packages/core/lib/v3/tests/agent-streaming.spec.ts new file mode 100644 index 000000000..1987cfb8a --- /dev/null +++ b/packages/core/lib/v3/tests/agent-streaming.spec.ts @@ -0,0 +1,196 @@ +import { test, expect } from "@playwright/test"; +import { V3 } from "../v3"; +import { v3TestConfig } from "./v3.config"; +import type { AgentResult } from "../types/public/agent"; + +test.describe("Stagehand agent streaming behavior", () => { + let v3: V3; + + test.beforeEach(async () => { + v3 = new V3({ + ...v3TestConfig, + experimental: true, // Required for streaming + }); + await v3.init(); + }); + + test.afterEach(async () => { + await v3?.close?.().catch(() => {}); + }); + + test.describe("agent({ stream: true })", () => { + test("returns an agent with execute that returns AgentStreamResult", async () => { + const agent = v3.agent({ + stream: true, + model: "anthropic/claude-haiku-4-5-20251001", + }); + + // Verify the agent has execute method + expect(agent).toHaveProperty("execute"); + expect(typeof agent.execute).toBe("function"); + }); + + test("AgentStreamResult has textStream as async iterable", async () => { + test.setTimeout(60000); + + const agent = v3.agent({ + stream: true, + model: "anthropic/claude-haiku-4-5-20251001", + }); + + // Navigate to a simple page first + const page = v3.context.pages()[0]; + await page.goto("https://example.com"); + + const streamResult = await agent.execute({ + instruction: + "What is the title of this page? Use the close tool immediately after answering.", + maxSteps: 3, + }); + + // Verify it's an AgentStreamResult with streaming capabilities + expect(streamResult).toHaveProperty("textStream"); + expect(streamResult).toHaveProperty("result"); + + // textStream should be async iterable + expect(typeof streamResult.textStream[Symbol.asyncIterator]).toBe( + "function", + ); + + // result should be a promise + expect(streamResult.result).toBeInstanceOf(Promise); + }); + + test("textStream yields chunks incrementally", async () => { + test.setTimeout(60000); + + const agent = v3.agent({ + stream: true, + model: "anthropic/claude-haiku-4-5-20251001", + }); + + const page = v3.context.pages()[0]; + await page.goto("https://example.com"); + + const streamResult = await agent.execute({ + instruction: + "Say hello and then use close tool with taskComplete: true", + maxSteps: 3, + }); + + // Collect chunks from the stream + const chunks: string[] = []; + for await (const chunk of streamResult.textStream) { + chunks.push(chunk); + } + + // Should have received at least some chunks (streaming behavior) + // The exact content depends on the LLM response + expect(Array.isArray(chunks)).toBe(true); + }); + + test("result promise resolves to AgentResult after stream completes", async () => { + test.setTimeout(60000); + + const agent = v3.agent({ + stream: true, + model: "anthropic/claude-haiku-4-5-20251001", + }); + + const page = v3.context.pages()[0]; + await page.goto("https://example.com"); + + const streamResult = await agent.execute({ + instruction: + "What is this page about? Use close tool with taskComplete: true after answering.", + maxSteps: 5, + }); + + // Consume the stream first + for await (const _ of streamResult.textStream) { + // Just consume + } + + // Now get the final result + const finalResult: AgentResult = await streamResult.result; + + // Verify it's a proper AgentResult + expect(finalResult).toHaveProperty("success"); + expect(finalResult).toHaveProperty("message"); + expect(finalResult).toHaveProperty("actions"); + expect(finalResult).toHaveProperty("completed"); + expect(typeof finalResult.success).toBe("boolean"); + expect(typeof finalResult.message).toBe("string"); + expect(Array.isArray(finalResult.actions)).toBe(true); + }); + }); + + test.describe("agent({ stream: false }) or agent()", () => { + test("returns an agent with execute that returns AgentResult directly", async () => { + const agent = v3.agent({ + stream: false, + model: "anthropic/claude-haiku-4-5-20251001", + }); + + expect(agent).toHaveProperty("execute"); + expect(typeof agent.execute).toBe("function"); + }); + + test("execute returns AgentResult without streaming properties", async () => { + test.setTimeout(60000); + + const agent = v3.agent({ + model: "anthropic/claude-haiku-4-5-20251001", + }); + + const page = v3.context.pages()[0]; + await page.goto("https://example.com"); + + const result = await agent.execute({ + instruction: "What is this page? Use close tool immediately.", + maxSteps: 3, + }); + + // Should be AgentResult, not AgentStreamResult + expect(result).toHaveProperty("success"); + expect(result).toHaveProperty("message"); + expect(result).toHaveProperty("actions"); + expect(result).toHaveProperty("completed"); + + // Should NOT have streaming properties + expect(result).not.toHaveProperty("textStream"); + }); + }); + + test.describe("CUA disables streaming", () => { + test("throws StagehandInvalidArgumentError when cua: true and stream: true", () => { + expect(() => { + v3.agent({ + cua: true, + stream: true, + model: "anthropic/claude-haiku-4-5-20251001", + }); + }).toThrow("Streaming is not supported with CUA"); + }); + + test("allows cua: true without stream", () => { + // Should not throw + const agent = v3.agent({ + cua: true, + model: "anthropic/claude-haiku-4-5-20251001", + }); + + expect(agent).toHaveProperty("execute"); + }); + + test("allows stream: true without cua", () => { + // Should not throw + const agent = v3.agent({ + stream: true, + model: "anthropic/claude-haiku-4-5-20251001", + }); + + expect(agent).toHaveProperty("execute"); + }); + }); +}); From c14d7365063858345dca583b14a79472eedc17e5 Mon Sep 17 00:00:00 2001 From: tkattkat Date: Wed, 26 Nov 2025 14:01:48 -0800 Subject: [PATCH 25/29] update test --- .../core/lib/v3/tests/agent-streaming.spec.ts | 22 ++----------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/packages/core/lib/v3/tests/agent-streaming.spec.ts b/packages/core/lib/v3/tests/agent-streaming.spec.ts index 1987cfb8a..85ccb4c23 100644 --- a/packages/core/lib/v3/tests/agent-streaming.spec.ts +++ b/packages/core/lib/v3/tests/agent-streaming.spec.ts @@ -19,17 +19,6 @@ test.describe("Stagehand agent streaming behavior", () => { }); test.describe("agent({ stream: true })", () => { - test("returns an agent with execute that returns AgentStreamResult", async () => { - const agent = v3.agent({ - stream: true, - model: "anthropic/claude-haiku-4-5-20251001", - }); - - // Verify the agent has execute method - expect(agent).toHaveProperty("execute"); - expect(typeof agent.execute).toBe("function"); - }); - test("AgentStreamResult has textStream as async iterable", async () => { test.setTimeout(60000); @@ -87,6 +76,7 @@ test.describe("Stagehand agent streaming behavior", () => { // Should have received at least some chunks (streaming behavior) // The exact content depends on the LLM response expect(Array.isArray(chunks)).toBe(true); + expect(chunks.length).toBeGreaterThan(0); }); test("result promise resolves to AgentResult after stream completes", async () => { @@ -107,6 +97,7 @@ test.describe("Stagehand agent streaming behavior", () => { }); // Consume the stream first + // eslint-disable-next-line @typescript-eslint/no-unused-vars for await (const _ of streamResult.textStream) { // Just consume } @@ -126,15 +117,6 @@ test.describe("Stagehand agent streaming behavior", () => { }); test.describe("agent({ stream: false }) or agent()", () => { - test("returns an agent with execute that returns AgentResult directly", async () => { - const agent = v3.agent({ - stream: false, - model: "anthropic/claude-haiku-4-5-20251001", - }); - - expect(agent).toHaveProperty("execute"); - expect(typeof agent.execute).toBe("function"); - }); test("execute returns AgentResult without streaming properties", async () => { test.setTimeout(60000); From c1e3baec7a9e0ca6e27e7b52ec619f770edcd9a1 Mon Sep 17 00:00:00 2001 From: tkattkat Date: Wed, 26 Nov 2025 14:10:44 -0800 Subject: [PATCH 26/29] lint --- packages/core/lib/v3/tests/agent-streaming.spec.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/packages/core/lib/v3/tests/agent-streaming.spec.ts b/packages/core/lib/v3/tests/agent-streaming.spec.ts index 85ccb4c23..35020beb3 100644 --- a/packages/core/lib/v3/tests/agent-streaming.spec.ts +++ b/packages/core/lib/v3/tests/agent-streaming.spec.ts @@ -117,7 +117,6 @@ test.describe("Stagehand agent streaming behavior", () => { }); test.describe("agent({ stream: false }) or agent()", () => { - test("execute returns AgentResult without streaming properties", async () => { test.setTimeout(60000); From 62cf2b04264473e0febe5118c69f8d4bb1af549d Mon Sep 17 00:00:00 2001 From: Sean McGuire Date: Wed, 26 Nov 2025 15:07:41 -0800 Subject: [PATCH 27/29] make anthropic key available in e2e local CI step --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bf7d12f87..142208731 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -188,6 +188,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 50 env: + ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} HEADLESS: true steps: - name: Check out repository code From 008bceb279630062d14efd665c400465e0cb0628 Mon Sep 17 00:00:00 2001 From: tkattkat Date: Wed, 26 Nov 2025 15:32:05 -0800 Subject: [PATCH 28/29] add try catch on prepare agent --- packages/core/lib/v3/handlers/v3AgentHandler.ts | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/packages/core/lib/v3/handlers/v3AgentHandler.ts b/packages/core/lib/v3/handlers/v3AgentHandler.ts index 24da7e7ac..ccb293d3e 100644 --- a/packages/core/lib/v3/handlers/v3AgentHandler.ts +++ b/packages/core/lib/v3/handlers/v3AgentHandler.ts @@ -49,6 +49,7 @@ export class V3AgentHandler { private async prepareAgent( instructionOrOptions: string | AgentExecuteOptions, ): Promise { + try { const options = typeof instructionOrOptions === "string" ? { instruction: instructionOrOptions } @@ -91,6 +92,14 @@ export class V3AgentHandler { wrappedModel, initialPageUrl, }; + } catch (error) { + this.logger({ + category: "agent", + message: `failed to prepare agent: ${error}`, + level: 0, + }); + throw error; + } } private createStepHandler(state: AgentState) { From 276cafb6f957810a4ab79591d89ff2f3ae2f27d0 Mon Sep 17 00:00:00 2001 From: tkattkat Date: Wed, 26 Nov 2025 15:35:08 -0800 Subject: [PATCH 29/29] format --- .../core/lib/v3/handlers/v3AgentHandler.ts | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/packages/core/lib/v3/handlers/v3AgentHandler.ts b/packages/core/lib/v3/handlers/v3AgentHandler.ts index ccb293d3e..72e885f95 100644 --- a/packages/core/lib/v3/handlers/v3AgentHandler.ts +++ b/packages/core/lib/v3/handlers/v3AgentHandler.ts @@ -50,48 +50,48 @@ export class V3AgentHandler { instructionOrOptions: string | AgentExecuteOptions, ): Promise { try { - const options = - typeof instructionOrOptions === "string" - ? { instruction: instructionOrOptions } - : instructionOrOptions; + const options = + typeof instructionOrOptions === "string" + ? { instruction: instructionOrOptions } + : instructionOrOptions; - const maxSteps = options.maxSteps || 20; + const maxSteps = options.maxSteps || 20; - const systemPrompt = this.buildSystemPrompt( - options.instruction, - this.systemInstructions, - ); - const tools = this.createTools(); - const allTools: ToolSet = { ...tools, ...this.mcpTools }; - const messages: ModelMessage[] = [ - { role: "user", content: options.instruction }, - ]; + const systemPrompt = this.buildSystemPrompt( + options.instruction, + this.systemInstructions, + ); + const tools = this.createTools(); + const allTools: ToolSet = { ...tools, ...this.mcpTools }; + const messages: ModelMessage[] = [ + { role: "user", content: options.instruction }, + ]; - if (!this.llmClient?.getLanguageModel) { - throw new MissingLLMConfigurationError(); - } - const baseModel = this.llmClient.getLanguageModel(); - const wrappedModel = wrapLanguageModel({ - model: baseModel, - middleware: { - transformParams: async ({ params }) => { - const { processedPrompt } = processMessages(params); - return { ...params, prompt: processedPrompt } as typeof params; + if (!this.llmClient?.getLanguageModel) { + throw new MissingLLMConfigurationError(); + } + const baseModel = this.llmClient.getLanguageModel(); + const wrappedModel = wrapLanguageModel({ + model: baseModel, + middleware: { + transformParams: async ({ params }) => { + const { processedPrompt } = processMessages(params); + return { ...params, prompt: processedPrompt } as typeof params; + }, }, - }, - }); + }); - const initialPageUrl = (await this.v3.context.awaitActivePage()).url(); + const initialPageUrl = (await this.v3.context.awaitActivePage()).url(); - return { - options, - maxSteps, - systemPrompt, - allTools, - messages, - wrappedModel, - initialPageUrl, - }; + return { + options, + maxSteps, + systemPrompt, + allTools, + messages, + wrappedModel, + initialPageUrl, + }; } catch (error) { this.logger({ category: "agent",