From adea28c6c616f0b467149d435696d07afc7b09c5 Mon Sep 17 00:00:00 2001 From: Tyson Thomas Date: Sat, 6 Sep 2025 08:24:35 -0700 Subject: [PATCH 1/2] Update agent framework structure --- config/gni/devtools_grd_files.gni | 25 +- front_end/BUILD.gn | 2 + front_end/panels/ai_assistance/BUILD.gn | 1 + .../ai_assistance/components/ChatView.ts | 165 +- .../components/ScrollPinHelper.ts | 59 + front_end/panels/ai_chat/BUILD.gn | 59 +- front_end/panels/ai_chat/LLM/GroqProvider.ts | 33 +- .../panels/ai_chat/LLM/MessageSanitizer.ts | 88 + .../LLM/__tests__/MessageSanitizer.test.ts | 70 + .../ai_chat/agent_framework/AgentRunner.ts | 300 ++- .../agent_framework/AgentRunnerEventBus.ts | 34 + .../agent_framework/ConfigurableAgentTool.ts | 15 +- .../AgentRunner.computeToolResultText.test.ts | 31 + .../__tests__/AgentRunner.run.flows.test.ts | 125 + .../AgentRunner.sanitizeToolResult.test.ts | 49 + .../implementation/ConfiguredAgents.ts | 2 +- .../panels/ai_chat/core/AgentErrorHandler.ts | 4 +- front_end/panels/ai_chat/core/AgentNodes.ts | 116 +- front_end/panels/ai_chat/core/AgentService.ts | 139 +- .../panels/ai_chat/core/ConfigurableGraph.ts | 9 +- front_end/panels/ai_chat/core/Graph.ts | 4 +- front_end/panels/ai_chat/core/GraphHelpers.ts | 2 +- front_end/panels/ai_chat/core/State.ts | 2 +- front_end/panels/ai_chat/core/StateGraph.ts | 2 +- .../ai_chat/core/structured_response.ts | 47 + .../evaluation/remote/EvaluationAgent.ts | 3 +- front_end/panels/ai_chat/models/ChatTypes.ts | 77 + .../ai_chat/tools/CombinedExtractionTool.ts | 10 +- .../panels/ai_chat/tools/CritiqueTool.ts | 39 +- front_end/panels/ai_chat/tools/FetcherTool.ts | 18 +- .../ai_chat/tools/FinalizeWithCritiqueTool.ts | 2 +- ...FullPageAccessibilityTreeToMarkdownTool.ts | 10 +- .../ai_chat/tools/HTMLToMarkdownTool.ts | 21 +- .../ai_chat/tools/SchemaBasedExtractorTool.ts | 28 +- .../ai_chat/tools/SequentialThinkingTool.ts | 132 +- .../tools/StreamlinedSchemaExtractorTool.ts | 40 +- .../panels/ai_chat/tools/ThinkingTool.ts | 20 +- front_end/panels/ai_chat/tools/Tools.ts | 128 +- .../panels/ai_chat/ui/AIChatPanel.test.ts | 101 +- front_end/panels/ai_chat/ui/AIChatPanel.ts | 154 +- .../ai_chat/ui/AgentSessionHeaderComponent.ts | 284 ++ front_end/panels/ai_chat/ui/ChatView.ts | 2353 +++-------------- .../ai_chat/ui/LiveAgentSessionComponent.ts | 523 ++++ .../panels/ai_chat/ui/ToolCallComponent.ts | 246 ++ .../ai_chat/ui/ToolDescriptionFormatter.ts | 140 + .../panels/ai_chat/ui/ToolResultComponent.ts | 228 ++ .../__tests__/ChatViewAgentSessions.test.ts | 484 ++++ .../ChatViewAgentSessionsOrder.test.ts | 96 + .../ui/__tests__/ChatViewInputClear.test.ts | 48 + .../ui/__tests__/ChatViewPrune.test.ts | 90 + ...atViewSequentialSessionsTransition.test.ts | 102 + .../LiveAgentSessionComponent.test.ts | 184 ++ front_end/panels/ai_chat/ui/chatView.css | 30 +- .../panels/ai_chat/ui/input/ChatInput.ts | 73 + front_end/panels/ai_chat/ui/input/InputBar.ts | 150 ++ .../ui/input/__tests__/InputBarClear.test.ts | 62 + .../ai_chat/ui/markdown/MarkdownRenderers.ts | 82 + .../ai_chat/ui/message/GlobalActionsRow.ts | 55 + .../ai_chat/ui/message/MessageCombiner.ts | 137 + .../panels/ai_chat/ui/message/MessageList.ts | 87 + .../panels/ai_chat/ui/message/ModelMessage.ts | 42 + .../message/StructuredResponseController.ts | 123 + .../ui/message/StructuredResponseRender.ts | 57 + .../ai_chat/ui/message/ToolResultMessage.ts | 23 + .../panels/ai_chat/ui/message/UserMessage.ts | 21 + .../message/__tests__/MessageCombiner.test.ts | 80 + .../ui/message/__tests__/MessageList.test.ts | 76 + .../StructuredResponseController.test.ts | 36 + .../ui/model_selector/ModelSelector.ts | 95 + .../ai_chat/ui/oauth/OAuthConnectPanel.ts | 93 + .../ai_chat/ui/version/VersionBanner.ts | 50 + 71 files changed, 5924 insertions(+), 2392 deletions(-) create mode 100644 front_end/panels/ai_assistance/components/ScrollPinHelper.ts create mode 100644 front_end/panels/ai_chat/LLM/MessageSanitizer.ts create mode 100644 front_end/panels/ai_chat/LLM/__tests__/MessageSanitizer.test.ts create mode 100644 front_end/panels/ai_chat/agent_framework/AgentRunnerEventBus.ts create mode 100644 front_end/panels/ai_chat/agent_framework/__tests__/AgentRunner.computeToolResultText.test.ts create mode 100644 front_end/panels/ai_chat/agent_framework/__tests__/AgentRunner.run.flows.test.ts create mode 100644 front_end/panels/ai_chat/agent_framework/__tests__/AgentRunner.sanitizeToolResult.test.ts create mode 100644 front_end/panels/ai_chat/core/structured_response.ts create mode 100644 front_end/panels/ai_chat/models/ChatTypes.ts create mode 100644 front_end/panels/ai_chat/ui/AgentSessionHeaderComponent.ts create mode 100644 front_end/panels/ai_chat/ui/LiveAgentSessionComponent.ts create mode 100644 front_end/panels/ai_chat/ui/ToolCallComponent.ts create mode 100644 front_end/panels/ai_chat/ui/ToolDescriptionFormatter.ts create mode 100644 front_end/panels/ai_chat/ui/ToolResultComponent.ts create mode 100644 front_end/panels/ai_chat/ui/__tests__/ChatViewAgentSessions.test.ts create mode 100644 front_end/panels/ai_chat/ui/__tests__/ChatViewAgentSessionsOrder.test.ts create mode 100644 front_end/panels/ai_chat/ui/__tests__/ChatViewInputClear.test.ts create mode 100644 front_end/panels/ai_chat/ui/__tests__/ChatViewPrune.test.ts create mode 100644 front_end/panels/ai_chat/ui/__tests__/ChatViewSequentialSessionsTransition.test.ts create mode 100644 front_end/panels/ai_chat/ui/__tests__/LiveAgentSessionComponent.test.ts create mode 100644 front_end/panels/ai_chat/ui/input/ChatInput.ts create mode 100644 front_end/panels/ai_chat/ui/input/InputBar.ts create mode 100644 front_end/panels/ai_chat/ui/input/__tests__/InputBarClear.test.ts create mode 100644 front_end/panels/ai_chat/ui/markdown/MarkdownRenderers.ts create mode 100644 front_end/panels/ai_chat/ui/message/GlobalActionsRow.ts create mode 100644 front_end/panels/ai_chat/ui/message/MessageCombiner.ts create mode 100644 front_end/panels/ai_chat/ui/message/MessageList.ts create mode 100644 front_end/panels/ai_chat/ui/message/ModelMessage.ts create mode 100644 front_end/panels/ai_chat/ui/message/StructuredResponseController.ts create mode 100644 front_end/panels/ai_chat/ui/message/StructuredResponseRender.ts create mode 100644 front_end/panels/ai_chat/ui/message/ToolResultMessage.ts create mode 100644 front_end/panels/ai_chat/ui/message/UserMessage.ts create mode 100644 front_end/panels/ai_chat/ui/message/__tests__/MessageCombiner.test.ts create mode 100644 front_end/panels/ai_chat/ui/message/__tests__/MessageList.test.ts create mode 100644 front_end/panels/ai_chat/ui/message/__tests__/StructuredResponseController.test.ts create mode 100644 front_end/panels/ai_chat/ui/model_selector/ModelSelector.ts create mode 100644 front_end/panels/ai_chat/ui/oauth/OAuthConnectPanel.ts create mode 100644 front_end/panels/ai_chat/ui/version/VersionBanner.ts diff --git a/config/gni/devtools_grd_files.gni b/config/gni/devtools_grd_files.gni index 0633f873317..49d3b1a9d29 100644 --- a/config/gni/devtools_grd_files.gni +++ b/config/gni/devtools_grd_files.gni @@ -603,6 +603,11 @@ grd_files_bundled_sources = [ "front_end/panels/ai_assistance/ai_assistance.js", "front_end/panels/ai_chat/ui/AIChatPanel.js", "front_end/panels/ai_chat/ui/ChatView.js", + "front_end/panels/ai_chat/ui/LiveAgentSessionComponent.js", + "front_end/panels/ai_chat/ui/ToolCallComponent.js", + "front_end/panels/ai_chat/ui/ToolResultComponent.js", + "front_end/panels/ai_chat/ui/AgentSessionHeaderComponent.js", + "front_end/panels/ai_chat/ui/ToolDescriptionFormatter.js", "front_end/panels/ai_chat/ui/chatView.css.js", "front_end/panels/ai_chat/ui/HelpDialog.js", "front_end/panels/ai_chat/ui/PromptEditDialog.js", @@ -634,7 +639,9 @@ grd_files_bundled_sources = [ "front_end/panels/ai_chat/LLM/GroqProvider.js", "front_end/panels/ai_chat/LLM/OpenRouterProvider.js", "front_end/panels/ai_chat/LLM/LLMClient.js", + "front_end/panels/ai_chat/LLM/MessageSanitizer.js", "front_end/panels/ai_chat/tools/Tools.js", + "front_end/panels/ai_chat/tools/SequentialThinkingTool.js", "front_end/panels/ai_chat/tools/CombinedExtractionTool.js", "front_end/panels/ai_chat/tools/CritiqueTool.js", "front_end/panels/ai_chat/tools/FetcherTool.js", @@ -647,12 +654,27 @@ grd_files_bundled_sources = [ "front_end/panels/ai_chat/tools/VectorDBClient.js", "front_end/panels/ai_chat/tools/BookmarkStoreTool.js", "front_end/panels/ai_chat/tools/DocumentSearchTool.js", - "front_end/panels/ai_chat/tools/SequentialThinkingTool.js", "front_end/panels/ai_chat/tools/ThinkingTool.js", "front_end/panels/ai_chat/common/utils.js", "front_end/panels/ai_chat/common/log.js", "front_end/panels/ai_chat/common/context.js", "front_end/panels/ai_chat/common/page.js", + "front_end/panels/ai_chat/core/structured_response.js", + "front_end/panels/ai_chat/models/ChatTypes.js", + "front_end/panels/ai_chat/ui/input/ChatInput.js", + "front_end/panels/ai_chat/ui/input/InputBar.js", + "front_end/panels/ai_chat/ui/markdown/MarkdownRenderers.js", + "front_end/panels/ai_chat/ui/message/MessageList.js", + "front_end/panels/ai_chat/ui/message/ModelMessage.js", + "front_end/panels/ai_chat/ui/message/MessageCombiner.js", + "front_end/panels/ai_chat/ui/message/StructuredResponseRender.js", + "front_end/panels/ai_chat/ui/message/StructuredResponseController.js", + "front_end/panels/ai_chat/ui/message/GlobalActionsRow.js", + "front_end/panels/ai_chat/ui/message/ToolResultMessage.js", + "front_end/panels/ai_chat/ui/message/UserMessage.js", + "front_end/panels/ai_chat/ui/model_selector/ModelSelector.js", + "front_end/panels/ai_chat/ui/oauth/OAuthConnectPanel.js", + "front_end/panels/ai_chat/ui/version/VersionBanner.js", "front_end/panels/ai_chat/common/WebSocketRPCClient.js", "front_end/panels/ai_chat/common/EvaluationConfig.js", "front_end/panels/ai_chat/evaluation/remote/EvaluationProtocol.js", @@ -666,6 +688,7 @@ grd_files_bundled_sources = [ "front_end/panels/ai_chat/ai_chat.js", "front_end/panels/ai_chat/ai_chat_impl.js", "front_end/panels/ai_chat/agent_framework/AgentRunner.js", + "front_end/panels/ai_chat/agent_framework/AgentRunnerEventBus.js", "front_end/panels/ai_chat/agent_framework/AgentSessionTypes.js", "front_end/panels/ai_chat/agent_framework/ConfigurableAgentTool.js", "front_end/panels/ai_chat/agent_framework/implementation/ConfiguredAgents.js", diff --git a/front_end/BUILD.gn b/front_end/BUILD.gn index c71bf62644a..347e80d10ad 100644 --- a/front_end/BUILD.gn +++ b/front_end/BUILD.gn @@ -29,6 +29,8 @@ group("front_end") { "entrypoints/shell", "entrypoints/wasmparser_worker:worker_entrypoint", "entrypoints/worker_app:entrypoint", + "panels/ai_chat:ai_chat_release_js_metadata", + "panels/ai_chat:ai_chat_release_css_metadata", "third_party/vscode.web-custom-data:web_custom_data", ] } diff --git a/front_end/panels/ai_assistance/BUILD.gn b/front_end/panels/ai_assistance/BUILD.gn index 7e0f7f92037..e1fc11e4cb7 100644 --- a/front_end/panels/ai_assistance/BUILD.gn +++ b/front_end/panels/ai_assistance/BUILD.gn @@ -24,6 +24,7 @@ devtools_module("ai_assistance") { "PatchWidget.ts", "SelectWorkspaceDialog.ts", "components/ChatView.ts", + "components/ScrollPinHelper.ts", "components/ExploreWidget.ts", "components/MarkdownRendererWithCodeBlock.ts", "components/UserActionRow.ts", diff --git a/front_end/panels/ai_assistance/components/ChatView.ts b/front_end/panels/ai_assistance/components/ChatView.ts index 18963d4672d..a22518b9722 100644 --- a/front_end/panels/ai_assistance/components/ChatView.ts +++ b/front_end/panels/ai_assistance/components/ChatView.ts @@ -15,6 +15,7 @@ import * as Marked from '../../../third_party/marked/marked.js'; import * as Buttons from '../../../ui/components/buttons/buttons.js'; import type * as MarkdownView from '../../../ui/components/markdown_view/markdown_view.js'; import * as UI from '../../../ui/legacy/legacy.js'; +import { ScrollPinHelper } from './ScrollPinHelper.js'; import * as Lit from '../../../ui/lit/lit.js'; import * as VisualLogging from '../../../ui/visual_logging/visual_logging.js'; import {PatchWidget} from '../PatchWidget.js'; @@ -303,29 +304,13 @@ export interface Props { export class ChatView extends HTMLElement { readonly #shadow = this.attachShadow({mode: 'open'}); #markdownRenderer = new MarkdownRendererWithCodeBlock(); - #scrollTop?: number; + // Scroll management helper replaces ad-hoc state/logic + #scrollHelper = new ScrollPinHelper(); #props: Props; #messagesContainerElement?: Element; #mainElementRef?: Lit.Directives.Ref = Lit.Directives.createRef(); #messagesContainerResizeObserver = new ResizeObserver(() => this.#handleMessagesContainerResize()); #popoverHelper: UI.PopoverHelper.PopoverHelper|null = null; - /** - * Indicates whether the chat scroll position should be pinned to the bottom. - * - * This is true when: - * - The scroll is at the very bottom, allowing new messages to push the scroll down automatically. - * - The panel is initially rendered and the user hasn't scrolled yet. - * - * It is set to false when the user scrolls up to view previous messages. - */ - #pinScrollToBottom = true; - /** - * Indicates whether the scroll event originated from code - * or a user action. When set to `true`, `handleScroll` will ignore the event, - * allowing it to only handle user-driven scrolls and correctly decide - * whether to pin the content to the bottom. - */ - #isProgrammaticScroll = false; constructor(props: Props) { super(); @@ -350,8 +335,13 @@ export class ChatView extends HTMLElement { this.#messagesContainerResizeObserver.disconnect(); } + // Centralize access to the textarea to avoid repeated querySelector casts + #getTextArea(): HTMLTextAreaElement|null { + return this.#shadow.querySelector('.chat-input') as HTMLTextAreaElement | null; + } + clearTextInput(): void { - const textArea = this.#shadow.querySelector('.chat-input') as HTMLTextAreaElement; + const textArea = this.#getTextArea(); if (!textArea) { return; } @@ -359,7 +349,7 @@ export class ChatView extends HTMLElement { } focusTextInput(): void { - const textArea = this.#shadow.querySelector('.chat-input') as HTMLTextAreaElement; + const textArea = this.#getTextArea(); if (!textArea) { return; } @@ -368,23 +358,18 @@ export class ChatView extends HTMLElement { } restoreScrollPosition(): void { - if (this.#scrollTop === undefined) { - return; - } - - if (!this.#mainElementRef?.value) { - return; + // Ensure helper has latest element + if (this.#mainElementRef?.value) { + this.#scrollHelper.setElement(this.#mainElementRef.value as HTMLElement); } - - this.#setMainElementScrollTop(this.#scrollTop); + this.#scrollHelper.restoreLastPosition(); } scrollToBottom(): void { - if (!this.#mainElementRef?.value) { - return; + if (this.#mainElementRef?.value) { + this.#scrollHelper.setElement(this.#mainElementRef.value as HTMLElement); } - - this.#setMainElementScrollTop(this.#mainElementRef.value.scrollHeight); + this.#scrollHelper.scrollToBottom(); } #handleChatUiRef(el: Element|undefined): void { @@ -445,31 +430,16 @@ export class ChatView extends HTMLElement { } #handleMessagesContainerResize(): void { - if (!this.#pinScrollToBottom) { - return; - } - - if (!this.#mainElementRef?.value) { - return; - } - - if (this.#pinScrollToBottom) { - this.#setMainElementScrollTop(this.#mainElementRef.value.scrollHeight); + if (this.#mainElementRef?.value) { + this.#scrollHelper.setElement(this.#mainElementRef.value as HTMLElement); } + this.#scrollHelper.handleResize(); } - #setMainElementScrollTop(scrollTop: number): void { - if (!this.#mainElementRef?.value) { - return; - } - - this.#scrollTop = scrollTop; - this.#isProgrammaticScroll = true; - this.#mainElementRef.value.scrollTop = scrollTop; - } + // Removed ad-hoc scroll setter in favor of ScrollPinHelper #setInputText(text: string): void { - const textArea = this.#shadow.querySelector('.chat-input') as HTMLTextAreaElement; + const textArea = this.#getTextArea(); if (!textArea) { return; } @@ -484,7 +454,6 @@ export class ChatView extends HTMLElement { if (el) { this.#messagesContainerResizeObserver.observe(el); } else { - this.#pinScrollToBottom = true; this.#messagesContainerResizeObserver.disconnect(); } } @@ -493,18 +462,10 @@ export class ChatView extends HTMLElement { if (!ev.target || !(ev.target instanceof HTMLElement)) { return; } - - // Do not handle scroll events caused by programmatically - // updating the scroll position. We want to know whether user - // did scroll the container from the user interface. - if (this.#isProgrammaticScroll) { - this.#isProgrammaticScroll = false; - return; + if (this.#mainElementRef?.value) { + this.#scrollHelper.setElement(this.#mainElementRef.value as HTMLElement); } - - this.#scrollTop = ev.target.scrollTop; - this.#pinScrollToBottom = - ev.target.scrollTop + ev.target.clientHeight + SCROLL_ROUNDING_OFFSET > ev.target.scrollHeight; + this.#scrollHelper.handleScroll(ev.target); }; #handleSubmit = (ev: SubmitEvent): void => { @@ -513,7 +474,7 @@ export class ChatView extends HTMLElement { return; } - const textArea = this.#shadow.querySelector('.chat-input') as HTMLTextAreaElement; + const textArea = this.#getTextArea(); if (!textArea?.value) { return; } @@ -567,42 +528,44 @@ export class ChatView extends HTMLElement { Host.userMetrics.actionTaken(Host.UserMetrics.Action.AiAssistanceDynamicSuggestionClicked); }; + #renderFooter(): Lit.LitTemplate { + const classes = Lit.Directives.classMap({ + 'chat-view-footer': true, + 'has-conversation': !!this.#props.conversationType, + 'is-read-only': this.#props.isReadOnly, + }); + + // clang-format off + const footerContents = this.#props.conversationType + ? renderRelevantDataDisclaimer({ + isLoading: this.#props.isLoading, + blockedByCrossOrigin: this.#props.blockedByCrossOrigin, + }) + : html`

+ ${lockedString(UIStringsNotTranslate.inputDisclaimerForEmptyState)} + +

`; + + return html` +
+ ${footerContents} +
+ `; + // clang-format on + } + #render(): void { - const renderFooter = (): Lit.LitTemplate => { - const classes = Lit.Directives.classMap({ - 'chat-view-footer': true, - 'has-conversation': !!this.#props.conversationType, - 'is-read-only': this.#props.isReadOnly, - }); - - // clang-format off - const footerContents = this.#props.conversationType - ? renderRelevantDataDisclaimer({ - isLoading: this.#props.isLoading, - blockedByCrossOrigin: this.#props.blockedByCrossOrigin, - }) - : html`

- ${lockedString(UIStringsNotTranslate.inputDisclaimerForEmptyState)} - -

`; - - return html` -
- ${footerContents} -
- `; - }; // clang-format off Lit.render(html` @@ -658,7 +621,7 @@ export class ChatView extends HTMLElement { }) } - ${renderFooter()} + ${this.#renderFooter()} `, this.#shadow, {host: this}); // clang-format on diff --git a/front_end/panels/ai_assistance/components/ScrollPinHelper.ts b/front_end/panels/ai_assistance/components/ScrollPinHelper.ts new file mode 100644 index 00000000000..2f34fb5c009 --- /dev/null +++ b/front_end/panels/ai_assistance/components/ScrollPinHelper.ts @@ -0,0 +1,59 @@ +// Copyright 2025 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * Utility to manage scroll pin-to-bottom behavior for a scrollable container. + */ +export class ScrollPinHelper { + #el: HTMLElement | null = null; + #scrollTop?: number; + #pinToBottom = true; + #isProgrammatic = false; + static readonly ROUNDING_OFFSET = 1; + + setElement(el: HTMLElement | undefined): void { + if (el) { + this.#el = el; + } else { + this.#el = null; + this.#pinToBottom = true; + } + } + + handleResize(): void { + if (!this.#el) return; + if (this.#pinToBottom) { + this.setScrollTop(this.#el.scrollHeight); + } + } + + handleScroll(target: HTMLElement): void { + if (this.#isProgrammatic) { + this.#isProgrammatic = false; + return; + } + this.#scrollTop = target.scrollTop; + this.#pinToBottom = target.scrollTop + target.clientHeight + ScrollPinHelper.ROUNDING_OFFSET > target.scrollHeight; + } + + setScrollTop(value: number): void { + if (!this.#el) return; + this.#scrollTop = value; + this.#isProgrammatic = true; + this.#el.scrollTop = value; + } + + scrollToBottom(): void { + if (!this.#el) return; + this.setScrollTop(this.#el.scrollHeight); + } + + restoreLastPosition(): void { + if (this.#scrollTop === undefined) { + return; + } + this.setScrollTop(this.#scrollTop); + } +} + diff --git a/front_end/panels/ai_chat/BUILD.gn b/front_end/panels/ai_chat/BUILD.gn index 0ae6cecf55a..b7996b33a02 100644 --- a/front_end/panels/ai_chat/BUILD.gn +++ b/front_end/panels/ai_chat/BUILD.gn @@ -18,16 +18,37 @@ devtools_module("ai_chat") { sources = [ "ui/AIChatPanel.ts", "ui/ChatView.ts", + "ui/message/MessageList.ts", + "ui/message/UserMessage.ts", + "ui/message/ModelMessage.ts", + "ui/message/ToolResultMessage.ts", + "ui/message/MessageCombiner.ts", + "ui/message/StructuredResponseRender.ts", + "ui/message/StructuredResponseController.ts", + "ui/message/GlobalActionsRow.ts", + "ui/markdown/MarkdownRenderers.ts", + "ui/model_selector/ModelSelector.ts", + "ui/input/ChatInput.ts", + "ui/input/InputBar.ts", + "ui/oauth/OAuthConnectPanel.ts", + "ui/version/VersionBanner.ts", + "ui/LiveAgentSessionComponent.ts", + "ui/ToolCallComponent.ts", + "ui/ToolResultComponent.ts", + "ui/AgentSessionHeaderComponent.ts", + "ui/ToolDescriptionFormatter.ts", "ui/HelpDialog.ts", "ui/SettingsDialog.ts", "ui/PromptEditDialog.ts", "ui/EvaluationDialog.ts", "ai_chat_impl.ts", + "models/ChatTypes.ts", "core/Graph.ts", "core/State.ts", "core/Types.ts", "core/AgentService.ts", "core/Constants.ts", + "core/structured_response.ts", "core/GraphConfigs.ts", "core/ConfigurableGraph.ts", "core/BaseOrchestratorAgent.ts", @@ -48,6 +69,7 @@ devtools_module("ai_chat") { "LLM/LiteLLMProvider.ts", "LLM/GroqProvider.ts", "LLM/OpenRouterProvider.ts", + "LLM/MessageSanitizer.ts", "LLM/LLMClient.ts", "tools/Tools.ts", "tools/LLMTracingWrapper.ts", @@ -67,6 +89,7 @@ devtools_module("ai_chat") { "tools/ThinkingTool.ts", "agent_framework/ConfigurableAgentTool.ts", "agent_framework/AgentRunner.ts", + "agent_framework/AgentRunnerEventBus.ts", "agent_framework/AgentSessionTypes.ts", "agent_framework/implementation/ConfiguredAgents.ts", "evaluation/framework/types.ts", @@ -123,16 +146,37 @@ devtools_module("ai_chat") { _ai_chat_sources = [ "ui/AIChatPanel.ts", "ui/ChatView.ts", + "ui/message/MessageList.ts", + "ui/message/UserMessage.ts", + "ui/message/ModelMessage.ts", + "ui/message/ToolResultMessage.ts", + "ui/message/MessageCombiner.ts", + "ui/message/StructuredResponseRender.ts", + "ui/message/StructuredResponseController.ts", + "ui/message/GlobalActionsRow.ts", + "ui/markdown/MarkdownRenderers.ts", + "ui/model_selector/ModelSelector.ts", + "ui/input/ChatInput.ts", + "ui/input/InputBar.ts", + "ui/oauth/OAuthConnectPanel.ts", + "ui/version/VersionBanner.ts", + "ui/LiveAgentSessionComponent.ts", + "ui/ToolCallComponent.ts", + "ui/ToolResultComponent.ts", + "ui/AgentSessionHeaderComponent.ts", + "ui/ToolDescriptionFormatter.ts", "ui/HelpDialog.ts", "ui/PromptEditDialog.ts", "ui/SettingsDialog.ts", "ui/EvaluationDialog.ts", "ai_chat_impl.ts", + "models/ChatTypes.ts", "core/Graph.ts", "core/State.ts", "core/Types.ts", "core/AgentService.ts", "core/Constants.ts", + "core/structured_response.ts", "core/GraphConfigs.ts", "core/ConfigurableGraph.ts", "core/BaseOrchestratorAgent.ts", @@ -153,6 +197,7 @@ _ai_chat_sources = [ "LLM/LiteLLMProvider.ts", "LLM/GroqProvider.ts", "LLM/OpenRouterProvider.ts", + "LLM/MessageSanitizer.ts", "LLM/LLMClient.ts", "tools/Tools.ts", "tools/CritiqueTool.ts", @@ -171,6 +216,7 @@ _ai_chat_sources = [ "tools/ThinkingTool.ts", "agent_framework/ConfigurableAgentTool.ts", "agent_framework/AgentRunner.ts", + "agent_framework/AgentRunnerEventBus.ts", "agent_framework/AgentSessionTypes.ts", "agent_framework/implementation/ConfiguredAgents.ts", "evaluation/framework/types.ts", @@ -286,7 +332,18 @@ ts_library("unittests") { testonly = true sources = [ - "common/utils.test.ts", + "ui/__tests__/ChatViewAgentSessions.test.ts", + "ui/__tests__/ChatViewPrune.test.ts", + "ui/__tests__/ChatViewAgentSessionsOrder.test.ts", + "ui/__tests__/ChatViewSequentialSessionsTransition.test.ts", + "ui/__tests__/ChatViewInputClear.test.ts", + "ui/input/__tests__/InputBarClear.test.ts", + "ui/message/__tests__/MessageCombiner.test.ts", + "ui/message/__tests__/StructuredResponseController.test.ts", + "LLM/__tests__/MessageSanitizer.test.ts", + "agent_framework/__tests__/AgentRunner.sanitizeToolResult.test.ts", + "agent_framework/__tests__/AgentRunner.computeToolResultText.test.ts", + "agent_framework/__tests__/AgentRunner.run.flows.test.ts", ] deps = [ diff --git a/front_end/panels/ai_chat/LLM/GroqProvider.ts b/front_end/panels/ai_chat/LLM/GroqProvider.ts index e5e6aaaaf0e..24afb3493d6 100644 --- a/front_end/panels/ai_chat/LLM/GroqProvider.ts +++ b/front_end/panels/ai_chat/LLM/GroqProvider.ts @@ -66,10 +66,22 @@ export class GroqProvider extends LLMBaseProvider { content: msg.content }; - // Add optional fields if present - if (msg.tool_calls) { - baseMessage.tool_calls = msg.tool_calls; + // Ensure tool call arguments are strings per OpenAI/Groq spec + if (msg.tool_calls && Array.isArray(msg.tool_calls)) { + baseMessage.tool_calls = msg.tool_calls.map(tc => { + const args = (tc.function as any).arguments; + const argsString = typeof args === 'string' ? args : JSON.stringify(args ?? {}); + return { + ...tc, + function: { + ...tc.function, + arguments: argsString, + }, + }; + }); } + + // Add optional fields if present if (msg.tool_call_id) { baseMessage.tool_call_id = msg.tool_call_id; } @@ -77,6 +89,13 @@ export class GroqProvider extends LLMBaseProvider { baseMessage.name = msg.name; } + // For tool role, content must be a string; stringify objects/arrays + if (msg.role === 'tool') { + if (typeof baseMessage.content !== 'string') { + baseMessage.content = JSON.stringify(baseMessage.content ?? ''); + } + } + return baseMessage; }); } @@ -204,8 +223,10 @@ export class GroqProvider extends LLMBaseProvider { }); } - // Add tool_choice if provided - if (options?.tool_choice) { + // Ensure tool_choice is set to 'auto' when tools are present unless explicitly provided + if (options?.tools && !options?.tool_choice) { + payloadBody.tool_choice = 'auto'; + } else if (options?.tool_choice) { payloadBody.tool_choice = options.tool_choice; } @@ -457,4 +478,4 @@ export class GroqProvider extends LLMBaseProvider { apiKey: 'ai_chat_groq_api_key' }; } -} \ No newline at end of file +} diff --git a/front_end/panels/ai_chat/LLM/MessageSanitizer.ts b/front_end/panels/ai_chat/LLM/MessageSanitizer.ts new file mode 100644 index 00000000000..47d77f1b604 --- /dev/null +++ b/front_end/panels/ai_chat/LLM/MessageSanitizer.ts @@ -0,0 +1,88 @@ +// Copyright 2025 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import type { LLMMessage, TextContent, ImageContent, FileContent } from './LLMTypes.js'; + +/** + * Deep clone helper to avoid mutating original messages. + */ +function deepClone(obj: T): T { + return obj == null ? obj : JSON.parse(JSON.stringify(obj)); +} + +/** + * Sanitization options for capability-aware message preparation. + */ +export interface SanitizationOptions { + visionCapable: boolean; + /** + * If true, when a message becomes empty after stripping image/file parts, + * replace it with a concise placeholder string. + */ + placeholderForImageOnly?: boolean; +} + +/** + * Remove image and file parts for models that do not support vision, while preserving + * textual content and message roles. Ensures resulting messages remain valid for providers + * that expect either string content or an array with only text parts. + */ +export function sanitizeMessagesForModel( + messages: LLMMessage[], + options: SanitizationOptions +): LLMMessage[] { + const { visionCapable, placeholderForImageOnly } = options; + + // Fast path: if the model supports vision, return a deep clone to avoid side effects. + if (visionCapable) { + return deepClone(messages); + } + + const sanitized: LLMMessage[] = []; + + for (const msg of messages) { + const cloned: LLMMessage = deepClone(msg); + + // Only sanitize the content field; keep tool_calls, tool_call_id, name, role as-is. + const content = cloned.content; + + if (content === undefined) { + sanitized.push(cloned); + continue; + } + + if (typeof content === 'string') { + // Plain text content is always safe. + sanitized.push(cloned); + continue; + } + + // content is an array of parts; filter out non-text parts for non-vision models. + const parts = content as Array; + const filteredParts = parts.filter(part => { + // Keep only text parts; drop image_url and file parts. + // Also future-proof for any { type: 'image' } style parts. + return (typeof part === 'object' && 'type' in part && part.type === 'text'); + }); + + if (filteredParts.length === 0) { + // Message was image/file-only. Replace with explicit text indicating no image available. + if (placeholderForImageOnly) { + cloned.content = [{ type: 'text', text: 'Image omitted (model lacks vision).' }] as any; + } else { + // If we don't want placeholders, set to empty string to keep message valid. + cloned.content = ''; + } + } else if (filteredParts.length === 1 && (filteredParts[0] as any).type === 'text') { + // Collapse single text part into a plain string for compatibility/simplicity. + cloned.content = (filteredParts[0] as any).text || ''; + } else { + cloned.content = filteredParts as any; + } + + sanitized.push(cloned); + } + + return sanitized; +} diff --git a/front_end/panels/ai_chat/LLM/__tests__/MessageSanitizer.test.ts b/front_end/panels/ai_chat/LLM/__tests__/MessageSanitizer.test.ts new file mode 100644 index 00000000000..701e4d22df0 --- /dev/null +++ b/front_end/panels/ai_chat/LLM/__tests__/MessageSanitizer.test.ts @@ -0,0 +1,70 @@ +// Copyright 2025 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import { sanitizeMessagesForModel } from '../MessageSanitizer.js'; +import type { LLMMessage } from '../LLMTypes.js'; + +describe('ai_chat: MessageSanitizer', () => { + it('returns deep-cloned messages when vision is supported', () => { + const input: LLMMessage[] = [ + { role: 'user', content: [ + { type: 'text', text: 'See this' }, + { type: 'image_url', image_url: { url: 'data:image/png;base64,abc' } }, + ] }, + ]; + + const out = sanitizeMessagesForModel(input, { visionCapable: true, placeholderForImageOnly: true }); + assert.deepEqual(out, input, 'content preserved for vision-capable models'); + assert.notStrictEqual(out, input, 'messages are deep-cloned, not same reference'); + }); + + it('strips images for non-vision models and keeps text', () => { + const input: LLMMessage[] = [ + { role: 'user', content: [ + { type: 'text', text: 'Hello' }, + { type: 'image_url', image_url: { url: 'http://example.com/img.png' } }, + ] }, + ]; + + const out = sanitizeMessagesForModel(input, { visionCapable: false, placeholderForImageOnly: true }); + assert.strictEqual(out[0].role, 'user'); + assert.strictEqual(typeof out[0].content, 'string'); + assert.strictEqual(out[0].content, 'Hello', 'text content preserved as plain string'); + }); + + it('replaces image-only content with placeholder text array when requested', () => { + const input: LLMMessage[] = [ + { role: 'user', content: [ + { type: 'image_url', image_url: { url: 'data:image/png;base64,abc' } }, + ] }, + ]; + + const out = sanitizeMessagesForModel(input, { visionCapable: false, placeholderForImageOnly: true }); + assert.isArray(out[0].content, 'content becomes an array with text part'); + const parts = out[0].content as any[]; + assert.lengthOf(parts, 1); + assert.strictEqual(parts[0].type, 'text'); + assert.strictEqual(parts[0].text, 'Image omitted (model lacks vision).'); + }); + + it('collapses single text part to string', () => { + const input: LLMMessage[] = [ + { role: 'user', content: [{ type: 'text', text: 'Only text' }] }, + ]; + const out = sanitizeMessagesForModel(input, { visionCapable: false, placeholderForImageOnly: true }); + assert.strictEqual(out[0].content, 'Only text'); + }); + + it('produces empty string for image-only content if no placeholder requested', () => { + const input: LLMMessage[] = [ + { role: 'user', content: [ + { type: 'image_url', image_url: { url: 'data:image/png;base64,abc' } }, + ] }, + ]; + + const out = sanitizeMessagesForModel(input, { visionCapable: false, placeholderForImageOnly: false }); + assert.strictEqual(out[0].content, ''); + }); +}); + diff --git a/front_end/panels/ai_chat/agent_framework/AgentRunner.ts b/front_end/panels/ai_chat/agent_framework/AgentRunner.ts index b9407c9503e..183e2fab378 100644 --- a/front_end/panels/ai_chat/agent_framework/AgentRunner.ts +++ b/front_end/panels/ai_chat/agent_framework/AgentRunner.ts @@ -4,15 +4,16 @@ import { enhancePromptWithPageContext } from '../core/PageInfoManager.js'; import { LLMClient } from '../LLM/LLMClient.js'; -import type { LLMResponse, LLMMessage } from '../LLM/LLMTypes.js'; +import type { LLMResponse, LLMMessage, LLMProvider } from '../LLM/LLMTypes.js'; import type { Tool } from '../tools/Tools.js'; -import { AIChatPanel } from '../ui/AIChatPanel.js'; -import { ChatMessageEntity, type ChatMessage, type ModelChatMessage, type ToolResultMessage } from '../ui/ChatView.js'; +import { ChatMessageEntity, type ChatMessage, type ModelChatMessage, type ToolResultMessage } from '../models/ChatTypes.js'; import { createLogger } from '../core/Logger.js'; import { createTracingProvider, getCurrentTracingContext } from '../tracing/TracingConfig.js'; import type { AgentSession, AgentMessage } from './AgentSessionTypes.js'; import { AgentErrorHandler } from '../core/AgentErrorHandler.js'; +import { AgentRunnerEventBus } from './AgentRunnerEventBus.js'; import { callLLMWithTracing } from '../tools/LLMTracingWrapper.js'; +import { sanitizeMessagesForModel } from '../LLM/MessageSanitizer.js'; const logger = createLogger('AgentRunner'); @@ -28,6 +29,10 @@ export interface AgentRunnerConfig { tools: Array>; maxIterations: number; temperature?: number; + /** Selected LLM provider for this run (required) */ + provider: LLMProvider; + /** Optional vision capability check. Defaults to false (no vision). */ + getVisionCapability?: (modelName: string) => Promise | boolean; } /** @@ -47,23 +52,15 @@ export interface AgentRunnerHooks { * Runs the core agent execution loop */ export class AgentRunner { - private static currentSession: AgentSession | null = null; - + private static eventBus: AgentRunnerEventBus | null = null; + /** - * Add a message to the current session + * Initialize event bus connection */ - private static addSessionMessage(message: Partial): void { - if (!this.currentSession) { - return; + static initializeEventBus(): void { + if (!AgentRunner.eventBus) { + AgentRunner.eventBus = AgentRunnerEventBus.getInstance(); } - - const fullMessage: AgentMessage = { - id: crypto.randomUUID(), - timestamp: new Date(), - ...message - } as AgentMessage; - - this.currentSession.messages.push(fullMessage); } /** * Helper function to convert ChatMessage[] to LLMMessage[] @@ -72,6 +69,11 @@ export class AgentRunner { const llmMessages: LLMMessage[] = []; for (const msg of messages) { + // Skip AGENT_SESSION messages - they are for UI only + if (msg.entity === ChatMessageEntity.AGENT_SESSION) { + continue; + } + if (msg.entity === ChatMessageEntity.USER) { // User message if ('text' in msg) { @@ -181,6 +183,25 @@ export class AgentRunner { return sanitized; } + /** + * Compute the tool result text shown to the LLM for regular tool outputs (non-ConfigurableAgentResult). + * Applies sanitization and chooses a placeholder if the result only contained an image payload. + */ + static computeToolResultText(toolResultData: any, imageData?: string): string { + // If the tool produced a simple string, return as-is + if (typeof toolResultData === 'string') { + return toolResultData; + } + // Create sanitized data for text representation (exclude large/non-LLM fields) + const sanitizedData = this.sanitizeToolResultForText(toolResultData); + const sanitizedIsEmptyObject = typeof sanitizedData === 'object' && sanitizedData !== null && Object.keys(sanitizedData).length === 0; + const hadOnlyImage = !!imageData && sanitizedIsEmptyObject; + if (hadOnlyImage) { + return 'Image omitted (model lacks vision).'; + } + return JSON.stringify(sanitizedData, null, 2); + } + // Helper function to execute the handoff logic (to avoid duplication) private static async executeHandoff( currentMessages: ChatMessage[], @@ -194,7 +215,10 @@ export class AgentRunner { defaultCreateSuccessResult: AgentRunnerHooks['createSuccessResult'], defaultCreateErrorResult: AgentRunnerHooks['createErrorResult'], llmToolArgs?: ConfigurableAgentArgs, // Specific args if triggered by LLM tool call - parentSession?: AgentSession // For natural nesting + parentSession?: AgentSession, // For natural nesting + defaultProvider?: LLMProvider, + defaultGetVisionCapability?: (modelName: string) => Promise | boolean, + overrides?: { sessionId?: string; parentSessionId?: string; traceId?: string } ): Promise { const targetAgentName = handoffConfig.targetAgentName; const targetAgentTool = ToolRegistry.getRegisteredTool(targetAgentName); @@ -228,6 +252,10 @@ export class AgentRunner { // Filter messages: keep user messages, final answers, and only tool calls/results for specified tools logger.info(`Filtering messages for handoff to ${targetAgentTool.name} based on includeToolResults.`); handoffMessages = currentMessages.filter(message => { + // Always exclude AGENT_SESSION messages - they are for UI only + if (message.entity === ChatMessageEntity.AGENT_SESSION) { + return false; + } if (message.entity === ChatMessageEntity.USER) { return true; // Always include user messages } @@ -247,9 +275,12 @@ export class AgentRunner { return false; // Exclude other message types }); } else { - // No filter specified: pass the entire message history - logger.info(`Passing full message history for handoff to ${targetAgentTool.name}.`); - handoffMessages = [...currentMessages]; + // No filter specified: pass the entire message history (but still exclude AGENT_SESSION messages) + logger.info(`Passing filtered message history for handoff to ${targetAgentTool.name}.`); + handoffMessages = currentMessages.filter(message => { + // Always exclude AGENT_SESSION messages - they are for UI only + return message.entity !== ChatMessageEntity.AGENT_SESSION; + }); } // Enhance the target agent's system prompt with page context @@ -267,6 +298,8 @@ export class AgentRunner { .filter((tool): tool is Tool => tool !== null), maxIterations: targetConfig.maxIterations || defaultMaxIterations, temperature: targetConfig.temperature ?? defaultTemperature, + provider: defaultProvider as LLMProvider, + getVisionCapability: defaultGetVisionCapability, }; const targetRunnerHooks: AgentRunnerHooks = { prepareInitialMessages: undefined, // History already formed by transform or passthrough @@ -288,7 +321,8 @@ export class AgentRunner { targetRunnerConfig, // Pass the constructed config targetRunnerHooks, // Pass the constructed hooks targetAgentTool, // Target agent is now the executing agent - parentSession // Pass parent session for natural nesting + parentSession, // Pass parent session for natural nesting + overrides ); // Extract the result and session @@ -337,10 +371,11 @@ export class AgentRunner { config: AgentRunnerConfig, hooks: AgentRunnerHooks, executingAgent: ConfigurableAgentTool | null, - parentSession?: AgentSession // For natural nesting + parentSession?: AgentSession, // For natural nesting + overrides?: { sessionId?: string; parentSessionId?: string; traceId?: string } ): Promise { const agentName = executingAgent?.name || 'Unknown'; - logger.info('Starting execution loop for agent: ${agentName}'); + logger.info(`Starting execution loop for agent: ${agentName}`); const { apiKey, modelName, systemPrompt, tools, maxIterations, temperature } = config; const { prepareInitialMessages, createSuccessResult, createErrorResult } = hooks; @@ -352,8 +387,8 @@ export class AgentRunner { agentReasoning: args.reasoning, agentDisplayName: executingAgent?.config?.ui?.displayName || agentName, agentDescription: executingAgent?.config?.description, - sessionId: crypto.randomUUID(), - parentSessionId: parentSession?.sessionId, + sessionId: overrides?.sessionId || crypto.randomUUID(), + parentSessionId: overrides?.parentSessionId || parentSession?.sessionId, status: 'running', startTime: new Date(), messages: [], @@ -365,7 +400,58 @@ export class AgentRunner { iterationCount: 0 }; - this.currentSession = agentSession; + // Use local session variable instead of static + let currentSession = agentSession; + + // Emit session started event + if (AgentRunner.eventBus) { + AgentRunner.eventBus.emitProgress({ + type: 'session_started', + sessionId: agentSession.sessionId, + parentSessionId: agentSession.parentSessionId, + agentName, + timestamp: new Date(), + data: { session: agentSession } + }); + } + + // Create local function that captures the correct session + const addSessionMessage = (message: Partial): void => { + const fullMessage: AgentMessage = { + id: crypto.randomUUID(), + timestamp: new Date(), + ...message + } as AgentMessage; + + currentSession.messages.push(fullMessage); + + // Emit progress events based on message type + if (AgentRunner.eventBus && fullMessage.type === 'tool_call') { + AgentRunner.eventBus.emitProgress({ + type: 'tool_started', + sessionId: currentSession.sessionId, + parentSessionId: currentSession.parentSessionId, + agentName: currentSession.agentName, + timestamp: new Date(), + data: { + session: currentSession, + toolCall: fullMessage + } + }); + } else if (AgentRunner.eventBus && fullMessage.type === 'tool_result') { + AgentRunner.eventBus.emitProgress({ + type: 'tool_completed', + sessionId: currentSession.sessionId, + parentSessionId: currentSession.parentSessionId, + agentName: currentSession.agentName, + timestamp: new Date(), + data: { + session: currentSession, + toolResult: fullMessage + } + }); + } + }; let messages = [...initialMessages]; @@ -413,7 +499,7 @@ export class AgentRunner { // Add a mapping for the handoff tool 'name' to the actual target tool instance // This allows us to find the target agent later when this tool is called. toolMap.set(handoffToolName, targetTool); - logger.info('Added LLM handoff tool schema: ${handoffToolName}'); + logger.info(`Added LLM handoff tool schema: ${handoffToolName}`); } else { logger.warn(`Configured LLM handoff target '${targetAgentName}' not found or is not a ConfigurableAgentTool.`); } @@ -424,7 +510,7 @@ export class AgentRunner { // Capture initial reasoning from args if provided if (args.reasoning) { const reasoningText = Array.isArray(args.reasoning) ? args.reasoning.join(' ') : args.reasoning; - this.addSessionMessage({ + addSessionMessage({ type: 'reasoning', content: { type: 'reasoning', @@ -437,8 +523,8 @@ export class AgentRunner { for (iteration = 0; iteration < maxIterations; iteration++) { // Update session iteration count - if (this.currentSession) { - this.currentSession.iterationCount = iteration + 1; + if (currentSession) { + currentSession.iterationCount = iteration + 1; } logger.info(`${agentName} Iteration ${iteration + 1}/${maxIterations}`); @@ -484,7 +570,7 @@ export class AgentRunner { model: modelName, modelParameters: { temperature: temperature ?? 0, - provider: AIChatPanel.getProviderForModel(modelName) + provider: config.provider }, input: { systemPrompt: currentSystemPrompt.substring(0, 500) + '...', // Truncate for tracing @@ -513,13 +599,28 @@ export class AgentRunner { } const llm = LLMClient.getInstance(); - const provider = AIChatPanel.getProviderForModel(modelName); + const provider = config.provider as LLMProvider; const llmMessages = AgentRunner.convertToLLMMessages(messages); + // Sanitize messages for model capabilities (strip images for non-vision models) + let isVisionForMainCall = false; + if (typeof config.getVisionCapability === 'function') { + try { + const res = await config.getVisionCapability(modelName); + isVisionForMainCall = typeof res === 'boolean' ? res : false; + } catch { + isVisionForMainCall = false; + } + } + const sanitizedForMainCall = sanitizeMessagesForModel(llmMessages, { + visionCapable: isVisionForMainCall, + placeholderForImageOnly: true, + }); + llmResponse = await llm.call({ provider, model: modelName, - messages: llmMessages, + messages: sanitizedForMainCall, systemPrompt: currentSystemPrompt, tools: toolSchemas, temperature: temperature ?? 0, @@ -592,7 +693,7 @@ export class AgentRunner { messages.push(systemErrorMessage); // Generate summary of error scenario - const errorSummary = await this.summarizeAgentProgress(messages, maxIterations, agentName, modelName, 'error'); + const errorSummary = await this.summarizeAgentProgress(messages, maxIterations, agentName, modelName, 'error', config.provider, config.getVisionCapability); // Complete session with error agentSession.status = 'error'; @@ -672,7 +773,7 @@ export class AgentRunner { messages.push(newModelMessage); // Add tool call to current session - this.addSessionMessage({ + addSessionMessage({ type: 'tool_call', content: { type: 'tool_call', @@ -693,7 +794,7 @@ export class AgentRunner { if (result.shouldContinue && result.errorMessage) { messages.push(result.errorMessage); if (result.sessionMessage) { - errorHandler.addSessionMessage(result.sessionMessage); + addSessionMessage(result.sessionMessage); } continue; // Continue to next iteration } @@ -721,7 +822,7 @@ export class AgentRunner { // Add handoff message to current session const nestedSessionId = crypto.randomUUID(); - this.addSessionMessage({ + addSessionMessage({ type: 'handoff', content: { type: 'handoff', @@ -741,7 +842,10 @@ export class AgentRunner { apiKey, modelName, maxIterations, temperature ?? 0, createSuccessResult, createErrorResult, toolArgs as ConfigurableAgentArgs, // <= Pass LLM's toolArgs explicitly as llmToolArgs - this.currentSession // Pass current session for natural nesting + currentSession, // Pass current session for natural nesting + config.provider, + config.getVisionCapability, + { sessionId: nestedSessionId, parentSessionId: currentSession.sessionId, traceId: getCurrentTracingContext()?.traceId } ); // LLM tool handoff replaces the current agent's execution entirely @@ -795,9 +899,76 @@ export class AgentRunner { } } - try { + // Special handling for agent-to-agent tool calls + let preallocatedChildId: string | undefined; + if (toolToExecute instanceof ConfigurableAgentTool) { + // This is an agent being called as a tool! + + // Pre-allocate child session ID and add placeholder for real-time UI + preallocatedChildId = crypto.randomUUID(); + const childPlaceholder: AgentSession = { + sessionId: preallocatedChildId, + agentName: toolName, + parentSessionId: currentSession.sessionId, + status: 'running', + startTime: new Date(), + messages: [], + nestedSessions: [], + tools: [] + }; + currentSession.nestedSessions.push(childPlaceholder); + // Add a handoff anchor message to the parent session timeline so the UI can inline the child timeline + addSessionMessage({ + type: 'handoff', + content: { + type: 'handoff', + targetAgent: toolName, + reason: `Handing off to ${toolName}`, + context: toolArgs as Record, + nestedSessionId: preallocatedChildId + } + }); + + // Emit child agent starting + if (AgentRunner.eventBus) { + AgentRunner.eventBus.emitProgress({ + type: 'child_agent_started', + sessionId: currentSession.sessionId, + parentSessionId: currentSession.parentSessionId, + agentName: currentSession.agentName, + timestamp: new Date(), + data: { + parentSession: currentSession, + childAgentName: toolName, + childSessionId: preallocatedChildId + } + }); + } + } + + try { logger.info(`${agentName} Executing tool: ${toolToExecute.name} with args:`, toolArgs); - toolResultData = await toolToExecute.execute(toolArgs as any); + const execTracingContext = getCurrentTracingContext(); + toolResultData = await toolToExecute.execute(toolArgs as any, ({ + provider: config.provider, + model: modelName, + getVisionCapability: config.getVisionCapability, + overrideSessionId: preallocatedChildId, + overrideParentSessionId: currentSession.sessionId, + overrideTraceId: execTracingContext?.traceId, + } as any)); + + // If this was an agent tool, replace placeholder with actual session + if (toolToExecute instanceof ConfigurableAgentTool && toolResultData?.agentSession) { + const index = currentSession.nestedSessions.findIndex( + s => s.sessionId === preallocatedChildId + ); + if (index !== -1) { + // Ensure the child session knows its parent for downstream UI logic + try { (toolResultData.agentSession as any).parentSessionId = currentSession.sessionId; } catch {} + currentSession.nestedSessions[index] = toolResultData.agentSession; + } + } // Extract image data if present (before sanitization) if (typeof toolResultData === 'object' && toolResultData !== null) { @@ -816,7 +987,7 @@ export class AgentRunner { : (toolResultData.error || 'Agent failed'); } else { // Regular tool result - toolResultText = typeof toolResultData === 'string' ? toolResultData : JSON.stringify(sanitizedData, null, 2); + toolResultText = AgentRunner.computeToolResultText(toolResultData, imageData); } // Check if the result object indicates an error explicitly @@ -919,7 +1090,7 @@ export class AgentRunner { messages.push(toolResultMessage); // Add tool result to current session - this.addSessionMessage({ + addSessionMessage({ type: 'tool_result', content: { type: 'tool_result', @@ -944,7 +1115,7 @@ export class AgentRunner { messages.push(newModelMessage); // Add final answer to current session - this.addSessionMessage({ + addSessionMessage({ type: 'final_answer', content: { type: 'final_answer', @@ -958,7 +1129,7 @@ export class AgentRunner { logger.info(`${agentName} LLM provided final answer.`); // Generate summary of successful completion - const completionSummary = await this.summarizeAgentProgress(messages, maxIterations, agentName, modelName, 'final_answer'); + const completionSummary = await this.summarizeAgentProgress(messages, maxIterations, agentName, modelName, 'final_answer', config.provider, config.getVisionCapability); // Complete session naturally agentSession.status = 'completed'; @@ -978,7 +1149,7 @@ export class AgentRunner { if (result.shouldContinue && result.errorMessage) { messages.push(result.errorMessage); if (result.sessionMessage) { - errorHandler.addSessionMessage(result.sessionMessage); + addSessionMessage(result.sessionMessage); } continue; // Continue to next iteration so the LLM can try again } @@ -1001,7 +1172,7 @@ export class AgentRunner { messages.push(systemErrorMessage); // Generate summary of error scenario - const errorSummary = await this.summarizeAgentProgress(messages, maxIterations, agentName, modelName, 'error'); + const errorSummary = await this.summarizeAgentProgress(messages, maxIterations, agentName, modelName, 'error', config.provider, config.getVisionCapability); // Complete session with error agentSession.status = 'error'; @@ -1037,14 +1208,16 @@ export class AgentRunner { apiKey, modelName, maxIterations, temperature ?? 0, createSuccessResult, createErrorResult, undefined, // No llmToolArgs for max iterations handoff - this.currentSession // Pass current session for natural nesting + currentSession, // Pass current session for natural nesting + config.provider, + config.getVisionCapability ); // Extract the result and session const { agentSession: childSession, ...actualResult } = handoffResult; // Add child session to current session's nested sessions (natural nesting) - if (this.currentSession) { - this.currentSession.nestedSessions.push(childSession); + if (currentSession) { + currentSession.nestedSessions.push(childSession); } // Complete current session and return result with session @@ -1065,7 +1238,7 @@ export class AgentRunner { agentSession.terminationReason = 'max_iterations'; // Generate summary of agent progress instead of generic error message - const progressSummary = await this.summarizeAgentProgress(messages, maxIterations, agentName, modelName); + const progressSummary = await this.summarizeAgentProgress(messages, maxIterations, agentName, modelName, 'max_iterations', config.provider, config.getVisionCapability); const result = createErrorResult('Agent reached maximum iterations', messages, 'max_iterations'); result.summary = { type: 'timeout', @@ -1083,7 +1256,9 @@ export class AgentRunner { maxIterations: number, agentName: string, modelName: string, - completionType: 'final_answer' | 'max_iterations' | 'error' = 'max_iterations' + completionType: 'final_answer' | 'max_iterations' | 'error' = 'max_iterations', + provider: LLMProvider, + getVisionCapability?: (modelName: string) => Promise | boolean ): Promise { logger.info(`Generating summary for agent "${agentName}" with completion type: ${completionType}`); try { @@ -1143,13 +1318,28 @@ Format your response as a clear, informative summary that would help a calling a content: summaryPrompt }); - const provider = AIChatPanel.getProviderForModel(modelName); + const selectedProvider = provider; + + // Centralized, capability-aware sanitization: strip images for non-vision models + let isVision = false; + if (typeof getVisionCapability === 'function') { + try { + const res = await getVisionCapability(modelName); + isVision = typeof res === 'boolean' ? res : false; + } catch { + isVision = false; + } + } + const sanitizedMessages = sanitizeMessagesForModel(llmMessages, { + visionCapable: isVision, + placeholderForImageOnly: true, + }); const response = await callLLMWithTracing( { - provider, + provider: selectedProvider as LLMProvider, model: modelName, - messages: llmMessages, + messages: sanitizedMessages, systemPrompt: '', // Empty string instead of undefined temperature: 0.1, // Omit tools parameter entirely to avoid tool_choice conflicts diff --git a/front_end/panels/ai_chat/agent_framework/AgentRunnerEventBus.ts b/front_end/panels/ai_chat/agent_framework/AgentRunnerEventBus.ts new file mode 100644 index 00000000000..9f6187b9062 --- /dev/null +++ b/front_end/panels/ai_chat/agent_framework/AgentRunnerEventBus.ts @@ -0,0 +1,34 @@ +// Copyright 2025 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import * as Common from '../../../core/common/common.js'; + +export interface AgentRunnerProgressEvent { + type: 'session_started' | 'tool_started' | 'tool_completed' | 'session_updated' | 'child_agent_started'; + sessionId: string; + parentSessionId?: string; + agentName: string; + timestamp: Date; + data: any; +} + +export class AgentRunnerEventBus extends Common.ObjectWrapper.ObjectWrapper<{ + 'agent-progress': AgentRunnerProgressEvent +}> { + private static instance: AgentRunnerEventBus; + + static getInstance(): AgentRunnerEventBus { + if (!this.instance) { + this.instance = new AgentRunnerEventBus(); + } + return this.instance; + } + + emitProgress(event: AgentRunnerProgressEvent): void { + this.dispatchEventToListeners('agent-progress', event); + } +} + +// Alternative: Callback-based approach for static context +export type ProgressCallback = (event: AgentRunnerProgressEvent) => void; \ No newline at end of file diff --git a/front_end/panels/ai_chat/agent_framework/ConfigurableAgentTool.ts b/front_end/panels/ai_chat/agent_framework/ConfigurableAgentTool.ts index f20b99b8161..5ddf170d813 100644 --- a/front_end/panels/ai_chat/agent_framework/ConfigurableAgentTool.ts +++ b/front_end/panels/ai_chat/agent_framework/ConfigurableAgentTool.ts @@ -5,7 +5,7 @@ import { AgentService } from '../core/AgentService.js'; import type { Tool } from '../tools/Tools.js'; import { AIChatPanel } from '../ui/AIChatPanel.js'; -import { ChatMessageEntity, type ChatMessage } from '../ui/ChatView.js'; +import { ChatMessageEntity, type ChatMessage } from '../models/ChatTypes.js'; import { createLogger } from '../core/Logger.js'; import { getCurrentTracingContext } from '../tracing/TracingConfig.js'; import type { AgentSession } from './AgentSessionTypes.js'; @@ -383,7 +383,7 @@ export class ConfigurableAgentTool implements Tool { + async execute(args: ConfigurableAgentArgs, _ctx?: unknown): Promise { logger.info(`Executing ${this.name} via AgentRunner with args:`, args); // Get current tracing context for debugging @@ -431,6 +431,8 @@ export class ConfigurableAgentTool implements Tool AIChatPanel.isVisionCapable(m), }; const runnerHooks: AgentRunnerHooks = { @@ -444,12 +446,19 @@ export class ConfigurableAgentTool implements Tool { + it('returns placeholder when sanitized is empty and imageData exists', () => { + const toolResult = { imageData: 'data:image/png;base64,AAA' } as any; + const out = AgentRunner.computeToolResultText(toolResult, toolResult.imageData); + assert.strictEqual(out, 'Image omitted (model lacks vision).'); + }); + + it('returns JSON string of sanitized when other fields exist even if imageData exists', () => { + const toolResult = { imageData: 'data:image/png;base64,BBB', note: 'hello', value: 1 } as any; + const out = AgentRunner.computeToolResultText(toolResult, toolResult.imageData); + assert.strictEqual(out, JSON.stringify({ note: 'hello', value: 1 }, null, 2)); + }); + + it('returns original string when toolResultData is a string', () => { + const out = AgentRunner.computeToolResultText('plain string result'); + assert.strictEqual(out, 'plain string result'); + }); + + it('returns {} JSON for empty sanitized object when no imageData provided', () => { + const toolResult = { imageData: 'data:image/png;base64,CCC' } as any; + const out = AgentRunner.computeToolResultText(toolResult, undefined); + assert.strictEqual(out, '{}'); + }); +}); + diff --git a/front_end/panels/ai_chat/agent_framework/__tests__/AgentRunner.run.flows.test.ts b/front_end/panels/ai_chat/agent_framework/__tests__/AgentRunner.run.flows.test.ts new file mode 100644 index 00000000000..c6553605940 --- /dev/null +++ b/front_end/panels/ai_chat/agent_framework/__tests__/AgentRunner.run.flows.test.ts @@ -0,0 +1,125 @@ +// Copyright 2025 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import { AgentRunner } from '../AgentRunner.js'; +import type { AgentRunnerConfig, AgentRunnerHooks } from '../AgentRunner.js'; +import { ChatMessageEntity, type ChatMessage } from '../../models/ChatTypes.js'; +import type { Tool } from '../../tools/Tools.js'; +import { AIChatPanel } from '../../ui/AIChatPanel.js'; +import { LLMClient } from '../../LLM/LLMClient.js'; + +// Minimal fake tool for testing +function makeTool(name: string, executeImpl: (args: any) => Promise): Tool { + return { + name, + description: `test tool ${name}`, + schema: { type: 'object', properties: {} }, + execute: executeImpl, + }; +} + +// Stub AIChatPanel static methods used by AgentRunner +function stubAIChatPanel() { + (AIChatPanel as any).getProviderForModel = (_model: string) => 'openai'; + (AIChatPanel as any).isVisionCapable = async (_model: string) => false; +} + +// Create a stub LLM client with deterministic responses +function stubLLMClientWithSequence(sequence: Array<'tool_call'|'final_answer'|'error'>, toolName = 'echo_tool') { + let calls = 0; + const fake = { + call: async () => { + calls++; + return { rawResponse: { seq: calls } }; + }, + parseResponse: (_resp: any) => { + const kind = sequence[Math.min(calls - 1, sequence.length - 1)]; + if (kind === 'tool_call') { + return { type: 'tool_call', name: toolName, args: {}}; + } + if (kind === 'final_answer') { + return { type: 'final_answer', answer: 'done' }; + } + return { type: 'error', error: 'forced' }; + } + }; + (LLMClient as any).getInstance = () => fake; +} + +describe('ai_chat: AgentRunner.run flows', () => { + beforeEach(() => { + stubAIChatPanel(); + }); + + it('runs tool_call -> tool_result (image-only placeholder) -> final_answer', async () => { + stubLLMClientWithSequence(['tool_call', 'final_answer'], 'echo_tool'); + + const initialMessages: ChatMessage[] = [ + { entity: ChatMessageEntity.USER, text: 'go', } as any, + ]; + + const tool = makeTool('echo_tool', async () => ({ imageData: 'data:image/png;base64,AAA' })); + + const config: AgentRunnerConfig = { + apiKey: 'k', + modelName: 'gpt-4.1-2025-04-14', + systemPrompt: 'sys', + tools: [tool], + maxIterations: 3, + temperature: 0, + provider: 'openai', + }; + const hooks: AgentRunnerHooks = { + prepareInitialMessages: undefined, + createSuccessResult: (output, steps, reason) => ({ success: true, output, terminationReason: reason, intermediateSteps: steps }), + createErrorResult: (error, steps, reason) => ({ success: false, error, terminationReason: reason, intermediateSteps: steps }), + }; + + const result = await AgentRunner.run(initialMessages, { query: 'q', reasoning: '' }, config, hooks, null); + + // Asserts + if (!result.success) throw new Error('Expected success'); + if (!result.intermediateSteps) throw new Error('Expected intermediate steps'); + const toolResult = result.intermediateSteps.find(m => m.entity === ChatMessageEntity.TOOL_RESULT) as any; + if (!toolResult) throw new Error('Expected tool result'); + assert.strictEqual(toolResult.resultText, 'Image omitted (model lacks vision).'); + const final = result.intermediateSteps.find(m => m.entity === ChatMessageEntity.MODEL && (m as any).action === 'final') as any; + assert.isOk(final, 'Expected final answer message'); + assert.strictEqual(result.terminationReason, 'final_answer'); + }); + + it('handles LLM call error and returns error result', async () => { + // Stub LLM to throw on call + (LLMClient as any).getInstance = () => ({ + call: async () => { throw new Error('boom'); }, + parseResponse: (_: any) => ({ type: 'error', error: 'n/a' }), + }); + + const initialMessages: ChatMessage[] = [ + { entity: ChatMessageEntity.USER, text: 'go', } as any, + ]; + const config: AgentRunnerConfig = { + apiKey: 'k', + modelName: 'gpt-4.1-2025-04-14', + systemPrompt: 'sys', + tools: [], + maxIterations: 1, + temperature: 0, + provider: 'openai', + }; + const hooks: AgentRunnerHooks = { + prepareInitialMessages: undefined, + createSuccessResult: (output, steps, reason) => ({ success: true, output, terminationReason: reason, intermediateSteps: steps }), + createErrorResult: (error, steps, reason) => ({ success: false, error, terminationReason: reason, intermediateSteps: steps }), + }; + + const result = await AgentRunner.run(initialMessages, { query: 'q', reasoning: '' }, config, hooks, null); + assert.isFalse(result.success); + assert.strictEqual(result.terminationReason, 'error'); + // Last message should be a system_error tool result + const last = (result.intermediateSteps || []).slice(-1)[0] as any; + assert.strictEqual(last?.entity, ChatMessageEntity.TOOL_RESULT); + assert.strictEqual(last?.toolName, 'system_error'); + }); +}); diff --git a/front_end/panels/ai_chat/agent_framework/__tests__/AgentRunner.sanitizeToolResult.test.ts b/front_end/panels/ai_chat/agent_framework/__tests__/AgentRunner.sanitizeToolResult.test.ts new file mode 100644 index 00000000000..99e093fd60b --- /dev/null +++ b/front_end/panels/ai_chat/agent_framework/__tests__/AgentRunner.sanitizeToolResult.test.ts @@ -0,0 +1,49 @@ +// Copyright 2025 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import { AgentRunner } from '../AgentRunner.js'; + +describe('ai_chat: AgentRunner.sanitizeToolResultForText', () => { + it('removes imageData and returns empty object for image-only results', () => { + const toolResult = { imageData: 'data:image/png;base64,AAA' } as any; + const sanitized = (AgentRunner as any).sanitizeToolResultForText(toolResult); + assert.isObject(sanitized); + assert.deepEqual(sanitized, {}, 'image-only payload becomes empty object'); + + // Simulate placeholder decision logic used in AgentRunner + const imageData = toolResult.imageData; + const sanitizedIsEmpty = typeof sanitized === 'object' && sanitized !== null && Object.keys(sanitized).length === 0; + const hadOnlyImage = !!imageData && sanitizedIsEmpty; + const toolResultText = hadOnlyImage ? 'Image omitted (model lacks vision).' : JSON.stringify(sanitized, null, 2); + assert.strictEqual(toolResultText, 'Image omitted (model lacks vision).'); + }); + + it('keeps non-image fields and stringifies them', () => { + const toolResult = { imageData: 'data:image/png;base64,AAA', summary: 'ok', value: 42 } as any; + const sanitized = (AgentRunner as any).sanitizeToolResultForText(toolResult); + assert.doesNotHaveAnyKeys(sanitized, ['imageData']); + assert.deepEqual(sanitized, { summary: 'ok', value: 42 }); + }); + + it('does not use placeholder when sanitized has fields even if imageData exists', () => { + const toolResult = { imageData: 'data:image/png;base64,BBB', summary: 'note' } as any; + const sanitized = (AgentRunner as any).sanitizeToolResultForText(toolResult); + const imageData = toolResult.imageData; + const sanitizedIsEmpty = typeof sanitized === 'object' && sanitized !== null && Object.keys(sanitized).length === 0; + const hadOnlyImage = !!imageData && sanitizedIsEmpty; + const toolResultText = hadOnlyImage ? 'Image omitted (model lacks vision).' : JSON.stringify(sanitized, null, 2); + assert.strictEqual(toolResultText, JSON.stringify({ summary: 'note' }, null, 2)); + }); + + it('uses original string when toolResultData is a string', () => { + const toolResultData = 'plain string result'; + // Simulate branch: when not object, we do not sanitize, and code picks the string directly + const sanitizedData = toolResultData as any; + const imageData = undefined; + const sanitizedIsEmptyObject = typeof sanitizedData === 'object' && sanitizedData !== null && Object.keys(sanitizedData).length === 0; + const hadOnlyImage = !!imageData && sanitizedIsEmptyObject; + const toolResultText = hadOnlyImage ? 'Image omitted (model lacks vision).' : (typeof toolResultData === 'string' ? toolResultData : JSON.stringify(sanitizedData, null, 2)); + assert.strictEqual(toolResultText, 'plain string result'); + }); +}); diff --git a/front_end/panels/ai_chat/agent_framework/implementation/ConfiguredAgents.ts b/front_end/panels/ai_chat/agent_framework/implementation/ConfiguredAgents.ts index 60d437cbba6..c32ecb65a27 100644 --- a/front_end/panels/ai_chat/agent_framework/implementation/ConfiguredAgents.ts +++ b/front_end/panels/ai_chat/agent_framework/implementation/ConfiguredAgents.ts @@ -11,7 +11,7 @@ import { DocumentSearchTool } from '../../tools/DocumentSearchTool.js'; import { NavigateURLTool, PerformActionTool, GetAccessibilityTreeTool, SearchContentTool, NavigateBackTool, NodeIDsToURLsTool, TakeScreenshotTool, ScrollPageTool } from '../../tools/Tools.js'; import { HTMLToMarkdownTool } from '../../tools/HTMLToMarkdownTool.js'; import { AIChatPanel } from '../../ui/AIChatPanel.js'; -import { ChatMessageEntity, type ChatMessage } from '../../ui/ChatView.js'; +import { ChatMessageEntity, type ChatMessage } from '../../models/ChatTypes.js'; import { ConfigurableAgentTool, ToolRegistry, type AgentToolConfig, type ConfigurableAgentArgs diff --git a/front_end/panels/ai_chat/core/AgentErrorHandler.ts b/front_end/panels/ai_chat/core/AgentErrorHandler.ts index 794b3bf367b..1aa2d38c9c1 100644 --- a/front_end/panels/ai_chat/core/AgentErrorHandler.ts +++ b/front_end/panels/ai_chat/core/AgentErrorHandler.ts @@ -3,7 +3,7 @@ // found in the LICENSE file. import { createLogger } from './Logger.js'; -import { ChatMessageEntity, type ChatMessage, type ToolResultMessage } from '../ui/ChatView.js'; +import { ChatMessageEntity, type ChatMessage, type ToolResultMessage } from '../models/ChatTypes.js'; import type { AgentSession, AgentMessage } from '../agent_framework/AgentSessionTypes.js'; const logger = createLogger('AgentErrorHandler'); @@ -307,4 +307,4 @@ export class AgentErrorHandler { ) => AgentErrorHandler.executeWithRetry(operation, isValidResult, { ...config, ...retryConfig }) }; } -} \ No newline at end of file +} diff --git a/front_end/panels/ai_chat/core/AgentNodes.ts b/front_end/panels/ai_chat/core/AgentNodes.ts index 560e517f1fe..6a48713755a 100644 --- a/front_end/panels/ai_chat/core/AgentNodes.ts +++ b/front_end/panels/ai_chat/core/AgentNodes.ts @@ -3,12 +3,12 @@ // found in the LICENSE file. import type { getTools } from '../tools/Tools.js'; -import { ChatMessageEntity, type ModelChatMessage, type ToolResultMessage, type ChatMessage, type AgentSessionMessage } from '../ui/ChatView.js'; +import { ChatMessageEntity, type ModelChatMessage, type ToolResultMessage, type ChatMessage, type AgentSessionMessage } from '../models/ChatTypes.js'; import { ConfigurableAgentTool } from '../agent_framework/ConfigurableAgentTool.js'; import { LLMClient } from '../LLM/LLMClient.js'; import type { LLMMessage } from '../LLM/LLMTypes.js'; -import { AIChatPanel } from '../ui/AIChatPanel.js'; +import type { LLMProvider } from '../LLM/LLMTypes.js'; import { createSystemPromptAsync, getAgentToolsFromState } from './GraphHelpers.js'; import { createLogger } from './Logger.js'; import type { AgentState } from './State.js'; @@ -19,19 +19,16 @@ import type { TracingProvider } from '../tracing/TracingProvider.js'; const logger = createLogger('AgentNodes'); -export function createAgentNode(modelName: string, temperature: number): Runnable { +export function createAgentNode(modelName: string, provider: LLMProvider, temperature: number): Runnable { const agentNode = new class AgentNode implements Runnable { - private modelName: string; - private temperature: number; + private modelName: string = modelName; + private provider: LLMProvider = provider; + private temperature: number = temperature; private callCount = 0; private readonly MAX_CALLS_PER_INTERACTION = 50; private tracingProvider: TracingProvider; - constructor(modelName: string, temperature: number) { - this.modelName = modelName; - this.temperature = temperature; - this.tracingProvider = createTracingProvider(); - } + constructor() { this.tracingProvider = createTracingProvider(); } async invoke(state: AgentState): Promise { console.log('[AGENT NODE DEBUG] AgentNode invoke called, messages count:', state.messages.length); @@ -123,7 +120,7 @@ export function createAgentNode(modelName: string, temperature: number): Runnabl model: this.modelName, modelParameters: { temperature: this.temperature, - provider: AIChatPanel.getProviderForModel(this.modelName) + provider: this.provider }, input: { systemPrompt: systemPrompt.substring(0, 1000) + '...', // Truncate for tracing @@ -143,8 +140,8 @@ export function createAgentNode(modelName: string, temperature: number): Runnabl try { const llm = LLMClient.getInstance(); - // Get provider for the specific model - const provider = AIChatPanel.getProviderForModel(this.modelName); + // Use provider passed at graph initialization + const provider = this.provider as LLMProvider; // Get tools for the current agent type const tools = getAgentToolsFromState(state); @@ -246,7 +243,7 @@ export function createAgentNode(modelName: string, temperature: number): Runnabl callCount: this.callCount, toolCallId, phase: 'tool_call_decision', - provider: AIChatPanel.getProviderForModel(this.modelName) + provider: this.provider } }, tracingContext.traceId); @@ -352,7 +349,7 @@ export function createAgentNode(modelName: string, temperature: number): Runnabl */ private convertChatMessagesToLLMMessages(messages: ChatMessage[]): LLMMessage[] { const llmMessages: LLMMessage[] = []; - + logger.info('Converting ChatMessages to LLMMessages. Messages:', messages); for (const msg of messages) { if (msg.entity === ChatMessageEntity.USER) { // User message @@ -387,27 +384,16 @@ export function createAgentNode(modelName: string, temperature: number): Runnabl } else if (msg.entity === ChatMessageEntity.TOOL_RESULT) { // Tool result message if ('toolCallId' in msg && 'resultText' in msg) { - let content = msg.resultText; - - // Try to parse and sanitize if it's JSON (structured tool result) - if (typeof msg.resultText === 'string') { - try { - const parsed = JSON.parse(msg.resultText); - const sanitized = this.sanitizeToolResultForText(parsed); - content = JSON.stringify(sanitized); - } catch { - // Not JSON, use as-is (simple string tool result) - content = msg.resultText; - } - } else if (typeof msg.resultText === 'object' && msg.resultText !== null) { - // Already an object, sanitize directly - const sanitized = this.sanitizeToolResultForText(msg.resultText); - content = JSON.stringify(sanitized); - } - + const toolResultData = msg.resultText || null; // Use resultText if available + // Sanitize object payloads to avoid leaking session data and large fields + const sanitized = typeof toolResultData === 'object' && toolResultData !== null + ? this.sanitizeToolResultForText(toolResultData) + : toolResultData; + llmMessages.push({ role: 'tool', - content: String(content), + // Ensure objects are serialized as JSON instead of "[object Object]" + content: typeof sanitized === 'string' ? sanitized : JSON.stringify(sanitized), tool_call_id: msg.toolCallId, }); } @@ -416,11 +402,11 @@ export function createAgentNode(modelName: string, temperature: number): Runnabl return llmMessages; } - }(modelName, temperature); + }(); return agentNode; } -export function createToolExecutorNode(state: AgentState): Runnable { +export function createToolExecutorNode(state: AgentState, provider: LLMProvider, modelName: string): Runnable { const tools = getAgentToolsFromState(state); // Adjusted to use getAgentToolsFromState const toolMap = new Map[number]>(); tools.forEach((tool: ReturnType[number]) => toolMap.set(tool.name, tool)); @@ -428,10 +414,14 @@ export function createToolExecutorNode(state: AgentState): Runnable { private toolMap: Map[number]>; private tracingProvider: TracingProvider; + private provider: LLMProvider; + private modelName: string; - constructor(toolMap: Map[number]>) { + constructor(toolMap: Map[number]>, provider: LLMProvider, modelName: string) { this.toolMap = toolMap; this.tracingProvider = createTracingProvider(); + this.provider = provider; + this.modelName = modelName; } async invoke(state: AgentState): Promise { @@ -549,7 +539,7 @@ export function createToolExecutorNode(state: AgentState): Runnable { console.log(`[TOOL EXECUTION PATH 1] Inside withTracingContext for tool: ${toolName}`); - return await selectedTool.execute(toolArgs as any); + return await selectedTool.execute(toolArgs as any, { provider: this.provider, model: this.modelName }); }); console.log(`[TOOL EXECUTION PATH 1] ToolExecutorNode completed tool: ${toolName}`); @@ -587,26 +577,32 @@ export function createToolExecutorNode(state: AgentState): Runnable !['imageData', 'dataUrl', 'success', 'agentSession'].includes(k)).length === 0; + resultText = likelyOnlyImage + ? 'Image omitted (model lacks vision).' + : JSON.stringify(result, null, 2); } isError = (typeof result === 'object' && result !== null && 'error' in result); @@ -730,7 +732,7 @@ export function createToolExecutorNode(state: AgentState): Runnable { static instance: AgentService; @@ -45,6 +54,7 @@ export class AgentService extends Common.ObjectWrapper.ObjectWrapper<{ #runningGraphStatePromise?: AsyncGenerator; #tracingProvider!: TracingProvider; #sessionId: string; + #activeAgentSessions = new Map(); constructor() { super(); @@ -61,6 +71,12 @@ export class AgentService extends Common.ObjectWrapper.ObjectWrapper<{ answer: i18nString(UIStrings.welcomeMessage), isFinalAnswer: true, }); + + // Initialize AgentRunner event system + AgentRunner.initializeEventBus(); + + // Subscribe to AgentRunner events + AgentRunnerEventBus.getInstance().addEventListener('agent-progress', this.#handleAgentProgress.bind(this)); } /** @@ -177,8 +193,11 @@ export class AgentService extends Common.ObjectWrapper.ObjectWrapper<{ throw new Error(`${providerName} API key is required for this configuration`); } - // Will throw error if OpenAI model is used without API key - this.#graph = createAgentGraph(apiKey, modelName); + // Determine selected provider for primary graph execution + const selectedProvider = (localStorage.getItem('ai_chat_provider') || 'openai') as LLMProvider; + + // Will throw error if model/provider configuration is invalid + this.#graph = createAgentGraph(apiKey, modelName, selectedProvider); this.#isInitialized = true; } catch (error) { @@ -597,6 +616,110 @@ export class AgentService extends Common.ObjectWrapper.ObjectWrapper<{ return true; } } + + /** + * Handle progress events from AgentRunner + */ + #handleAgentProgress(event: Common.EventTarget.EventTargetEvent): void { + const progressEvent = event.data; + + switch (progressEvent.type) { + case 'session_started': + this.#activeAgentSessions.set(progressEvent.sessionId, progressEvent.data.session); + this.dispatchEventToListeners(Events.AGENT_SESSION_STARTED, progressEvent.data.session); + // Upsert AGENT_SESSION message for real-time rendering + this.#upsertAgentSessionInMessages(progressEvent.data.session); + break; + case 'tool_started': + this.dispatchEventToListeners(Events.AGENT_TOOL_STARTED, progressEvent.data); + // Stream session update into chat messages (parent or child) + this.#upsertAgentSessionInMessages(progressEvent.data.session); + break; + case 'tool_completed': + this.dispatchEventToListeners(Events.AGENT_TOOL_COMPLETED, progressEvent.data); + // Update session state + const session = this.#activeAgentSessions.get(progressEvent.sessionId); + if (session) { + this.dispatchEventToListeners(Events.AGENT_SESSION_UPDATED, session); + // Stream session update into chat messages + this.#upsertAgentSessionInMessages(session); + } + break; + case 'child_agent_started': + this.dispatchEventToListeners(Events.CHILD_AGENT_STARTED, progressEvent.data); + // Also reflect child placeholder in the parent's message if present + { + const parent = progressEvent.data.parentSession as AgentSession | undefined; + if (parent) { + this.#upsertAgentSessionInMessages(parent); + } + } + break; + } + } + + // Upsert helper: ensures the chat transcript reflects the latest AgentSession state in real-time + #upsertAgentSessionInMessages(session: AgentSession): void { + // If this is a child session, update the parent container too + if (session.parentSessionId) { + // Find parent message and update nestedSessions + const parentIdx = this.#state.messages.findIndex(m => + (m as any).entity === ChatMessageEntity.AGENT_SESSION && + ((m as any).agentSession?.sessionId === session.parentSessionId) + ); + if (parentIdx !== -1) { + const parentMsg = this.#state.messages[parentIdx] as any; + const parentSession = parentMsg.agentSession as AgentSession; + const nested = Array.isArray(parentSession.nestedSessions) ? [...parentSession.nestedSessions] : []; + const nIdx = nested.findIndex(s => s.sessionId === session.sessionId); + if (nIdx !== -1) { + nested[nIdx] = session; + } else { + nested.push(session); + } + const updatedParent = { ...parentSession, nestedSessions: nested } as AgentSession; + this.#state.messages[parentIdx] = { ...parentMsg, agentSession: updatedParent }; + this.dispatchEventToListeners(Events.MESSAGES_CHANGED, [...this.#state.messages]); + return; + } + } + + // Otherwise, upsert the session as a top-level AGENT_SESSION message + const idx = this.#state.messages.findIndex(m => + (m as any).entity === ChatMessageEntity.AGENT_SESSION && + ((m as any).agentSession?.sessionId === session.sessionId) + ); + if (idx !== -1) { + const existing = this.#state.messages[idx] as any; + this.#state.messages[idx] = { ...existing, agentSession: session }; + } else { + // Only add as top-level if it has no parent + if (!session.parentSessionId) { + this.#state.messages.push({ entity: ChatMessageEntity.AGENT_SESSION, agentSession: session } as any); + } + } + this.dispatchEventToListeners(Events.MESSAGES_CHANGED, [...this.#state.messages]); + } + + /** + * Get active agent sessions + */ + getActiveAgentSessions(): AgentSession[] { + return Array.from(this.#activeAgentSessions.values()); + } + + /** + * Clean up completed session + */ + #cleanupCompletedSession(sessionId: string): void { + const session = this.#activeAgentSessions.get(sessionId); + if (session && (session.status === 'completed' || session.status === 'error')) { + // Keep for a short time for UI to finish rendering + setTimeout(() => { + this.#activeAgentSessions.delete(sessionId); + }, 5000); + } + } } // Define UI strings object to manage i18n strings diff --git a/front_end/panels/ai_chat/core/ConfigurableGraph.ts b/front_end/panels/ai_chat/core/ConfigurableGraph.ts index 28ef0b0ad5d..6dd48d925c5 100644 --- a/front_end/panels/ai_chat/core/ConfigurableGraph.ts +++ b/front_end/panels/ai_chat/core/ConfigurableGraph.ts @@ -7,6 +7,7 @@ import { createLogger } from './Logger.js'; import type { AgentState } from './State.js'; import { StateGraph } from './StateGraph.js'; import { NodeType, type CompiledGraph, type Runnable } from './Types.js'; +import type { LLMProvider } from '../LLM/LLMTypes.js'; const logger = createLogger('ConfigurableGraph'); @@ -35,6 +36,10 @@ export interface GraphConfig { edges: GraphEdgeConfig[]; modelName?: string; temperature?: number; + /** + * Selected LLM provider for this graph's agent nodes + */ + provider?: LLMProvider; } /** @@ -51,7 +56,7 @@ export function createAgentGraphFromConfig( const graph = new StateGraph({ name: config.name }); const nodeFactories: Record) => Runnable> = { - agent: () => createAgentNode(config.modelName!, config.temperature || 0), + agent: () => createAgentNode(config.modelName!, config.provider!, config.temperature || 0), final: () => createFinalNode(), toolExecutor: (nodeCfg) => { return { @@ -91,7 +96,7 @@ export function createAgentGraphFromConfig( const toolExecutorNodeName = edgeConfig.targetMap[NodeType.TOOL_EXECUTOR.toString()]; if (toolExecutorNodeName && toolExecutorNodeName !== '__end__') { logger.debug(`Dynamically creating/updating tool executor: ${toolExecutorNodeName}`); - const toolExecutorInstance = createToolExecutorNode(state); + const toolExecutorInstance = createToolExecutorNode(state, config.provider!, config.modelName!); graphInstance.addNode(toolExecutorNodeName, toolExecutorInstance); } else { logger.error('Tool executor node name not found in targetMap or is __end__. Routing to __end__.'); diff --git a/front_end/panels/ai_chat/core/Graph.ts b/front_end/panels/ai_chat/core/Graph.ts index ce01871c188..a0b9950b222 100644 --- a/front_end/panels/ai_chat/core/Graph.ts +++ b/front_end/panels/ai_chat/core/Graph.ts @@ -16,11 +16,12 @@ import { routeNextNode, } from './GraphHelpers.js'; import { type CompiledGraph, NodeType } from './Types.js'; +import type { LLMProvider } from '../LLM/LLMTypes.js'; const logger = createLogger('Graph'); // createAgentGraph now uses the LLM SDK directly -export function createAgentGraph(_apiKey: string | null, modelName: string): CompiledGraph { +export function createAgentGraph(_apiKey: string | null, modelName: string, provider?: LLMProvider): CompiledGraph { if (!modelName) { throw new Error('Model name is required'); } @@ -32,6 +33,7 @@ export function createAgentGraph(_apiKey: string | null, modelName: string): Com ...defaultAgentGraphConfig, modelName: modelName, temperature: 0, + ...(provider ? { provider } : {}), }; return createAgentGraphFromConfig(graphConfigWithModel); diff --git a/front_end/panels/ai_chat/core/GraphHelpers.ts b/front_end/panels/ai_chat/core/GraphHelpers.ts index e4ae5adb671..ff0d729a95b 100644 --- a/front_end/panels/ai_chat/core/GraphHelpers.ts +++ b/front_end/panels/ai_chat/core/GraphHelpers.ts @@ -3,7 +3,7 @@ // found in the LICENSE file. import type { getTools } from '../tools/Tools.js'; -import { ChatMessageEntity, type ChatMessage } from '../ui/ChatView.js'; +import { ChatMessageEntity, type ChatMessage } from '../models/ChatTypes.js'; import * as BaseOrchestratorAgent from './BaseOrchestratorAgent.js'; import { createLogger } from './Logger.js'; diff --git a/front_end/panels/ai_chat/core/State.ts b/front_end/panels/ai_chat/core/State.ts index 2fb830c7602..c60bef44f35 100644 --- a/front_end/panels/ai_chat/core/State.ts +++ b/front_end/panels/ai_chat/core/State.ts @@ -3,7 +3,7 @@ // found in the LICENSE file. import * as i18n from '../../../core/i18n/i18n.js'; -import {type ChatMessage, ChatMessageEntity, type ImageInputData} from '../ui/ChatView.js'; +import {type ChatMessage, ChatMessageEntity, type ImageInputData} from '../models/ChatTypes.js'; import type {TracingContext} from '../tracing/TracingProvider.js'; const UIStrings = { diff --git a/front_end/panels/ai_chat/core/StateGraph.ts b/front_end/panels/ai_chat/core/StateGraph.ts index dcffca67d24..ebf8e87fb29 100644 --- a/front_end/panels/ai_chat/core/StateGraph.ts +++ b/front_end/panels/ai_chat/core/StateGraph.ts @@ -6,7 +6,7 @@ import { createLogger } from './Logger.js'; import type { Runnable } from './Types.js'; import { createTracingProvider } from '../tracing/TracingConfig.js'; import type { TracingProvider } from '../tracing/TracingProvider.js'; -import { ChatMessageEntity, type ModelChatMessage } from '../ui/ChatView.js'; +import { ChatMessageEntity, type ModelChatMessage } from '../models/ChatTypes.js'; const logger = createLogger('StateGraph'); diff --git a/front_end/panels/ai_chat/core/structured_response.ts b/front_end/panels/ai_chat/core/structured_response.ts new file mode 100644 index 00000000000..52951650bad --- /dev/null +++ b/front_end/panels/ai_chat/core/structured_response.ts @@ -0,0 +1,47 @@ +// Copyright 2025 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import { CONTENT_THRESHOLDS, REGEX_PATTERNS } from '../core/Constants.js'; +import { createLogger } from '../core/Logger.js'; + +const logger = createLogger('structured_response'); + +export interface StructuredResponse { + reasoning: string; + markdownReport: string; +} + +// Parse and wrapped content from a model answer +export function parseStructuredResponse(text: string): StructuredResponse | null { + try { + const reasoningMatch = text.match(REGEX_PATTERNS.REASONING_TAG); + const reportMatch = text.match(REGEX_PATTERNS.MARKDOWN_REPORT_TAG); + if (reasoningMatch && reportMatch) { + const reasoning = reasoningMatch[1]?.trim() ?? ''; + const markdownReport = reportMatch[1]?.trim() ?? ''; + if (reasoning && markdownReport && markdownReport.length >= CONTENT_THRESHOLDS.MARKDOWN_REPORT_MIN_LENGTH) { + return { reasoning, markdownReport }; + } + } + } catch (error) { + logger.error('Failed to parse structured response', error); + } + return null; +} + +// Create a stable key for a structured response +export function getMessageStateKey(structuredResponse: StructuredResponse): string { + const content = structuredResponse.reasoning + structuredResponse.markdownReport; + const encoder = new TextEncoder(); + const bytes = encoder.encode(content); + let hash = 0; + for (let i = 0; i < bytes.length; i++) { + // eslint-disable-next-line no-bitwise + hash = ((hash << 5) - hash) + bytes[i]; + // eslint-disable-next-line no-bitwise + hash = hash & hash; + } + return Math.abs(hash).toString(16).padStart(8, '0'); +} + diff --git a/front_end/panels/ai_chat/evaluation/remote/EvaluationAgent.ts b/front_end/panels/ai_chat/evaluation/remote/EvaluationAgent.ts index efafedfeca8..e739fd36e50 100644 --- a/front_end/panels/ai_chat/evaluation/remote/EvaluationAgent.ts +++ b/front_end/panels/ai_chat/evaluation/remote/EvaluationAgent.ts @@ -9,7 +9,7 @@ import { AgentService } from '../../core/AgentService.js'; import { createLogger } from '../../core/Logger.js'; import { createTracingProvider, withTracingContext, isTracingEnabled, getTracingConfig } from '../../tracing/TracingConfig.js'; import type { TracingProvider, TracingContext } from '../../tracing/TracingProvider.js'; -import type { ChatMessage } from '../../ui/ChatView.js'; +import type { ChatMessage } from '../../models/ChatTypes.js'; import { AIChatPanel } from '../../ui/AIChatPanel.js'; import { RegisterMessage, @@ -805,4 +805,3 @@ export class EvaluationAgent { }); } } - diff --git a/front_end/panels/ai_chat/models/ChatTypes.ts b/front_end/panels/ai_chat/models/ChatTypes.ts new file mode 100644 index 00000000000..fae1fe2a17e --- /dev/null +++ b/front_end/panels/ai_chat/models/ChatTypes.ts @@ -0,0 +1,77 @@ +// Copyright 2025 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Shared chat types extracted from ChatView. Keep UI-agnostic. + +// Define possible entities for chat messages +export enum ChatMessageEntity { + USER = 'user', + MODEL = 'model', + TOOL_RESULT = 'tool_result', + AGENT_SESSION = 'agent_session', +} + +// Base structure for all chat messages +export interface BaseChatMessage { + entity: ChatMessageEntity; + error?: string; +} + +// Image input used by user messages +export interface ImageInputData { + url: string; + bytesBase64: string; +} + +// User message +export interface UserChatMessage extends BaseChatMessage { + entity: ChatMessageEntity.USER; + text: string; + imageInput?: ImageInputData; +} + +// Model message +export interface ModelChatMessage extends BaseChatMessage { + entity: ChatMessageEntity.MODEL; + action: 'tool' | 'final'; + toolName?: string; + toolArgs?: Record; + answer?: string; + isFinalAnswer: boolean; + reasoning?: string[] | null; + toolCallId?: string; +} + +// Tool result message +export interface ToolResultMessage extends BaseChatMessage { + entity: ChatMessageEntity.TOOL_RESULT; + toolName: string; + resultText: string; + isError: boolean; + resultData?: unknown; + toolCallId?: string; + isFromConfigurableAgent?: boolean; + imageData?: string; + summary?: string; +} + +// Agent session message (lightweight reference; AgentSession type lives in agent_framework) +export interface AgentSessionMessage extends BaseChatMessage { + entity: ChatMessageEntity.AGENT_SESSION; + // Use `any` to avoid tight coupling here; UI components import the precise type. + agentSession: any; + triggerMessageId?: string; + summary?: string; +} + +export type ChatMessage = + UserChatMessage|ModelChatMessage|ToolResultMessage|AgentSessionMessage; + +// View state for the chat container +export enum State { + IDLE = 'idle', + LOADING = 'loading', + ERROR = 'error', +} + diff --git a/front_end/panels/ai_chat/tools/CombinedExtractionTool.ts b/front_end/panels/ai_chat/tools/CombinedExtractionTool.ts index 6be48654545..dee45efd7b8 100644 --- a/front_end/panels/ai_chat/tools/CombinedExtractionTool.ts +++ b/front_end/panels/ai_chat/tools/CombinedExtractionTool.ts @@ -13,7 +13,7 @@ import { SchemaBasedExtractorTool, type SchemaDefinition } from './SchemaBasedExtractorTool.js'; import { - NavigateURLTool, type Tool, type ErrorResult + NavigateURLTool, type Tool, type ErrorResult, type LLMContext } from './Tools.js'; const logger = createLogger('Tool:CombinedExtraction'); @@ -78,7 +78,7 @@ export class CombinedExtractionTool implements Tool { + async execute(args: CombinedExtractionArgs, ctx?: LLMContext): Promise { logger.info('Executing with args', { args }); const { url, schema, markdownResponse, reasoning, extractionInstruction } = args; const agentService = AgentService.getInstance(); @@ -96,7 +96,7 @@ export class CombinedExtractionTool implements Tool /** * Execute the critique agent */ - async execute(args: CritiqueToolArgs): Promise { + async execute(args: CritiqueToolArgs, ctx?: LLMContext): Promise { logger.debug('Executing with args', args); const { userInput, finalResponse, reasoning } = args; const agentService = AgentService.getInstance(); @@ -102,7 +100,7 @@ export class CritiqueTool implements Tool logger.info('Evaluating planning response against user requirements'); // First, extract requirements from user input - const requirementsResult = await this.extractRequirements(userInput, apiKey); + const requirementsResult = await this.extractRequirements(userInput, apiKey, ctx); if (!requirementsResult.success) { throw new Error('Failed to extract requirements from user input.'); } @@ -112,7 +110,8 @@ export class CritiqueTool implements Tool userInput, finalResponse, requirementsResult.requirements, - apiKey + apiKey, + ctx ); if (!evaluationResult.success || !evaluationResult.criteria) { @@ -124,7 +123,7 @@ export class CritiqueTool implements Tool // Generate feedback only if criteria not satisfied let feedback = undefined; if (!criteria.satisfiesCriteria) { - feedback = await this.generateFeedback(criteria, userInput, finalResponse, apiKey); + feedback = await this.generateFeedback(criteria, userInput, finalResponse, apiKey, ctx); } logger.info('Evaluation complete', { @@ -151,7 +150,7 @@ export class CritiqueTool implements Tool /** * Extract structured requirements from user input */ - private async extractRequirements(userInput: string, apiKey: string): Promise<{success: boolean, requirements: string[], error?: string}> { + private async extractRequirements(userInput: string, apiKey: string, ctx?: LLMContext): Promise<{success: boolean, requirements: string[], error?: string}> { const systemPrompt = `You are an expert requirements analyst. Your task is to extract clear, specific requirements from the user's input. Focus on functional requirements, constraints, and expected outcomes. @@ -166,7 +165,11 @@ Return a JSON array of requirement statements. Example format: ["Requirement 1", "Requirement 2", ...]`; try { - const { model, provider } = AIChatPanel.getNanoModelWithProvider(); + if (!ctx?.provider || !(ctx.nanoModel || ctx.model)) { + throw new Error('Missing LLM context (provider/model) for requirements extraction'); + } + const provider = ctx.provider; + const model = ctx.nanoModel || ctx.model; const response = await callLLMWithTracing( { @@ -213,7 +216,8 @@ Return a JSON array of requirement statements. Example format: userInput: string, finalResponse: string, requirements: string[], - apiKey: string + apiKey: string, + ctx?: LLMContext ): Promise<{success: boolean, criteria?: EvaluationCriteria, error?: string}> { const systemPrompt = `You are an expert plan evaluator. Your task is to determine if a planning response satisfies the user's requirements. @@ -268,7 +272,11 @@ Return a JSON object evaluating the plan against the requirements using this sch ${JSON.stringify(evaluationSchema, null, 2)}`; try { - const { model, provider } = AIChatPanel.getNanoModelWithProvider(); + if (!ctx?.provider || !(ctx.nanoModel || ctx.model)) { + throw new Error('Missing LLM context (provider/model) for evaluation'); + } + const provider = ctx.provider; + const model = ctx.nanoModel || ctx.model; const response = await callLLMWithTracing( { @@ -316,7 +324,8 @@ ${JSON.stringify(evaluationSchema, null, 2)}`; criteria: EvaluationCriteria, userInput: string, finalResponse: string, - apiKey: string + apiKey: string, + ctx?: LLMContext ): Promise { const systemPrompt = `You are an expert feedback provider. Your task is to generate clear, constructive feedback for a planning response. @@ -338,7 +347,11 @@ Provide clear, actionable feedback focused on helping improve the final response Be concise, specific, and constructive.`; try { - const { model, provider } = AIChatPanel.getNanoModelWithProvider(); + if (!ctx?.provider || !(ctx.nanoModel || ctx.model)) { + throw new Error('Missing LLM context (provider/model) for feedback generation'); + } + const provider = ctx.provider; + const model = ctx.nanoModel || ctx.model; const response = await callLLMWithTracing( { diff --git a/front_end/panels/ai_chat/tools/FetcherTool.ts b/front_end/panels/ai_chat/tools/FetcherTool.ts index cdc75166ec0..466c159bc17 100644 --- a/front_end/panels/ai_chat/tools/FetcherTool.ts +++ b/front_end/panels/ai_chat/tools/FetcherTool.ts @@ -4,7 +4,7 @@ import { createLogger } from '../core/Logger.js'; import { HTMLToMarkdownTool, type HTMLToMarkdownResult } from './HTMLToMarkdownTool.js'; -import { NavigateURLTool, type Tool } from './Tools.js'; +import { NavigateURLTool, type Tool, type LLMContext } from './Tools.js'; const logger = createLogger('Tool:Fetcher'); @@ -72,7 +72,7 @@ export class FetcherTool implements Tool { /** * Execute the fetcher agent to process multiple URLs */ - async execute(args: FetcherToolArgs): Promise { + async execute(args: FetcherToolArgs, ctx?: LLMContext): Promise { logger.info('Executing with args', { args }); const { urls, reasoning } = args; @@ -93,7 +93,7 @@ export class FetcherTool implements Tool { for (const url of urlsToProcess) { try { logger.info('Processing URL', { url }); - const fetchedContent = await this.fetchContentFromUrl(url, reasoning); + const fetchedContent = await this.fetchContentFromUrl(url, reasoning, ctx); results.push(fetchedContent); } catch (error: any) { logger.error('Error processing URL', { url, error: error.message, stack: error.stack }); @@ -116,15 +116,15 @@ export class FetcherTool implements Tool { /** * Fetch and extract content from a single URL */ - private async fetchContentFromUrl(url: string, reasoning: string): Promise { + private async fetchContentFromUrl(url: string, reasoning: string, ctx?: LLMContext): Promise { try { // Step 1: Navigate to the URL logger.info('Navigating to URL', { url }); // Note: NavigateURLTool requires both url and reasoning parameters - const navigationResult = await this.navigateURLTool.execute({ - url, - reasoning: `Navigating to ${url} to extract content for research` - } as { url: string, reasoning: string }); + const navigationResult = await this.navigateURLTool.execute({ + url, + reasoning: `Navigating to ${url} to extract content for research` + } as { url: string, reasoning: string }, ctx); // Check for navigation errors if ('error' in navigationResult) { @@ -148,7 +148,7 @@ export class FetcherTool implements Tool { const extractionResult = await this.htmlToMarkdownTool.execute({ instruction: 'Extract the main content focusing on article text, headings, and important information. Remove ads, navigation, and distracting elements.', reasoning - }); + }, ctx); // Check for extraction errors if (!extractionResult.success || !extractionResult.markdownContent) { diff --git a/front_end/panels/ai_chat/tools/FinalizeWithCritiqueTool.ts b/front_end/panels/ai_chat/tools/FinalizeWithCritiqueTool.ts index 7c4a6d1aacc..268ca55dce9 100644 --- a/front_end/panels/ai_chat/tools/FinalizeWithCritiqueTool.ts +++ b/front_end/panels/ai_chat/tools/FinalizeWithCritiqueTool.ts @@ -3,7 +3,7 @@ // found in the LICENSE file. import { AgentService } from '../core/AgentService.js'; -import { ChatMessageEntity } from '../ui/ChatView.js'; +import { ChatMessageEntity } from '../models/ChatTypes.js'; import { createLogger } from '../core/Logger.js'; const logger = createLogger('FinalizeWithCritiqueTool'); diff --git a/front_end/panels/ai_chat/tools/FullPageAccessibilityTreeToMarkdownTool.ts b/front_end/panels/ai_chat/tools/FullPageAccessibilityTreeToMarkdownTool.ts index 8d4e02d7b69..08e55e68909 100644 --- a/front_end/panels/ai_chat/tools/FullPageAccessibilityTreeToMarkdownTool.ts +++ b/front_end/panels/ai_chat/tools/FullPageAccessibilityTreeToMarkdownTool.ts @@ -3,7 +3,7 @@ // found in the LICENSE file. import { AgentService } from '../core/AgentService.js'; -import { AIChatPanel } from '../ui/AIChatPanel.js'; +import type { LLMContext } from './Tools.js'; import { callLLMWithTracing } from './LLMTracingWrapper.js'; import { GetAccessibilityTreeTool, type Tool, type ErrorResult } from './Tools.js'; @@ -34,7 +34,7 @@ export class FullPageAccessibilityTreeToMarkdownTool implements Tool): Promise { + async execute(_args: Record, ctx?: LLMContext): Promise { const getAccTreeTool = new GetAccessibilityTreeTool(); const treeResult = await getAccTreeTool.execute({ reasoning: 'Get full accessibility tree for Markdown conversion' }); if ('error' in treeResult) { @@ -50,7 +50,11 @@ export class FullPageAccessibilityTreeToMarkdownTool implements Tool { + async execute(args: HTMLToMarkdownArgs, ctx?: LLMContext): Promise { logger.info('Executing with args', { args }); const { instruction } = args; const agentService = AgentService.getInstance(); @@ -107,10 +106,19 @@ export class HTMLToMarkdownTool implements Tool { // Call LLM using the unified client with tracing - const { model, provider } = AIChatPanel.getNanoModelWithProvider(); + const provider = params.provider; + const model = params.model; const llmResponse = await callLLMWithTracing( { provider, diff --git a/front_end/panels/ai_chat/tools/SchemaBasedExtractorTool.ts b/front_end/panels/ai_chat/tools/SchemaBasedExtractorTool.ts index 18c236fa16d..a7b663a1648 100644 --- a/front_end/panels/ai_chat/tools/SchemaBasedExtractorTool.ts +++ b/front_end/panels/ai_chat/tools/SchemaBasedExtractorTool.ts @@ -7,7 +7,7 @@ import * as Protocol from '../../../generated/protocol.js'; import * as Utils from '../common/utils.js'; import { AgentService } from '../core/AgentService.js'; import { createLogger } from '../core/Logger.js'; -import { AIChatPanel } from '../ui/AIChatPanel.js'; +import type { LLMContext } from './Tools.js'; import { callLLMWithTracing } from './LLMTracingWrapper.js'; import { NodeIDsToURLsTool, type Tool } from './Tools.js'; @@ -74,7 +74,7 @@ Schema Examples: * Execute the schema-based extraction */ - async execute(args: SchemaExtractionArgs): Promise { + async execute(args: SchemaExtractionArgs, ctx?: LLMContext): Promise { logger.debug('Executing with args', args); const { schema, instruction, reasoning } = args; @@ -173,6 +173,7 @@ Schema Examples: domContent: treeText, schema: transformedSchema, apiKey, + ctx, }); logger.debug('Initial extraction result:', initialExtraction); @@ -190,6 +191,7 @@ Schema Examples: schema: transformedSchema, // Use the same transformed schema initialData: initialExtraction, apiKey, + ctx, }); logger.debug('Refinement result:', refinedData); @@ -227,6 +229,7 @@ Schema Examples: domContent: treeText, // Pass the DOM content for context schema, // Pass the schema to understand what was requested apiKey, + ctx, }); logger.debug('Metadata result:', metadata); @@ -399,6 +402,7 @@ Schema Examples: domContent: string, schema: SchemaDefinition, apiKey: string, + ctx?: LLMContext, }): Promise { const { instruction, domContent, schema, apiKey } = options; logger.debug('Calling Extraction LLM...'); @@ -447,7 +451,11 @@ CRITICAL: Only output the JSON object with real data from the accessibility tree.`; try { - const { model, provider } = AIChatPanel.getNanoModelWithProvider(); + if (!options.ctx?.provider || !(options.ctx.nanoModel || options.ctx.model)) { + throw new Error('Missing LLM context (provider/model) for extraction'); + } + const provider = options.ctx.provider; + const model = options.ctx.nanoModel || options.ctx.model; const llmResponse = await callLLMWithTracing( { provider, @@ -488,6 +496,7 @@ Only output the JSON object with real data from the accessibility tree.`; schema: SchemaDefinition, initialData: any, apiKey: string, + ctx?: LLMContext, }): Promise { const { instruction, schema, initialData, apiKey } = options; logger.debug('Calling Refinement LLM...'); @@ -527,7 +536,11 @@ Return only the refined JSON object. Do not add any conversational text or explanations or thinking tags.`; try { - const { model, provider } = AIChatPanel.getNanoModelWithProvider(); + if (!options.ctx?.provider || !(options.ctx.nanoModel || options.ctx.model)) { + throw new Error('Missing LLM context (provider/model) for refinement'); + } + const provider = options.ctx.provider; + const model = options.ctx.nanoModel || options.ctx.model; const llmResponse = await callLLMWithTracing( { provider, @@ -568,6 +581,7 @@ Do not add any conversational text or explanations or thinking tags.`; domContent: string, schema: SchemaDefinition, apiKey: string, + ctx?: LLMContext, }): Promise { const { instruction, extractedData, domContent, schema, apiKey } = options; logger.debug('Calling Metadata LLM...'); @@ -634,7 +648,11 @@ Describe the type of page/content that was analyzed. Return ONLY a valid JSON object conforming to the required metadata schema.`; try { - const { model, provider } = AIChatPanel.getNanoModelWithProvider(); + if (!options.ctx?.provider || !(options.ctx.nanoModel || options.ctx.model)) { + throw new Error('Missing LLM context (provider/model) for metadata'); + } + const provider = options.ctx.provider; + const model = options.ctx.nanoModel || options.ctx.model; const llmResponse = await callLLMWithTracing( { provider, diff --git a/front_end/panels/ai_chat/tools/SequentialThinkingTool.ts b/front_end/panels/ai_chat/tools/SequentialThinkingTool.ts index 63e787fc34c..047848d25e3 100644 --- a/front_end/panels/ai_chat/tools/SequentialThinkingTool.ts +++ b/front_end/panels/ai_chat/tools/SequentialThinkingTool.ts @@ -3,12 +3,13 @@ // found in the LICENSE file. import * as SDK from '../../../core/sdk/sdk.js'; -import type { Tool } from './Tools.js'; +import type { Tool, LLMContext } from './Tools.js'; import { TakeScreenshotTool } from './Tools.js'; import { GetAccessibilityTreeTool } from './Tools.js'; import { createLogger } from '../core/Logger.js'; -import { AIChatPanel } from '../ui/AIChatPanel.js'; -import { callLLMWithTracing } from './LLMTracingWrapper.js'; +import { LLMClient } from '../LLM/LLMClient.js'; +import { LLMResponseParser } from '../LLM/LLMResponseParser.js'; +import { LLMRetryManager } from '../LLM/LLMErrorHandler.js'; const logger = createLogger('SequentialThinkingTool'); @@ -31,6 +32,8 @@ export interface ThinkingStep { thought: string; action: string; targetDescription?: string; + target_id?: number; + value?: string; expectedOutcome: string; risks?: string[]; } @@ -69,13 +72,13 @@ export class SequentialThinkingTool implements Tool { + async execute(args: SequentialThinkingArgs, ctx?: LLMContext): Promise { try { logger.info('Sequential thinking initiated', { userRequest: args.userRequest }); // Check if current model supports vision - const currentModel = AIChatPanel.instance().getSelectedModel(); - const isVisionCapable = await AIChatPanel.isVisionCapable(currentModel); + const currentModel = ctx?.model || ''; + const isVisionCapable = ctx?.getVisionCapability ? await ctx.getVisionCapability(currentModel) : false; logger.info(`Model ${currentModel} vision capable: ${isVisionCapable}`); @@ -94,7 +97,7 @@ export class SequentialThinkingTool implements Tool 0 ? ` PAST STEPS ATTEMPTED: @@ -219,65 +229,73 @@ Based on the screenshot and current state, create a grounded sequential plan for }; } - private async getGroundedAnalysis(prompt: { systemPrompt: string; userPrompt: string; images: Array<{ type: string; data: string }> }, isVisionCapable: boolean = true): Promise { + private async getGroundedAnalysis(prompt: { systemPrompt: string; userPrompt: string; images: Array<{ type: string; data: string }> }, ctx?: LLMContext): Promise { + const retryManager = new LLMRetryManager({ + enableLogging: true, + defaultConfig: { + maxRetries: 2, // Allow 2 retries for JSON parsing failures + baseDelayMs: 1000, + maxDelayMs: 5000, + backoffMultiplier: 1.5, + jitterMs: 500, + } + }); + try { - // Get the selected model and its provider - const model = AIChatPanel.instance().getSelectedModel(); - const provider = AIChatPanel.getProviderForModel(model); - - // Prepare message based on vision capability - const messages = [{ - role: 'user' as const, - content: isVisionCapable ? [ - { type: 'text' as const, text: prompt.userPrompt }, - ...prompt.images.filter(img => img.data !== 'no-screenshot-available').map(img => ({ - type: 'image_url' as const, - image_url: { url: img.data } - })) - ] : prompt.userPrompt // Text-only for non-vision models - }]; - - const response = await callLLMWithTracing( - { + const result = await retryManager.executeWithRetry(async () => { + // Get the selected model and its provider + if (!ctx?.provider || !ctx.model) { + throw new Error('Missing LLM context (provider/model) for SequentialThinkingTool'); + } + const provider = ctx.provider; + const model = ctx.model; + const llm = LLMClient.getInstance(); + + // Prepare multimodal message + const validImages = prompt.images.filter(img => !!img?.data && img.data !== 'no-screenshot-available'); + const messages = [{ + role: 'user' as const, + content: [ + { type: 'text' as const, text: prompt.userPrompt }, + ...validImages.map(img => ({ + type: 'image_url' as const, + image_url: { url: img.data } + })) + ] + }]; + + const response = await llm.call({ provider, model, messages, systemPrompt: prompt.systemPrompt, - temperature: 0.2, - options: { retryConfig: { maxRetries: 3 } } - }, - { - toolName: this.name, - operationName: 'sequential_analysis', - context: 'grounded_planning', - additionalMetadata: { - isVisionCapable, - imageCount: prompt.images.length, - hasValidImages: prompt.images.some(img => img.data !== 'no-screenshot-available') - } - } - ); + temperature: 0.2 + }); - if (!response.text) { - return { error: 'No response from LLM' }; - } + if (!response.text) { + throw new Error('No response from LLM'); + } - try { - const result = JSON.parse(response.text) as SequentialThinkingResult; + // This will throw if JSON parsing fails, triggering a retry + const parsedResult = LLMResponseParser.parseStrictJSON(response.text) as SequentialThinkingResult; // Validate result structure - if (!result.currentState || !result.nextSteps || !Array.isArray(result.nextSteps)) { - return { error: 'Invalid response structure from LLM' }; + if (!parsedResult.currentState || !parsedResult.nextSteps || !Array.isArray(parsedResult.nextSteps)) { + throw new Error('Invalid response structure from LLM - missing required fields'); } - return result; - } catch (parseError) { - logger.error('Failed to parse LLM response:', parseError); - return { error: `Failed to parse response: ${String(parseError)}` }; - } + return parsedResult; + }, { + context: 'sequential_thinking_analysis', + customRetryConfig: { + maxRetries: 2, // Specific retry count for this operation + } + }); + + return result; } catch (error) { - logger.error('LLM call failed:', error); - return { error: `LLM analysis failed: ${String(error)}` }; + logger.error('Sequential thinking analysis failed after retries:', error); + return { error: `Analysis failed: ${String(error)}` }; } } @@ -310,4 +328,4 @@ Based on the screenshot and current state, create a grounded sequential plan for }, required: ['userRequest'] }; -} \ No newline at end of file +} diff --git a/front_end/panels/ai_chat/tools/StreamlinedSchemaExtractorTool.ts b/front_end/panels/ai_chat/tools/StreamlinedSchemaExtractorTool.ts index 6837f02d5e7..416a4b5fbb8 100644 --- a/front_end/panels/ai_chat/tools/StreamlinedSchemaExtractorTool.ts +++ b/front_end/panels/ai_chat/tools/StreamlinedSchemaExtractorTool.ts @@ -8,10 +8,8 @@ import * as Utils from '../common/utils.js'; import type { AccessibilityNode } from '../common/context.js'; import { AgentService } from '../core/AgentService.js'; import { createLogger } from '../core/Logger.js'; -import { AIChatPanel } from '../ui/AIChatPanel.js'; import { callLLMWithTracing } from './LLMTracingWrapper.js'; - -import type { Tool } from './Tools.js'; +import type { Tool, LLMContext } from './Tools.js'; const logger = createLogger('Tool:StreamlinedSchemaExtractor'); @@ -64,15 +62,15 @@ export class StreamlinedSchemaExtractorTool implements Tool { + async execute(args: StreamlinedSchemaExtractionArgs, ctx?: LLMContext): Promise { try { const context = await this.setupExecution(args); if (context.success !== true) { return context as StreamlinedExtractionResult; } - const extractionResult = await this.performExtraction(context as ExecutionContext); - const finalData = await this.resolveUrlsWithRetry(extractionResult, context as ExecutionContext); + const extractionResult = await this.performExtraction(context as ExecutionContext, ctx); + const finalData = await this.resolveUrlsWithRetry(extractionResult, context as ExecutionContext, ctx); return { success: true, @@ -139,17 +137,18 @@ export class StreamlinedSchemaExtractorTool implements Tool { + private async performExtraction(context: ExecutionContext, ctx?: LLMContext): Promise { return await this.extractWithJsonRetry( context.schema, context.treeText, context.instruction, context.apiKey, - this.MAX_JSON_RETRIES + this.MAX_JSON_RETRIES, + ctx ); } - private async resolveUrlsWithRetry(extractionResult: any, context: ExecutionContext): Promise { + private async resolveUrlsWithRetry(extractionResult: any, context: ExecutionContext, ctx?: LLMContext): Promise { const urlFields = this.findUrlFields(context.schema); let finalData = this.resolveUrlsDirectly(extractionResult, context.urlMappings, urlFields); @@ -175,7 +174,8 @@ export class StreamlinedSchemaExtractorTool implements Tool { const systemPrompt = `You are a data extraction agent. Extract structured data from the accessibility tree according to the provided schema. @@ -238,7 +239,11 @@ IMPORTANT: Only extract data that you can see in the accessibility tree above. D extractionPrompt += `\n\nIMPORTANT: Previous attempt ${attempt - 1} failed due to invalid JSON. Please ensure you return ONLY valid JSON that can be parsed. Do not hallucinate any data - only extract what actually exists in the tree.`; } - const { model, provider } = AIChatPanel.getMiniModelWithProvider(); + if (!ctx?.provider || !(ctx.miniModel || ctx.model)) { + throw new Error('Missing LLM context (provider/model) for streamlined extraction'); + } + const provider = ctx.provider; + const model = ctx.miniModel || ctx.model; const llmResponse = await callLLMWithTracing( { provider, @@ -330,7 +335,8 @@ IMPORTANT: Only extract data that you can see in the accessibility tree above. D originalResult: any, unresolvedNodeIds: string[], apiKey: string, - attemptNumber: number + attemptNumber: number, + ctx?: LLMContext ): Promise { const systemPrompt = `You are a data extraction agent. A previous extraction attempt was made but some nodeIDs could not be resolved to URLs. @@ -377,7 +383,11 @@ Extract data according to the schema. For URL fields, return different nodeId nu CRITICAL: Only use nodeIds that you can actually see in the accessibility tree above. Do not invent, guess, or make up any nodeIds.`; try { - const { model, provider } = AIChatPanel.getMiniModelWithProvider(); + if (!ctx?.provider || !(ctx.miniModel || ctx.model)) { + throw new Error('Missing LLM context (provider/model) for URL retry extraction'); + } + const provider = ctx.provider; + const model = ctx.miniModel || ctx.model; const llmResponse = await callLLMWithTracing( { provider, @@ -501,4 +511,4 @@ export interface StreamlinedSchemaExtractionArgs { schema: any; instruction: string; reasoning?: string; -} \ No newline at end of file +} diff --git a/front_end/panels/ai_chat/tools/ThinkingTool.ts b/front_end/panels/ai_chat/tools/ThinkingTool.ts index 09461d3dd6e..7c00e88060a 100644 --- a/front_end/panels/ai_chat/tools/ThinkingTool.ts +++ b/front_end/panels/ai_chat/tools/ThinkingTool.ts @@ -3,10 +3,9 @@ // found in the LICENSE file. import * as SDK from '../../../core/sdk/sdk.js'; -import type { Tool } from './Tools.js'; +import type { Tool, LLMContext } from './Tools.js'; import { TakeScreenshotTool, GetAccessibilityTreeTool } from './Tools.js'; import { createLogger } from '../core/Logger.js'; -import { AIChatPanel } from '../ui/AIChatPanel.js'; import { callLLMWithTracing } from './LLMTracingWrapper.js'; const logger = createLogger('ThinkingTool'); @@ -39,13 +38,13 @@ export class ThinkingTool implements Tool { + async execute(args: ThinkingArgs, ctx?: LLMContext): Promise { try { logger.info('Thinking tool initiated', { userRequest: args.userRequest }); // 1. Check if current model supports vision - const currentModel = AIChatPanel.instance().getSelectedModel(); - const isVisionModel = await AIChatPanel.isVisionCapable(currentModel); + const currentModel = ctx?.model || ''; + const isVisionModel = ctx?.getVisionCapability ? await ctx.getVisionCapability(currentModel) : false; logger.info(`Model ${currentModel} vision capable: ${isVisionModel}`); @@ -71,7 +70,7 @@ export class ThinkingTool implements Tool }, isVisionModel: boolean): Promise { + private async getThinkingAnalysis(prompt: { systemPrompt: string; userPrompt: string; images: Array<{ type: string; data: string }> }, isVisionModel: boolean, ctx?: LLMContext): Promise { try { // Get the selected model and its provider - const model = AIChatPanel.instance().getSelectedModel(); - const provider = AIChatPanel.getProviderForModel(model); + if (!ctx?.provider || !ctx.model) { + return { error: 'Missing LLM context (provider/model) for ThinkingTool' }; + } + const provider = ctx.provider; + const model = ctx.model; // Prepare message based on model type const messages = [{ diff --git a/front_end/panels/ai_chat/tools/Tools.ts b/front_end/panels/ai_chat/tools/Tools.ts index 42a2da64b38..8469fb2fb5e 100644 --- a/front_end/panels/ai_chat/tools/Tools.ts +++ b/front_end/panels/ai_chat/tools/Tools.ts @@ -20,8 +20,8 @@ import { getXPathByBackendNodeId } from '../common/utils.js'; import { AgentService } from '../core/AgentService.js'; import type { DevToolsContext } from '../core/State.js'; import { LLMClient } from '../LLM/LLMClient.js'; -import { AIChatPanel } from '../ui/AIChatPanel.js'; -import { ChatMessageEntity } from '../ui/ChatView.js'; +import type { LLMProvider } from '../LLM/LLMTypes.js'; +import { ChatMessageEntity } from '../models/ChatTypes.js'; // Type imports @@ -40,7 +40,7 @@ import { SequentialThinkingTool, type SequentialThinkingResult, type SequentialT export interface Tool, TResult = unknown> { name: string; description: string; - execute: (args: TArgs) => Promise; + execute: (args: TArgs, ctx?: LLMContext) => Promise; schema: { type: string, properties: Record, @@ -48,6 +48,17 @@ export interface Tool, TResult = unknown> { }; } +/** + * Context passed into tools for LLM-related choices without relying on UI. + */ +export interface LLMContext { + provider: LLMProvider; + model: string; + getVisionCapability?: (model: string) => Promise | boolean; + miniModel?: string; + nanoModel?: string; +} + /** * Type for element inspection result */ @@ -624,7 +635,7 @@ export class NavigateURLTool implements Tool<{ url: string, reasoning: string }, constructor() { } - async execute(args: { url: string, reasoning: string /* Add reasoning to signature */ }): Promise { + async execute(args: { url: string, reasoning: string /* Add reasoning to signature */ }, ctx?: LLMContext): Promise { logger.info('navigate_url', args); const url = args.url; const LOAD_TIMEOUT_MS = 30000; // 30 seconds timeout for page load @@ -688,7 +699,7 @@ export class NavigateURLTool implements Tool<{ url: string, reasoning: string }, logger.info('Metadata fetched:', metadata); // *** Add 404 detection heuristic *** - const is404Result = await this.check404Status(target, metadata); + const is404Result = await this.check404Status(target, metadata, ctx); if (is404Result.is404) { return { error: `Page not found (404): ${is404Result.reason}`, @@ -751,7 +762,7 @@ export class NavigateURLTool implements Tool<{ url: string, reasoning: string }, } } - private async check404Status(target: SDK.Target.Target, metadata: { url: string, title: string }): Promise<{ is404: boolean, reason?: string }> { + private async check404Status(target: SDK.Target.Target, metadata: { url: string, title: string }, ctx?: LLMContext): Promise<{ is404: boolean, reason?: string }> { try { // Basic heuristic checks first const title = metadata.title.toLowerCase(); @@ -773,7 +784,7 @@ export class NavigateURLTool implements Tool<{ url: string, reasoning: string }, // Get accessibility tree for better semantic analysis const treeResult = await Utils.getAccessibilityTree(target); const pageContent = treeResult.simplified; - const is404Confirmed = await this.confirmWith404LLM(metadata.url, metadata.title, pageContent); + const is404Confirmed = await this.confirmWith404LLM(metadata.url, metadata.title, pageContent, ctx); if (is404Confirmed) { return { @@ -790,7 +801,7 @@ export class NavigateURLTool implements Tool<{ url: string, reasoning: string }, } } - private async confirmWith404LLM(url: string, title: string, content: string): Promise { + private async confirmWith404LLM(url: string, title: string, content: string, ctx?: LLMContext): Promise { try { const agentService = AgentService.getInstance(); const apiKey = agentService.getApiKey(); @@ -800,7 +811,12 @@ export class NavigateURLTool implements Tool<{ url: string, reasoning: string }, return false; } - const { model, provider } = AIChatPanel.getNanoModelWithProvider(); + if (!ctx?.provider || !(ctx.nanoModel || ctx.model)) { + logger.warn('Missing LLM context for 404 confirmation'); + return false; + } + const provider = ctx.provider; + const model = ctx.nanoModel || ctx.model; const llm = LLMClient.getInstance(); const systemPrompt = `You are analyzing web page content to determine if it represents a 404 "Page Not Found" error page. @@ -1406,7 +1422,7 @@ export class WaitTool implements Tool<{ seconds?: number, duration?: number, rea name = 'wait_for_page_load'; description = 'Waits for a specified number of seconds to allow page content to load, animations to complete, or dynamic content to appear. After waiting, returns a summary of what is currently visible in the viewport to help determine if additional waiting is needed. Provide the number of seconds to wait and an optional reasoning for waiting.'; - async execute(args: { seconds?: number, duration?: number, reason?: string, reasoning?: string }): Promise { + async execute(args: { seconds?: number, duration?: number, reason?: string, reasoning?: string }, ctx?: LLMContext): Promise { // Handle both 'seconds' and 'duration' parameter names for flexibility const waitTime = args.seconds ?? args.duration; const waitReason = args.reason ?? args.reasoning; @@ -1438,9 +1454,11 @@ export class WaitTool implements Tool<{ seconds?: number, duration?: number, rea // Get visible accessibility tree const treeResult = await Utils.getVisibleAccessibilityTree(target); - // Generate summary using LLM - const { model, provider } = AIChatPanel.getNanoModelWithProvider(); - const llm = LLMClient.getInstance(); + // Generate summary using LLM if ctx is available + if (ctx?.provider && (ctx.nanoModel || ctx.model)) { + const provider = ctx.provider; + const model = ctx.nanoModel || ctx.model; + const llm = LLMClient.getInstance(); const reasonContext = waitReason ? `The wait was specifically for: ${waitReason}` : 'No specific reason was provided for the wait.'; @@ -1461,15 +1479,16 @@ Keep the summary to 2-3 sentences maximum.`; const userPrompt = `Analyze this viewport content and provide a brief summary${waitReason ? `, focusing on elements related to: ${waitReason}` : ''}: ${treeResult.simplified}`; - const response = await llm.call({ - provider, - model, - messages: [{ role: 'user', content: userPrompt }], - systemPrompt, - temperature: 0.1, - }); + const response = await llm.call({ + provider, + model, + messages: [{ role: 'user', content: userPrompt }], + systemPrompt, + temperature: 0.1, + }); - viewportSummary = response.text?.trim(); + viewportSummary = response.text?.trim(); + } } } catch (error) { // Non-critical error - just log and continue @@ -1679,7 +1698,7 @@ export class PerformActionTool implements Tool<{ method: string, nodeId: number name = 'perform_action'; description = 'Performs an action on a DOM element identified by NodeID'; - async execute(args: { method: string, nodeId: number | string, reasoning: string, args?: Record | unknown[] }): Promise { + async execute(args: { method: string, nodeId: number | string, reasoning: string, args?: Record | unknown[] }, ctx?: LLMContext): Promise { logger.info('Executing with args:', JSON.stringify(args)); const method = args.method; const nodeId = args.nodeId; @@ -2042,9 +2061,9 @@ export class PerformActionTool implements Tool<{ method: string, nodeId: number // Visual verification using before/after screenshots and LLM let visualCheck: string | undefined; - // Check if current model supports vision - const currentModel = AIChatPanel.instance().getSelectedModel(); - const isVisionCapable = await AIChatPanel.isVisionCapable(currentModel); + // Check if current model supports vision via provided context + const currentModel = (ctx as any)?.model; + const isVisionCapable = (ctx as any)?.getVisionCapability ? await (ctx as any).getVisionCapability(currentModel) : false; if (!isVisionCapable) { logger.info(`Model ${currentModel} does not support vision - using DOM-based verification`); @@ -2063,7 +2082,11 @@ export class PerformActionTool implements Tool<{ method: string, nodeId: number // Use LLM to analyze DOM changes const llmClient = LLMClient.getInstance(); - const { model, provider } = AIChatPanel.getNanoModelWithProvider(); + if (!(ctx as any)?.provider || !((ctx as any)?.nanoModel || (ctx as any)?.model)) { + visualCheck = 'Skipping DOM verification (missing LLM context)'; + } else { + const provider = (ctx as any).provider; + const model = (ctx as any).nanoModel || (ctx as any).model; const response = await llmClient.call({ provider, model, @@ -2096,7 +2119,8 @@ Provide a clear, concise response about what happened.` temperature: 0 }); - visualCheck = response.text || 'No DOM verification response'; + visualCheck = response.text || 'No DOM verification response'; + } logger.info('DOM-based verification result:', visualCheck); } catch (error) { logger.warn('DOM-based verification failed:', error); @@ -2126,7 +2150,11 @@ Provide a clear, concise response about what happened.` // Ask LLM to verify using nano model for efficiency const llmClient = LLMClient.getInstance(); - const { model, provider } = AIChatPanel.getNanoModelWithProvider(); + if (!(ctx as any)?.provider || !((ctx as any)?.nanoModel || (ctx as any)?.model)) { + visualCheck = 'Skipping visual verification (missing LLM context)'; + } else { + const provider = (ctx as any).provider; + const model = (ctx as any).nanoModel || (ctx as any).model; const response = await llmClient.call({ provider, model, @@ -2178,7 +2206,8 @@ Provide a clear, descriptive response about what happened and whether the action temperature: 0 }); - visualCheck = response.text || 'No response'; + visualCheck = response.text || 'No response'; + } logger.info('Visual verification result:', visualCheck); } else if (afterScreenshotResult.data && !beforeScreenshotData) { // Fallback to single after screenshot if before screenshot failed @@ -2195,15 +2224,19 @@ Provide a clear, descriptive response about what happened and whether the action } const llmClient = LLMClient.getInstance(); - const { model, provider } = AIChatPanel.getNanoModelWithProvider(); - const response = await llmClient.call({ - provider, - model, - systemPrompt: 'You are a visual verification assistant. Analyze screenshots and page context to determine if actions succeeded.', - messages: [ - { - role: 'user', - content: [ + if (!(ctx as any)?.provider || !((ctx as any)?.nanoModel || (ctx as any)?.model)) { + visualCheck = 'Skipping visual verification (missing LLM context)'; + } else { + const provider = (ctx as any).provider; + const model = (ctx as any).nanoModel || (ctx as any).model; + const response = await llmClient.call({ + provider, + model, + systemPrompt: 'You are a visual verification assistant. Analyze screenshots and page context to determine if actions succeeded.', + messages: [ + { + role: 'user', + content: [ { type: 'text', text: `Analyze this screenshot to determine if the ${method} action succeeded and describe what you observe. @@ -2241,7 +2274,8 @@ Provide a clear, descriptive response about what you observe and whether the act temperature: 0 }); - visualCheck = response.text || 'No response'; + visualCheck = response.text || 'No response'; + } logger.info('Visual verification result (after only):', visualCheck); } else { logger.error('Screenshot data is empty or undefined'); @@ -2824,14 +2858,18 @@ Important guidelines: } - async execute(args: { objective: string, offset?: number, chunkSize?: number, maxRetries?: number }): Promise { + async execute(args: { objective: string, offset?: number, chunkSize?: number, maxRetries?: number }, ctx?: LLMContext): Promise { const { objective, offset = 0, chunkSize = 60000, maxRetries = 1 } = args; // Default offset 0, chunkSize 60000, maxRetries 1 let currentTry = 0; let lastError: string | null = null; const agentService = AgentService.getInstance(); const apiKey = agentService.getApiKey(); - const { model: modelNameForAction, provider: providerForAction } = AIChatPanel.getMiniModelWithProvider(); + const providerForAction = ctx?.provider; + const modelNameForAction = ctx?.miniModel || ctx?.model; + if (!providerForAction || !modelNameForAction) { + return { error: 'Missing LLM context (provider/model) for ObjectiveDrivenActionTool' }; + } if (!apiKey) {return { error: 'API key not configured.' };} if (typeof objective !== 'string' || objective.trim() === '') { @@ -3760,14 +3798,18 @@ CRITICAL: } - async execute(args: { objective: string, schema: Record, offset?: number, chunkSize?: number, maxRetries?: number }): Promise { + async execute(args: { objective: string, schema: Record, offset?: number, chunkSize?: number, maxRetries?: number }, ctx?: LLMContext): Promise { const { objective, schema, offset = 0, chunkSize = 60000, maxRetries = 1 } = args; // Default offset 0, chunkSize 60000, maxRetries 1 let currentTry = 0; let lastError: string | null = null; const agentService = AgentService.getInstance(); const apiKey = agentService.getApiKey(); - const { model: modelNameForExtraction, provider: providerForExtraction } = AIChatPanel.getMiniModelWithProvider(); + const providerForExtraction = ctx?.provider; + const modelNameForExtraction = ctx?.miniModel || ctx?.model; + if (!providerForExtraction || !modelNameForExtraction) { + return { error: 'Missing LLM context (provider/model) for SchemaBasedDataExtractionTool' }; + } if (!apiKey) { return { error: 'API key not configured.' }; diff --git a/front_end/panels/ai_chat/ui/AIChatPanel.test.ts b/front_end/panels/ai_chat/ui/AIChatPanel.test.ts index 9d32b5edd82..58a5a54defc 100644 --- a/front_end/panels/ai_chat/ui/AIChatPanel.test.ts +++ b/front_end/panels/ai_chat/ui/AIChatPanel.test.ts @@ -5,7 +5,9 @@ import {AIChatPanel, DEFAULT_PROVIDER_MODELS, type ModelOption, resetAIChatPanelInstanceForTesting} from './AIChatPanel.js'; import {LLMProviderRegistry} from '../LLM/LLMProviderRegistry.js'; import {AgentService} from '../core/AgentService.js'; -import {ChatMessageEntity} from './ChatView.js'; +import {Events as AgentEvents} from '../core/AgentService.js'; +import {ChatMessageEntity} from '../models/ChatTypes.js'; +import {raf} from '../../testing/DOMHelpers.js'; declare global { function describe(name: string, fn: () => void): void; @@ -69,6 +71,101 @@ describe('AIChatPanel Model Validation', () => { mockLocalStorage.clear(); }); + describe('Processing state on final error', () => { + it('clears loader and keeps inputs enabled when final error message arrives', async () => { + // Ensure we have a panel and attach to DOM so ChatView renders + const root = document.createElement('div'); + document.body.appendChild(root); + root.appendChild(panel.contentElement); + + // Simulate messages changed with a final error message + const svc = AgentService.getInstance(); + const messages: any[] = [ + { entity: ChatMessageEntity.USER, text: 'Go' }, + { entity: ChatMessageEntity.MODEL, action: 'final', isFinalAnswer: true, error: 'Failed after 5 attempts' }, + ]; + svc.dispatchEventToListeners(AgentEvents.MESSAGES_CHANGED, messages as any); + await raf(); + + // Query ChatView shadow for generic loader and assert it is not present + const chatView = panel.contentElement.querySelector('devtools-chat-view') as HTMLElement; + const shadow = chatView.shadowRoot!; + const loaders = shadow.querySelectorAll('.message.model-message.loading'); + assert.strictEqual(loaders.length, 0); + + // Model selector should not be disabled (processing cleared) + const inputBar = shadow.querySelector('ai-input-bar') as any; + assert.strictEqual(Boolean(inputBar?.modelSelectorDisabled), false); + + document.body.removeChild(root); + }); + }); + + describe('#updateProcessingState edge cases', () => { + it('does not clear processing on non-final (tool) message, then clears on final', async () => { + const root = document.createElement('div'); + document.body.appendChild(root); + root.appendChild(panel.contentElement); + + // Force processing state on + panel.setProcessingForTesting(true); + await raf(); + + // Dispatch non-final model tool message + const svc = AgentService.getInstance(); + const toolMsg = { entity: ChatMessageEntity.MODEL, action: 'tool', toolName: 'fetch', isFinalAnswer: false } as any; + svc.dispatchEventToListeners(AgentEvents.MESSAGES_CHANGED, [ { entity: ChatMessageEntity.USER, text: 'x' } as any, toolMsg ] as any); + await raf(); + + // Still processing + assert.strictEqual(panel.getIsProcessingForTesting(), true); + const chatView = panel.contentElement.querySelector('devtools-chat-view') as HTMLElement; + const shadow = chatView.shadowRoot!; + const inputBar = shadow.querySelector('ai-input-bar') as any; + assert.strictEqual(Boolean(inputBar?.modelSelectorDisabled), true); + + // Now dispatch final without error but with isFinalAnswer true + const finalMsg = { entity: ChatMessageEntity.MODEL, action: 'final', isFinalAnswer: true, answer: 'done' } as any; + svc.dispatchEventToListeners(AgentEvents.MESSAGES_CHANGED, [ { entity: ChatMessageEntity.USER, text: 'x' } as any, finalMsg ] as any); + await raf(); + + // Processing cleared + assert.strictEqual(panel.getIsProcessingForTesting(), false); + const inputBarAfter = shadow.querySelector('ai-input-bar') as any; + assert.strictEqual(Boolean(inputBarAfter?.modelSelectorDisabled), false); + + document.body.removeChild(root); + }); + }); + + describe('Groq JSON parse error surfaces and clears loader', () => { + it('shows error in ChatView and hides loader for Groq tool-args parse error', async () => { + const root = document.createElement('div'); + document.body.appendChild(root); + root.appendChild(panel.contentElement); + + const svc = AgentService.getInstance(); + const groqError = 'Failed after 5 attempts: Groq API error: - Failed to parse tool call arguments as JSON'; + const messages: any[] = [ + { entity: ChatMessageEntity.USER, text: 'Do research' }, + { entity: ChatMessageEntity.MODEL, action: 'final', isFinalAnswer: true, error: groqError }, + ]; + svc.dispatchEventToListeners(AgentEvents.MESSAGES_CHANGED, messages as any); + await raf(); + + const chatView = panel.contentElement.querySelector('devtools-chat-view') as HTMLElement; + const shadow = chatView.shadowRoot!; + // Error is rendered + const errorNode = shadow.querySelector('.message-error') as HTMLElement; + assert.strictEqual(Boolean(errorNode && errorNode.textContent?.includes('Groq API error')), true); + assert.strictEqual(Boolean(errorNode && errorNode.textContent?.includes('Failed to parse tool call arguments as JSON')), true); + // Loader not present + const loaders = shadow.querySelectorAll('.message.model-message.loading'); + assert.strictEqual(loaders.length, 0); + document.body.removeChild(root); + }); + }); + describe('#validateAndFixModelSelections', () => { it('should return true when all models are valid', () => { // Set up valid models @@ -275,4 +372,4 @@ describe('AIChatPanel Model Validation', () => { LLMProviderRegistry.hasProvider = originalHasProvider; }); }); -}); \ No newline at end of file +}); diff --git a/front_end/panels/ai_chat/ui/AIChatPanel.ts b/front_end/panels/ai_chat/ui/AIChatPanel.ts index 18a999ede36..0d3f674d883 100644 --- a/front_end/panels/ai_chat/ui/AIChatPanel.ts +++ b/front_end/panels/ai_chat/ui/AIChatPanel.ts @@ -20,6 +20,8 @@ import { OpenRouterProvider } from '../LLM/OpenRouterProvider.js'; import { createLogger } from '../core/Logger.js'; import { isEvaluationEnabled, getEvaluationConfig } from '../common/EvaluationConfig.js'; import { EvaluationAgent } from '../evaluation/remote/EvaluationAgent.js'; +// Import of LiveAgentSessionComponent is not required here; the element is +// registered by ChatView where it is used. const logger = createLogger('AIChatPanel'); @@ -76,14 +78,8 @@ localStorage.removeItem = (key: string) => { } import chatViewStyles from './chatView.css.js'; -import { - type ChatMessage, - ChatMessageEntity, - ChatView, - type ImageInputData, - type ModelChatMessage, - State as ChatViewState, -} from './ChatView.js'; +import { ChatView } from './ChatView.js'; +import { type ChatMessage, ChatMessageEntity, type ImageInputData, type ModelChatMessage, State as ChatViewState } from '../models/ChatTypes.js'; import { HelpDialog } from './HelpDialog.js'; import { SettingsDialog, isVectorDBEnabled } from './SettingsDialog.js'; import { EvaluationDialog } from './EvaluationDialog.js'; @@ -719,12 +715,28 @@ export class AIChatPanel extends UI.Panel.Panel { #apiKey: string | null = null; // Regular API key #evaluationAgent: EvaluationAgent | null = null; // Evaluation agent for this tab + // Store bound event listeners to properly add/remove without duplications + #boundOnMessagesChanged?: (e: Common.EventTarget.EventTargetEvent) => void; + #boundOnAgentSessionStarted?: (e: Common.EventTarget.EventTargetEvent) => void; + #boundOnAgentToolStarted?: (e: Common.EventTarget.EventTargetEvent<{ session: import('../agent_framework/AgentSessionTypes.js').AgentSession, toolCall: import('../agent_framework/AgentSessionTypes.js').AgentMessage }>) => void; + #boundOnAgentToolCompleted?: (e: Common.EventTarget.EventTargetEvent<{ session: import('../agent_framework/AgentSessionTypes.js').AgentSession, toolResult: import('../agent_framework/AgentSessionTypes.js').AgentMessage }>) => void; + #boundOnAgentSessionUpdated?: (e: Common.EventTarget.EventTargetEvent) => void; + #boundOnChildAgentStarted?: (e: Common.EventTarget.EventTargetEvent<{ parentSession: import('../agent_framework/AgentSessionTypes.js').AgentSession, childAgentName: string, childSessionId: string }>) => void; + constructor() { super(AIChatPanel.panelName); // Initialize storage monitoring for debugging StorageMonitor.getInstance(); + // Prepare bound handlers once so removeEventListener works correctly + this.#boundOnMessagesChanged = this.#handleMessagesChanged.bind(this); + this.#boundOnAgentSessionStarted = this.#handleAgentSessionStarted.bind(this); + this.#boundOnAgentToolStarted = this.#handleAgentToolStarted.bind(this); + this.#boundOnAgentToolCompleted = this.#handleAgentToolCompleted.bind(this); + this.#boundOnAgentSessionUpdated = this.#handleAgentSessionUpdated.bind(this); + this.#boundOnChildAgentStarted = this.#handleChildAgentStarted.bind(this); + this.#setupUI(); this.#setupInitialState(); this.#setupOAuthEventListeners(); @@ -1310,10 +1322,20 @@ export class AIChatPanel extends UI.Panel.Panel { logger.info('✅ Credentials valid, proceeding with agent service initialization'); // Remove any existing listeners to prevent duplicates - this.#agentService.removeEventListener(AgentEvents.MESSAGES_CHANGED, this.#handleMessagesChanged.bind(this)); + if (this.#boundOnMessagesChanged) this.#agentService.removeEventListener(AgentEvents.MESSAGES_CHANGED, this.#boundOnMessagesChanged); + if (this.#boundOnAgentSessionStarted) this.#agentService.removeEventListener(AgentEvents.AGENT_SESSION_STARTED, this.#boundOnAgentSessionStarted); + if (this.#boundOnAgentToolStarted) this.#agentService.removeEventListener(AgentEvents.AGENT_TOOL_STARTED, this.#boundOnAgentToolStarted); + if (this.#boundOnAgentToolCompleted) this.#agentService.removeEventListener(AgentEvents.AGENT_TOOL_COMPLETED, this.#boundOnAgentToolCompleted); + if (this.#boundOnAgentSessionUpdated) this.#agentService.removeEventListener(AgentEvents.AGENT_SESSION_UPDATED, this.#boundOnAgentSessionUpdated); + if (this.#boundOnChildAgentStarted) this.#agentService.removeEventListener(AgentEvents.CHILD_AGENT_STARTED, this.#boundOnChildAgentStarted); // Register for messages changed events - this.#agentService.addEventListener(AgentEvents.MESSAGES_CHANGED, this.#handleMessagesChanged.bind(this)); + if (this.#boundOnMessagesChanged) this.#agentService.addEventListener(AgentEvents.MESSAGES_CHANGED, this.#boundOnMessagesChanged); + if (this.#boundOnAgentSessionStarted) this.#agentService.addEventListener(AgentEvents.AGENT_SESSION_STARTED, this.#boundOnAgentSessionStarted); + if (this.#boundOnAgentToolStarted) this.#agentService.addEventListener(AgentEvents.AGENT_TOOL_STARTED, this.#boundOnAgentToolStarted); + if (this.#boundOnAgentToolCompleted) this.#agentService.addEventListener(AgentEvents.AGENT_TOOL_COMPLETED, this.#boundOnAgentToolCompleted); + if (this.#boundOnAgentSessionUpdated) this.#agentService.addEventListener(AgentEvents.AGENT_SESSION_UPDATED, this.#boundOnAgentSessionUpdated); + if (this.#boundOnChildAgentStarted) this.#agentService.addEventListener(AgentEvents.CHILD_AGENT_STARTED, this.#boundOnChildAgentStarted); // Initialize the agent service logger.info('Calling agentService.initialize()...'); @@ -1564,17 +1586,99 @@ export class AIChatPanel extends UI.Panel.Panel { this.performUpdate(); } + /** + * Handle agent session started event + */ + #handleAgentSessionStarted(event: Common.EventTarget.EventTargetEvent): void { + const session = event.data; + this.#upsertAgentSessionMessage(session); + this.performUpdate(); + } + + /** + * Handle agent tool started event + */ + #handleAgentToolStarted(event: Common.EventTarget.EventTargetEvent<{ session: import('../agent_framework/AgentSessionTypes.js').AgentSession, toolCall: import('../agent_framework/AgentSessionTypes.js').AgentMessage }>): void { + const { session } = event.data; + this.#upsertAgentSessionMessage(session); + this.performUpdate(); + } + + /** + * Handle agent tool completed event + */ + #handleAgentToolCompleted(event: Common.EventTarget.EventTargetEvent<{ session: import('../agent_framework/AgentSessionTypes.js').AgentSession, toolResult: import('../agent_framework/AgentSessionTypes.js').AgentMessage }>): void { + const { session } = event.data; + this.#upsertAgentSessionMessage(session); + this.performUpdate(); + } + + /** + * Handle agent session updated event + */ + #handleAgentSessionUpdated(event: Common.EventTarget.EventTargetEvent): void { + const session = event.data; + this.#upsertAgentSessionMessage(session); + this.performUpdate(); + } + + /** + * Handle child agent started event + */ + #handleChildAgentStarted(event: Common.EventTarget.EventTargetEvent<{ parentSession: import('../agent_framework/AgentSessionTypes.js').AgentSession, childAgentName: string, childSessionId: string }>): void { + const { parentSession } = event.data; + this.#upsertAgentSessionMessage(parentSession); + this.performUpdate(); + } + + /** + * Upsert an AGENT_SESSION message into the messages array by sessionId + */ + #upsertAgentSessionMessage(session: import('../agent_framework/AgentSessionTypes.js').AgentSession): void { + const idx = this.#messages.findIndex(m => m.entity === ChatMessageEntity.AGENT_SESSION && + (m as any).agentSession?.sessionId === session.sessionId); + if (idx >= 0) { + const updated = { ...(this.#messages[idx] as any), agentSession: session }; + const next = [...this.#messages]; + next[idx] = updated; + this.#messages = next; + } else { + const agentSessionMessage: ChatMessage = { + entity: ChatMessageEntity.AGENT_SESSION, + agentSession: session, + summary: `${session.agentName} is executing...` + } as any; + this.#messages = [...this.#messages, agentSessionMessage]; + } + } + /** * Updates processing state based on the latest messages */ #updateProcessingState(messages: ChatMessage[]): void { // Only set isProcessing to false if the last message is a final answer from the model const lastMessage = messages[messages.length - 1]; - if (lastMessage && - lastMessage.entity === ChatMessageEntity.MODEL && - lastMessage.action === 'final' && - lastMessage.isFinalAnswer) { - this.#isProcessing = false; + + // DEBUG: Log processing state check + logger.info('updateProcessingState: Current isProcessing =', this.#isProcessing); + if (lastMessage) { + const checks = { + hasMessage: !!lastMessage, + isModelEntity: lastMessage.entity === ChatMessageEntity.MODEL, + isFinalAction: 'action' in lastMessage && lastMessage.action === 'final', + isFinalAnswer: 'isFinalAnswer' in lastMessage && lastMessage.isFinalAnswer + }; + logger.info('Processing state checks:', checks); + + if (lastMessage && + lastMessage.entity === ChatMessageEntity.MODEL && + lastMessage.action === 'final' && + (lastMessage.isFinalAnswer || 'error' in lastMessage)) { + logger.info('Setting isProcessing to false'); + this.#isProcessing = false; + } else { + logger.info('Not setting isProcessing to false - conditions not met'); + } } } @@ -1757,7 +1861,23 @@ export class AIChatPanel extends UI.Panel.Panel { */ override willHide(): void { // Explicitly remove any event listeners to prevent memory leaks - this.#agentService.removeEventListener(AgentEvents.MESSAGES_CHANGED, this.#handleMessagesChanged.bind(this)); + if (this.#boundOnMessagesChanged) { + this.#agentService.removeEventListener(AgentEvents.MESSAGES_CHANGED, this.#boundOnMessagesChanged); + } + if (this.#boundOnAgentSessionStarted) this.#agentService.removeEventListener(AgentEvents.AGENT_SESSION_STARTED, this.#boundOnAgentSessionStarted); + if (this.#boundOnAgentToolStarted) this.#agentService.removeEventListener(AgentEvents.AGENT_TOOL_STARTED, this.#boundOnAgentToolStarted); + if (this.#boundOnAgentToolCompleted) this.#agentService.removeEventListener(AgentEvents.AGENT_TOOL_COMPLETED, this.#boundOnAgentToolCompleted); + if (this.#boundOnAgentSessionUpdated) this.#agentService.removeEventListener(AgentEvents.AGENT_SESSION_UPDATED, this.#boundOnAgentSessionUpdated); + if (this.#boundOnChildAgentStarted) this.#agentService.removeEventListener(AgentEvents.CHILD_AGENT_STARTED, this.#boundOnChildAgentStarted); + } + + // Test-only helpers + getIsProcessingForTesting(): boolean { + return this.#isProcessing; + } + + setProcessingForTesting(flag: boolean): void { + this.#setProcessingState(flag); } /** @@ -2203,4 +2323,4 @@ export class ActionDelegate implements UI.ActionRegistration.ActionDelegate { } return false; } -} \ No newline at end of file +} diff --git a/front_end/panels/ai_chat/ui/AgentSessionHeaderComponent.ts b/front_end/panels/ai_chat/ui/AgentSessionHeaderComponent.ts new file mode 100644 index 00000000000..a07150ddbb6 --- /dev/null +++ b/front_end/panels/ai_chat/ui/AgentSessionHeaderComponent.ts @@ -0,0 +1,284 @@ +// Copyright 2025 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import * as Lit from '../../../ui/lit/lit.js'; +import type { AgentSession } from '../agent_framework/AgentSessionTypes.js'; +import { getAgentUIConfig } from '../agent_framework/AgentSessionTypes.js'; + +const {html, Decorators} = Lit; +const {customElement} = Decorators; + +export type SessionStatus = 'running' | 'completed' | 'error'; + +@customElement('agent-session-header') +export class AgentSessionHeaderComponent extends HTMLElement { + static readonly litTagName = Lit.StaticHtml.literal`agent-session-header`; + private readonly shadow = this.attachShadow({mode: 'open'}); + + private session: AgentSession | null = null; + private isExpanded = true; + private startTime: Date | null = null; + private endTime: Date | null = null; + private durationTimer: number | null = null; + + connectedCallback(): void { + this.startDurationTimer(); + this.render(); + } + + disconnectedCallback(): void { + this.stopDurationTimer(); + } + + setSession(session: AgentSession): void { + this.session = session; + this.startTime = session.startTime || new Date(); + this.endTime = session.endTime || null; + + if (session.status !== 'running' && !this.endTime) { + this.endTime = new Date(); + } + + this.render(); + } + + toggleExpanded(): void { + this.isExpanded = !this.isExpanded; + this.render(); + + // Dispatch event to notify parent + this.dispatchEvent(new CustomEvent('toggle-expanded', { + detail: { isExpanded: this.isExpanded }, + bubbles: true + })); + } + + private startDurationTimer(): void { + this.durationTimer = window.setInterval(() => { + if (this.session?.status === 'running') { + this.render(); + } + }, 1000); + } + + private stopDurationTimer(): void { + if (this.durationTimer) { + window.clearInterval(this.durationTimer); + this.durationTimer = null; + } + } + + private render(): void { + if (!this.session) return; + + const uiConfig = getAgentUIConfig(this.session.agentName, this.session.config); + const status = this.getSessionStatus(); + const statusClass = this.getStatusClass(status); + const statusIcon = this.getStatusIcon(status); + const duration = this.formatDuration(); + + Lit.render(html` + + +
this.toggleExpanded()}> +
${uiConfig.displayName}
+ + ${status === 'running' ? html` +
+
+ LIVE +
+ ` : html` +
+ ${statusIcon} + ${status.toUpperCase()} +
+ `} + +
+ ${this.session.parentSessionId ? 'Nested' : 'Top Level'} +
+ +
${duration}
+ + +
+ `, this.shadow); + } + + private getSessionStatus(): SessionStatus { + if (!this.session) return 'running'; + + // Map session status to our component status + switch (this.session.status) { + case 'running': return 'running'; + case 'completed': return 'completed'; + case 'error': return 'error'; + default: return 'running'; + } + } + + private getStatusClass(status: SessionStatus): string { + return status; + } + + private getStatusIcon(status: SessionStatus): string { + switch (status) { + case 'running': return '⏳'; + case 'completed': return '✓'; + case 'error': return '❌'; + default: return '●'; + } + } + + private formatDuration(): string { + if (!this.startTime) return '0s'; + + const endTime = this.endTime || new Date(); + const durationMs = endTime.getTime() - this.startTime.getTime(); + const seconds = Math.floor(durationMs / 1000); + + if (seconds < 60) { + return `${seconds}s`; + } else if (seconds < 3600) { + const minutes = Math.floor(seconds / 60); + const remainingSeconds = seconds % 60; + return `${minutes}m ${remainingSeconds}s`; + } else { + const hours = Math.floor(seconds / 3600); + const minutes = Math.floor((seconds % 3600) / 60); + return `${hours}h ${minutes}m`; + } + } +} \ No newline at end of file diff --git a/front_end/panels/ai_chat/ui/ChatView.ts b/front_end/panels/ai_chat/ui/ChatView.ts index ecdf34aa95f..aaaeaa10a9b 100644 --- a/front_end/panels/ai_chat/ui/ChatView.ts +++ b/front_end/panels/ai_chat/ui/ChatView.ts @@ -2,20 +2,37 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -import * as Marked from '../../../third_party/marked/marked.js'; import * as ComponentHelpers from '../../../ui/components/helpers/helpers.js'; -import * as MarkdownView from '../../../ui/components/markdown_view/markdown_view.js'; import * as Lit from '../../../ui/lit/lit.js'; import * as BaseOrchestratorAgent from '../core/BaseOrchestratorAgent.js'; -import { TIMING_CONSTANTS, CONTENT_THRESHOLDS, ERROR_MESSAGES, REGEX_PATTERNS } from '../core/Constants.js'; +import { TIMING_CONSTANTS } from '../core/Constants.js'; import { PromptEditDialog } from './PromptEditDialog.js'; -import * as SDK from '../../../core/sdk/sdk.js'; -import * as Host from '../../../core/host/host.js'; -import * as Platform from '../../../core/platform/platform.js'; +import { MarkdownViewerUtil } from '../common/MarkdownViewerUtil.js'; import { createLogger } from '../core/Logger.js'; -import type { AgentSession, AgentMessage, ToolCallMessage as AgentToolCallMessage, ToolResultMessage as AgentToolResultMessage } from '../agent_framework/AgentSessionTypes.js'; +import type { AgentSession, ToolCallMessage as AgentToolCallMessage, ToolResultMessage as AgentToolResultMessage } from '../agent_framework/AgentSessionTypes.js'; import { getAgentUIConfig } from '../agent_framework/AgentSessionTypes.js'; import { VersionChecker, type VersionInfo } from '../core/VersionChecker.js'; +import { LiveAgentSessionComponent } from './LiveAgentSessionComponent.js'; +import { MarkdownRenderer, renderMarkdown } from './markdown/MarkdownRenderers.js'; +import { parseStructuredResponse } from '../core/structured_response.js'; +import { ToolDescriptionFormatter } from './ToolDescriptionFormatter.js'; +import './message/MessageList.js'; +import { renderUserMessage } from './message/UserMessage.js'; +import { renderModelMessage } from './message/ModelMessage.js'; +import { renderToolResultMessage } from './message/ToolResultMessage.js'; +import './version/VersionBanner.js'; +import { renderGlobalActionsRow } from './message/GlobalActionsRow.js'; +import { renderStructuredResponse as renderStructuredResponseUI } from './message/StructuredResponseRender.js'; +import './oauth/OAuthConnectPanel.js'; +import './input/ChatInput.js'; +import './input/InputBar.js'; +import './model_selector/ModelSelector.js'; +import { combineMessages } from './message/MessageCombiner.js'; +import { StructuredResponseController } from './message/StructuredResponseController.js'; + +// Shared chat types +import type { ChatMessage, ModelChatMessage, ToolResultMessage, AgentSessionMessage, ImageInputData } from '../models/ChatTypes.js'; +import { ChatMessageEntity, State } from '../models/ChatTypes.js'; const logger = createLogger('ChatView'); @@ -24,200 +41,7 @@ import chatViewStyles from './chatView.css.js'; const {html, Decorators} = Lit; const {customElement} = Decorators; -// A simplified version of the MarkdownRenderer with code block support -class MarkdownRenderer extends MarkdownView.MarkdownView.MarkdownInsightRenderer { - override templateForToken(token: Marked.Marked.MarkedToken): Lit.TemplateResult|null { - if (token.type === 'code') { - const lines = (token.text).split('\n'); - if (lines[0]?.trim() === 'css') { - token.lang = 'css'; - token.text = lines.slice(1).join('\n'); - } - } - - return super.templateForToken(token); - } -} - -// Enhanced MarkdownRenderer for deep-research responses with table of contents -class DeepResearchMarkdownRenderer extends MarkdownView.MarkdownView.MarkdownInsightRenderer { - private tocItems: Array<{level: number, text: string, id: string}> = []; - - override templateForToken(token: Marked.Marked.MarkedToken): Lit.TemplateResult|null { - if (token.type === 'heading') { - // Generate ID for heading - const headingText = this.extractTextFromTokens((token.tokens || []) as Marked.Marked.MarkedToken[]); - const id = this.generateHeadingId(headingText); - - // Add to TOC - this.tocItems.push({ - level: (token as any).depth, - text: headingText, - id: id - }); - - // Create heading with ID - const headingTag = `h${(token as any).depth}`; - // Use renderToken to render the content - const content = super.renderToken(token); - return html`
${content}
`; - } - - if (token.type === 'code') { - const lines = (token.text).split('\n'); - if (lines[0]?.trim() === 'css') { - token.lang = 'css'; - token.text = lines.slice(1).join('\n'); - } - } - - return super.templateForToken(token); - } - - private extractTextFromTokens(tokens: Marked.Marked.MarkedToken[]): string { - return tokens.map(token => { - if (token.type === 'text') { - return token.text; - } - return token.raw || ''; - }).join(''); - } - - private generateHeadingId(text: string): string { - return text.toLowerCase() - .replace(/[^\w\s-]/g, '') - .replace(/\s+/g, '-') - .trim(); - } - - getTocItems(): Array<{level: number, text: string, id: string}> { - return this.tocItems; - } - - clearToc(): void { - this.tocItems = []; - } -} - -// Function to render text as markdown -function renderMarkdown(text: string, markdownRenderer: MarkdownRenderer, onOpenTableInViewer?: (markdownContent: string) => void): Lit.TemplateResult { - let tokens: Marked.Marked.MarkedToken[] = []; - try { - tokens = Marked.Marked.lexer(text) as Marked.Marked.MarkedToken[]; - for (const token of tokens) { - // Try to render all the tokens to make sure that - // they all have a template defined for them. If there - // isn't any template defined for a token, we'll fallback - // to rendering the text as plain text instead of markdown. - markdownRenderer.renderToken(token); - } - } catch { - // The tokens were not parsed correctly or - // one of the tokens are not supported, so we - // continue to render this as text. - return html`${text}`; - } - - return html` - `; -} - -// Types for the ChatView component - -// Define possible entities for chat messages -export enum ChatMessageEntity { - USER = 'user', - MODEL = 'model', - TOOL_RESULT = 'tool_result', - AGENT_SESSION = 'agent_session', -} - -// Base structure for all chat messages -export interface BaseChatMessage { - entity: ChatMessageEntity; - error?: string; -} - -// Represents a message sent by the user -export interface UserChatMessage extends BaseChatMessage { - entity: ChatMessageEntity.USER; - text: string; - imageInput?: ImageInputData; -} - -// Represents a message generated by the AI model -// This now directly contains the agent's action details -export interface ModelChatMessage extends BaseChatMessage { - entity: ChatMessageEntity.MODEL; - // Type of action the model decided on - action: 'tool' | 'final'; - // Tool details (only relevant if action is 'tool') - toolName?: string; - toolArgs?: Record; - // Final answer (only relevant if action is 'final') - answer?: string; - // Indicates if this message concludes the agent's turn (set if action is 'final') - isFinalAnswer: boolean; - // Reasoning summary from the model - reasoning?: string[] | null; - // Tool call ID for linking with tool responses (OpenAI format) - toolCallId?: string; - // REMOVED steps?: Step[]; -} - -// Represents the result of a tool execution -export interface ToolResultMessage extends BaseChatMessage { - entity: ChatMessageEntity.TOOL_RESULT; - toolName: string; - resultText: string; - isError: boolean; - // Add optional structured data field - resultData?: any; - // Tool call ID for linking to assistant tool call (OpenAI format) - toolCallId?: string; - // Mark if this is from a ConfigurableAgentTool - isFromConfigurableAgent?: boolean; - // Base64 image data URL for multimodal LLM responses - imageData?: string; - // Optional summary for agent tool completions - summary?: string; -} - -// Represents an agent session execution -export interface AgentSessionMessage extends BaseChatMessage { - entity: ChatMessageEntity.AGENT_SESSION; - agentSession: AgentSession; - // Link to the user message that triggered this execution - triggerMessageId?: string; - // Summary for quick display - summary?: string; -} - -// Union type representing any possible chat message -export type ChatMessage = - UserChatMessage|ModelChatMessage|ToolResultMessage|AgentSessionMessage; - -// Defines the structure of an image input -export interface ImageInputData { - url: string; - bytesBase64: string; -} - -// Structure for enhanced responses (e.g., markdown, code blocks) -// This might be less relevant now if answers are plain text -export interface EnhancedResponse { - type: 'markdown' | 'code'; - content: string; -} - -// REMOVED Step interface entirely - -export enum State { - IDLE = 'idle', - LOADING = 'loading', - ERROR = 'error', -} +// Markdown rendering moved to ui/markdown/MarkdownRenderers.ts export interface Props { messages: ChatMessage[]; @@ -257,10 +81,12 @@ export class ChatView extends HTMLElement { #onSendMessage?: (text: string, imageInput?: ImageInputData) => void; #onImageInputClear?: () => void; #onPromptSelected?: (promptType: string | null) => void; - #textInputElement?: HTMLTextAreaElement; + // input is handled by #markdownRenderer = new MarkdownRenderer(); #isFirstMessageView = true; // Track if we're in the centered first-message view #selectedPromptType?: string | null; // Track the currently selected prompt type + // Lightweight instance cache to preserve per-session element state across renders + #liveSessionComponents = new Map(); #handlePromptButtonClickBound: (event: Event) => void = () => {}; // Initialize with empty function, will be properly set in connectedCallback // Add model selection properties #modelOptions?: Array<{value: string, label: string}>; @@ -270,10 +96,7 @@ export class ChatView extends HTMLElement { #selectedAgentType?: string | null; #isModelSelectorDisabled = false; - // Add scroll-related properties - #messagesContainerElement?: HTMLElement; - #messagesContainerResizeObserver = new ResizeObserver(() => this.#handleMessagesContainerResize()); - #pinScrollToBottom = true; + // Scroll behavior delegated to // Add properties for input disabled state and placeholder #isInputDisabled = false; @@ -283,15 +106,17 @@ export class ChatView extends HTMLElement { #showOAuthLogin = false; #onOAuthLogin?: () => void; - // Add state tracking for AI Assistant operations - #aiAssistantStates = new Map(); - #lastProcessedMessageKey: string | null = null; - - // Add model selector state for searchable dropdown - #isModelDropdownOpen = false; - #modelSearchQuery = ''; - #highlightedOptionIndex = 0; - #dropdownPosition: 'above' | 'below' = 'below'; + // Structured response auto-open controller + #structuredController = new StructuredResponseController(() => { + void ComponentHelpers.ScheduledRender.scheduleRender(this, this.#boundRender); + }); + // Combined messages cache for this render pass + #combinedMessagesCache: CombinedMessage[] = []; + // Track agent session IDs that are nested inside other sessions to avoid duplicate top-level rendering + #nestedChildSessionIds: Set = new Set(); + // Track pending handoff target agent names to suppress interim top-level renders + #pendingHandoffTargets: Set = new Set(); + // Model selector is rendered via // Add version info state #versionInfo: VersionInfo | null = null; @@ -305,11 +130,6 @@ export class ChatView extends HTMLElement { // Initialize the prompt button click handler this.#updatePromptButtonClickHandler(); - // Observe the messages container for size changes if it exists - if (this.#messagesContainerElement) { - this.#messagesContainerResizeObserver.observe(this.#messagesContainerElement); - } - // Check for updates when component is connected this.#checkForUpdates(); @@ -317,11 +137,14 @@ export class ChatView extends HTMLElement { } disconnectedCallback(): void { - // Cleanup resize observer - this.#messagesContainerResizeObserver.disconnect(); - - // Clear state maps to prevent memory leaks - this.#aiAssistantStates.clear(); + // Nothing to cleanup currently + } + + // Test-only helper to introspect cached live agent sessions + // This is used by unit tests to verify pruning behavior and is not used in production code. + getLiveAgentSessionCountForTesting(): number { + // Count AGENT_SESSION messages present; used as a proxy for visible sessions + return this.#messages.filter(m => m.entity === ChatMessageEntity.AGENT_SESSION).length; } @@ -331,7 +154,7 @@ export class ChatView extends HTMLElement { setAgentViewMode(mode: 'simplified' | 'enhanced'): void { this.#agentViewMode = mode; void ComponentHelpers.ScheduledRender.scheduleRender(this, this.#boundRender); - this.#lastProcessedMessageKey = null; + this.#structuredController.resetLastProcessed(); } /** @@ -340,7 +163,6 @@ export class ChatView extends HTMLElement { #isPartOfAgentSession(message: ChatMessage): boolean { // Check if there's an AgentSessionMessage in the current messages const hasAgentSession = this.#messages.some(msg => msg.entity === ChatMessageEntity.AGENT_SESSION); - console.log('[DEBUG] hasAgentSession:', hasAgentSession); if (!hasAgentSession) { return false; @@ -350,18 +172,14 @@ export class ChatView extends HTMLElement { if (message.entity === ChatMessageEntity.MODEL) { const modelMsg = message as ModelChatMessage; if (modelMsg.action === 'tool' && modelMsg.toolName) { - console.log('[DEBUG] Checking tool:', modelMsg.toolName, 'callId:', modelMsg.toolCallId); // Check if there's a corresponding tool result that's from ConfigurableAgentTool const toolResultIndex = this.#messages.findIndex((msg) => msg.entity === ChatMessageEntity.TOOL_RESULT && (msg as ToolResultMessage).toolName === modelMsg.toolName && (msg as ToolResultMessage).toolCallId === modelMsg.toolCallId ); - - console.log('[DEBUG] Found tool result index:', toolResultIndex); if (toolResultIndex !== -1) { const toolResult = this.#messages[toolResultIndex] as ToolResultMessage; - console.log('[DEBUG] Tool result isFromConfigurableAgent:', toolResult.isFromConfigurableAgent); return toolResult.isFromConfigurableAgent === true; } } @@ -370,149 +188,21 @@ export class ChatView extends HTMLElement { return false; } - // Add method to scroll to bottom - #scrollToBottom(): void { - if (!this.#messagesContainerElement) { - return; - } - - this.#messagesContainerElement.scrollTop = this.#messagesContainerElement.scrollHeight; - } - - // Add method to handle resizing of messages container - #handleMessagesContainerResize(): void { - if (!this.#pinScrollToBottom) { - return; - } - - if (!this.#messagesContainerElement) { - return; - } - - this.#scrollToBottom(); - } - - // Add method to handle scroll events - #handleScroll = (event: Event): void => { - if (!event.target || !(event.target instanceof HTMLElement)) { - return; - } - - const container = event.target as HTMLElement; - const SCROLL_ROUNDING_OFFSET = 1; // Add small offset to handle rounding errors - - // Consider "scrolled to bottom" if within 1px of the bottom - this.#pinScrollToBottom = - container.scrollTop + container.clientHeight + SCROLL_ROUNDING_OFFSET >= container.scrollHeight; - }; - - // Add method to handle message container reference - #handleMessagesContainerRef = (el: Element | undefined): void => { - // Remove old observer if it exists - if (this.#messagesContainerElement) { - this.#messagesContainerResizeObserver.unobserve(this.#messagesContainerElement); - } - - this.#messagesContainerElement = el as HTMLElement | undefined; - - if (el) { - this.#messagesContainerResizeObserver.observe(el); - // Initially scroll to bottom when container is first created - this.#scrollToBottom(); - } else { - this.#pinScrollToBottom = true; - } - }; - - // Helper methods for AI Assistant state management - #getMessageStateKey(structuredResponse: {reasoning: string, markdownReport: string}): string { - // Create stable hash from content - Unicode safe - const content = structuredResponse.reasoning + structuredResponse.markdownReport; - - // Unicode-safe hash function using TextEncoder - const encoder = new TextEncoder(); - const bytes = encoder.encode(content); - - let hash = 0; - for (let i = 0; i < bytes.length; i++) { - hash = ((hash << 5) - hash) + bytes[i]; - hash = hash & hash; // Convert to 32-bit integer - } - - // Convert to hex for consistent 8-character length - const key = Math.abs(hash).toString(16).padStart(8, '0'); - - return key; - } - - #getMessageAIAssistantState(messageKey: string): 'pending' | 'opened' | 'failed' | 'not-attempted' { - return this.#aiAssistantStates.get(messageKey) || 'not-attempted'; - } - - #setMessageAIAssistantState(messageKey: string, state: 'pending' | 'opened' | 'failed'): void { - this.#aiAssistantStates.set(messageKey, state); - } - + // Scroll behavior handled by #isLastStructuredMessage(currentCombinedIndex: number): boolean { - // We need to work with the combined messages logic to properly identify the last structured message - // The currentCombinedIndex is from the combined array, but we need to check against the original array - - // Recreate the combined messages logic to understand the mapping - let combinedIndex = 0; - let lastStructuredCombinedIndex = -1; - - for (let originalIndex = 0; originalIndex < this.#messages.length; originalIndex++) { - const message = this.#messages[originalIndex]; - - // Keep User messages and Final Model answers - if (message.entity === ChatMessageEntity.USER || - (message.entity === ChatMessageEntity.MODEL && message.action === 'final')) { - - // Check if this is a structured final answer - if (message.entity === ChatMessageEntity.MODEL && message.action === 'final') { - const structuredResponse = this.#parseStructuredResponse((message as any).answer || ''); - if (structuredResponse) { - lastStructuredCombinedIndex = combinedIndex; - } - } - - combinedIndex++; - continue; - } - - // Handle Model Tool Call message - if (message.entity === ChatMessageEntity.MODEL && message.action === 'tool') { - const nextMessage = this.#messages[originalIndex + 1]; - - // Check if the next message is the corresponding result - if (nextMessage && nextMessage.entity === ChatMessageEntity.TOOL_RESULT && nextMessage.toolName === (message as any).toolName) { - // Combined representation: tool call + result = 1 entry in combined array - combinedIndex++; - } else { - // Tool call is still running (no result yet) - combinedIndex++; - } - continue; - } - - // Handle Tool Result message - skip if it was combined previously - if (message.entity === ChatMessageEntity.TOOL_RESULT) { - const prevMessage = this.#messages[originalIndex - 1]; - // Check if the previous message was the corresponding model call - if (!(prevMessage && prevMessage.entity === ChatMessageEntity.MODEL && prevMessage.action === 'tool' && prevMessage.toolName === (message as any).toolName)) { - // Orphaned tool result - add it directly - combinedIndex++; + const combined = this.#combinedMessagesCache.length ? this.#combinedMessagesCache : combineMessages(this.#messages); + let lastStructuredIndex = -1; + for (let i = 0; i < combined.length; i++) { + const m = combined[i]; + if (m.entity === ChatMessageEntity.MODEL && (m as any).action === 'final') { + const sr = parseStructuredResponse(((m as any).answer || '') as string); + if (sr) { + lastStructuredIndex = i; } - // Otherwise, it was handled by the MODEL case above, so we skip this result message - continue; } - - // Fallback for any unexpected message types - combinedIndex++; } - - return lastStructuredCombinedIndex === currentCombinedIndex; + return lastStructuredIndex === currentCombinedIndex; } @@ -520,7 +210,7 @@ export class ChatView extends HTMLElement { #updatePromptButtonClickHandler(): void { this.#handlePromptButtonClickBound = BaseOrchestratorAgent.createAgentTypeSelectionHandler( this, - this.#textInputElement, + undefined, this.#onPromptSelected, (type: string | null) => { this.#selectedPromptType = type; @@ -588,32 +278,9 @@ export class ChatView extends HTMLElement { const willHaveMoreMessages = data.messages?.length > previousMessageCount; const wasInputDisabled = this.#isInputDisabled; - // Handle AI Assistant state cleanup for last-message-only approach - if (willHaveMoreMessages && this.#messages) { - // When new messages are added, reset states for previous final messages - // so that only the last message can attempt to open AI Assistant - const previousLastFinalIndex = this.#messages.findLastIndex(msg => - msg.entity === ChatMessageEntity.MODEL && - (msg as ModelChatMessage).action === 'final' - ); - - if (previousLastFinalIndex >= 0) { - const previousLastMessage = this.#messages[previousLastFinalIndex] as ModelChatMessage; - if (previousLastMessage.answer) { - const structuredResponse = this.#parseStructuredResponse(previousLastMessage.answer); - if (structuredResponse) { - const messageKey = this.#getMessageStateKey(structuredResponse); - const currentState = this.#getMessageAIAssistantState(messageKey); - - // If the previous last message was pending, mark it as failed - // But keep 'opened' state to preserve successfully opened reports - if (currentState === 'pending') { - this.#setMessageAIAssistantState(messageKey, 'failed'); - } - // If it was 'opened', keep it that way to show button only - } - } - } + // Inform structured response controller of new messages + if (willHaveMoreMessages) { + this.#structuredController.handleNewMessages(this.#messages, data.messages); } this.#messages = data.messages; @@ -643,12 +310,6 @@ export class ChatView extends HTMLElement { // Log the input state changes if (wasInputDisabled !== this.#isInputDisabled) { logger.info(`Input disabled state changed: ${wasInputDisabled} -> ${this.#isInputDisabled}`); - - // If we have a text input element, update its disabled state directly - if (this.#textInputElement) { - this.#textInputElement.disabled = this.#isInputDisabled; - logger.info(`Directly updated textarea disabled state to: ${this.#isInputDisabled}`); - } } // Update the selectedPromptType from the passed selectedAgentType if it exists @@ -662,58 +323,69 @@ export class ChatView extends HTMLElement { data.messages.some(msg => msg && msg.entity === ChatMessageEntity.USER) : false; this.#isFirstMessageView = !hasUserMessages; + // Controller owns session message upserts; no UI sync required + // Update the prompt button handler with new props this.#updatePromptButtonClickHandler(); void ComponentHelpers.ScheduledRender.scheduleRender(this, this.#boundRender); - // After rendering, scroll to bottom if we have new messages and auto-scroll is enabled - if (this.#pinScrollToBottom && willHaveMoreMessages) { - // Give the DOM time to update before scrolling - setTimeout(() => this.#scrollToBottom(), 0); + // Message list handles pin-to-bottom; no explicit scroll needed here + } + + // Ensure that for each cached live agent session there is a corresponding + // AgentSessionMessage in the messages list. This protects against timing + // gaps where a session has started but upstream state has not yet emitted + // the AgentSessionMessage, which would otherwise prevent rendering. + // (Removed) #syncLiveSessionsIntoMessages + + // Upsert an AGENT_SESSION message by sessionId + #upsertAgentSessionMessage(session: AgentSession): void { + const idx = this.#messages.findIndex(m => m.entity === ChatMessageEntity.AGENT_SESSION && + (m as AgentSessionMessage).agentSession.sessionId === session.sessionId); + if (idx >= 0) { + (this.#messages[idx] as AgentSessionMessage).agentSession = session; + } else { + const agentSessionMessage: AgentSessionMessage = { + entity: ChatMessageEntity.AGENT_SESSION, + agentSession: session, + summary: `${session.agentName} is executing...` + }; + this.#messages.push(agentSessionMessage); } } + // Event handlers removed: controller owns session updates - #handleSendMessage(): void { - // Check if textInputElement, onSendMessage callback, or input is disabled - if (!this.#textInputElement || !this.#onSendMessage || this.#isInputDisabled) { + #handleSendMessage(text?: string): void { + if (!this.#onSendMessage || this.#isInputDisabled) { return; } - - const text = this.#textInputElement.value.trim(); - if (!text) { + const value = (text ?? '').trim(); + if (!value) { return; } - // Exit the first message view mode when sending a message this.#isFirstMessageView = false; - // Always scroll to bottom after sending message - this.#pinScrollToBottom = true; - - this.#onSendMessage(text, this.#imageInput); - this.#textInputElement.value = ''; - this.#textInputElement.style.height = 'auto'; + this.#onSendMessage(value, this.#imageInput); this.#isTextInputEmpty = true; void ComponentHelpers.ScheduledRender.scheduleRender(this, this.#boundRender); } - #handleKeyDown(event: KeyboardEvent): void { - if (event.key === 'Enter' && !event.shiftKey) { - event.preventDefault(); - this.#handleSendMessage(); + #handleChatInputSend(event: Event): void { + const e = event as CustomEvent<{text: string}>; + this.#handleSendMessage(e.detail?.text); + // Proactively clear the input bar's field to avoid any stale content + const bar = this.#shadow.querySelector('ai-input-bar') as any; + if (bar && typeof bar.clearInput === 'function') { + bar.clearInput(); } } - #handleTextInput(event: Event): void { - const textarea = event.target as HTMLTextAreaElement; - textarea.style.height = 'auto'; // Reset height to shrink if needed - textarea.style.height = `${textarea.scrollHeight}px`; - - const newIsEmpty = textarea.value.trim().length === 0; - - // Only trigger re-render if empty state actually changed + #handleChatInputChange(event: Event): void { + const e = event as CustomEvent<{value: string}>; + const newIsEmpty = (e.detail?.value || '').trim().length === 0; if (this.#isTextInputEmpty !== newIsEmpty) { this.#isTextInputEmpty = newIsEmpty; void ComponentHelpers.ScheduledRender.scheduleRender(this, this.#boundRender); @@ -722,56 +394,58 @@ export class ChatView extends HTMLElement { } } + // input key handling and autosize handled inside + // Render messages based on the combined structure #renderMessage(message: ChatMessage | (ModelChatMessage & { resultText?: string, isError?: boolean, resultError?: string, combined?: boolean }) | (ToolResultMessage & { orphaned?: boolean }), combinedIndex?: number ): Lit.TemplateResult { try { switch (message.entity) { case ChatMessageEntity.USER: - // Render User Message - return html` -
-
-
${renderMarkdown(message.text || '', this.#markdownRenderer, this.#openInAIAssistantViewer.bind(this))}
- ${message.error ? html`
${message.error}
` : Lit.nothing} -
-
- `; + // Render User Message via dedicated renderer + return renderUserMessage(message as any, this.#markdownRenderer); case ChatMessageEntity.AGENT_SESSION: - // Render agent session using existing logic + // Render live session declaratively; Lit preserves element instance by key { const agentSessionMessage = message as AgentSessionMessage; - console.log('[AGENT SESSION RENDER] Rendering AgentSessionMessage:', agentSessionMessage); - return this.#renderTaskCompletion(agentSessionMessage.agentSession); + const sid = agentSessionMessage.agentSession.sessionId; + // If this session is a nested child of another visible session, or a pending handoff target, hide the top-level duplicate + if (this.#nestedChildSessionIds.has(sid) || this.#pendingHandoffTargets.has(agentSessionMessage.agentSession.agentName)) { + logger.info('ChatView: suppressing top-level nested agent session', { sid }); + return html``; + } + let comp = this.#liveSessionComponents.get(sid); + if (!comp) { + comp = new LiveAgentSessionComponent(); + this.#liveSessionComponents.set(sid, comp); + } + // Update data on the persistent element instance + (comp as any).session = agentSessionMessage.agentSession; + // Ensure top-level sessions render in full variant + (comp as any).setVariant?.('full'); + // Provide top-level session IDs to suppress inline duplication of nested children + const topLevelIds = new Set( + this.#messages + .filter(m => (m as any).entity === ChatMessageEntity.AGENT_SESSION) + .map(m => (m as AgentSessionMessage).agentSession.sessionId) + ); + (comp as any).setSuppressInlineChildIds?.(topLevelIds); + logger.info('ChatView: rendering top-level agent session', { + sid, + topLevelCount: topLevelIds.size, + nestedChildCount: this.#nestedChildSessionIds.size, + }); + return html`${comp}`; } case ChatMessageEntity.TOOL_RESULT: - // Should only render if orphaned { - const toolResultMessage = message as (ToolResultMessage & { orphaned?: boolean }); - - // If this is from a ConfigurableAgentTool, don't render individual cards - // Let the agent session UI handle it - if (toolResultMessage.isFromConfigurableAgent) { - console.log('[UI FILTER] Hiding ConfigurableAgentTool result:', toolResultMessage.toolName); - return html``; - } - - if (toolResultMessage.orphaned) { - return html` -
-
-
-
- Orphaned Result from: ${toolResultMessage.toolName} ${toolResultMessage.isError ? '(Error)' : ''} -
-
${toolResultMessage.resultText}
- ${toolResultMessage.error ? html`
${toolResultMessage.error}
` : Lit.nothing} -
-
-
- `; - } - // If not orphaned, it should have been combined, so render nothing. - return html``; + const toolResultMessage = message as (ToolResultMessage & { orphaned?: boolean }); + if (toolResultMessage.isFromConfigurableAgent) { + return html``; + } + if (toolResultMessage.orphaned) { + return renderToolResultMessage(toolResultMessage); + } + return html``; } case ChatMessageEntity.MODEL: { @@ -781,9 +455,7 @@ export class ChatView extends HTMLElement { // Hide tool calls that are part of agent sessions if (modelMessage.action === 'tool') { const isPartOfSession = this.#isPartOfAgentSession(modelMessage); - console.log('[UI DEBUG] Tool call:', modelMessage.toolName, 'isPartOfAgentSession:', isPartOfSession); if (isPartOfSession) { - console.log('[UI FILTER] Hiding ModelChatMessage tool call from agent session:', modelMessage.toolName); return html``; } } @@ -796,42 +468,13 @@ export class ChatView extends HTMLElement { // --- Render Final Answer --- if (isFinal) { // Check if this is a structured response with REASONING and MARKDOWN_REPORT sections - const structuredResponse = this.#parseStructuredResponse(modelMessage.answer || ''); + const structuredResponse = parseStructuredResponse(modelMessage.answer || ''); if (structuredResponse) { return this.#renderStructuredResponse(structuredResponse, combinedIndex); } else { - // Regular response - use the old logic - - return html` -
-
- ${modelMessage.answer ? - html` -
${renderMarkdown(modelMessage.answer, this.#markdownRenderer, this.#openInAIAssistantViewer.bind(this))}
- ${Lit.nothing} - ` : - Lit.nothing - } - ${modelMessage.reasoning?.length ? html` -
-
- - 💡 - Model Reasoning - -
- ${modelMessage.reasoning.map(item => html` -
${renderMarkdown(item, this.#markdownRenderer, this.#openInAIAssistantViewer.bind(this))}
- `)} -
-
-
- ` : Lit.nothing} - ${modelMessage.error ? html`
${modelMessage.error}
` : Lit.nothing} -
-
- `; + // Regular final answer -> delegate to renderer + return renderModelMessage(modelMessage as any, this.#markdownRenderer); } } @@ -851,81 +494,82 @@ export class ChatView extends HTMLElement { } const toolName = modelMessage.toolName || 'unknown_tool'; - const icon = this.#getToolIcon(toolName); - const descriptionData = this.#getToolDescription(toolName, toolArgs); - - return html` - - ${toolReasoning ? html` -
- ${renderMarkdown(toolReasoning, this.#markdownRenderer, this.#openInAIAssistantViewer.bind(this))} -
- ` : Lit.nothing} - - -
- -
-
-
${descriptionData.action}
-
- -
+ const icon = ToolDescriptionFormatter.getToolIcon(toolName); + const descriptionData = ToolDescriptionFormatter.getToolDescription(toolName, toolArgs); + + return html`` + // return html` + // + // ${toolReasoning ? html` + //
+ // ${renderMarkdown(toolReasoning, this.#markdownRenderer, this.#openInAIAssistantViewer.bind(this))} + //
+ // ` : Lit.nothing} + + // + //
+ // + //
+ //
+ //
${descriptionData.action}
+ //
+ // + //
- - - ${status === 'running' ? html` -
- - - - - -
- ` : Lit.nothing} - - - ${modelMessage.error ? html`
Model Error: ${modelMessage.error}
` : Lit.nothing} -
- `; + // + // ${status === 'running' ? html` + //
+ // + // + // + // + // + //
+ // ` : Lit.nothing} + + // + // ${modelMessage.error ? html`
Model Error: ${modelMessage.error}
` : Lit.nothing} + //
+ // `; } default: // Should not happen, but render a fallback @@ -948,69 +592,76 @@ export class ChatView extends HTMLElement { // Check if the last message is a MODEL message indicating a tool is running const lastMessage = this.#messages[this.#messages.length - 1]; const isModelRunningTool = lastMessage?.entity === ChatMessageEntity.MODEL && !lastMessage.isFinalAnswer && lastMessage.toolName; + const lastIsFinal = lastMessage?.entity === ChatMessageEntity.MODEL && (lastMessage as any).action === 'final'; + // Session-aware loading: keep spinner while any agent session is running, + // or (for non-agent flows) until we see a final model message. + const anyAgentRunning = this.#messages.some(m => + (m as any).entity === ChatMessageEntity.AGENT_SESSION && + ((m as any as AgentSessionMessage).agentSession?.status === 'running') + ); // All messages are rendered directly now, including AgentSessionMessage let messagesToRender = this.#messages; - // Combine the tool calling and tool result messages into a single logical unit for rendering - const combinedMessages = messagesToRender.reduce((acc, message, index, allMessages) => { - // Keep User messages and Final Model answers - if (message.entity === ChatMessageEntity.USER || - (message.entity === ChatMessageEntity.MODEL && message.action === 'final')) { - acc.push(message); - return acc; - } - - // Handle Model Tool Call message - if (message.entity === ChatMessageEntity.MODEL && message.action === 'tool') { - const modelMessage = message as ModelChatMessage; - const nextMessage = allMessages[index + 1]; - - // Check if the next message is the corresponding result - if (nextMessage && nextMessage.entity === ChatMessageEntity.TOOL_RESULT && nextMessage.toolName === modelMessage.toolName) { - // Create a combined representation: add result to model message - // IMPORTANT: Create a new object, don't mutate the original state - const combinedRepresentation = { - ...modelMessage, // Copy model call details - // Add result details directly to this combined object - resultText: (nextMessage as ToolResultMessage).resultText, - isError: (nextMessage as ToolResultMessage).isError, - resultError: (nextMessage as ToolResultMessage).error, // Keep original model error separate if needed - combined: true, // Add a flag to identify this combined message - }; - acc.push(combinedRepresentation); - } else { - // Tool call is still running (no result yet) or result is missing - // Add the model message as is (it will render the "running" state) - acc.push(modelMessage); + // Build a set of nested child session IDs present in the current message set. + // Include both nestedSessions[].sessionId and any handoff anchors in messages that + // have a concrete nestedSessionId (ignore pending-* placeholders). Also build + // a set of pending handoff target agent names to suppress interim top-level renders. + this.#nestedChildSessionIds = new Set(); + this.#pendingHandoffTargets = new Set(); + const collectNested = (s: AgentSession | any) => { + if (!s) return; + // Record child sessions from nestedSessions + if (Array.isArray(s.nestedSessions)) { + for (const child of s.nestedSessions) { + if (child?.sessionId) { + this.#nestedChildSessionIds.add(child.sessionId); + } + collectNested(child); } - return acc; } - - // Handle Tool Result message - skip if it was combined previously - if (message.entity === ChatMessageEntity.TOOL_RESULT) { - const prevMessage = allMessages[index - 1]; - // Check if the previous message was the corresponding model call - if (!(prevMessage && prevMessage.entity === ChatMessageEntity.MODEL && prevMessage.action === 'tool' && prevMessage.toolName === message.toolName)) { - // Orphaned tool result - add it directly - logger.warn('Orphaned tool result found:', message); - acc.push({...message, orphaned: true }); // Add marker for rendering + // Record concrete anchors from handoff messages in the timeline (if available) + if (Array.isArray(s.messages)) { + for (const msg of s.messages) { + if (msg?.type === 'handoff') { + const nestedId = (msg.content as any)?.nestedSessionId; + if (typeof nestedId === 'string' && !nestedId.startsWith('pending-')) { + this.#nestedChildSessionIds.add(nestedId); + } else if (typeof nestedId === 'string' && nestedId.startsWith('pending-')) { + const targetAgent = (msg.content as any)?.targetAgent as string | undefined; + if (targetAgent) { + this.#pendingHandoffTargets.add(targetAgent); + } + } + } } - // Otherwise, it was handled by the MODEL case above, so we skip this result message - return acc; } + }; + for (const m of this.#messages) { + if ((m as any).entity === ChatMessageEntity.AGENT_SESSION) { + const sess = (m as any as AgentSessionMessage).agentSession; + collectNested(sess); + } + } + try { + const topLevelIds = this.#messages + .filter(m => (m as any).entity === ChatMessageEntity.AGENT_SESSION) + .map(m => (m as any as AgentSessionMessage).agentSession.sessionId); + logger.info('ChatView: agent sessions overview', { + topLevelSessionIds: topLevelIds, + nestedChildSessionIds: Array.from(this.#nestedChildSessionIds), + pendingHandoffTargets: Array.from(this.#pendingHandoffTargets), + }); + } catch {} - // Fallback for any unexpected message types (shouldn't happen) - acc.push(message); - return acc; - - // Define the type for the accumulator array more accurately - // Allow ToolResultMessage to potentially have an 'orphaned' flag - }, [] as Array); + // Combine tool calls and results using helper + const combinedMessages = combineMessages(messagesToRender) as CombinedMessage[]; + this.#combinedMessagesCache = combinedMessages; - // General loading state (before any model response or after tool result) - const showGeneralLoading = this.#state === State.LOADING && !isModelRunningTool; + // General loading state: show while processing unless we have a final model message + // or (for agent flows) no sessions are running anymore. + const showGeneralLoading = this.#state === State.LOADING && (anyAgentRunning || !lastIsFinal); // Find the last model message with an answer to use for the copy action let lastModelAnswer: string | null = null; @@ -1043,125 +694,13 @@ export class ChatView extends HTMLElement { ${welcomeMessage ? this.#renderMessage(welcomeMessage, 0) : Lit.nothing} ${this.#showOAuthLogin ? html` - - - ` : html` - -
- ${this.#imageInput ? html` -
- Image input - -
- ` : Lit.nothing} -
- -
- -
- ${BaseOrchestratorAgent.renderAgentTypeButtons(this.#selectedPromptType, this.#handlePromptButtonClickBound, true)} - -
- ${this.#renderModelSelector()} - -
-
-
- `} + + ` : this.#renderInputBar(true)} `, this.#shadow, {host: this}); @@ -1170,10 +709,12 @@ export class ChatView extends HTMLElement { Lit.render(html`
${this.#renderVersionBanner()} -
- ${combinedMessages?.map((message, combinedIndex) => this.#renderMessage(message, combinedIndex)) || Lit.nothing} + + ${Lit.Directives.repeat( + combinedMessages || [], + (m, i) => this.#messageKey(m, i), + (m, i) => this.#renderMessage(m, i) + )} ${showGeneralLoading ? html`
@@ -1197,102 +738,16 @@ export class ChatView extends HTMLElement { ` : Lit.nothing} - ${showActionsRow ? html` -
-
- - - - -
-
- ` : Lit.nothing} -
-
- ${this.#imageInput ? html` -
- Image input - -
- ` : Lit.nothing} -
- -
- -
- ${BaseOrchestratorAgent.renderAgentTypeButtons(this.#selectedPromptType, this.#handlePromptButtonClickBound)} -
- ${this.#renderModelSelector()} - -
-
-
-
+ ${showActionsRow ? renderGlobalActionsRow({ + textToCopy: lastModelAnswer || '', + onCopy: () => this.#copyToClipboard(lastModelAnswer || ''), + onThumbsUp: () => this.dispatchEvent(new CustomEvent('feedback', { bubbles: true, detail: { value: 'up' } })), + onThumbsDown: () => this.dispatchEvent(new CustomEvent('feedback', { bubbles: true, detail: { value: 'down' } })), + onRetry: () => this.dispatchEvent(new CustomEvent('retry', { bubbles: true })) + }) : Lit.nothing} + + ${this.#renderInputBar(false)} +
`, this.#shadow, {host: this}); } @@ -1314,7 +769,7 @@ export class ChatView extends HTMLElement { } // Use the YAML formatter for better readability - const yamlFormatted = this.#formatValueForDisplay(parsed); + const yamlFormatted = ToolDescriptionFormatter.formatValueForDisplay(parsed); return html`
             ${yamlFormatted}
@@ -1329,102 +784,58 @@ export class ChatView extends HTMLElement {
     }
   }
 
-  // Add helper to render model selector
-  #renderModelSelector() {
+  // Render model selector via dedicated component
+  #renderModelSelectorInline() {
     if (!this.#modelOptions || !this.#modelOptions.length || !this.#selectedModel || !this.#onModelChanged) {
       return '';
     }
-
-    // Check if we need searchable dropdown (20+ options)
-    const needsSearch = this.#modelOptions.length > 20;
-    
-    if (needsSearch) {
-      return this.#renderSearchableModelSelector();
-    } else {
-      return this.#renderSimpleModelSelector();
-    }
-  }
-
-  #renderSimpleModelSelector() {
     return html`
-      
- -
+ { + const value = (e.detail && (e.detail as any).value) as string | undefined; + if (!value) return; + if (this.#onModelChanged) { + this.#onModelChanged(value); + } + }} + @model-selector-focus=${() => { + if (this.#onModelSelectorFocus) { + this.#onModelSelectorFocus(); + } + }} + > `; } - #renderSearchableModelSelector() { + // Render the input bar (DRY across centered and expanded views) + #renderInputBar(centered: boolean): Lit.TemplateResult { return html` -
- - - ${this.#isModelDropdownOpen ? html` -
e.stopPropagation()}> - { - if (el) (el as HTMLInputElement).focus(); - })} - /> -
- ${this.#getFilteredModelOptions().map((option, index) => html` -
this.#selectModel(option.value)} - @mouseenter=${() => this.#highlightedOptionIndex = index} - > - ${option.label} -
- `)} - ${this.#getFilteredModelOptions().length === 0 ? html` -
No matching models found
- ` : ''} -
-
- ` : ''} -
+ this.#onImageInputClear && this.#onImageInputClear()} + @model-changed=${(e: Event) => { + const val = (e as CustomEvent).detail?.value as string | undefined; + if (val && this.#onModelChanged) this.#onModelChanged(val); + }} + @model-selector-focus=${() => this.#onModelSelectorFocus && this.#onModelSelectorFocus()} + > `; } - #handleModelChange(event: Event): void { - if (this.#isModelSelectorDisabled) { - return; - } - const selectElement = event.target as HTMLSelectElement; - const selectedValue = selectElement.value; - if (this.#onModelChanged) { - this.#onModelChanged(selectedValue); - } - } - - #handleModelSelectorFocus(): void { - if (this.#onModelSelectorFocus) { - this.#onModelSelectorFocus(); - } - } - // OAuth login handlers #handleOAuthLogin(): void { if (this.#onOAuthLogin) { @@ -1461,123 +872,7 @@ export class ChatView extends HTMLElement { })); } - // Helper methods for searchable model selector - #getSelectedModelLabel(): string { - const selectedOption = this.#modelOptions?.find(option => option.value === this.#selectedModel); - return selectedOption?.label || this.#selectedModel || 'Select Model'; - } - - #getFilteredModelOptions(): Array<{value: string, label: string}> { - if (!this.#modelOptions) return []; - if (!this.#modelSearchQuery) return this.#modelOptions; - - const query = this.#modelSearchQuery.toLowerCase(); - return this.#modelOptions.filter(option => - option.label.toLowerCase().includes(query) || - option.value.toLowerCase().includes(query) - ); - } - - #toggleModelDropdown(event: Event): void { - event.preventDefault(); - event.stopPropagation(); - - if (this.#isModelSelectorDisabled) return; - - this.#isModelDropdownOpen = !this.#isModelDropdownOpen; - - if (this.#isModelDropdownOpen) { - this.#modelSearchQuery = ''; - this.#highlightedOptionIndex = 0; - - // Calculate dropdown position - this.#calculateDropdownPosition(event.currentTarget as HTMLElement); - - // Add click outside handler with slight delay to avoid immediate trigger - setTimeout(() => { - document.addEventListener('click', this.#handleClickOutside.bind(this), { once: true }); - }, 100); - } - - void ComponentHelpers.ScheduledRender.scheduleRender(this, this.#boundRender); - } - - #calculateDropdownPosition(triggerElement: HTMLElement): void { - const rect = triggerElement.getBoundingClientRect(); - const viewportHeight = window.innerHeight; - const dropdownHeight = 300; // Max height from CSS - const spaceBelow = viewportHeight - rect.bottom; - const spaceAbove = rect.top; - - // If not enough space below and more space above, show dropdown above - if (spaceBelow < dropdownHeight && spaceAbove > spaceBelow) { - this.#dropdownPosition = 'above'; - } else { - this.#dropdownPosition = 'below'; - } - } - - #handleClickOutside(event: Event): void { - const target = event.target as Element; - // Check if click is within the model selector or dropdown - const modelSelector = target.closest('.model-selector.searchable'); - const modelDropdown = target.closest('.model-dropdown'); - - if (!modelSelector && !modelDropdown) { - this.#isModelDropdownOpen = false; - this.#dropdownPosition = 'below'; // Reset position - void ComponentHelpers.ScheduledRender.scheduleRender(this, this.#boundRender); - } - } - - #handleModelSearch(event: Event): void { - const input = event.target as HTMLInputElement; - this.#modelSearchQuery = input.value; - this.#highlightedOptionIndex = 0; // Reset highlight to first item - void ComponentHelpers.ScheduledRender.scheduleRender(this, this.#boundRender); - } - - #handleModelSearchKeydown(event: KeyboardEvent): void { - const filteredOptions = this.#getFilteredModelOptions(); - - switch (event.key) { - case 'ArrowDown': - event.preventDefault(); - this.#highlightedOptionIndex = Math.min(this.#highlightedOptionIndex + 1, filteredOptions.length - 1); - void ComponentHelpers.ScheduledRender.scheduleRender(this, this.#boundRender); - break; - - case 'ArrowUp': - event.preventDefault(); - this.#highlightedOptionIndex = Math.max(this.#highlightedOptionIndex - 1, 0); - void ComponentHelpers.ScheduledRender.scheduleRender(this, this.#boundRender); - break; - - case 'Enter': - event.preventDefault(); - if (filteredOptions[this.#highlightedOptionIndex]) { - this.#selectModel(filteredOptions[this.#highlightedOptionIndex].value); - } - break; - - case 'Escape': - event.preventDefault(); - this.#isModelDropdownOpen = false; - this.#dropdownPosition = 'below'; // Reset position - void ComponentHelpers.ScheduledRender.scheduleRender(this, this.#boundRender); - break; - } - } - - #selectModel(modelValue: string): void { - if (this.#onModelChanged) { - this.#onModelChanged(modelValue); - } - this.#isModelDropdownOpen = false; - this.#modelSearchQuery = ''; - this.#dropdownPosition = 'below'; // Reset position - void ComponentHelpers.ScheduledRender.scheduleRender(this, this.#boundRender); - } + // Model selector behaviors delegated to // Add this new method for copying text to clipboard #copyToClipboard(text: string): void { @@ -1646,379 +941,33 @@ export class ChatView extends HTMLElement { void ComponentHelpers.ScheduledRender.scheduleRender(this, this.#boundRender); } - // Method to render version banner + // Method to render version banner (delegates to component) #renderVersionBanner(): Lit.TemplateResult { - logger.info('Rendering version banner:', { - versionInfo: this.#versionInfo, - isUpdateAvailable: this.#versionInfo?.isUpdateAvailable, - isVersionBannerDismissed: this.#isVersionBannerDismissed, - messageCount: this.#messages.length - }); - - // Hide banner after first message or if dismissed - if (!this.#versionInfo || !this.#versionInfo.isUpdateAvailable || - this.#isVersionBannerDismissed || this.#messages.length > 1) { - logger.info('Not rendering version banner - conditions not met'); + if (!this.#versionInfo || !this.#versionInfo.isUpdateAvailable || this.#isVersionBannerDismissed || this.#messages.length > 1) { return html``; } - - logger.info('Rendering version banner for version:', this.#versionInfo.latestVersion); - - return html` -
-
- 🎉 - - New version ${this.#versionInfo.latestVersion} is available! - - - View Release - -
- -
- `; + return html``; } // Method to parse structured response with reasoning and markdown_report XML tags - #parseStructuredResponse(text: string): {reasoning: string, markdownReport: string} | null { - try { - // Look for the XML format with and tags - const reasoningMatch = text.match(REGEX_PATTERNS.REASONING_TAG); - const reportMatch = text.match(REGEX_PATTERNS.MARKDOWN_REPORT_TAG); - - if (reasoningMatch && reportMatch) { - const reasoning = reasoningMatch[1].trim(); - const markdownReport = reportMatch[1].trim(); - - // Validate extracted content - if (reasoning && markdownReport && markdownReport.length >= CONTENT_THRESHOLDS.MARKDOWN_REPORT_MIN_LENGTH) { - return { reasoning, markdownReport }; - } - } - } catch (error) { - logger.error('Failed to parse structured response:', error); - } - - return null; - } + // parseStructuredResponse moved to core/structured_response.ts // Render structured response with last-message-only auto-processing #renderStructuredResponse(structuredResponse: {reasoning: string, markdownReport: string}, combinedIndex?: number): Lit.TemplateResult { - logger.info('Starting renderStructuredResponse:', { - combinedIndex, - hasMessages: Boolean(this.#messages), - messagesLength: this.#messages?.length, - lastProcessedKey: this.#lastProcessedMessageKey, - reasoningPreview: structuredResponse.reasoning.slice(0, 50) + '...' - }); - - const messageKey = this.#getMessageStateKey(structuredResponse); - const isLastMessage = this.#isLastStructuredMessage(combinedIndex || 0); - - logger.info('Rendering structured response decision:', { - messageKey, - combinedIndex, - isLastMessage, - lastProcessedKey: this.#lastProcessedMessageKey, - shouldAutoProcess: isLastMessage && messageKey !== this.#lastProcessedMessageKey - }); - - // Auto-process only last message - if (isLastMessage && messageKey !== this.#lastProcessedMessageKey) { - const aiState = this.#getMessageAIAssistantState(messageKey); - if (aiState === 'not-attempted') { - // Set to pending immediately for loading state - logger.info('Setting state to pending and starting AI Assistant for LAST message key:', messageKey); - this.#setMessageAIAssistantState(messageKey, 'pending'); - this.#attemptAIAssistantOpen(structuredResponse.markdownReport, messageKey); - this.#lastProcessedMessageKey = messageKey; - } - } - - const aiState = this.#getMessageAIAssistantState(messageKey); - return this.#renderStructuredMessage(structuredResponse, messageKey, aiState, isLastMessage); - } - - // Unified render method for structured response messages - #renderStructuredMessage(structuredResponse: {reasoning: string, markdownReport: string}, messageKey: string, aiState: 'pending' | 'opened' | 'failed' | 'not-attempted', isLastMessage: boolean): Lit.TemplateResult { - logger.info('Rendering structured message:', { messageKey, aiState, isLastMessage }); - - return html` -
-
-
${renderMarkdown(structuredResponse.reasoning, this.#markdownRenderer, this.#openInAIAssistantViewer.bind(this))}
- - ${aiState === 'pending' ? html` - -
- - - - - -
- ` : aiState === 'opened' ? html` - -
- -
- ` : html` - -
-
-

Full Research Report

-
-
- ${renderMarkdown(structuredResponse.markdownReport, this.#markdownRenderer, this.#openInAIAssistantViewer.bind(this))} -
-
-
- -
- `} -
-
- `; - } - - // Attempt to open AI Assistant for a specific message - async #attemptAIAssistantOpen(markdownContent: string, messageKey: string): Promise { - logger.info('ATTEMPTING AI ASSISTANT OPEN:', { - messageKey, - contentLength: markdownContent.length, - contentPreview: markdownContent.slice(0, 200) + '...' - }); - - try { - logger.info('Calling #openInAIAssistantViewer for key:', messageKey); - await this.#openInAIAssistantViewer(markdownContent); - - logger.info('AI Assistant opened successfully, setting state to opened for key:', messageKey); - this.#setMessageAIAssistantState(messageKey, 'opened'); - } catch (error) { - logger.warn('AI Assistant navigation failed for key:', { messageKey, error }); - this.#setMessageAIAssistantState(messageKey, 'failed'); - } - - // Trigger single re-render after state change - logger.info('Triggering re-render after AI Assistant state change for key:', messageKey); - void ComponentHelpers.ScheduledRender.scheduleRender(this, this.#boundRender); - } - - - // Method to open markdown content in AI Assistant viewer in the same tab - async #openInAIAssistantViewer(markdownContent: string): Promise { - // Get the primary page target to navigate the inspected page - const target = SDK.TargetManager.TargetManager.instance().primaryPageTarget(); - if (!target) { - throw new Error(ERROR_MESSAGES.NO_PRIMARY_TARGET); - } - - // Get the ResourceTreeModel to navigate the page - const resourceTreeModel = target.model(SDK.ResourceTreeModel.ResourceTreeModel); - if (!resourceTreeModel) { - throw new Error('No ResourceTreeModel found'); - } - - // Navigate to browser-operator://assistant - const url = 'browser-operator://assistant' as Platform.DevToolsPath.UrlString; - const navigationResult = await resourceTreeModel.navigate(url); - - if (navigationResult.errorText) { - throw new Error(`Navigation failed: ${navigationResult.errorText}`); - } - - // Wait for the page to load, then inject the markdown content - // Use event-based detection or timeout as fallback - const injectContent = async (): Promise => { - const runtimeModel = target.model(SDK.RuntimeModel.RuntimeModel); - if (!runtimeModel) { - logger.error('No RuntimeModel found'); - throw new Error('No RuntimeModel found'); - } - - // Escape the markdown content for JavaScript injection - const escapedContent = JSON.stringify(markdownContent); - - // JavaScript to inject - calls the global function we added to AI Assistant - const injectionScript = ` - (function() { - console.log('DevTools injecting markdown content...', 'Content length:', ${JSON.stringify(markdownContent.length)}); - console.log('Available global functions:', Object.keys(window).filter(k => k.includes('setDevTools') || k.includes('aiAssistant'))); - - if (typeof window.setDevToolsMarkdown === 'function') { - try { - window.setDevToolsMarkdown(${escapedContent}); - console.log('Successfully called setDevToolsMarkdown function'); - return 'SUCCESS: Content injected via setDevToolsMarkdown function'; - } catch (error) { - console.error('Error calling setDevToolsMarkdown:', error); - return 'ERROR: Failed to call setDevToolsMarkdown: ' + error.message; - } - } else { - console.warn('setDevToolsMarkdown function not found, using fallback methods'); - console.log('Available window properties:', Object.keys(window).filter(k => k.includes('DevTools') || k.includes('assistant') || k.includes('ai'))); - - // Store in sessionStorage - sessionStorage.setItem('devtools-markdown-content', ${escapedContent}); - console.log('Stored content in sessionStorage'); - - // Try to trigger app reload - if (window.aiAssistantApp && typeof window.aiAssistantApp.loadFromSessionStorage === 'function') { - try { - window.aiAssistantApp.loadFromSessionStorage(); - console.log('Successfully called aiAssistantApp.loadFromSessionStorage'); - return 'SUCCESS: Content stored and app reloaded'; - } catch (error) { - console.error('Error calling loadFromSessionStorage:', error); - return 'ERROR: Content stored but failed to reload app: ' + error.message; - } - } else { - console.log('aiAssistantApp not available or loadFromSessionStorage not a function'); - console.log('aiAssistantApp type:', typeof window.aiAssistantApp); - if (window.aiAssistantApp) { - console.log('aiAssistantApp methods:', Object.getOwnPropertyNames(Object.getPrototypeOf(window.aiAssistantApp))); - } - - // Try to force a page reload as last resort - try { - location.reload(); - return 'SUCCESS: Content stored, forcing page reload'; - } catch (error) { - return 'SUCCESS: Content stored in sessionStorage, but manual refresh may be needed'; - } - } - } - })(); - `; - - try { - // Get the default execution context and evaluate the script - const executionContext = runtimeModel.defaultExecutionContext(); - if (!executionContext) { - logger.error('No execution context available'); - throw new Error('No execution context available'); - } - - const result = await executionContext.evaluate({ - expression: injectionScript, - objectGroup: 'console', - includeCommandLineAPI: false, - silent: false, - returnByValue: true, - generatePreview: false - }, false, false); - - if ('error' in result) { - logger.error('Evaluation failed:', result.error); - throw new Error(`Evaluation failed: ${result.error}`); - } - - if (result.object.value) { - logger.info('Content injection result:', result.object.value); - // Check if injection was successful - if (typeof result.object.value === 'string' && result.object.value.startsWith('ERROR:')) { - throw new Error(result.object.value); - } - } else if (result.exceptionDetails) { - logger.error('Content injection failed:', result.exceptionDetails.text); - throw new Error(`Content injection failed: ${result.exceptionDetails.text || 'Unknown error'}`); - } - } catch (error) { - logger.error('Failed to inject content:', error); - throw error; // Re-throw to propagate to caller - } - }; - - // Try to detect when AI Assistant is ready - let retries = 0; - const maxRetries = TIMING_CONSTANTS.AI_ASSISTANT_MAX_RETRIES; - - // Return a promise that resolves/rejects based on injection success - return new Promise((resolve, reject) => { - const attemptInjection = () => { - setTimeout(async () => { - try { - const runtimeModel = target.model(SDK.RuntimeModel.RuntimeModel); - if (!runtimeModel) { - reject(new Error('No RuntimeModel found')); - return; - } - - const executionContext = runtimeModel.defaultExecutionContext(); - if (!executionContext) { - reject(new Error('No execution context available')); - return; - } - - // Check if AI Assistant is ready - const checkResult = await executionContext.evaluate({ - expression: 'typeof window.setDevToolsMarkdown === "function" || (window.aiAssistantApp && typeof window.aiAssistantApp.loadFromSessionStorage === "function")', - objectGroup: 'console', - includeCommandLineAPI: false, - silent: true, - returnByValue: true, - generatePreview: false - }, false, false); - - if (!('error' in checkResult) && checkResult.object.value === true) { - // AI Assistant is ready - await injectContent(); - resolve(); - } else if (retries < maxRetries) { - // Retry with exponential backoff - retries++; - attemptInjection(); - } else { - logger.error('AI Assistant did not load in time'); - // Try to inject anyway as a last resort - try { - await injectContent(); - resolve(); - } catch (error) { - reject(error); - } - } - } catch (error) { - reject(error); - } - }, TIMING_CONSTANTS.AI_ASSISTANT_RETRY_DELAY * Math.pow(2, retries)); - }; - - attemptInjection(); - }); + const { aiState, isLastMessage } = this.#structuredController.computeStateAndMaybeOpen( + structuredResponse, + combinedIndex || 0, + this.#combinedMessagesCache as any + ); + return renderStructuredResponseUI(structuredResponse, { aiState, isLastMessage }, this.#markdownRenderer); } + // Presentational structured response handled by StructuredResponseRender + // Auto-open behavior delegated to StructuredResponseController /** * Toggle between simplified and enhanced agent view */ @@ -2027,565 +976,37 @@ export class ChatView extends HTMLElement { void ComponentHelpers.ScheduledRender.scheduleRender(this, this.#boundRender); } - /** - * Toggle tool details visibility - */ - #toggleToolDetails(event: Event): void { - const clickTarget = event.target as HTMLElement; - const button = clickTarget.closest('.tool-toggle') as HTMLButtonElement; - if (!button) return; - - const container = button.closest('.agent-execution-timeline'); - if (!container) return; - - const summary = container.querySelector('.tool-summary') as HTMLElement; - const details = container.querySelector('.tool-details') as HTMLElement; - const toggleIcon = button.querySelector('.toggle-icon') as HTMLElement; - - if (!details || !toggleIcon) return; - - if (details.style.display === 'none') { - // Show details - summary.style.display = 'none'; - details.style.display = 'flex'; - toggleIcon.textContent = '▲'; - } else { - // Hide details - summary.style.display = 'flex'; - details.style.display = 'none'; - toggleIcon.textContent = '▼'; - } - } - - /** - * Toggle tool result visibility - */ - #toggleToolResult(event: Event): void { - const clickTarget = event.target as HTMLElement; - const button = clickTarget.closest('.tool-toggle') as HTMLButtonElement; - if (!button) return; - - const container = button.closest('.agent-execution-timeline'); - if (!container) return; - - const result = container.querySelector('.timeline-items') as HTMLElement; - const toggleIcon = button.querySelector('.toggle-icon') as HTMLElement; - - if (!result || !toggleIcon) return; - - if (result.style.display === 'none') { - // Show result - result.style.display = 'block'; - toggleIcon.textContent = '▲'; - } else { - // Hide result - result.style.display = 'none'; - toggleIcon.textContent = '▼'; + // Stable key for message list rendering to avoid node reuse glitches + #messageKey(m: CombinedMessage, index: number): string { + // Agent sessions keyed by sessionId to ensure distinct component instances render + if ((m as any).entity === ChatMessageEntity.AGENT_SESSION) { + const sessionId = (m as any).agentSession?.sessionId; + return sessionId ? `agent:${sessionId}` : `agent:index:${index}`; } - } - - /** - * Toggle agent session details visibility - */ - #toggleAgentSessionDetails(event: Event): void { - const clickTarget = event.target as HTMLElement; - const button = clickTarget.closest('.tool-toggle') as HTMLButtonElement; - if (!button) return; - - const container = button.closest('.agent-session-container'); - if (!container) return; - - const timelineItems = container.querySelector('.timeline-items') as HTMLElement; - const nestedSessions = container.querySelector('.nested-sessions') as HTMLElement; - const toggleIcon = button.querySelector('.toggle-icon') as HTMLElement; - - if (!toggleIcon) return; - - if (timelineItems.style.display === 'none') { - // Show details - timelineItems.style.display = 'block'; - if (nestedSessions) { - nestedSessions.style.display = 'block'; - } - toggleIcon.textContent = '▲'; - } else { - // Hide details - timelineItems.style.display = 'none'; - if (nestedSessions) { - nestedSessions.style.display = 'none'; + // Model tool calls keyed by toolCallId if present, final answers by index + if ((m as any).entity === ChatMessageEntity.MODEL) { + const action = (m as any).action; + if (action === 'tool') { + const tId = (m as any).toolCallId; + return tId ? `model-tool:${tId}` : `model-tool:${(m as any).toolName || 'unknown'}:${index}`; } - toggleIcon.textContent = '▼'; + return `model:${action}:${index}`; } - } - - /** - * Render task completion with agent sessions using seamless timeline design - */ - #renderTaskCompletion(agentSession: AgentSession): Lit.TemplateResult { - if (!agentSession) { - return html``; + // Tool results keyed by toolCallId/name when orphaned + if ((m as any).entity === ChatMessageEntity.TOOL_RESULT) { + const tId = (m as any).toolCallId; + return tId ? `tool-result:${tId}` : `tool-result:${(m as any).toolName || 'unknown'}:${index}`; } - - return html` -
- ${this.#renderAgentSessionTimeline(agentSession)} -
- `; + // User and others + return `msg:${(m as any).entity}:${index}`; } - /** - * Render agent session with timeline design - */ - #renderAgentSessionTimeline(session: AgentSession, depth: number = 0, visitedSessions: Set = new Set()): Lit.TemplateResult { - // Prevent infinite recursion with depth limit and visited tracking - if (depth > 10 || visitedSessions.has(session.sessionId)) { - return html`
Maximum nesting depth reached
`; - } - - visitedSessions.add(session.sessionId); - const uiConfig = getAgentUIConfig(session.agentName, session.config); - const toolMessages = session.messages.filter(msg => msg.type === 'tool_call'); - const toolResults = session.messages.filter(msg => msg.type === 'tool_result'); - - return html` - ${session.agentReasoning ? html`
${session.agentReasoning}
` : ''} -
-
-
-
${uiConfig.displayName}
-
- -
+ // (Removed unused #toggleToolDetails; replaced by stateful rendering patterns) - - - -
- `; - } + // Removed direct DOM toggling; use state-driven rendering instead - /** - * Render individual timeline item for tool execution - */ - #renderTimelineItem(toolMessage: AgentMessage, toolResult: AgentMessage | undefined): Lit.TemplateResult { - const toolContent = toolMessage.content as AgentToolCallMessage; - const resultContent = toolResult?.content as AgentToolResultMessage; - const toolName = toolContent.toolName; - const toolArgs = toolContent.toolArgs || {}; - - // Determine status based on tool result - let status = 'running'; - if (toolResult && resultContent) { - status = resultContent.success ? 'completed' : 'error'; - } - - const icon = this.#getToolIcon(toolName); - const toolNameDisplay = toolName.replace(/_/g, ' '); - const descriptionData = this.#getToolDescription(toolName, toolArgs); - const resultText = resultContent?.result ? JSON.stringify(resultContent.result, null, 2) : ''; - - if (descriptionData.isMultiLine) { - // Multi-line format - just the timeline item, no wrapper - return html` -
-
-
- - ${icon} ${toolNameDisplay}: - ${(descriptionData.content as Array<{key: string, value: string}>).map(arg => html` -
- ${arg.key}: - ${arg.value} -
- `)} -
- -
-
- `; - } else { - // Single-line format - return html` -
-
-
- - ${icon} ${descriptionData.content} -
- -
-
- `; - } - } - - /** - * Generate task title from agent session - */ - #generateTaskTitle(agentSession: AgentSession): string { - if (!agentSession) { - return 'AI Task Execution'; - } - - const totalTools = this.#countTotalTools(agentSession); - return `Completed task using ${totalTools} tools`; - } - - /** - * Count total tools used across all sessions - */ - #countTotalTools(session: AgentSession): number { - const sessionTools = session.messages.filter(msg => msg.type === 'tool_call').length; - const nestedTools = session.nestedSessions.reduce((total, nested) => { - return total + this.#countTotalTools(nested); - }, 0); - return sessionTools + nestedTools; - } - - /** - * Render simplified content (current tool list style) - */ - #renderSimplifiedContent(agentSession: AgentSession): Lit.TemplateResult { - if (!agentSession) { - return html``; - } - - const allToolCalls = this.#flattenToolCalls(agentSession); - - return html` -
- ${allToolCalls.map(tool => this.#renderSimpleToolItem(tool))} - -
- `; - } - - /** - * Flatten tool calls from all sessions - */ - #flattenToolCalls(session: AgentSession): Array<{toolName: string, args: any}> { - const sessionToolCalls = session.messages - .filter(msg => msg.type === 'tool_call') - .map(msg => ({ - toolName: (msg.content as any).toolName, - args: (msg.content as any).toolArgs - })); - - const nestedToolCalls = session.nestedSessions.flatMap(nested => - this.#flattenToolCalls(nested) - ); - - return [...sessionToolCalls, ...nestedToolCalls]; - } - - /** - * Render simple tool item (current style) - */ - #renderSimpleToolItem(tool: {toolName: string, args: any}): Lit.TemplateResult { - const icon = this.#getToolIcon(tool.toolName); - const description = this.#getToolDescription(tool.toolName, tool.args); - - return html` -
-
${icon}
-
${description}
-
- `; - } - - /** - * Get tool icon based on tool name - */ - #getToolIcon(toolName: string): string { - if (toolName.includes('search')) return '🔍'; - if (toolName.includes('browse') || toolName.includes('navigate')) return '🌐'; - if (toolName.includes('create') || toolName.includes('write')) return '📝'; - if (toolName.includes('extract') || toolName.includes('analyze')) return '🔬'; - if (toolName.includes('click') || toolName.includes('action')) return '👆'; - return '🔧'; - } - - /** - * Format value for display - convert objects to YAML-like format - */ - #formatValueForDisplay(value: any, depth: number = 0): string { - // Prevent infinite recursion - if (depth > 10) { - return '[Max depth reached]'; - } - - if (value === null || value === undefined) { - return String(value); - } - - if (typeof value === 'string' || typeof value === 'number' || typeof value === 'boolean') { - return String(value); - } - - if (Array.isArray(value)) { - if (value.length === 0) return '[]'; - if (value.length === 1) return this.#formatValueForDisplay(value[0], depth + 1); - return value.map(item => `- ${this.#formatValueForDisplay(item, depth + 1)}`).join('\n'); - } - - if (typeof value === 'object') { - // Handle circular references by using try-catch - try { - const entries = Object.entries(value); - if (entries.length === 0) return '{}'; - if (entries.length === 1) { - const [k, v] = entries[0]; - return `${k}: ${this.#formatValueForDisplay(v, depth + 1)}`; - } - return entries.map(([k, v]) => `${k}: ${this.#formatValueForDisplay(v, depth + 1)}`).join('\n'); - } catch (error) { - return '[Circular reference detected]'; - } - } - - return String(value); - } - - /** - * Get tool description from name and args - */ - #getToolDescription(toolName: string, args: any): { isMultiLine: boolean, content: string | Array<{key: string, value: string}>, action: string } { - const action = toolName.replace(/_/g, ' ').toLowerCase(); - - // Filter out common metadata fields - const filteredArgs = Object.fromEntries( - Object.entries(args).filter(([key]) => - key !== 'reasoning' && key !== 'toolCallId' && key !== 'timestamp' - ) - ); - - const argKeys = Object.keys(filteredArgs); - - if (argKeys.length === 0) { - return { isMultiLine: false, content: action, action }; - } - - if (argKeys.length === 1) { - // Single argument - inline format - const [key, value] = Object.entries(filteredArgs)[0]; - const formattedValue = this.#formatValueForDisplay(value); - const needsNewline = formattedValue.length > 80; - return { isMultiLine: false, content: `${action}:${needsNewline ? '\n' : ''}${formattedValue}`, action }; - } - - // Multiple arguments - return structured data for multi-line rendering - // Sort to put 'query' first if it exists - const sortedKeys = argKeys.sort((a, b) => { - if (a === 'query') return -1; - if (b === 'query') return 1; - return 0; - }); - - const argsArray = sortedKeys.map(key => ({ - key, - value: this.#formatValueForDisplay(filteredArgs[key]) - })); - - return { isMultiLine: true, content: argsArray, action }; - } - - /** - * Render enhanced content (agent-centric view) - */ - #renderEnhancedContent(agentSession: AgentSession): Lit.TemplateResult { - if (!agentSession) { - return html``; - } - - return html` -
- ${this.#renderAgentSession(agentSession, 0)} - -
- `; - } - - /** - * Render agent session recursively - */ - #renderAgentSession(session: AgentSession, depth: number): Lit.TemplateResult { - const uiConfig = getAgentUIConfig(session.agentName, session.config); - - return html` -
- ${this.#renderAgentHeader(session, uiConfig)} -
- ${session.reasoning ? this.#renderReasoningBubble(session.reasoning) : Lit.nothing} -
- ${session.messages.map(msg => this.#renderAgentMessage(msg))} -
- ${session.nestedSessions.map((nested, index) => html` - ${index === 0 ? this.#renderHandoffIndicator(session.agentName, nested.agentName) : Lit.nothing} - ${this.#renderAgentSession(nested, depth + 1)} - `)} -
-
- `; - } - - /** - * Render agent header - */ - #renderAgentHeader(session: AgentSession, uiConfig: ReturnType): Lit.TemplateResult { - return html` -
-
- ${uiConfig.avatar} -
-
-
${uiConfig.displayName}
-
- ${this.#getStatusText(session)} - ${this.#getStatusDescription(session)} -
-
-
- `; - } - - /** - * Get status text from session - */ - #getStatusText(session: AgentSession): string { - switch (session.status) { - case 'completed': return 'Completed'; - case 'running': return 'Running'; - case 'error': return 'Error'; - default: return 'Unknown'; - } - } - - /** - * Get status description from session - */ - #getStatusDescription(session: AgentSession): string { - const toolCount = session.messages.filter(msg => msg.type === 'tool_call').length; - if (toolCount === 0) { - return 'No tools executed'; - } - return `Executed ${toolCount} tool${toolCount === 1 ? '' : 's'}`; - } - - /** - * Render reasoning bubble - */ - #renderReasoningBubble(reasoning: string): Lit.TemplateResult { - return html` -
- ${reasoning} -
- `; - } - - /** - * Render handoff indicator - */ - #renderHandoffIndicator(fromAgent: string, toAgent: string): Lit.TemplateResult { - return html` -
-
-
→ Handoff to ${toAgent}
-
- `; - } - - /** - * Render agent message - */ - #renderAgentMessage(message: AgentMessage): Lit.TemplateResult { - switch (message.type) { - case 'reasoning': - return html`
💭 ${(message.content as any).text}
`; - case 'tool_call': - return this.#renderToolCall(message); - case 'tool_result': - return this.#renderToolResult(message); - case 'handoff': - return html``; // Don't render handoff messages as they're handled by the handoff indicator - case 'final_answer': - return html`
🎯 ${(message.content as any).answer}
`; - default: - return html``; - } - } - - /** - * Render tool call - */ - #renderToolCall(message: AgentMessage): Lit.TemplateResult { - const content = message.content as any; - return html` -
-
-
${content.toolName}
-
✓ Success
-
-
${this.#getToolDescription(content.toolName, content.toolArgs)}
-
${JSON.stringify(content.toolArgs, null, 2)}
-
- `; - } - - /** - * Render tool result - */ - #renderToolResult(message: AgentMessage): Lit.TemplateResult { - const content = message.content as any; - const statusClass = content.success ? 'success' : 'error'; - const statusIcon = content.success ? '✓' : '❌'; - - return html` -
-
- ${statusIcon} ${content.toolName} result -
- ${content.result ? html` -
- ${typeof content.result === 'string' ? content.result : JSON.stringify(content.result, null, 2)} -
- ` : Lit.nothing} - ${content.error ? html`
${content.error}
` : Lit.nothing} -
- `; - } + // Agent timeline and enhanced views are handled by LiveAgentSessionComponent. + // The legacy, unused render helpers have been removed from ChatView to reduce duplication. } @@ -2594,3 +1015,9 @@ declare global { 'devtools-chat-view': ChatView; } } + +// Local type alias for combined messages to improve readability +type CombinedMessage = + | ChatMessage + | (ModelChatMessage & { resultText?: string; isError?: boolean; resultError?: string; combined?: boolean }) + | (ToolResultMessage & { orphaned?: boolean }); diff --git a/front_end/panels/ai_chat/ui/LiveAgentSessionComponent.ts b/front_end/panels/ai_chat/ui/LiveAgentSessionComponent.ts new file mode 100644 index 00000000000..92b5ecc1d5e --- /dev/null +++ b/front_end/panels/ai_chat/ui/LiveAgentSessionComponent.ts @@ -0,0 +1,523 @@ +// Copyright 2025 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import * as Lit from '../../../ui/lit/lit.js'; +import { createLogger } from '../core/Logger.js'; +import type { AgentSession, AgentMessage } from '../agent_framework/AgentSessionTypes.js'; +import { getAgentUIConfig } from '../agent_framework/AgentSessionTypes.js'; +import { ToolCallComponent } from './ToolCallComponent.js'; +import { AgentSessionHeaderComponent } from './AgentSessionHeaderComponent.js'; +import { ToolDescriptionFormatter } from './ToolDescriptionFormatter.js'; + +const {Decorators} = Lit; +const {customElement} = Decorators; + +@customElement('live-agent-session') +export class LiveAgentSessionComponent extends HTMLElement { + static readonly litTagName = Lit.StaticHtml.literal`live-agent-session`; + private readonly shadow = this.attachShadow({mode: 'open'}); + + private _session: AgentSession | null = null; + private _variant: 'full'|'nested' = 'full'; + private headerComponent: AgentSessionHeaderComponent | null = null; + private toolComponents = new Map(); + private childComponents = new Map(); + private isExpanded = true; + private suppressInlineChildIds: Set = new Set(); + readonly #log = createLogger('LiveAgentSession'); + + connectedCallback(): void { + this.render(); + } + + // Allow declarative property binding from templates + set session(session: AgentSession) { this.setSession(session); } + get session(): AgentSession | null { return this._session; } + + // Control whether to render with header (full) or compact nested variant + setVariant(variant: 'full'|'nested'): void { + this._variant = variant; + if (variant === 'nested') { + this.isExpanded = true; // nested timelines are always expanded + } + this.render(); + } + + setSuppressInlineChildIds(ids: Set): void { + this.suppressInlineChildIds = ids; + try { this.#log.info('setSuppressInlineChildIds', { ids: Array.from(ids) }); } catch {} + this.render(); + } + + setSession(session: AgentSession): void { + this._session = session; + try { + this.#log.info('setSession', { + sessionId: session.sessionId, + agentName: session.agentName, + status: session.status, + nestedCount: session.nestedSessions?.length || 0, + messageCount: session.messages?.length || 0, + }); + } catch {} + + // Update header component + if (!this.headerComponent) { + this.headerComponent = new AgentSessionHeaderComponent(); + this.headerComponent.addEventListener('toggle-expanded', (e: Event) => { + const customEvent = e as CustomEvent; + this.isExpanded = customEvent.detail.isExpanded; + this.render(); + }); + } + + this.headerComponent.setSession(session); + this.render(); + } + + addToolCall(toolCall: AgentMessage): void { + if (!this._session) return; + + // Store the tool call (no longer using separate components) + this.toolComponents.set(toolCall.id, null as any); + + // Re-render to show the updated timeline + this.render(); + } + + updateToolResult(toolResult: AgentMessage): void { + if (!this._session) return; + + // Re-render to show the updated status + this.render(); + } + + addChildSession(sessionId: string, childComponent: LiveAgentSessionComponent): void { + this.childComponents.set(sessionId, childComponent); + + // Re-render to show nested sessions + this.render(); + } + + private render(): void { + if (!this._session) return; + + // Get agent UI configuration for proper display name + const uiConfig = getAgentUIConfig(this._session.agentName, this._session.config); + + // Generate timeline items HTML + const timelineItemsHtml = this.generateTimelineItemsHtml(); + + // Determine if this is a single tool execution + const toolMessages = this._session.messages.filter(msg => msg.type === 'tool_call'); + const isSingleTool = toolMessages.length === 1; + + const timelineId = `timeline-${this._session.sessionId}`; + + const isFull = this._variant === 'full'; + const reasoningHtml = isFull && this._session.agentReasoning ? `
${this._session.agentReasoning}
` : ''; + const timelineDisplay = isFull ? (this.isExpanded ? 'block' : 'none') : 'block'; + + this.shadow.innerHTML = ` + +
+ ${reasoningHtml} +
+ ${isFull ? ` +
+
+
${uiConfig.displayName}
+
+ +
` : ''} +
+ ${timelineItemsHtml} +
+
+
+
+ `; + + // Add event listener for the toggle button (full variant only) + const toggleButton = isFull ? this.shadow.querySelector('.tool-toggle') : null; + if (toggleButton) { + toggleButton.addEventListener('click', () => { + this.isExpanded = !this.isExpanded; + this.render(); + }); + } + + // No expand/collapse listeners for reasoning (simplified) + + // Render nested sessions as real elements with live interactivity + const nestedContainer = this.shadow.querySelector('.nested-sessions'); + const nestedSessions = this._session?.nestedSessions || []; + if (nestedSessions.length) { + // First, try to inline children at handoff anchors + const anchors = Array.from(this.shadow.querySelectorAll('.handoff-anchor')); + const inlined = new Set(); + for (const anchor of anchors) { + const nid = anchor.getAttribute('data-nested-id'); + if (!nid) continue; + const nested = nestedSessions.find(s => s.sessionId === nid); + if (!nested) continue; + const nestedEl = new LiveAgentSessionComponent(); + try { this.#log.info('inlining nested session at anchor', { childSessionId: nested.sessionId, agentName: nested.agentName }); } catch {} + nestedEl.setVariant('nested'); + nestedEl.setSession(nested); + nestedEl.setSuppressInlineChildIds(this.suppressInlineChildIds); + anchor.replaceWith(nestedEl); + inlined.add(nid); + } + // Fallback: append any remaining nested sessions (without anchors) at the bottom container + if (nestedContainer) { + (nestedContainer as HTMLElement).innerHTML = ''; + for (const nested of nestedSessions) { + if (inlined.has(nested.sessionId)) continue; + // Always show fallback nested sessions; ChatView suppresses top-level duplicates + const nestedEl = new LiveAgentSessionComponent(); + try { this.#log.info('appending nested session (fallback)', { childSessionId: nested.sessionId, agentName: nested.agentName }); } catch {} + nestedEl.setVariant('nested'); + nestedEl.setSession(nested); + nestedEl.setSuppressInlineChildIds(this.suppressInlineChildIds); + nestedContainer.appendChild(nestedEl); + } + } + } + } + + private generateTimelineItemsHtml(): string { + if (!this.session) return ''; + + const toolMessages = this._session?.messages.filter(msg => msg.type === 'tool_call') || []; + const toolResults = this._session?.messages.filter(msg => msg.type === 'tool_result') || []; + + let html = ''; + + // Add agent query if present (matching ChatView) + if (this._session && this._session.agentQuery) { + html += ` +
+
+
+ ${this.session.agentQuery} +
+
+
+ `; + } + + // Build result map for quick lookup + const resultMap = new Map(); + for (const r of toolResults) { + const rc = (r.content as any); + if (rc?.toolCallId) resultMap.set(rc.toolCallId, rc); + } + + // Walk messages in order and render tool calls and inline handoff anchors + for (const m of (this._session?.messages || [])) { + if (m.type === 'tool_call') { + const toolContent = m.content as any; + const toolName = toolContent.toolName; + const toolArgs = toolContent.toolArgs || {}; + const reasonFromArgs = toolArgs?.reasoning ?? toolArgs?.reason ?? toolArgs?.why; + const toolReasoning: string | undefined = (toolContent.reasoning ?? (reasonFromArgs !== undefined ? String(reasonFromArgs) : undefined)); + const toolId: string = toolContent.toolCallId || m.id; + const resultContent = resultMap.get(toolContent.toolCallId); + const status = resultContent ? (resultContent.success ? 'completed' : 'error') : 'running'; + const statusText = status === 'running' ? 'Running' : (status === 'completed' ? 'Success' : 'Error'); + const icon = ToolDescriptionFormatter.getToolIcon(toolName); + const toolNameDisplay = ToolDescriptionFormatter.formatToolName(toolName); + const aria = toolReasoning ? `${toolReasoning} — ${toolNameDisplay} — ${statusText}` : `${toolNameDisplay} — ${statusText}`; + html += ` +
+
+
+ ${toolReasoning ? `${toolReasoning}` : ``} + ${icon} ${toolNameDisplay} +
+ +
+
+ `; + } else if (m.type === 'handoff') { + const c = (m.content as any) || {}; + const nestedId = c.nestedSessionId as string | undefined; + if (nestedId && !nestedId.startsWith('pending-')) { + html += ` +
+ `; + } + } + } + + return html; + } + // No additional helpers needed for the simplified view +} diff --git a/front_end/panels/ai_chat/ui/ToolCallComponent.ts b/front_end/panels/ai_chat/ui/ToolCallComponent.ts new file mode 100644 index 00000000000..f4cfa25f54f --- /dev/null +++ b/front_end/panels/ai_chat/ui/ToolCallComponent.ts @@ -0,0 +1,246 @@ +// Copyright 2025 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import * as Lit from '../../../ui/lit/lit.js'; +import type { AgentMessage } from '../agent_framework/AgentSessionTypes.js'; +import { ToolDescriptionFormatter } from './ToolDescriptionFormatter.js'; + +const {html, Decorators} = Lit; +const {customElement} = Decorators; + +export type ToolStatus = 'running' | 'completed' | 'error'; + +@customElement('tool-call') +export class ToolCallComponent extends HTMLElement { + static readonly litTagName = Lit.StaticHtml.literal`tool-call`; + private readonly shadow = this.attachShadow({mode: 'open'}); + + private toolCall: AgentMessage | null = null; + private status: ToolStatus = 'running'; + private isExpanded = false; + + connectedCallback(): void { + this.render(); + } + + setToolCall(toolCall: AgentMessage): void { + this.toolCall = toolCall; + this.status = 'running'; + this.render(); + } + + updateStatus(status: ToolStatus): void { + this.status = status; + this.render(); + } + + private toggleExpanded(): void { + this.isExpanded = !this.isExpanded; + this.render(); + } + + private render(): void { + if (!this.toolCall) return; + + const content = this.toolCall.content as any; + const toolName = content.toolName || 'unknown_tool'; + const toolArgs = content.toolArgs || content.arguments || {}; + + const icon = ToolDescriptionFormatter.getToolIcon(toolName); + const description = ToolDescriptionFormatter.getToolDescription(toolName, toolArgs); + const statusIcon = this.getStatusIcon(); + const statusClass = this.getStatusClass(); + + Lit.render(html` + + +
+
this.toggleExpanded()}> +
+ ${icon} ${ToolDescriptionFormatter.formatToolName(toolName)} +
+
+ ${statusIcon} ${this.getStatusText()} + +
+
+ +
+ ${description.isMultiLine ? + html`${(description.content as Array<{key: string; value: string}>).map(arg => html` +
+ ${arg.key}: + ${arg.value} +
+ `)}` : + html`${description.content}` + } +
+ + ${this.isExpanded ? html` +
+ ${JSON.stringify(ToolDescriptionFormatter.filterMetadataFields(toolArgs), null, 2)} +
+ ` : Lit.nothing} +
+ `, this.shadow); + } + + private getStatusIcon(): string { + switch (this.status) { + case 'running': return '⏳'; + case 'completed': return '✓'; + case 'error': return '❌'; + default: return '●'; + } + } + + private getStatusClass(): string { + return this.status; + } + + private getStatusText(): string { + switch (this.status) { + case 'running': return 'Running'; + case 'completed': return 'Success'; + case 'error': return 'Error'; + default: return 'Unknown'; + } + } +} \ No newline at end of file diff --git a/front_end/panels/ai_chat/ui/ToolDescriptionFormatter.ts b/front_end/panels/ai_chat/ui/ToolDescriptionFormatter.ts new file mode 100644 index 00000000000..a0630bb352b --- /dev/null +++ b/front_end/panels/ai_chat/ui/ToolDescriptionFormatter.ts @@ -0,0 +1,140 @@ +// Copyright 2025 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/** + * Interface for tool description formatting result + */ +export interface ToolDescriptionData { + isMultiLine: boolean; + content: string | Array<{key: string; value: string}>; + action: string; +} + +/** + * Utility class for formatting tool descriptions and icons + * Extracted from ChatView to be reusable across components + */ +export class ToolDescriptionFormatter { + /** + * Get tool icon based on tool name + */ + static getToolIcon(toolName: string): string { + if (toolName.includes('search')) return '🔍'; + if (toolName.includes('browse') || toolName.includes('navigate')) return '🌐'; + if (toolName.includes('create') || toolName.includes('write')) return '📝'; + if (toolName.includes('extract') || toolName.includes('analyze')) return '🔬'; + if (toolName.includes('click') || toolName.includes('action')) return '👆'; + if (toolName.includes('screenshot')) return '📸'; + if (toolName.includes('accessibility') || toolName.includes('tree')) return '🌳'; + if (toolName.includes('thinking') || toolName.includes('sequential')) return '🧠'; + if (toolName.includes('fetch') || toolName.includes('download')) return '📥'; + if (toolName.includes('scroll')) return '📜'; + if (toolName.includes('type') || toolName.includes('input')) return '⌨️'; + return '🔧'; + } + + /** + * Format value for display - convert objects to YAML-like format + */ + static formatValueForDisplay(value: any, depth: number = 0): string { + // Prevent infinite recursion + if (depth > 10) { + return '[Max depth reached]'; + } + + if (value === null || value === undefined) { + return String(value); + } + + if (typeof value === 'string' || typeof value === 'number' || typeof value === 'boolean') { + return String(value); + } + + if (Array.isArray(value)) { + if (value.length === 0) return '[]'; + if (value.length === 1) return this.formatValueForDisplay(value[0], depth + 1); + return value.map(item => `- ${this.formatValueForDisplay(item, depth + 1)}`).join('\n'); + } + + if (typeof value === 'object') { + const keys = Object.keys(value); + if (keys.length === 0) return '{}'; + + return keys.map(key => { + const childValue = this.formatValueForDisplay(value[key], depth + 1); + return `${key}: ${childValue}`; + }).join('\n'); + } + + return String(value); + } + + /** + * Get tool description from name and args + */ + static getToolDescription(toolName: string, args: any): ToolDescriptionData { + const action = toolName.replace(/_/g, ' ').toLowerCase(); + + // Filter out common metadata fields + const filteredArgs = Object.fromEntries( + Object.entries(args).filter(([key]) => + key !== 'reasoning' && key !== 'toolCallId' && key !== 'timestamp' + ) + ); + + const argKeys = Object.keys(filteredArgs); + + if (argKeys.length === 0) { + return { isMultiLine: false, content: action, action }; + } + + if (argKeys.length === 1) { + // Single argument - inline format + const [key, value] = Object.entries(filteredArgs)[0]; + const formattedValue = this.formatValueForDisplay(value); + const needsNewline = formattedValue.length > 80; + return { isMultiLine: false, content: `${action}:${needsNewline ? '\n' : ' '}${formattedValue}`, action }; + } + + // Multiple arguments - return structured data for multi-line rendering + // Sort to put 'query' first if it exists + const sortedKeys = argKeys.sort((a, b) => { + if (a === 'query') return -1; + if (b === 'query') return 1; + return 0; + }); + + const structuredContent = sortedKeys.map(key => ({ + key, + value: this.formatValueForDisplay(filteredArgs[key]) + })); + + return { isMultiLine: true, content: structuredContent, action }; + } + + /** + * Filter metadata fields from tool arguments + */ + static filterMetadataFields(args: any): any { + return Object.fromEntries( + Object.entries(args).filter(([key]) => + key !== 'reasoning' && key !== 'toolCallId' && key !== 'timestamp' + ) + ); + } + + /** + * Format tool name for display (replace underscores with spaces) + */ + static formatToolName(toolName: string): string { + return toolName.replace(/_/g, ' '); + } + + /** + * Get readable action name from tool name + */ + static getActionName(toolName: string): string { + return toolName.replace(/_/g, ' ').toLowerCase(); + } +} \ No newline at end of file diff --git a/front_end/panels/ai_chat/ui/ToolResultComponent.ts b/front_end/panels/ai_chat/ui/ToolResultComponent.ts new file mode 100644 index 00000000000..9f1117f81a0 --- /dev/null +++ b/front_end/panels/ai_chat/ui/ToolResultComponent.ts @@ -0,0 +1,228 @@ +// Copyright 2025 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import * as Lit from '../../../ui/lit/lit.js'; +import type { AgentMessage } from '../agent_framework/AgentSessionTypes.js'; +import { ToolDescriptionFormatter } from './ToolDescriptionFormatter.js'; + +const {html, Decorators} = Lit; +const {customElement} = Decorators; + +@customElement('tool-result') +export class ToolResultComponent extends HTMLElement { + static readonly litTagName = Lit.StaticHtml.literal`tool-result`; + private readonly shadow = this.attachShadow({mode: 'open'}); + + private toolResult: AgentMessage | null = null; + private isExpanded = false; + + connectedCallback(): void { + this.render(); + } + + setResult(toolResult: AgentMessage): void { + this.toolResult = toolResult; + this.render(); + } + + private toggleExpanded(): void { + this.isExpanded = !this.isExpanded; + this.render(); + } + + private render(): void { + if (!this.toolResult) return; + + const content = this.toolResult.content as any; + const toolName = content.toolName || 'unknown_tool'; + const success = content.success !== false; // Default to true if not specified + const statusClass = success ? 'success' : 'error'; + const statusIcon = success ? '✓' : '❌'; + const result = content.result; + const error = content.error; + + const resultText = this.formatResult(result); + const isLongResult = resultText && resultText.length > 200; + + Lit.render(html` + + +
+
this.toggleExpanded() : null}> +
+ ${statusIcon} ${ToolDescriptionFormatter.formatToolName(toolName)} result +
+ ${isLongResult ? html` + + ` : Lit.nothing} +
+ + ${result ? html` + ${isLongResult && !this.isExpanded ? html` +
${this.getPreview(resultText)}
+ ` : html` +
+ ${this.renderFormattedResult(result)} +
+ `} + ` : Lit.nothing} + + ${error ? html` +
${error}
+ ` : Lit.nothing} +
+ `, this.shadow); + } + + private formatResult(result: any): string { + if (result === null || result === undefined) { + return ''; + } + + if (typeof result === 'string') { + return result; + } + + return JSON.stringify(result, null, 2); + } + + private getPreview(text: string): string { + if (!text) return ''; + + // Get first line or first 100 characters, whichever is shorter + const firstLine = text.split('\n')[0]; + return firstLine.length > 100 ? firstLine.substring(0, 97) + '...' : firstLine; + } + + private renderFormattedResult(result: any): Lit.TemplateResult { + if (typeof result === 'string') { + return html`${result}`; + } + + // For objects, render as formatted JSON + const jsonString = JSON.stringify(result, null, 2); + return html`${jsonString}`; + } +} \ No newline at end of file diff --git a/front_end/panels/ai_chat/ui/__tests__/ChatViewAgentSessions.test.ts b/front_end/panels/ai_chat/ui/__tests__/ChatViewAgentSessions.test.ts new file mode 100644 index 00000000000..ba02e34a58a --- /dev/null +++ b/front_end/panels/ai_chat/ui/__tests__/ChatViewAgentSessions.test.ts @@ -0,0 +1,484 @@ +// Copyright 2025 The Chromium Authors. + +import '../ChatView.js'; +import {raf} from '../../../../testing/DOMHelpers.js'; + +// Local enums/types to avoid TS enum imports in strip mode +const ChatMessageEntity = { + USER: 'user', + AGENT_SESSION: 'agent_session', + MODEL: 'model', + TOOL_RESULT: 'tool_result', +} as const; + +type AgentMsg = { + id: string; + timestamp: Date; + type: 'reasoning'|'tool_call'|'tool_result'|'handoff'|'final_answer'; + content: any; +}; + +function makeToolCall(id: string, toolName: string, toolArgs: Record = {}): AgentMsg { + return {id, timestamp: new Date(), type: 'tool_call', content: {type: 'tool_call', toolName, toolArgs, toolCallId: id}}; +} + +function makeToolResult(id: string, toolName: string, success = true, result: any = {ok: true}): AgentMsg { + return {id: `${id}-result`, timestamp: new Date(), type: 'tool_result', content: {type: 'tool_result', toolCallId: id, toolName, success, result}}; +} + +function makeSession(sessionId: string, opts: Partial = {}): any { + return { + agentName: opts.agentName || 'test_agent', + sessionId, + status: opts.status || 'running', + startTime: new Date(), + messages: opts.messages || [], + nestedSessions: opts.nestedSessions || [], + agentQuery: opts.agentQuery, + agentReasoning: opts.agentReasoning, + config: opts.config || {}, + tools: [], + }; +} + +function makeAgentSessionMessage(session: any): any { + return { entity: ChatMessageEntity.AGENT_SESSION, agentSession: session } as any; +} + +function makeUser(text: string): any { + return { entity: ChatMessageEntity.USER, text } as any; +} + +function queryLive(view: HTMLElement): HTMLElement[] { + const shadow = (view.shadowRoot!); + return Array.from(shadow.querySelectorAll('live-agent-session')) as HTMLElement[]; +} + +describe('ChatView Agent Sessions: nesting & handoffs', () => { + it('renders nested child session inside parent timeline', async () => { + const parent = makeSession('p1', {nestedSessions: [makeSession('c1')]}); + const view = document.createElement('devtools-chat-view') as any; + document.body.appendChild(view); + view.data = {messages: [makeUser('start'), makeAgentSessionMessage(parent)], state: 'idle', isTextInputEmpty: true, onSendMessage: () => {}, onPromptSelected: () => {}} as any; + await raf(); + + const lives = queryLive(view); + assert.strictEqual(lives.length, 1); + const shadow = lives[0].shadowRoot!; + const nestedContainer = shadow.querySelector('.nested-sessions'); + assert.isNotNull(nestedContainer); + assert.isAtLeast((nestedContainer as HTMLElement).querySelectorAll('live-agent-session').length, 1); + document.body.removeChild(view); + }); + + it('renders two-level nested sessions (parent→child→grandchild)', async () => { + const grandchild = makeSession('g1'); + const child = makeSession('c1', {nestedSessions: [grandchild]}); + const parent = makeSession('p1', {nestedSessions: [child]}); + + const view = document.createElement('devtools-chat-view') as any; + document.body.appendChild(view); + view.data = {messages: [makeUser('start'), makeAgentSessionMessage(parent)], state: 'idle', isTextInputEmpty: true, onSendMessage: () => {}, onPromptSelected: () => {}} as any; + await raf(); + + const shadow = queryLive(view)[0].shadowRoot!; + const nestedContainer = shadow.querySelector('.nested-sessions') as HTMLElement; + // There should be a nested live-agent-session for child; and inside it, another for grandchild + const firstLevel = nestedContainer.querySelector('live-agent-session') as HTMLElement; + assert.isNotNull(firstLevel); + const secondShadow = firstLevel.shadowRoot!; + const secondLevel = secondShadow.querySelector('.nested-sessions') as HTMLElement; + assert.isNotNull(secondLevel); + document.body.removeChild(view); + }); + + it('promotes child to top-level; top-level child is suppressed (shown inline under parent)', async () => { + const child = makeSession('c1'); + const parent = makeSession('p1', {nestedSessions: [child]}); + const view = document.createElement('devtools-chat-view') as any; + document.body.appendChild(view); + + view.data = {messages: [makeUser('start'), makeAgentSessionMessage(parent)], state: 'idle', isTextInputEmpty: true, onSendMessage: () => {}, onPromptSelected: () => {}} as any; + await raf(); + assert.strictEqual(view.getLiveAgentSessionCountForTesting(), 1); + + view.data = {messages: [makeUser('start'), makeAgentSessionMessage(parent), makeAgentSessionMessage(child)], state: 'idle', isTextInputEmpty: true, onSendMessage: () => {}, onPromptSelected: () => {}} as any; + await raf(); + // New behavior: suppress duplicate top-level child when also nested + assert.strictEqual(view.getLiveAgentSessionCountForTesting(), 1); + document.body.removeChild(view); + }); + + it('suppresses inline nested child when child also appears as top-level session', async () => { + const child = makeSession('c-suppress'); + const parent = makeSession('p-suppress', {nestedSessions: [child]}); + const view = document.createElement('devtools-chat-view') as any; + document.body.appendChild(view); + + // Render both parent and child as top-level messages + view.data = {messages: [makeUser('start'), makeAgentSessionMessage(parent), makeAgentSessionMessage(child)], state: 'idle', isTextInputEmpty: true, onSendMessage: () => {}, onPromptSelected: () => {}} as any; + await raf(); + + // The parent component should not render inline child timeline + const lives = queryLive(view); + assert.strictEqual(lives.length, 2); + const parentShadow = lives[0].shadowRoot!; + const nestedContainer = parentShadow.querySelector('.nested-sessions') as HTMLElement; + if (nestedContainer) { + assert.strictEqual(nestedContainer.querySelectorAll('live-agent-session').length, 0); + } + document.body.removeChild(view); + }); + + it('parent removed, child persists', async () => { + const child = makeSession('c1'); + const parent = makeSession('p1', {nestedSessions: [child]}); + const view = document.createElement('devtools-chat-view') as any; + document.body.appendChild(view); + + view.data = {messages: [makeUser('start'), makeAgentSessionMessage(parent), makeAgentSessionMessage(child)], state: 'idle', isTextInputEmpty: true, onSendMessage: () => {}, onPromptSelected: () => {}} as any; + await raf(); + assert.strictEqual(view.getLiveAgentSessionCountForTesting(), 2); + + view.data = {messages: [makeUser('start'), makeAgentSessionMessage(child)], state: 'idle', isTextInputEmpty: true, onSendMessage: () => {}, onPromptSelected: () => {}} as any; + await raf(); + assert.strictEqual(view.getLiveAgentSessionCountForTesting(), 1); + document.body.removeChild(view); + }); + + it('suppresses top-level child when handoff anchor is pending for the target agent', async () => { + // Create a parent session with a pending handoff anchor to child agent by name + const childAgentName = 'child_agent'; + const parent = makeSession('parent-pending', { agentName: 'parent_agent', messages: [ + { + id: 'handoff-1', + timestamp: new Date(), + type: 'handoff', + content: { + type: 'handoff', + targetAgent: childAgentName, + reason: 'Handing off to child temporarily', + context: { note: 'pending handoff anchor' }, + nestedSessionId: 'pending-abc', + } + } + ]}); + const child = makeSession('child-real', { agentName: childAgentName }); + + const view = document.createElement('devtools-chat-view') as any; + document.body.appendChild(view); + // Render both parent and child as top-level messages + view.data = {messages: [makeUser('start'), makeAgentSessionMessage(parent), makeAgentSessionMessage(child)], state: 'idle', isTextInputEmpty: true, onSendMessage: () => {}, onPromptSelected: () => {}} as any; + await raf(); + + // The top-level child should be suppressed due to the pending handoff anchor + const lives = queryLive(view); + assert.strictEqual(lives.length, 1, 'only parent session should render as top-level'); + document.body.removeChild(view); + }); + + it('inlines child agent timeline at handoff and updates in real time', async () => { + // Create child session with stable id and no messages yet + const childId = 'child-stable-1'; + const child = makeSession(childId, { agentName: 'child_agent', messages: [] }); + // Parent session includes a handoff anchor pointing to the child id + const parent = makeSession('parent-rt', { + agentName: 'parent_agent', + messages: [ + { + id: 'handoff-rt', + timestamp: new Date(), + type: 'handoff', + content: { + type: 'handoff', + targetAgent: 'child_agent', + reason: 'Handing off to child', + context: { note: 'rt' }, + nestedSessionId: childId, + } + } + ], + nestedSessions: [child], + }); + + const view = document.createElement('devtools-chat-view') as any; + document.body.appendChild(view); + view.data = {messages: [makeUser('start'), makeAgentSessionMessage(parent)], state: 'idle', isTextInputEmpty: true, onSendMessage: () => {}, onPromptSelected: () => {}} as any; + await raf(); + + // Verify nested child is inlined inside the parent's timeline (headerless nested component) + const parentEl = queryLive(view)[0]; + const parentShadow = parentEl.shadowRoot!; + // The nested child live-agent-session should be present via the inlined anchor replacement + const nestedChildEl = parentShadow.querySelector('live-agent-session') as HTMLElement; + assert.isNotNull(nestedChildEl, 'expected nested child session element inlined at anchor'); + + // Now simulate real-time update: append a tool_call to the child session and re-render parent + const updatedChild = makeSession(childId, { + agentName: 'child_agent', + messages: [ + { id: 'tc1', timestamp: new Date(), type: 'tool_call', content: { type: 'tool_call', toolName: 'fetch', toolArgs: { url: 'https://example.com' }, toolCallId: 'tc1' } }, + ], + }); + const updatedParent = makeSession('parent-rt', { + agentName: 'parent_agent', + messages: parent.messages, + nestedSessions: [updatedChild], + }); + + view.data = {messages: [makeUser('start'), makeAgentSessionMessage(updatedParent)], state: 'idle', isTextInputEmpty: true, onSendMessage: () => {}, onPromptSelected: () => {}} as any; + await raf(); + + // Check the nested child timeline shows the new tool call inline without waiting for completion + const parentShadow2 = queryLive(view)[0].shadowRoot!; + const nestedChild2 = parentShadow2.querySelector('live-agent-session') as HTMLElement; + assert.isNotNull(nestedChild2, 'nested child should remain present after update'); + const childShadow = (nestedChild2.shadowRoot!) as ShadowRoot; + assert.include((childShadow.innerHTML || '').toLowerCase(), 'fetch', 'child timeline shows running tool call'); + + document.body.removeChild(view); + }); + + it('agent tool call inside agent tool call via nested session shows both timelines', async () => { + // Parent has a tool call; child nested session also has a tool call + const parent = makeSession('p-tool', {messages: [makeToolCall('pa', 'analyze', {target: 'doc'})]}); + const child = makeSession('c-tool', {messages: [makeToolCall('cb', 'fetch', {url: 'https://example.com'})]}); + parent.nestedSessions = [child]; + + const view = document.createElement('devtools-chat-view') as any; + document.body.appendChild(view); + view.data = {messages: [makeUser('start'), makeAgentSessionMessage(parent)], state: 'idle', isTextInputEmpty: true, onSendMessage: () => {}, onPromptSelected: () => {}} as any; + await raf(); + + const live = queryLive(view)[0]; + const sroot = live.shadowRoot!; + // Parent timeline item present + const parentItems = sroot.querySelectorAll('.timeline-item'); + assert.isAtLeast(parentItems.length, 1); + // Nested child timeline HTML is inlined; ensure child tool name appears + assert.include(sroot.innerHTML, 'fetch'); + document.body.removeChild(view); + }); + + it('concurrent sessions with handoff A→B (B top-level), removing A prunes only A', async () => { + const b = makeSession('b'); + const a = makeSession('a', {nestedSessions: [b]}); + const view = document.createElement('devtools-chat-view') as any; + document.body.appendChild(view); + + view.data = {messages: [makeUser('start'), makeAgentSessionMessage(a), makeAgentSessionMessage(b)], state: 'idle', isTextInputEmpty: true, onSendMessage: () => {}, onPromptSelected: () => {}} as any; + await raf(); + assert.strictEqual(view.getLiveAgentSessionCountForTesting(), 2); + view.data = {messages: [makeUser('start'), makeAgentSessionMessage(b)], state: 'idle', isTextInputEmpty: true, onSendMessage: () => {}, onPromptSelected: () => {}} as any; + await raf(); + assert.strictEqual(view.getLiveAgentSessionCountForTesting(), 1); + document.body.removeChild(view); + }); +}); + +describe('ChatView Agent Sessions: pruning and resilience', () => { + it('reorder does not recreate or prune', async () => { + const s1 = makeSession('s1'); + const s2 = makeSession('s2'); + const view = document.createElement('devtools-chat-view') as any; + document.body.appendChild(view); + + view.data = {messages: [makeUser('start'), makeAgentSessionMessage(s1), makeAgentSessionMessage(s2)], state: 'idle', isTextInputEmpty: true, onSendMessage: () => {}, onPromptSelected: () => {}} as any; + await raf(); + assert.strictEqual(view.getLiveAgentSessionCountForTesting(), 2); + view.data = {messages: [makeUser('start'), makeAgentSessionMessage(s2), makeAgentSessionMessage(s1)], state: 'idle', isTextInputEmpty: true, onSendMessage: () => {}, onPromptSelected: () => {}} as any; + await raf(); + assert.strictEqual(view.getLiveAgentSessionCountForTesting(), 2); + document.body.removeChild(view); + }); + + it('minimal agent session (agentName + sessionId) renders without error', async () => { + const minimal = {entity: ChatMessageEntity.AGENT_SESSION, agentSession: {agentName: 'TestAgent', sessionId: 's-min', status: 'running', startTime: new Date(), messages: [], nestedSessions: []}} as any; + const view = document.createElement('devtools-chat-view') as any; + document.body.appendChild(view); + view.data = {messages: [makeUser('start'), minimal], state: 'idle', isTextInputEmpty: true, onSendMessage: () => {}, onPromptSelected: () => {}} as any; + await raf(); + assert.strictEqual(view.getLiveAgentSessionCountForTesting(), 1); + document.body.removeChild(view); + }); +}); + +describe('ChatView visibility rules: agent-managed tool calls/results are hidden', () => { + it('hides model tool call + result for configurable agent; live timeline shows instead', async () => { + const view = document.createElement('devtools-chat-view') as any; + document.body.appendChild(view); + const toolCallId = 'id-1'; + const agent = makeSession('s1'); + // Include model tool + agent-managed tool result + view.data = {messages: [ + makeUser('start'), + { entity: ChatMessageEntity.MODEL, action: 'tool', toolName: 'fetch', toolCallId, isFinalAnswer: false } as any, + { entity: ChatMessageEntity.TOOL_RESULT, toolName: 'fetch', toolCallId, resultText: 'ok', isError: false, isFromConfigurableAgent: true } as any, + makeAgentSessionMessage(agent), + ], state: 'idle', isTextInputEmpty: true, onSendMessage: () => {}, onPromptSelected: () => {}} as any; + await raf(); + + const shadow = view.shadowRoot!; + // No standalone tool result message should be rendered + assert.strictEqual(shadow.querySelectorAll('.tool-result-message').length, 0); + // Live session should be present + assert.isAtLeast(queryLive(view).length, 1); + document.body.removeChild(view); + }); + + it('shows non-agent tool result as standalone message', async () => { + const view = document.createElement('devtools-chat-view') as any; + document.body.appendChild(view); + view.data = {messages: [ + makeUser('start'), + { entity: ChatMessageEntity.TOOL_RESULT, toolName: 'scan', resultText: 'x', isError: false } as any, + ], state: 'idle', isTextInputEmpty: true, onSendMessage: () => {}, onPromptSelected: () => {}} as any; + await raf(); + const shadow = view.shadowRoot!; + assert.strictEqual(shadow.querySelectorAll('.tool-result-message').length, 1); + document.body.removeChild(view); + }); + + it('mixed: agent-managed tool hidden; regular tool visible', async () => { + const view = document.createElement('devtools-chat-view') as any; + document.body.appendChild(view); + const toolCallId = 't1'; + view.data = {messages: [ + makeUser('start'), + { entity: ChatMessageEntity.MODEL, action: 'tool', toolName: 'fetch', toolCallId, isFinalAnswer: false } as any, + { entity: ChatMessageEntity.TOOL_RESULT, toolName: 'fetch', toolCallId, resultText: 'ok', isError: false, isFromConfigurableAgent: true } as any, + { entity: ChatMessageEntity.TOOL_RESULT, toolName: 'other', resultText: 'y', isError: false } as any, + makeAgentSessionMessage(makeSession('s1')), + ], state: 'idle', isTextInputEmpty: true, onSendMessage: () => {}, onPromptSelected: () => {}} as any; + await raf(); + const shadow = view.shadowRoot!; + assert.strictEqual(shadow.querySelectorAll('.tool-result-message').length, 1); + document.body.removeChild(view); + }); +}); + +describe('LiveAgentSessionComponent timeline rendering and interactions', () => { + it('single tool session shows single-tool mode and hides spine', async () => { + const session = makeSession('s1', {messages: [makeToolCall('tc1', 'fetch', {url: 'x'}), makeToolResult('tc1', 'fetch', true, {ok: true})]}); + const view = document.createElement('devtools-chat-view') as any; + document.body.appendChild(view); + view.data = {messages: [makeUser('start'), makeAgentSessionMessage(session)], state: 'idle', isTextInputEmpty: true, onSendMessage: () => {}, onPromptSelected: () => {}} as any; + await raf(); + + const live = queryLive(view)[0]; + const sroot = live.shadowRoot!; + const container = sroot.querySelector('.agent-execution-timeline') as HTMLElement; + assert.isTrue(container.classList.contains('single-tool')); + // In single-tool mode, vertical spine is suppressed via CSS; we ensure the class exists + document.body.removeChild(view); + }); + + it('multiple tools show two items with status markers', async () => { + const session = makeSession('s1', {messages: [ + makeToolCall('a', 'fetch', {url: 'x'}), + makeToolResult('a', 'fetch', true, {ok: 1}), + makeToolCall('b', 'scan', {q: 'q'}), + // leave b running (no result) + ]}); + const view = document.createElement('devtools-chat-view') as any; + document.body.appendChild(view); + view.data = {messages: [makeUser('start'), makeAgentSessionMessage(session)], state: 'idle', isTextInputEmpty: true, onSendMessage: () => {}, onPromptSelected: () => {}} as any; + await raf(); + + const live = queryLive(view)[0]; + const sroot = live.shadowRoot!; + const items = sroot.querySelectorAll('.timeline-item'); + assert.isAtLeast(items.length, 2); + const markers = sroot.querySelectorAll('.tool-status-marker'); + assert.isAtLeast(markers.length, 2); + document.body.removeChild(view); + }); + + it('expansion toggle persists across re-render', async () => { + const session = makeSession('s1', {messages: [makeToolCall('a', 'fetch')]}); + const view = document.createElement('devtools-chat-view') as any; + document.body.appendChild(view); + view.data = {messages: [makeUser('start'), makeAgentSessionMessage(session)], state: 'idle', isTextInputEmpty: true, onSendMessage: () => {}, onPromptSelected: () => {}} as any; + await raf(); + + const live = queryLive(view)[0]; + const sroot = live.shadowRoot!; + const toggle = sroot.querySelector('.tool-toggle') as HTMLButtonElement; + toggle?.click(); + await raf(); + // Trigger re-render by adding a no-op user message + view.data = {messages: [makeUser('start'), makeUser('again'), makeAgentSessionMessage(session)], state: 'idle', isTextInputEmpty: true, onSendMessage: () => {}, onPromptSelected: () => {}} as any; + await raf(); + const sroot2 = queryLive(view)[0].shadowRoot!; + const timeline = sroot2.querySelector('.timeline-items') as HTMLElement; + assert.strictEqual(timeline.style.display, 'block'); + document.body.removeChild(view); + }); + + it('renders agent query and reasoning once', async () => { + const session = makeSession('s1', {agentQuery: 'Do X', agentReasoning: 'Because Y', messages: []}); + const view = document.createElement('devtools-chat-view') as any; + document.body.appendChild(view); + view.data = {messages: [makeUser('start'), makeAgentSessionMessage(session)], state: 'idle', isTextInputEmpty: true, onSendMessage: () => {}, onPromptSelected: () => {}} as any; + await raf(); + const sroot = queryLive(view)[0].shadowRoot!; + const msgs = sroot.querySelectorAll('.message'); + // One for reasoning; query appears inside timeline items area as a specific block + assert.isAtLeast(msgs.length, 1); + assert.include(sroot.innerHTML, 'Do X'); + assert.include(sroot.innerHTML, 'Because Y'); + document.body.removeChild(view); + }); + + it('renders Research Agent session with multiple tool calls and results', async () => { + const researchConfig = { ui: { displayName: 'Research Agent' } }; + const session = makeSession('research-1', { + agentName: 'research_agent', + config: researchConfig, + agentQuery: 'Find latest web performance news', + agentReasoning: 'Gather and summarize credible sources', + messages: [ + makeToolCall('w1', 'web_search', {query: 'web performance news'}), + makeToolResult('w1', 'web_search', true, {hits: 5}), + makeToolCall('s1', 'summarize', {text: '...'}), + {id: 's1-result', timestamp: new Date(), type: 'tool_result', content: {type: 'tool_result', toolCallId: 's1', toolName: 'summarize', success: false, error: 'too long'}}, + ], + }); + const view = document.createElement('devtools-chat-view') as any; + document.body.appendChild(view); + view.data = {messages: [makeUser('start'), makeAgentSessionMessage(session)], state: 'idle', isTextInputEmpty: true, onSendMessage: () => {}, onPromptSelected: () => {}} as any; + await raf(); + + const live = queryLive(view)[0]; + const sroot = live.shadowRoot!; + // Header shows display name + const headerTitle = sroot.querySelector('.agent-title') as HTMLElement; + assert.include(headerTitle.textContent || '', 'Research Agent'); + // Two tool items present + const items = sroot.querySelectorAll('.timeline-item'); + assert.isAtLeast(items.length, 2); + // Has completed and error markers + const completed = sroot.querySelectorAll('.tool-status-marker.completed'); + const errored = sroot.querySelectorAll('.tool-status-marker.error'); + assert.isAtLeast(completed.length, 1); + assert.isAtLeast(errored.length, 1); + // Contains tool names after formatting (underscores replaced with spaces) + assert.include(sroot.innerHTML.toLowerCase(), 'web search'); + assert.include(sroot.innerHTML.toLowerCase(), 'summarize'); + + document.body.removeChild(view); + }); + + it('hides general loader when a final error message is present', async () => { + const view = document.createElement('devtools-chat-view') as any; + document.body.appendChild(view); + // Provide LOADING state but include a final error message; ChatView should not show generic loader + const errorFinal = { entity: 'model', action: 'final', isFinalAnswer: true, error: 'Something went wrong' } as any; + view.data = {messages: [makeUser('start'), errorFinal], state: 'loading', isTextInputEmpty: true, onSendMessage: () => {}, onPromptSelected: () => {}} as any; + await raf(); + const shadow = view.shadowRoot!; + const loaders = shadow.querySelectorAll('.message.model-message.loading'); + assert.strictEqual(loaders.length, 0); + document.body.removeChild(view); + }); +}); diff --git a/front_end/panels/ai_chat/ui/__tests__/ChatViewAgentSessionsOrder.test.ts b/front_end/panels/ai_chat/ui/__tests__/ChatViewAgentSessionsOrder.test.ts new file mode 100644 index 00000000000..c1b8b3eb7a2 --- /dev/null +++ b/front_end/panels/ai_chat/ui/__tests__/ChatViewAgentSessionsOrder.test.ts @@ -0,0 +1,96 @@ +// Copyright 2025 The Chromium Authors. + +import '../ChatView.js'; +import {raf} from '../../../../testing/DOMHelpers.js'; + +// Minimal local constants to avoid importing enums in strip mode +const ChatMessageEntity = { + USER: 'user', + AGENT_SESSION: 'agent_session', +} as const; + +function makeUser(text: string): any { + return { entity: ChatMessageEntity.USER, text } as any; +} + +function makeSession(sessionId: string, opts: Partial = {}): any { + return { + agentName: opts.agentName || 'test_agent', + sessionId, + status: opts.status || 'running', + startTime: new Date(), + messages: opts.messages || [], + nestedSessions: opts.nestedSessions || [], + agentQuery: opts.agentQuery, + agentReasoning: opts.agentReasoning, + config: opts.config || {}, + tools: [], + }; +} + +function makeAgentSessionMessage(session: any): any { + return { entity: ChatMessageEntity.AGENT_SESSION, agentSession: session } as any; +} + +function queryLive(view: HTMLElement): HTMLElement[] { + const shadow = view.shadowRoot!; + return Array.from(shadow.querySelectorAll('live-agent-session')) as HTMLElement[]; +} + +describe('ChatView Agent Sessions: sequential top-level sessions', () => { + it('renders two top-level agent sessions in order with first completed and second running', async () => { + // First session has a completed tool (call + result) + const s1 = makeSession('s1', { + agentReasoning: 'First agent session', + status: 'completed', + messages: [ + { id: 'tc1', timestamp: new Date(), type: 'tool_call', content: { type: 'tool_call', toolName: 'fetch', toolArgs: { url: 'x' }, toolCallId: 'tc1' } }, + { id: 'tc1-result', timestamp: new Date(), type: 'tool_result', content: { type: 'tool_result', toolName: 'fetch', toolCallId: 'tc1', success: true, result: { ok: true } } }, + ], + }); + // Second session is still running (only tool call) + const s2 = makeSession('s2', { + agentReasoning: 'Second agent session', + status: 'running', + messages: [ + { id: 'tc2', timestamp: new Date(), type: 'tool_call', content: { type: 'tool_call', toolName: 'scan', toolArgs: { sel: '#id' }, toolCallId: 'tc2' } }, + ], + }); + + const view = document.createElement('devtools-chat-view') as any; + document.body.appendChild(view); + + view.data = { + messages: [ + makeUser('start'), + makeAgentSessionMessage(s1), + makeAgentSessionMessage(s2), + ], + state: 'idle', + isTextInputEmpty: true, + onSendMessage: () => {}, + onPromptSelected: () => {}, + } as any; + + await raf(); + + const lives = queryLive(view); + assert.strictEqual(lives.length, 2); + + // Verify ordering by checking the agentReasoning text in each live component + const firstShadow = lives[0].shadowRoot!; + const secondShadow = lives[1].shadowRoot!; + const firstReason = firstShadow.querySelector('.message')?.textContent || ''; + const secondReason = secondShadow.querySelector('.message')?.textContent || ''; + assert.include(firstReason, 'First agent session'); + assert.include(secondReason, 'Second agent session'); + + // Verify status markers in timelines: first has a completed marker, second has running + const firstCompleted = firstShadow.querySelector('.tool-status-marker.completed'); + const secondRunning = secondShadow.querySelector('.tool-status-marker.running'); + assert.isNotNull(firstCompleted, 'first session should show completed tool status'); + assert.isNotNull(secondRunning, 'second session should show running tool status'); + + document.body.removeChild(view); + }); +}); diff --git a/front_end/panels/ai_chat/ui/__tests__/ChatViewInputClear.test.ts b/front_end/panels/ai_chat/ui/__tests__/ChatViewInputClear.test.ts new file mode 100644 index 00000000000..cc32f1ff8fc --- /dev/null +++ b/front_end/panels/ai_chat/ui/__tests__/ChatViewInputClear.test.ts @@ -0,0 +1,48 @@ +// Copyright 2025 The Chromium Authors. + +import '../ChatView.js'; +import {raf} from '../../../../testing/DOMHelpers.js'; + +// Minimal enums +const ChatMessageEntity = { USER: 'user' } as const; + +function makeUser(text: string): any { return { entity: ChatMessageEntity.USER, text } as any; } + +describe('ChatView input clearing (expanded view)', () => { + function getTextarea(view: HTMLElement): HTMLTextAreaElement { + const shadow = view.shadowRoot!; + const bar = shadow.querySelector('ai-input-bar') as HTMLElement; + // InputBar renders light-DOM; target the nested ai-chat-input textarea + const chat = bar?.querySelector('ai-chat-input') as HTMLElement; + return chat?.querySelector('textarea') as HTMLTextAreaElement; + } + + it('clears text after Enter send', async () => { + const view = document.createElement('devtools-chat-view') as any; + document.body.appendChild(view); + + // Expanded view requires at least one user message + view.data = { + messages: [makeUser('hello')], + state: 'idle', + isTextInputEmpty: true, + onSendMessage: () => {}, + onPromptSelected: () => {}, + } as any; + await raf(); await raf(); + + const ta = getTextarea(view); + ta.value = 'New message'; + ta.dispatchEvent(new Event('input', {bubbles: true})); + await raf(); await raf(); + + const ev = new KeyboardEvent('keydown', {key: 'Enter', shiftKey: false, bubbles: true}); + ta.dispatchEvent(ev); + await raf(); + + const taAfter = getTextarea(view); + assert.strictEqual(taAfter.value, ''); + + document.body.removeChild(view); + }); +}); diff --git a/front_end/panels/ai_chat/ui/__tests__/ChatViewPrune.test.ts b/front_end/panels/ai_chat/ui/__tests__/ChatViewPrune.test.ts new file mode 100644 index 00000000000..14267927838 --- /dev/null +++ b/front_end/panels/ai_chat/ui/__tests__/ChatViewPrune.test.ts @@ -0,0 +1,90 @@ +// Copyright 2025 The Chromium Authors. + +import '../ChatView.js'; +import {raf} from '../../../../testing/DOMHelpers.js'; + +// Minimal local enum constants to avoid importing TS enums in tests +const ChatMessageEntity = { + USER: 'user', + AGENT_SESSION: 'agent_session', +} as const; + +function makeAgentSessionMessage(sessionId: string): any { + return { + entity: ChatMessageEntity.AGENT_SESSION, + agentSession: { + sessionId, + agentName: 'TestAgent', + config: {}, + messages: [], + }, + } as any; +} + +describe('ChatView pruneLiveAgentSessions', () => { + it('prunes cached sessions when they disappear from messages', async () => { + const view = document.createElement('devtools-chat-view') as any; + document.body.appendChild(view); + + // Seed with a single session + view.data = { + messages: [makeAgentSessionMessage('s1')], + state: 'idle', + isTextInputEmpty: true, + onSendMessage: () => {}, + onPromptSelected: () => {}, + } as any; + await raf(); + // Expect one cached session component + assert.strictEqual(view.getLiveAgentSessionCountForTesting(), 1); + + // Now remove all agent sessions from messages + view.data = { + messages: [ + {entity: ChatMessageEntity.USER, text: 'hi'} as any, + ], + state: 'idle', + isTextInputEmpty: true, + onSendMessage: () => {}, + onPromptSelected: () => {}, + } as any; + await raf(); + // Expect cached sessions to be pruned + assert.strictEqual(view.getLiveAgentSessionCountForTesting(), 0); + + document.body.removeChild(view); + }); + + it('prunes only stale sessions and keeps active ones', async () => { + const view = document.createElement('devtools-chat-view') as any; + document.body.appendChild(view); + + // Two sessions present (ensure expanded view by including a user message) + view.data = { + messages: [ + {entity: ChatMessageEntity.USER, text: 'start'} as any, + makeAgentSessionMessage('s1'), + makeAgentSessionMessage('s2'), + ], + state: 'idle', + isTextInputEmpty: true, + onSendMessage: () => {}, + onPromptSelected: () => {}, + } as any; + await raf(); + assert.strictEqual(view.getLiveAgentSessionCountForTesting(), 2); + + // Remove s1, keep s2 + view.data = { + messages: [makeAgentSessionMessage('s2')], + state: 'idle', + isTextInputEmpty: true, + onSendMessage: () => {}, + onPromptSelected: () => {}, + } as any; + await raf(); + assert.strictEqual(view.getLiveAgentSessionCountForTesting(), 1); + + document.body.removeChild(view); + }); +}); diff --git a/front_end/panels/ai_chat/ui/__tests__/ChatViewSequentialSessionsTransition.test.ts b/front_end/panels/ai_chat/ui/__tests__/ChatViewSequentialSessionsTransition.test.ts new file mode 100644 index 00000000000..a1bd67b2a4d --- /dev/null +++ b/front_end/panels/ai_chat/ui/__tests__/ChatViewSequentialSessionsTransition.test.ts @@ -0,0 +1,102 @@ +// Copyright 2025 The Chromium Authors. + +import '../ChatView.js'; +import {raf} from '../../../../testing/DOMHelpers.js'; + +// Minimal local constants to avoid importing enums in strip mode +const ChatMessageEntity = { + USER: 'user', + AGENT_SESSION: 'agent_session', +} as const; + +function makeUser(text: string): any { + return { entity: ChatMessageEntity.USER, text } as any; +} + +function makeSession(sessionId: string, opts: Partial = {}): any { + return { + agentName: opts.agentName || 'test_agent', + sessionId, + status: opts.status || 'running', + startTime: new Date(), + messages: opts.messages || [], + nestedSessions: opts.nestedSessions || [], + agentQuery: opts.agentQuery, + agentReasoning: opts.agentReasoning, + config: opts.config || {}, + tools: [], + }; +} + +function makeAgentSessionMessage(session: any): any { + return { entity: ChatMessageEntity.AGENT_SESSION, agentSession: session } as any; +} + +function queryLive(view: HTMLElement): HTMLElement[] { + const shadow = view.shadowRoot!; + return Array.from(shadow.querySelectorAll('live-agent-session')) as HTMLElement[]; +} + +describe('ChatView Agent Sessions: transition from completed to new running session', () => { + it('renders first completed session then second running session when added to messages', async () => { + const s1 = makeSession('s1', { + agentReasoning: 'First agent session', + status: 'completed', + messages: [ + { id: 'tc1', timestamp: new Date(), type: 'tool_call', content: { type: 'tool_call', toolName: 'fetch', toolArgs: { url: 'x' }, toolCallId: 'tc1' } }, + { id: 'tc1-result', timestamp: new Date(), type: 'tool_result', content: { type: 'tool_result', toolName: 'fetch', toolCallId: 'tc1', success: true, result: { ok: true } } }, + ], + }); + + const view = document.createElement('devtools-chat-view') as any; + document.body.appendChild(view); + + // Initial data only contains the completed first session + view.data = { + messages: [ + makeUser('start'), + makeAgentSessionMessage(s1), + ], + state: 'idle', + isTextInputEmpty: true, + onSendMessage: () => {}, + onPromptSelected: () => {}, + } as any; + await raf(); + + // Now a new session starts; upstream adds it to messages + const s2 = makeSession('s2', { + agentReasoning: 'Second agent session', + status: 'running', + messages: [ + { id: 'tc2', timestamp: new Date(), type: 'tool_call', content: { type: 'tool_call', toolName: 'scan', toolArgs: { sel: '#id' }, toolCallId: 'tc2' } }, + ], + }); + view.data = { + messages: [ + makeUser('start'), + makeAgentSessionMessage(s1), + makeAgentSessionMessage(s2), + ], + state: 'idle', + isTextInputEmpty: true, + onSendMessage: () => {}, + onPromptSelected: () => {}, + } as any; + await raf(); + + // Both sessions should be visible in order + let lives = queryLive(view); + assert.strictEqual(lives.length, 2); + const firstReason = lives[0].shadowRoot!.querySelector('.message')?.textContent || ''; + const secondReason = lives[1].shadowRoot!.querySelector('.message')?.textContent || ''; + assert.include(firstReason, 'First agent session'); + assert.include(secondReason, 'Second agent session'); + + // Still 2 sessions visible + lives = queryLive(view); + assert.strictEqual(lives.length, 2); + + document.body.removeChild(view); + }); +}); diff --git a/front_end/panels/ai_chat/ui/__tests__/LiveAgentSessionComponent.test.ts b/front_end/panels/ai_chat/ui/__tests__/LiveAgentSessionComponent.test.ts new file mode 100644 index 00000000000..50e78f4b338 --- /dev/null +++ b/front_end/panels/ai_chat/ui/__tests__/LiveAgentSessionComponent.test.ts @@ -0,0 +1,184 @@ +// Copyright 2025 The Chromium Authors. + +import '../LiveAgentSessionComponent.js'; +import {raf} from '../../../../testing/DOMHelpers.js'; + +type AgentMsg = { + id: string; + timestamp: Date; + type: 'reasoning'|'tool_call'|'tool_result'|'handoff'|'final_answer'; + content: any; +}; + +type AgentSession = { + agentName: string; + sessionId: string; + status: 'running'|'completed'|'error'; + startTime: Date; + endTime?: Date; + messages: AgentMsg[]; + nestedSessions: AgentSession[]; + agentQuery?: string; + agentReasoning?: string; + parentSessionId?: string; + config?: any; +}; + +function makeToolCall(id: string, toolName: string, toolArgs: Record = {}): AgentMsg { + return {id, timestamp: new Date(), type: 'tool_call', content: {type: 'tool_call', toolName, toolArgs, toolCallId: id}}; +} + +function makeToolResult(id: string, toolName: string, success = true, result: any = {ok: true}): AgentMsg { + return {id: `${id}-result`, timestamp: new Date(), type: 'tool_result', content: {type: 'tool_result', toolCallId: id, toolName, success, result}}; +} + +function makeSession(sessionId: string, opts: Partial = {}): AgentSession { + return { + agentName: opts.agentName || 'test_agent', + sessionId, + status: opts.status || 'running', + startTime: opts.startTime || new Date(), + endTime: opts.endTime, + messages: opts.messages || [], + nestedSessions: opts.nestedSessions || [], + agentQuery: opts.agentQuery, + agentReasoning: opts.agentReasoning, + parentSessionId: opts.parentSessionId, + config: opts.config || { ui: { displayName: 'Test Agent' } }, + }; +} + +describe('LiveAgentSessionComponent UI elements', () => { + it('renders header with display name and toggle', async () => { + const session = makeSession('s1', {config: {ui: {displayName: 'Research Agent'}}}); + const el = document.createElement('live-agent-session') as any; + document.body.appendChild(el); + el.setSession(session as any); + await raf(); + + const sroot = el.shadowRoot!; + const header = sroot.querySelector('.agent-header') as HTMLElement; + const title = sroot.querySelector('.agent-title') as HTMLElement; + const toggle = sroot.querySelector('.tool-toggle') as HTMLButtonElement; + assert.isNotNull(header); + assert.include(title?.textContent || '', 'Research Agent'); + assert.isNotNull(toggle); + + document.body.removeChild(el); + }); + + it('expands/collapses timeline via header toggle', async () => { + const session = makeSession('s1', {messages: [makeToolCall('a', 'fetch', {url: 'x'})]}); + const el = document.createElement('live-agent-session') as any; + document.body.appendChild(el); + el.setSession(session as any); + await raf(); + + const sroot = el.shadowRoot!; + const timeline = sroot.querySelector('.timeline-items') as HTMLElement; + // Default collapsed + assert.strictEqual(timeline.style.display, 'none'); + // Click toggle + (sroot.querySelector('.tool-toggle') as HTMLButtonElement).click(); + await raf(); + assert.strictEqual((el.shadowRoot!.querySelector('.timeline-items') as HTMLElement).style.display, 'block'); + // Collapse again + (el.shadowRoot!.querySelector('.tool-toggle') as HTMLButtonElement).click(); + await raf(); + assert.strictEqual((el.shadowRoot!.querySelector('.timeline-items') as HTMLElement).style.display, 'none'); + + document.body.removeChild(el); + }); + + it('renders tool items with running/completed/error markers', async () => { + const base = makeSession('s1', {messages: [ + makeToolCall('a', 'fetch', {url: 'x'}), + makeToolResult('a', 'fetch', true, {ok: 1}), + makeToolCall('b', 'scan', {q: 'q'}), + ]}); + + const el = document.createElement('live-agent-session') as any; + document.body.appendChild(el); + el.setSession(base as any); + await raf(); + + let sroot = el.shadowRoot!; + assert.isAtLeast(sroot.querySelectorAll('.tool-status-marker').length, 2); + assert.isAtLeast(sroot.querySelectorAll('.tool-status-marker.completed').length, 1); + assert.isAtLeast(sroot.querySelectorAll('.tool-status-marker.running').length, 1); + + // Now add an error result for the second call and re-render + const updated = makeSession('s1', { + messages: [ + makeToolCall('a', 'fetch', {url: 'x'}), + makeToolResult('a', 'fetch', true, {ok: 1}), + makeToolCall('b', 'scan', {q: 'q'}), + {id: 'b-result', timestamp: new Date(), type: 'tool_result', content: {type: 'tool_result', toolCallId: 'b', toolName: 'scan', success: false, error: 'bad'}}, + ], + }); + el.setSession(updated as any); + await raf(); + + sroot = el.shadowRoot!; + assert.isAtLeast(sroot.querySelectorAll('.tool-status-marker.error').length, 1); + + document.body.removeChild(el); + }); + + it('renders nested session inside timeline area', async () => { + const child = makeSession('c1', {config: {ui: {displayName: 'Child Agent'}}}); + const parent = makeSession('p1', {nestedSessions: [child], config: {ui: {displayName: 'Parent Agent'}}}); + const el = document.createElement('live-agent-session') as any; + document.body.appendChild(el); + el.setSession(parent as any); + await raf(); + + const sroot = el.shadowRoot!; + const nestedContainer = sroot.querySelector('.nested-sessions') as HTMLElement; + assert.isNotNull(nestedContainer); + // A nested live-agent-session element should be present + assert.isAtLeast(nestedContainer.querySelectorAll('live-agent-session').length, 1); + + document.body.removeChild(el); + }); + + it('shows per-tool reasoning inline with the tool call when provided', async () => { + const session = makeSession('sR', {messages: [ + { id: 't1', timestamp: new Date(), type: 'tool_call', content: {type: 'tool_call', toolName: 'fetch', toolArgs: {url: 'x'}, toolCallId: 't1', reasoning: 'Need to fetch the resource first.'}}, + ]}); + const el = document.createElement('live-agent-session') as any; + document.body.appendChild(el); + el.setSession(session as any); + await raf(); + + const sroot = el.shadowRoot!; + const items = Array.from(sroot.querySelectorAll('.timeline-item')) as HTMLElement[]; + assert.isAtLeast(items.length, 1); + const first = items[0]; + const inline = first.querySelector('.tool-reasoning-inline') as HTMLElement; + assert.isNotNull(inline); + assert.include(inline.textContent || '', 'Need to fetch the resource first.'); + + document.body.removeChild(el); + }); + + it('uses reasoning from tool args when missing on content (inline) and does not duplicate in args list', async () => { + const session = makeSession('sArgs', {messages: [ + { id: 't2', timestamp: new Date(), type: 'tool_call', content: {type: 'tool_call', toolName: 'scan', toolArgs: {q: 'abc', reasoning: 'Scanning is needed to locate items.'}, toolCallId: 't2'}}, + ]}); + const el = document.createElement('live-agent-session') as any; + document.body.appendChild(el); + el.setSession(session as any); + await raf(); + + const sroot = el.shadowRoot!; + const item = sroot.querySelector('.timeline-item') as HTMLElement; + const inline = item.querySelector('.tool-reasoning-inline') as HTMLElement; + assert.isNotNull(inline); + assert.include(inline.textContent || '', 'Scanning is needed to locate items.'); + // Ensure we did not also render an arg row for the reasoning key + assert.notInclude(item.innerHTML.toLowerCase(), 'reasoning:'); + + document.body.removeChild(el); + }); +}); diff --git a/front_end/panels/ai_chat/ui/chatView.css b/front_end/panels/ai_chat/ui/chatView.css index 438650b0104..a6849ad37e5 100644 --- a/front_end/panels/ai_chat/ui/chatView.css +++ b/front_end/panels/ai_chat/ui/chatView.css @@ -35,6 +35,12 @@ color-scheme: light dark; } +/* Ensure custom input bar expands with container width */ +ai-input-bar { + display: block; + width: 100%; +} + .chat-view-container { display: flex; flex-direction: column; @@ -89,7 +95,7 @@ align-items: center; justify-content: center; width: 100%; - max-width: 600px; + max-width: none; margin: 0 auto; padding: 5px; padding-top: 0; @@ -648,6 +654,20 @@ box-sizing: border-box; } +/* Ensure custom input element fills available width inside the row */ +.input-row ai-chat-input { + flex: 1 1 auto; + display: block; + width: 100%; +} + +/* Ensure inner textarea fills its host component width */ +.input-row ai-chat-input .text-input { + width: 100%; + box-sizing: border-box; + display: block; +} + .input-container:focus-within { border-color: var(--color-primary-container-border); box-shadow: 0 4px 15px var(--color-shadow-subtle); @@ -670,6 +690,11 @@ letter-spacing: 0.01em; } +/* Allow larger autosize range in centered first-message view */ +.centered-view .input-container.centered .input-row ai-chat-input .text-input { + max-height: 200px; +} + .text-input::placeholder { font-weight: 400; } @@ -2856,7 +2881,8 @@ devtools-snackbar.bookmark-notification .container { flex-shrink: 0; /* Prevent banner from shrinking */ width: 100%; box-sizing: border-box; - z-index: 1000; + z-index: 9999; /* Ensure always on top */ + position: relative; } @keyframes slideDown { diff --git a/front_end/panels/ai_chat/ui/input/ChatInput.ts b/front_end/panels/ai_chat/ui/input/ChatInput.ts new file mode 100644 index 00000000000..b5240c1e07b --- /dev/null +++ b/front_end/panels/ai_chat/ui/input/ChatInput.ts @@ -0,0 +1,73 @@ +// Copyright 2025 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import * as Lit from '../../../../ui/lit/lit.js'; + +const {html, Decorators} = Lit; +const {customElement} = Decorators as any; + +@customElement('ai-chat-input') +export class ChatInput extends HTMLElement { + static readonly litTagName = Lit.StaticHtml.literal`ai-chat-input`; + + #disabled = false; + #placeholder = ''; + #value = ''; + + get disabled(): boolean { return this.#disabled; } + set disabled(v: boolean) { this.#disabled = v; this.#render(); } + get placeholder(): string { return this.#placeholder; } + set placeholder(v: string) { this.#placeholder = v ?? ''; this.#render(); } + get value(): string { return this.#value; } + set value(v: string) { this.#value = v ?? ''; this.#render(); } + + connectedCallback(): void { this.#render(); } + focusInput(): void { (this.querySelector('textarea') as HTMLTextAreaElement | null)?.focus(); } + clear(): void { this.#value = ''; this.#render(); this.#syncDomValue(); } + + // Ensure DOM reflects the internal value immediately + #syncDomValue(): void { + const ta = this.querySelector('textarea') as HTMLTextAreaElement | null; + if (ta) { ta.value = this.#value; this.#autosize(ta); } + } + + #onInput = (e: Event) => { + const el = e.target as HTMLTextAreaElement; + this.#value = el.value; + this.dispatchEvent(new CustomEvent('inputchange', {bubbles: true, detail: { value: this.#value }})); + this.#autosize(el); + }; + #onKeyDown = (e: KeyboardEvent) => { + if (e.key === 'Enter' && !e.shiftKey) { + e.preventDefault(); + const text = this.#value.trim(); + if (!text || this.#disabled) return; + this.dispatchEvent(new CustomEvent('send', {bubbles: true, detail: { text }})); + this.#value = ''; + this.#render(); + this.#syncDomValue(); + } + }; + + #autosize(el: HTMLTextAreaElement): void { el.style.height = 'auto'; el.style.height = `${el.scrollHeight}px`; } + + #render(): void { + Lit.render(html` + + + `, this, {host: this}); + } +} + +declare global { + interface HTMLElementTagNameMap { 'ai-chat-input': ChatInput; } +} diff --git a/front_end/panels/ai_chat/ui/input/InputBar.ts b/front_end/panels/ai_chat/ui/input/InputBar.ts new file mode 100644 index 00000000000..7a9ced36c13 --- /dev/null +++ b/front_end/panels/ai_chat/ui/input/InputBar.ts @@ -0,0 +1,150 @@ +// Copyright 2025 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import * as Lit from '../../../../ui/lit/lit.js'; +import type { ImageInputData } from '../../models/ChatTypes.js'; +import * as BaseOrchestratorAgent from '../../core/BaseOrchestratorAgent.js'; + +import '../model_selector/ModelSelector.js'; +import './ChatInput.js'; + +const {html, Decorators} = Lit; +const {customElement} = Decorators as any; + +@customElement('ai-input-bar') +export class InputBar extends HTMLElement { + static readonly litTagName = Lit.StaticHtml.literal`ai-input-bar`; + + // Props + #placeholder = ''; + #disabled = false; + #sendDisabled = true; + #imageInput?: ImageInputData; + #modelOptions?: Array<{value: string, label: string}>; + #selectedModel?: string; + #modelSelectorDisabled = false; + #selectedPromptType?: string|null; + #agentButtonsHandler: (event: Event) => void = () => {}; + #centered = false; + + set placeholder(v: string) { this.#placeholder = v || ''; this.#render(); } + set disabled(v: boolean) { this.#disabled = !!v; this.#render(); } + set sendDisabled(v: boolean) { this.#sendDisabled = !!v; this.#render(); } + set imageInput(v: ImageInputData|undefined) { this.#imageInput = v; this.#render(); } + set modelOptions(v: Array<{value: string, label: string}>|undefined) { this.#modelOptions = v; this.#render(); } + set selectedModel(v: string|undefined) { this.#selectedModel = v; this.#render(); } + set modelSelectorDisabled(v: boolean) { this.#modelSelectorDisabled = !!v; this.#render(); } + set selectedPromptType(v: string|null|undefined) { this.#selectedPromptType = v ?? null; this.#render(); } + set agentButtonsHandler(fn: (event: Event) => void) { this.#agentButtonsHandler = fn || (() => {}); this.#render(); } + set centered(v: boolean) { this.#centered = !!v; this.#render(); } + + connectedCallback(): void { this.#render(); } + + #emitSendAndClear(detail: any): void { + // Re-emit send upward + this.dispatchEvent(new CustomEvent('send', { bubbles: true, detail })); + // Proactively clear the child input to avoid any stale content + const inputEl = this.querySelector('ai-chat-input') as (HTMLElement & { clear?: () => void }) | null; + if (inputEl) { + // Prefer component clear() if available + if (typeof (inputEl as any).clear === 'function') { + (inputEl as any).clear(); + } else if ('value' in (inputEl as any)) { + // Fall back to resetting value via setter + (inputEl as any).value = ''; + } + } + } + + // Public API for parent to explicitly clear the input field + clearInput(): void { + const inputEl = this.querySelector('ai-chat-input') as (HTMLElement & { clear?: () => void, value?: string }) | null; + if (typeof inputEl?.clear === 'function') { + inputEl.clear(); + } else if (inputEl && 'value' in inputEl) { + (inputEl as any).value = ''; + } + } + + #sendFromInput(): void { + const inputEl = this.querySelector('ai-chat-input') as (HTMLElement & { value?: string, clear?: () => void }) | null; + const text = (inputEl?.value ?? '').trim(); + if (!text) { + return; + } + this.dispatchEvent(new CustomEvent('send', { bubbles: true, detail: { text }})); + if (typeof inputEl?.clear === 'function') { + inputEl.clear(); + } + } + + #render(): void { + const imagePreview = this.#imageInput ? html` +
+ Image input + +
+ ` : Lit.nothing; + + const agentButtons = BaseOrchestratorAgent.renderAgentTypeButtons(this.#selectedPromptType ?? null, this.#agentButtonsHandler, this.#centered); + + const modelSelector = (this.#modelOptions && this.#modelOptions.length && this.#selectedModel) ? html` + { + const value = (e.detail as any)?.value as string | undefined; + if (value) { + this.dispatchEvent(new CustomEvent('model-changed', { bubbles: true, detail: { value }})); + } + }} + @model-selector-focus=${() => this.dispatchEvent(new CustomEvent('model-selector-focus', { bubbles: true }))} + > + ` : Lit.nothing; + + Lit.render(html` +
+ ${imagePreview} +
+ this.#emitSendAndClear((e as CustomEvent).detail)} + @inputchange=${(e: Event) => this.dispatchEvent(new CustomEvent('inputchange', { bubbles: true, detail: (e as CustomEvent).detail }))} + > +
+
+ ${agentButtons} +
+ ${modelSelector} + +
+
+
+ `, this, {host: this}); + } +} + +declare global { + interface HTMLElementTagNameMap { 'ai-input-bar': InputBar; } +} diff --git a/front_end/panels/ai_chat/ui/input/__tests__/InputBarClear.test.ts b/front_end/panels/ai_chat/ui/input/__tests__/InputBarClear.test.ts new file mode 100644 index 00000000000..e1755e2ab17 --- /dev/null +++ b/front_end/panels/ai_chat/ui/input/__tests__/InputBarClear.test.ts @@ -0,0 +1,62 @@ +// Copyright 2025 The Chromium Authors. + +import '../InputBar.js'; +import '../../input/ChatInput.js'; +import {raf} from '../../../../../testing/DOMHelpers.js'; + +describe('InputBar clearing behavior', () => { + function getTextarea(el: HTMLElement): HTMLTextAreaElement { + const chat = el.querySelector('ai-chat-input') as HTMLElement; + return chat?.querySelector('textarea') as HTMLTextAreaElement; + } + + it('clears input after Enter send from ai-chat-input', async () => { + const bar = document.createElement('ai-input-bar') as any; + document.body.appendChild(bar); + + // Ensure enabled + bar.disabled = false; + bar.sendDisabled = false; + await raf(); + + const ta = getTextarea(bar); + ta.value = 'Hello world'; + ta.dispatchEvent(new Event('input', {bubbles: true})); + await raf(); + + // Simulate Enter key (without Shift) + const ev = new KeyboardEvent('keydown', {key: 'Enter', shiftKey: false, bubbles: true}); + ta.dispatchEvent(ev); + await raf(); + + const taAfter = getTextarea(bar); + assert.strictEqual(taAfter.value, ''); + + document.body.removeChild(bar); + }); + + it('clears input after clicking send button', async () => { + const bar = document.createElement('ai-input-bar') as any; + document.body.appendChild(bar); + + // Ensure send button is enabled + bar.disabled = false; + bar.sendDisabled = false; + await raf(); + + const ta = getTextarea(bar); + ta.value = 'Second message'; + ta.dispatchEvent(new Event('input', {bubbles: true})); + await raf(); + + const btn = bar.querySelector('.send-button') as HTMLButtonElement; + btn.click(); + await raf(); + + const taAfter = getTextarea(bar); + assert.strictEqual(taAfter.value, ''); + + document.body.removeChild(bar); + }); +}); + diff --git a/front_end/panels/ai_chat/ui/markdown/MarkdownRenderers.ts b/front_end/panels/ai_chat/ui/markdown/MarkdownRenderers.ts new file mode 100644 index 00000000000..12033a0581d --- /dev/null +++ b/front_end/panels/ai_chat/ui/markdown/MarkdownRenderers.ts @@ -0,0 +1,82 @@ +// Copyright 2025 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import * as Marked from '../../../../third_party/marked/marked.js'; +import * as MarkdownView from '../../../../ui/components/markdown_view/markdown_view.js'; +import * as Lit from '../../../../ui/lit/lit.js'; + +const {html} = Lit; + +// Markdown renderer with simple code block support (e.g., css sentinel line) +export class MarkdownRenderer extends MarkdownView.MarkdownView.MarkdownInsightRenderer { + override templateForToken(token: Marked.Marked.MarkedToken): Lit.TemplateResult|null { + if (token.type === 'code') { + const lines = (token.text).split('\n'); + if (lines[0]?.trim() === 'css') { + token.lang = 'css'; + token.text = lines.slice(1).join('\n'); + } + } + + return super.templateForToken(token); + } +} + +// Extended renderer that collects a table of contents from headings +export class DeepResearchMarkdownRenderer extends MarkdownView.MarkdownView.MarkdownInsightRenderer { + #tocItems: Array<{level: number, text: string, id: string}> = []; + + override templateForToken(token: Marked.Marked.MarkedToken): Lit.TemplateResult|null { + if (token.type === 'heading') { + const headingText = this.#extractTextFromTokens((token.tokens || []) as Marked.Marked.MarkedToken[]); + const id = this.#generateHeadingId(headingText); + this.#tocItems.push({ level: (token as any).depth, text: headingText, id }); + const content = super.renderToken(token); + return html`
${content}
`; + } + + if (token.type === 'code') { + const lines = (token.text).split('\n'); + if (lines[0]?.trim() === 'css') { + token.lang = 'css'; + token.text = lines.slice(1).join('\n'); + } + } + + return super.templateForToken(token); + } + + #extractTextFromTokens(tokens: Marked.Marked.MarkedToken[]): string { + return tokens.map(token => token.type === 'text' ? (token as any).text : (token as any).raw || '').join(''); + } + + #generateHeadingId(text: string): string { + return text.toLowerCase().replace(/[^\w\s-]/g, '').replace(/\s+/g, '-').trim(); + } + + getTocItems(): Array<{level: number, text: string, id: string}> { return this.#tocItems; } + clearToc(): void { this.#tocItems = []; } +} + +// Helper to render text as markdown using devtools-markdown-view +export function renderMarkdown( + text: string, + markdownRenderer: MarkdownRenderer, + onOpenTableInViewer?: (markdownContent: string) => void, +): Lit.TemplateResult { + let tokens: Marked.Marked.MarkedToken[] = []; + try { + tokens = Marked.Marked.lexer(text) as Marked.Marked.MarkedToken[]; + for (const token of tokens) { + markdownRenderer.renderToken(token); + } + } catch { + return html`${text}`; + } + + return html` + `; +} + diff --git a/front_end/panels/ai_chat/ui/message/GlobalActionsRow.ts b/front_end/panels/ai_chat/ui/message/GlobalActionsRow.ts new file mode 100644 index 00000000000..c4fa4678644 --- /dev/null +++ b/front_end/panels/ai_chat/ui/message/GlobalActionsRow.ts @@ -0,0 +1,55 @@ +// Copyright 2025 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import * as Lit from '../../../../ui/lit/lit.js'; + +const {html} = Lit; + +interface Options { + textToCopy: string; + onCopy?: () => void; + onThumbsUp?: () => void; + onThumbsDown?: () => void; + onRetry?: () => void; +} + +export function renderGlobalActionsRow({ + textToCopy, + onCopy, + onThumbsUp, + onThumbsDown, + onRetry, +}: Options): Lit.TemplateResult { + return html` +
+
+ + + + +
+
+ `; +} + diff --git a/front_end/panels/ai_chat/ui/message/MessageCombiner.ts b/front_end/panels/ai_chat/ui/message/MessageCombiner.ts new file mode 100644 index 00000000000..02c12e0ca51 --- /dev/null +++ b/front_end/panels/ai_chat/ui/message/MessageCombiner.ts @@ -0,0 +1,137 @@ +// Copyright 2025 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import type { ChatMessage, ModelChatMessage, ToolResultMessage, AgentSessionMessage } from '../../models/ChatTypes.js'; + +export type CombinedModelMessage = ModelChatMessage & { + combined?: true; + resultText?: string; + isError?: boolean; + resultError?: string; +}; + +export type OrphanedToolResultMessage = ToolResultMessage & { orphaned?: true }; + +export type CombinedMessage = ChatMessage|CombinedModelMessage|OrphanedToolResultMessage; + +/** + * Combine adjacent model tool-call messages with their following tool-result messages + * into a single logical item for rendering. + * + * Matching strategy (in priority order): + * - toolCallId (when present) + * - toolName equality with immediate adjacency (fallback) + */ +export function combineMessages(messages: ChatMessage[]): CombinedMessage[] { + // Build a set of toolCallIds that are managed by agent sessions, so we can + // hide both the model tool-call and tool-result duplicates in the main feed. + const agentManagedToolCallIds = new Set(); + + for (const msg of messages) { + if ((msg as any).entity === 'agent_session') { + const sess = (msg as AgentSessionMessage).agentSession; + if (sess && Array.isArray(sess.messages)) { + for (const am of sess.messages) { + if (am && am.content && (am.content as any).toolCallId) { + agentManagedToolCallIds.add((am.content as any).toolCallId); + } + } + } + } else if ((msg as any).entity === 'tool_result') { + const tr = msg as ToolResultMessage; + if (tr.isFromConfigurableAgent && tr.toolCallId) { + agentManagedToolCallIds.add(tr.toolCallId); + } + } + } + + const result: CombinedMessage[] = []; + + for (let i = 0; i < messages.length; i++) { + const msg = messages[i]; + + // Keep User messages and Final Model answers as-is + if (msg.entity === 'user' || (msg.entity === 'model' && (msg as ModelChatMessage).action === 'final')) { + result.push(msg); + continue; + } + + // Handle Model tool-call messages + if (msg.entity === 'model' && (msg as ModelChatMessage).action === 'tool') { + const modelMsg = msg as ModelChatMessage; + // Hide model tool-call if it's managed by an agent session + if (modelMsg.toolCallId && agentManagedToolCallIds.has(modelMsg.toolCallId)) { + // If the immediate next is the paired tool_result, skip it as well + const next = messages[i + 1]; + if (next && next.entity === 'tool_result' && (next as ToolResultMessage).toolCallId === modelMsg.toolCallId) { + i++; // skip the result too + } + continue; // do not include in result + } + const next = messages[i + 1]; + + const nextIsMatchingToolResult = Boolean( + next && next.entity === 'tool_result' && + ( + // Prefer toolCallId match when available + (!!(next as ToolResultMessage).toolCallId && (next as ToolResultMessage).toolCallId === modelMsg.toolCallId) || + // Fallback to name match for immediate adjacency + (!(next as ToolResultMessage).toolCallId && (next as ToolResultMessage).toolName === modelMsg.toolName) + ), + ); + + if (nextIsMatchingToolResult) { + const tr = next as ToolResultMessage; + // If the result is agent-managed, drop both + if (tr.isFromConfigurableAgent || (tr.toolCallId && agentManagedToolCallIds.has(tr.toolCallId))) { + i++; // skip the tool-result + continue; + } + const combined: CombinedModelMessage = { + ...modelMsg, + resultText: tr.resultText, + isError: tr.isError, + resultError: tr.error, + combined: true, + }; + result.push(combined); + i++; // Skip the tool-result; it has been combined + } else { + // Tool call still running or result missing + result.push(modelMsg); + } + continue; + } + + // Handle orphaned tool-result messages + if (msg.entity === 'tool_result') { + const tr = msg as ToolResultMessage; + // Hide agent-managed tool results in all cases + if (tr.isFromConfigurableAgent || (tr.toolCallId && agentManagedToolCallIds.has(tr.toolCallId))) { + continue; + } + const prev = messages[i - 1]; + const isPrevMatchingModelCall = Boolean( + prev && prev.entity === 'model' && (prev as ModelChatMessage).action === 'tool' && + ( + // Prefer matching by toolCallId when both present + ((prev as ModelChatMessage).toolCallId && (msg as ToolResultMessage).toolCallId && + (prev as ModelChatMessage).toolCallId === (msg as ToolResultMessage).toolCallId) || + // Fallback to name equality for adjacency + (!(msg as ToolResultMessage).toolCallId && (prev as ModelChatMessage).toolName === (msg as ToolResultMessage).toolName) + ), + ); + + if (!isPrevMatchingModelCall) { + result.push({ ...(msg as ToolResultMessage), orphaned: true }); + } + continue; + } + + // Fallback: push anything else as-is + result.push(msg); + } + + return result; +} diff --git a/front_end/panels/ai_chat/ui/message/MessageList.ts b/front_end/panels/ai_chat/ui/message/MessageList.ts new file mode 100644 index 00000000000..1e865c2292b --- /dev/null +++ b/front_end/panels/ai_chat/ui/message/MessageList.ts @@ -0,0 +1,87 @@ +// Copyright 2025 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import * as Lit from '../../../../ui/lit/lit.js'; +import type { ChatMessage } from '../../models/ChatTypes.js'; + +const {html, Decorators} = Lit; +const {customElement} = Decorators as any; + +@customElement('ai-message-list') +export class MessageList extends HTMLElement { + static readonly litTagName = Lit.StaticHtml.literal`ai-message-list`; + readonly #shadow = this.attachShadow({mode: 'open'}); + + // Public API properties (no decorators; manual setters trigger render) + #messages: ChatMessage[] = []; + #state: 'idle'|'loading'|'error' = 'idle'; + #agentViewMode: 'simplified'|'enhanced' = 'simplified'; + + set messages(value: ChatMessage[]) { this.#messages = value; this.#render(); } + get messages(): ChatMessage[] { return this.#messages; } + set state(value: 'idle'|'loading'|'error') { this.#state = value; this.#render(); } + get state(): 'idle'|'loading'|'error' { return this.#state; } + set agentViewMode(value: 'simplified'|'enhanced') { this.#agentViewMode = value; this.#render(); } + get agentViewMode(): 'simplified'|'enhanced' { return this.#agentViewMode; } + + // Internal state + #pinToBottom = true; + #container?: HTMLElement; + #resizeObserver = new ResizeObserver(() => { if (this.#pinToBottom) this.#scrollToBottom(); }); + + connectedCallback(): void { this.#render(); } + disconnectedCallback(): void { this.#resizeObserver.disconnect(); } + + #onScroll = (e: Event) => { + const el = e.target as HTMLElement; + const SCROLL_ROUNDING_OFFSET = 1; + this.#pinToBottom = el.scrollTop + el.clientHeight + SCROLL_ROUNDING_OFFSET >= el.scrollHeight; + }; + + #scrollToBottom(): void { if (this.#container) this.#container.scrollTop = this.#container.scrollHeight; } + + #render(): void { + const refFn = (el?: Element) => { + if (this.#container) { this.#resizeObserver.unobserve(this.#container); } + this.#container = el as HTMLElement | undefined; + if (this.#container) { + this.#resizeObserver.observe(this.#container); + this.#scrollToBottom(); + } else { + this.#pinToBottom = true; + } + }; + + // Container mode: project messages via slot from parent. + Lit.render(html` + +
+ +
+ `, this.#shadow, {host: this}); + } +} + +declare global { + interface HTMLElementTagNameMap { 'ai-message-list': MessageList; } +} diff --git a/front_end/panels/ai_chat/ui/message/ModelMessage.ts b/front_end/panels/ai_chat/ui/message/ModelMessage.ts new file mode 100644 index 00000000000..e9ad849a944 --- /dev/null +++ b/front_end/panels/ai_chat/ui/message/ModelMessage.ts @@ -0,0 +1,42 @@ +// Copyright 2025 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import * as Lit from '../../../../ui/lit/lit.js'; +import type { ModelChatMessage } from '../../models/ChatTypes.js'; +import { MarkdownRenderer, renderMarkdown } from '../markdown/MarkdownRenderers.js'; + +const {html} = Lit; + +// Renders a model message final answer including optional reasoning block. +export function renderModelMessage(msg: ModelChatMessage, renderer: MarkdownRenderer): Lit.TemplateResult { + if (msg.action === 'final') { + return html` +
+
+ ${msg.answer ? html` +
${renderMarkdown(msg.answer, renderer)}
+ ` : Lit.nothing} + ${msg.reasoning?.length ? html` +
+
+ + 💡 + Model Reasoning + +
+ ${msg.reasoning.map(item => html` +
${renderMarkdown(item, renderer)}
+ `)} +
+
+
+ ` : Lit.nothing} + ${msg.error ? html`
${msg.error}
` : Lit.nothing} +
+
+ `; + } + // Tool-call messages are handled elsewhere. + return html``; +} diff --git a/front_end/panels/ai_chat/ui/message/StructuredResponseController.ts b/front_end/panels/ai_chat/ui/message/StructuredResponseController.ts new file mode 100644 index 00000000000..c057a5c7d59 --- /dev/null +++ b/front_end/panels/ai_chat/ui/message/StructuredResponseController.ts @@ -0,0 +1,123 @@ +// Copyright 2025 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import type { ChatMessage } from '../../models/ChatTypes.js'; +import type { CombinedMessage } from './MessageCombiner.js'; +import { getMessageStateKey } from '../../core/structured_response.js'; +import { MarkdownViewerUtil } from '../../common/MarkdownViewerUtil.js'; + +export type AIState = 'pending' | 'opened' | 'failed' | 'not-attempted'; + +export class StructuredResponseController { + #aiAssistantStates = new Map(); + #lastProcessedMessageKey: string | null = null; + #onStateChanged: () => void; + + constructor(onStateChanged: () => void) { + this.#onStateChanged = onStateChanged; + } + + resetLastProcessed(): void { + this.#lastProcessedMessageKey = null; + } + + handleNewMessages(previousMessages: ChatMessage[]|undefined, newMessages: ChatMessage[]|undefined): void { + if (!previousMessages || !newMessages) { + return; + } + const willHaveMoreMessages = (newMessages?.length || 0) > (previousMessages?.length || 0); + if (!willHaveMoreMessages) { + return; + } + // When new messages are added, reset states for previous final structured messages + const previousLastFinalIndex = previousMessages.findLastIndex(msg => + (msg as any).entity === 'model' && (msg as any).action === 'final' + ); + if (previousLastFinalIndex >= 0) { + const previousLast = previousMessages[previousLastFinalIndex] as any; + const answer = previousLast?.answer as string | undefined; + if (answer) { + const structured = this.#tryParseStructured(answer); + if (structured) { + const key = getMessageStateKey(structured); + const cur = this.getState(key); + if (cur === 'pending') { + this.#aiAssistantStates.set(key, 'failed'); + } + } + } + } + } + + computeStateAndMaybeOpen(structuredResponse: { reasoning: string, markdownReport: string }, + combinedIndex: number, + combinedMessages: CombinedMessage[]): { aiState: AIState, isLastMessage: boolean } { + const messageKey = getMessageStateKey(structuredResponse); + const isLast = this.#isLastStructuredMessage(combinedMessages, combinedIndex); + + if (isLast && messageKey !== this.#lastProcessedMessageKey) { + const state = this.getState(messageKey); + if (state === 'not-attempted') { + this.#aiAssistantStates.set(messageKey, 'pending'); + this.#open(markdownContent(structuredResponse), messageKey); + this.#lastProcessedMessageKey = messageKey; + } + } + + const aiState = this.getState(messageKey); + return { aiState, isLastMessage: isLast }; + } + + getState(messageKey: string): AIState { + return this.#aiAssistantStates.get(messageKey) || 'not-attempted'; + } + + // Determine if the current combined index is the last structured final answer + #isLastStructuredMessage(combined: CombinedMessage[], currentIndex: number): boolean { + let last = -1; + for (let i = 0; i < combined.length; i++) { + const m: any = combined[i]; + if (m?.entity === 'model' && m?.action === 'final') { + const sr = this.#tryParseStructured(m?.answer || ''); + if (sr) { + last = i; + } + } + } + return last === currentIndex; + } + + async #open(markdown: string, key: string): Promise { + try { + await MarkdownViewerUtil.openInAIAssistantViewer(markdown); + this.#aiAssistantStates.set(key, 'opened'); + } catch (e) { + this.#aiAssistantStates.set(key, 'failed'); + } + this.#onStateChanged(); + } + + #tryParseStructured(answer: string): { reasoning: string, markdownReport: string } | null { + try { + // Lightweight probe: the caller (ChatView) uses authoritative parser already. + // Here we only need stable key; ChatView provides full rendering. + const matchReasoning = answer.includes('') && answer.includes(''); + const matchReport = answer.includes('') && answer.includes(''); + if (!matchReasoning || !matchReport) { + return null; + } + // Extract minimal payload for key generation. + const reasoning = answer.substring(answer.indexOf('') + 11, answer.indexOf('')).trim(); + const markdownReport = answer.substring(answer.indexOf('') + 17, answer.indexOf('')).trim(); + return { reasoning, markdownReport }; + } catch { + return null; + } + } +} + +function markdownContent(sr: { reasoning: string, markdownReport: string }): string { + return sr.markdownReport; +} + diff --git a/front_end/panels/ai_chat/ui/message/StructuredResponseRender.ts b/front_end/panels/ai_chat/ui/message/StructuredResponseRender.ts new file mode 100644 index 00000000000..2127ab2bcde --- /dev/null +++ b/front_end/panels/ai_chat/ui/message/StructuredResponseRender.ts @@ -0,0 +1,57 @@ +// Copyright 2025 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import * as Lit from '../../../../ui/lit/lit.js'; +import { MarkdownViewerUtil } from '../../common/MarkdownViewerUtil.js'; +import { MarkdownRenderer, renderMarkdown } from '../markdown/MarkdownRenderers.js'; + +const {html} = Lit; + +export interface StructuredResponseState { + aiState: 'pending' | 'opened' | 'failed' | 'not-attempted'; + isLastMessage: boolean; +} + +export interface StructuredResponseData { + reasoning: string; + markdownReport: string; +} + +// Presentational renderer for a structured response. Does not manage state. +export function renderStructuredResponse( + data: StructuredResponseData, + state: StructuredResponseState, + markdownRenderer: MarkdownRenderer, +): Lit.TemplateResult { + const open = () => { void MarkdownViewerUtil.openInAIAssistantViewer(data.markdownReport); }; + return html` +
+
+
${renderMarkdown(data.reasoning, markdownRenderer, open)}
+ ${state.aiState === 'pending' ? html` +
+ + + + + +
+ ` : state.aiState === 'opened' ? html` +
+ +
+ ` : html` +
+

Full Research Report

+
${renderMarkdown(data.markdownReport, markdownRenderer, open)}
+
+
+ +
+ `} +
+
+ `; +} + diff --git a/front_end/panels/ai_chat/ui/message/ToolResultMessage.ts b/front_end/panels/ai_chat/ui/message/ToolResultMessage.ts new file mode 100644 index 00000000000..b86b2073460 --- /dev/null +++ b/front_end/panels/ai_chat/ui/message/ToolResultMessage.ts @@ -0,0 +1,23 @@ +// Copyright 2025 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import * as Lit from '../../../../ui/lit/lit.js'; +import type { ToolResultMessage } from '../../models/ChatTypes.js'; + +const {html} = Lit; + +export function renderToolResultMessage(msg: ToolResultMessage): Lit.TemplateResult { + return html` +
+
+
+
Result from: ${msg.toolName} ${msg.isError ? '(Error)' : ''}
+
${msg.resultText}
+ ${msg.error ? html`
${msg.error}
` : Lit.nothing} +
+
+
+ `; +} + diff --git a/front_end/panels/ai_chat/ui/message/UserMessage.ts b/front_end/panels/ai_chat/ui/message/UserMessage.ts new file mode 100644 index 00000000000..1380e0814e3 --- /dev/null +++ b/front_end/panels/ai_chat/ui/message/UserMessage.ts @@ -0,0 +1,21 @@ +// Copyright 2025 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import * as Lit from '../../../../ui/lit/lit.js'; +import type { UserChatMessage } from '../../models/ChatTypes.js'; +import { MarkdownRenderer, renderMarkdown } from '../markdown/MarkdownRenderers.js'; + +const {html} = Lit; + +export function renderUserMessage(msg: UserChatMessage, renderer: MarkdownRenderer): Lit.TemplateResult { + return html` +
+
+
${renderMarkdown(msg.text || '', renderer)}
+ ${msg.error ? html`
${msg.error}
` : Lit.nothing} +
+
+ `; +} + diff --git a/front_end/panels/ai_chat/ui/message/__tests__/MessageCombiner.test.ts b/front_end/panels/ai_chat/ui/message/__tests__/MessageCombiner.test.ts new file mode 100644 index 00000000000..fc3a592a003 --- /dev/null +++ b/front_end/panels/ai_chat/ui/message/__tests__/MessageCombiner.test.ts @@ -0,0 +1,80 @@ +// Copyright 2025 The Chromium Authors. + +import {combineMessages} from '../MessageCombiner.js'; +// Local minimal enum to avoid importing TypeScript enums from ChatTypes in strip-only mode +const ChatMessageEntity = { + USER: 'user', + MODEL: 'model', + TOOL_RESULT: 'tool_result', + AGENT_SESSION: 'agent_session', +} as const; +type ChatMessage = any; + +describe('MessageCombiner', () => { + it('combines adjacent tool call and result by toolCallId', () => { + const messages: ChatMessage[] = [ + { entity: ChatMessageEntity.USER, text: 'Go' } as any, + { entity: ChatMessageEntity.MODEL, action: 'tool', toolName: 'fetch', toolCallId: 'id-1', isFinalAnswer: false } as any, + { entity: ChatMessageEntity.TOOL_RESULT, toolName: 'fetch', toolCallId: 'id-1', resultText: '{"ok":true}', isError: false } as any, + { entity: ChatMessageEntity.MODEL, action: 'final', answer: 'Done', isFinalAnswer: true } as any, + ]; + + const combined = combineMessages(messages); + assert.lengthOf(combined, 3); + assert.strictEqual((combined[1] as any).combined, true); + assert.strictEqual((combined[1] as any).resultText, '{"ok":true}'); + }); + + it('marks orphaned tool results', () => { + const messages: ChatMessage[] = [ + { entity: ChatMessageEntity.TOOL_RESULT, toolName: 'scan', resultText: 'x', isError: false } as any, + ]; + const combined = combineMessages(messages); + assert.lengthOf(combined, 1); + assert.isTrue((combined[0] as any).orphaned); + }); + + it('hides model tool-call and its result when agent session manages same toolCallId', () => { + const toolCallId = 'tc-123'; + const messages: ChatMessage[] = [ + { entity: ChatMessageEntity.USER, text: 'run agent' } as any, + // Model tool call that will be managed by the agent session + { entity: ChatMessageEntity.MODEL, action: 'tool', toolName: 'fetch', toolCallId, isFinalAnswer: false } as any, + { entity: ChatMessageEntity.TOOL_RESULT, toolName: 'fetch', toolCallId, resultText: '{"ok":true}', isError: false } as any, + // Agent session includes the same tool call/result in its timeline + { entity: ChatMessageEntity.AGENT_SESSION, agentSession: { + sessionId: 's1', + agentName: 'agent', + status: 'running', + startTime: new Date(), + messages: [ + { id: 'tc', timestamp: new Date(), type: 'tool_call', content: { type: 'tool_call', toolName: 'fetch', toolArgs: { url: 'x' }, toolCallId } }, + { id: 'tr', timestamp: new Date(), type: 'tool_result', content: { type: 'tool_result', toolName: 'fetch', toolCallId, success: true, result: { ok: true } } }, + ], + nestedSessions: [], + }} as any, + ]; + + const combined = combineMessages(messages); + // Expect: user + agent_session only (model tool+result removed) + assert.lengthOf(combined, 2); + assert.strictEqual((combined[0] as any).entity, 'user'); + assert.strictEqual((combined[1] as any).entity, 'agent_session'); + }); + + it('hides agent-managed tool_result even if it arrives before model tool-call', () => { + const toolCallId = 'tc-outoforder'; + const messages: ChatMessage[] = [ + { entity: ChatMessageEntity.USER, text: 'go' } as any, + // Agent-managed tool result first + { entity: ChatMessageEntity.TOOL_RESULT, toolName: 'fetch', toolCallId, resultText: '{"ok":1}', isError: false, isFromConfigurableAgent: true } as any, + // Model tool call later + { entity: ChatMessageEntity.MODEL, action: 'tool', toolName: 'fetch', toolCallId, isFinalAnswer: false } as any, + ]; + + const combined = combineMessages(messages); + // Expect: user only (both the agent-managed result and matching model call removed) + assert.lengthOf(combined, 1); + assert.strictEqual((combined[0] as any).entity, 'user'); + }); +}); diff --git a/front_end/panels/ai_chat/ui/message/__tests__/MessageList.test.ts b/front_end/panels/ai_chat/ui/message/__tests__/MessageList.test.ts new file mode 100644 index 00000000000..1a09bb13a4e --- /dev/null +++ b/front_end/panels/ai_chat/ui/message/__tests__/MessageList.test.ts @@ -0,0 +1,76 @@ +// Copyright 2025 The Chromium Authors. + +import '../MessageList.js'; +import {raf} from '../../../../../testing/DOMHelpers.js'; + +describe('MessageList UI', () => { + function makeMessage(text: string, height = 0): HTMLElement { + const msg = document.createElement('div'); + msg.textContent = text; + if (height > 0) { + msg.setAttribute('style', `display:block; height:${height}px;`); + } + return msg; + } + + it('projects slotted messages into container', async () => { + const list = document.createElement('ai-message-list'); + // Keep size small; contents will overflow to create scrollbar in later tests + (list as HTMLElement).style.cssText = 'display:block; height:120px; width:200px;'; + const m1 = makeMessage('Hello'); + const m2 = makeMessage('World'); + list.appendChild(m1); + list.appendChild(m2); + document.body.appendChild(list); + await raf(); + + const sroot = list.shadowRoot!; + const slot = sroot.querySelector('slot') as HTMLSlotElement; + const assigned = slot.assignedNodes({flatten: true}); + // Both light DOM children are projected via the slot + assert.strictEqual(assigned.length, 2); + assert.strictEqual((assigned[0] as HTMLElement).textContent, 'Hello'); + assert.strictEqual((assigned[1] as HTMLElement).textContent, 'World'); + + document.body.removeChild(list); + }); + + it('pins to bottom by default and preserves scroll position when user scrolls up', async () => { + const list = document.createElement('ai-message-list'); + (list as HTMLElement).style.cssText = 'display:block; height:120px; width:200px;'; + // Add enough tall messages to overflow + for (let i = 0; i < 5; i++) { + list.appendChild(makeMessage(`Msg ${i}`, 80)); + } + document.body.appendChild(list); + await raf(); + + const sroot = list.shadowRoot!; + const container = sroot.querySelector('.container') as HTMLElement; + + // Initially pinned to bottom; after initial render it should end up at bottom + await raf(); + const atBottomInitial = container.scrollTop + container.clientHeight >= container.scrollHeight - 1; + assert.isTrue(atBottomInitial, 'should pin to bottom initially'); + + // Simulate user scroll up -> pin disabled + container.scrollTop = 0; + container.dispatchEvent(new Event('scroll')); + + // Append a new tall message; should NOT auto-scroll now + list.appendChild(makeMessage('New message', 120)); + await raf(); + assert.isBelow(container.scrollTop, container.scrollHeight - container.clientHeight); + + // Scroll to bottom and append again; should auto-pin + container.scrollTop = container.scrollHeight; + container.dispatchEvent(new Event('scroll')); + list.appendChild(makeMessage('Another new message', 120)); + await raf(); + const atBottomFinal = container.scrollTop + container.clientHeight >= container.scrollHeight - 1; + assert.isTrue(atBottomFinal, 'should repin to bottom when user scrolled to end'); + + document.body.removeChild(list); + }); +}); + diff --git a/front_end/panels/ai_chat/ui/message/__tests__/StructuredResponseController.test.ts b/front_end/panels/ai_chat/ui/message/__tests__/StructuredResponseController.test.ts new file mode 100644 index 00000000000..612be766f8d --- /dev/null +++ b/front_end/panels/ai_chat/ui/message/__tests__/StructuredResponseController.test.ts @@ -0,0 +1,36 @@ +// Copyright 2025 The Chromium Authors. + +import {StructuredResponseController} from '../StructuredResponseController.js'; +import {getMessageStateKey} from '../../../core/structured_response.js'; + +describe('StructuredResponseController', () => { + it('sets pending for last structured message and marks failed on new messages', async () => { + const controller = new StructuredResponseController(() => {}); + + // Stub global MarkdownViewerUtil to avoid navigation + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (await import('../../../common/MarkdownViewerUtil.js') as any).MarkdownViewerUtil.openInAIAssistantViewer = async () => {}; + + const sr = { reasoning: 'why', markdownReport: '# report' }; + const key = getMessageStateKey(sr); + + const combinedMessages: any[] = [ + { entity: 'model', action: 'final', answer: `${sr.reasoning}${sr.markdownReport}` } + ]; + + const {aiState, isLastMessage} = controller.computeStateAndMaybeOpen(sr, 0, combinedMessages); + assert.isTrue(isLastMessage); + assert.strictEqual(aiState, 'pending'); + + // Simulate new message arrival; previous pending should become failed + const prevFinal = { entity: 'model', action: 'final', answer: `${sr.reasoning}${sr.markdownReport}` } as any; + controller.handleNewMessages([ + prevFinal + ], [ + prevFinal, + { entity: 'model', action: 'final', answer: 'new' } as any + ]); + + assert.strictEqual(controller.getState(key), 'failed'); + }); +}); diff --git a/front_end/panels/ai_chat/ui/model_selector/ModelSelector.ts b/front_end/panels/ai_chat/ui/model_selector/ModelSelector.ts new file mode 100644 index 00000000000..0474066e93a --- /dev/null +++ b/front_end/panels/ai_chat/ui/model_selector/ModelSelector.ts @@ -0,0 +1,95 @@ +// Copyright 2025 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import * as Lit from '../../../../ui/lit/lit.js'; + +const {html, Decorators} = Lit; +const {customElement} = Decorators as any; + +export interface ModelOption { value: string; label: string; } + +@customElement('ai-model-selector') +export class ModelSelector extends HTMLElement { + static readonly litTagName = Lit.StaticHtml.literal`ai-model-selector`; + + #options: ModelOption[] = []; + #selected: string | undefined; + #disabled = false; + #open = false; + #query = ''; + #highlighted = 0; + + get options(): ModelOption[] { return this.#options; } + set options(v: ModelOption[]) { this.#options = v || []; this.#render(); } + get selected(): string | undefined { return this.#selected; } + set selected(v: string | undefined) { this.#selected = v; this.#render(); } + get disabled(): boolean { return this.#disabled; } + set disabled(v: boolean) { this.#disabled = !!v; this.#render(); } + + connectedCallback(): void { this.#render(); } + + #emitChange(value: string): void { + this.dispatchEvent(new CustomEvent('change', { bubbles: true, detail: { value }})); + } + + #toggle = (e: Event) => { e.preventDefault(); if (!this.#disabled) { this.#open = !this.#open; this.#render(); } }; + #onSearch = (e: Event) => { this.#query = (e.target as HTMLInputElement).value; this.#highlighted = 0; this.#render(); }; + #onKeydown = (e: KeyboardEvent) => { + const filtered = this.#filtered(); + if (e.key === 'ArrowDown') { e.preventDefault(); this.#highlighted = Math.min(this.#highlighted + 1, filtered.length - 1); this.#render(); } + if (e.key === 'ArrowUp') { e.preventDefault(); this.#highlighted = Math.max(this.#highlighted - 1, 0); this.#render(); } + if (e.key === 'Enter') { e.preventDefault(); const opt = filtered[this.#highlighted]; if (opt) { this.#selected = opt.value; this.#open = false; this.#emitChange(opt.value); this.#render(); } } + if (e.key === 'Escape') { e.preventDefault(); this.#open = false; this.#render(); } + }; + + #filtered(): ModelOption[] { + if (!this.#query) return this.#options; + const q = this.#query.toLowerCase(); + return this.#options.filter(o => o.label.toLowerCase().includes(q) || o.value.toLowerCase().includes(q)); + } + + #isSearchable(): boolean { return (this.#options?.length || 0) >= 20; } + + #render(): void { + const selectedLabel = this.#options.find(o => o.value === this.#selected)?.label || this.#selected || 'Select Model'; + if (!this.#isSearchable()) { + Lit.render(html` +
+ +
+ `, this, {host: this}); + return; + } + + const filtered = this.#filtered(); + Lit.render(html` +
+ + ${this.#open ? html` +
e.stopPropagation()}> + +
+ ${filtered.map((o, i) => html` +
{ this.#selected = o.value; this.#open = false; this.#emitChange(o.value); this.#render(); }} + @mouseenter=${() => this.#highlighted = i} + >${o.label}
+ `)} + ${filtered.length === 0 ? html`
No matching models found
` : ''} +
+
+ ` : ''} +
+ `, this, {host: this}); + } +} + +declare global { + interface HTMLElementTagNameMap { 'ai-model-selector': ModelSelector; } +} diff --git a/front_end/panels/ai_chat/ui/oauth/OAuthConnectPanel.ts b/front_end/panels/ai_chat/ui/oauth/OAuthConnectPanel.ts new file mode 100644 index 00000000000..4db1c825f35 --- /dev/null +++ b/front_end/panels/ai_chat/ui/oauth/OAuthConnectPanel.ts @@ -0,0 +1,93 @@ +// Copyright 2025 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import * as Lit from '../../../../ui/lit/lit.js'; + +const {html, Decorators} = Lit; +const {customElement} = Decorators as any; + +@customElement('ai-oauth-connect') +export class OAuthConnectPanel extends HTMLElement { + static readonly litTagName = Lit.StaticHtml.literal`ai-oauth-connect`; + + #visible = false; + get visible(): boolean { return this.#visible; } + set visible(v: boolean) { this.#visible = v; this.#render(); } + + connectedCallback(): void { this.#render(); } + setVisible(visible: boolean): void { this.visible = visible; } + + #emit(name: string, detail?: unknown): void { + this.dispatchEvent(new CustomEvent(name, {bubbles: true, detail})); + } + + #onOpenRouter = () => { this.#emit('oauth-login', { provider: 'openrouter' }); }; + #onOpenAI = () => { this.#emit('openai-setup'); }; + #onManual = (e: Event) => { e.preventDefault(); this.#emit('manual-setup'); }; + + #render(): void { + // Light DOM render so host page CSS (chatView.css) styles apply + if (!this.#visible) { + this.innerHTML = ''; + return; + } + Lit.render(html` + + + `, this, {host: this}); + } +} + +declare global { + interface HTMLElementTagNameMap { 'ai-oauth-connect': OAuthConnectPanel; } +} diff --git a/front_end/panels/ai_chat/ui/version/VersionBanner.ts b/front_end/panels/ai_chat/ui/version/VersionBanner.ts new file mode 100644 index 00000000000..52bf18a2d1d --- /dev/null +++ b/front_end/panels/ai_chat/ui/version/VersionBanner.ts @@ -0,0 +1,50 @@ +// Copyright 2025 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import * as Lit from '../../../../ui/lit/lit.js'; + +const {html, Decorators} = Lit; +const {customElement} = Decorators as any; + +export interface VersionInfo { latestVersion: string; releaseUrl: string; isUpdateAvailable: boolean; } + +@customElement('ai-version-banner') +export class VersionBanner extends HTMLElement { + static readonly litTagName = Lit.StaticHtml.literal`ai-version-banner`; + readonly #shadow = this.attachShadow({mode: 'open'}); + + // Manual properties + #info: VersionInfo | null = null; + #dismissed = false; + + get info(): VersionInfo | null { return this.#info; } + set info(value: VersionInfo | null) { this.#info = value; this.#render(); } + get dismissed(): boolean { return this.#dismissed; } + set dismissed(value: boolean) { this.#dismissed = value; this.#render(); } + + connectedCallback(): void { this.#render(); } + + #dismiss = () => { this.dispatchEvent(new CustomEvent('dismiss', {bubbles: true})); }; + + #render(): void { + if (!this.#info || !this.#info.isUpdateAvailable || this.#dismissed) { this.#shadow.innerHTML = ''; return; } + const info = this.#info; + Lit.render(html` + + + `, this.#shadow, {host: this}); + } +} + +declare global { + interface HTMLElementTagNameMap { 'ai-version-banner': VersionBanner; } +} From d0119c79fa9d7370675e6f0a0fd067ce2dcd4d49 Mon Sep 17 00:00:00 2001 From: Tyson Thomas Date: Sat, 6 Sep 2025 12:34:13 -0700 Subject: [PATCH 2/2] fix ux issue --- front_end/panels/ai_chat/ui/chatView.css | 5 +---- front_end/panels/ai_chat/ui/message/MessageList.ts | 4 ++-- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/front_end/panels/ai_chat/ui/chatView.css b/front_end/panels/ai_chat/ui/chatView.css index a6849ad37e5..2763ab7d5c9 100644 --- a/front_end/panels/ai_chat/ui/chatView.css +++ b/front_end/panels/ai_chat/ui/chatView.css @@ -439,7 +439,7 @@ ai-input-bar { /* Ensure last message has enough space below it when scrolled into view */ .message:last-child { - margin-bottom: 90px; /* Increased space for input container */ + margin-bottom: 16px; /* Avoid excess gap now that input is not sticky */ } @keyframes fadeIn { @@ -633,9 +633,6 @@ ai-input-bar { padding: 10px 16px; background-color: var(--color-background); backdrop-filter: blur(10px); - position: sticky; - bottom: 0; - z-index: 10; box-shadow: 0 -4px 16px var(--color-shadow-subtle); border-radius: 28px; border: 1.5px solid var(--color-background-elevation-1); diff --git a/front_end/panels/ai_chat/ui/message/MessageList.ts b/front_end/panels/ai_chat/ui/message/MessageList.ts index 1e865c2292b..de2327f49b0 100644 --- a/front_end/panels/ai_chat/ui/message/MessageList.ts +++ b/front_end/panels/ai_chat/ui/message/MessageList.ts @@ -65,8 +65,8 @@ export class MessageList extends HTMLElement { scroll-behavior: smooth; padding: 12px 16px; background-color: var(--color-background); - /* Ensure input container area is not overlapped by the scroller */ - padding-bottom: 140px; + /* Reduced bottom padding since input bar is no longer sticky */ + padding-bottom: 16px; min-height: 100px; position: relative; z-index: 0;