From 43772061cea301bc8eceed3808b5c89769b30a75 Mon Sep 17 00:00:00 2001 From: Brian Love Date: Sat, 4 Apr 2026 10:49:35 -0700 Subject: [PATCH 001/187] feat: animated arch diagram, copy buttons, tab labels, deploy docs (#8) * feat(website): add animated architecture flow diagram to introduction * feat(website): redesign arch diagram, fix tabs/table/deploy/copy button - Redesign ArchFlowDiagram as chat+console simulation showing full AI turn - Fix Tab labels using label prop instead of items array - Replace Key Concepts table with CardGroup (better MDX compat) - Expand Deploy section with 4 steps + env config + Angular build - Add copy button to all code blocks via Pre component - Use gpt-5-mini in code examples --- .../docs-v2/getting-started/introduction.mdx | 108 +++++++---- .../src/components/docs/ArchFlowDiagram.tsx | 169 ++++++++++++++++++ .../src/components/docs/MdxRenderer.tsx | 4 + .../src/components/docs/mdx/CodeBlock.tsx | 42 +++++ apps/website/src/components/docs/mdx/Tabs.tsx | 18 +- 5 files changed, 300 insertions(+), 41 deletions(-) create mode 100644 apps/website/src/components/docs/ArchFlowDiagram.tsx create mode 100644 apps/website/src/components/docs/mdx/CodeBlock.tsx diff --git a/apps/website/content/docs-v2/getting-started/introduction.mdx b/apps/website/content/docs-v2/getting-started/introduction.mdx index 3aeae6115..eddc6d57d 100644 --- a/apps/website/content/docs-v2/getting-started/introduction.mdx +++ b/apps/website/content/docs-v2/getting-started/introduction.mdx @@ -27,29 +27,16 @@ No RxJS. No manual subscriptions. No async pipes. Just Signals that work with An ## The Architecture -StreamResource sits between your Angular app and LangGraph Platform: +Watch a full conversation turn flow through the stack — from user input to rendered response: - - -Creates a reactive resource bound to a specific agent. All state is exposed as Signals. - - -Sends HTTP POST to LangGraph Platform, receives Server-Sent Events with state updates. - - -Executes nodes, calls tools, manages checkpoints. Streams results back in real-time. - - -As tokens arrive, `messages()` updates. Angular re-renders only the affected components. - - + ## Build Your Agent LangGraph agents are Python programs defined as directed graphs. Here's a minimal chat agent using the example from this repository: - - + + ```python # examples/chat-agent/src/chat_agent/agent.py @@ -58,7 +45,7 @@ from langchain_core.runnables import RunnableConfig from langgraph.graph import END, START, MessagesState, StateGraph from langchain_openai import ChatOpenAI -llm = ChatOpenAI(model="gpt-4o-mini") +llm = ChatOpenAI(model="gpt-5-mini") def call_model(state: MessagesState, config: RunnableConfig) -> dict: """Invoke the LLM with the current message history.""" @@ -79,7 +66,7 @@ graph = builder.compile() ``` - + ```json { @@ -162,8 +149,8 @@ export const appConfig: ApplicationConfig = { - - + + ```typescript // chat.component.ts @@ -202,7 +189,7 @@ export class ChatComponent { ``` - + ```html @@ -257,46 +244,91 @@ Open `http://localhost:4200` and start chatting with your agent. Messages stream Here's what streamResource() gives you out of the box: -| Feature | Signal | Description | -|---------|--------|-------------| -| **Messages** | `chat.messages()` | Live message list, updates as tokens arrive | -| **Status** | `chat.status()` | Current state: idle, loading, resolved, error | -| **Thread persistence** | `threadId` option | Conversations survive page refreshes | -| **Interrupts** | `chat.interrupt()` | Agent pauses for human input | -| **History** | `chat.history()` | Full checkpoint timeline for time-travel | -| **Subagents** | `chat.subagents()` | Track delegated agent work | -| **Tool calls** | `chat.toolCalls()` | See what tools the agent is using | + + + `chat.messages()` — live message list that updates as each token arrives from the agent + + + `chat.status()` — current state: idle, loading, resolved, or error + + + `threadId` option — conversations survive page refreshes via localStorage or backend + + + `chat.interrupt()` — agent pauses for human approval, your UI handles the decision + + + `chat.history()` — full checkpoint timeline for debugging and branching + + + `chat.subagents()` — track delegated agent work across multiple graphs + + + `chat.toolCalls()` — see what tools the agent is invoking in real-time + + + `MockStreamTransport` — deterministic testing without a running server + + ## Deploy to Production -When you're ready to go live, deploy your agent to LangGraph Cloud. +When you're ready to go live, deploy your agent to LangGraph Cloud and point your Angular app to the deployment URL. -Your agent code (the Python project with `langgraph.json`) needs to be in a GitHub repository. +Your agent code (the Python project with `langgraph.json`) needs to be in a GitHub repository. Make sure your `langgraph.json` references the correct graph entry point. + +```bash +git init && git add . && git commit -m "initial agent" +gh repo create my-agent --public --source=. --push +``` -Go to [LangSmith Deployments](https://smith.langchain.com) and click **+ New Deployment**. Connect your GitHub repo and deploy. This takes about 15 minutes. +Go to [LangSmith Deployments](https://smith.langchain.com) and click **+ New Deployment**. Connect your GitHub account, select your repository, and deploy. The first deployment takes about 15 minutes. + +You'll receive a deployment URL like `https://my-agent-abc123.langsmith.dev`. -Point `apiUrl` to your deployment URL: +Point `apiUrl` to your deployment URL and set up environment-based configuration: ```typescript +// environment.ts +export const environment = { + langgraphUrl: 'http://localhost:2024', // dev +}; + +// environment.prod.ts +export const environment = { + langgraphUrl: 'https://my-agent-abc123.langsmith.dev', // prod +}; + +// app.config.ts provideStreamResource({ - apiUrl: 'https://your-deployment.langsmith.dev', + apiUrl: environment.langgraphUrl, }) ``` + + + +Deploy your Angular frontend to any hosting platform — Vercel, Netlify, AWS, or your own infrastructure. Since streamResource() is a stateless client, your frontend has no server-side state requirements. + +```bash +ng build --configuration production +# Deploy dist/ to your hosting platform +``` + - -Your Angular app is a stateless client. All agent state lives on LangGraph Platform. This means you can deploy your Angular app anywhere — CDN, edge, SSR — without state management concerns. + +Your Angular app is a stateless client. All agent state — threads, checkpoints, memory — lives on LangGraph Platform. This means you can deploy your frontend anywhere (CDN, edge, SSR) without state management concerns. Scale your frontend independently of your agent infrastructure. ## What's Next diff --git a/apps/website/src/components/docs/ArchFlowDiagram.tsx b/apps/website/src/components/docs/ArchFlowDiagram.tsx new file mode 100644 index 000000000..bef621fd7 --- /dev/null +++ b/apps/website/src/components/docs/ArchFlowDiagram.tsx @@ -0,0 +1,169 @@ +'use client'; +import { useState, useEffect, useRef } from 'react'; +import { tokens } from '../../../lib/design-tokens'; + +interface LogEntry { + time: string; + source: 'angular' | 'transport' | 'langgraph' | 'signal'; + message: string; +} + +const SCENARIO: { delay: number; chatBubble?: { role: 'user' | 'assistant'; text: string; streaming?: boolean }; log: LogEntry }[] = [ + { delay: 0, chatBubble: { role: 'user', text: 'How do Angular Signals work with streaming?' }, log: { time: '0.00s', source: 'angular', message: 'chat.submit({ messages: [userMsg] })' } }, + { delay: 800, log: { time: '0.02s', source: 'transport', message: 'POST /threads/t_8f3a/runs/stream → 200' } }, + { delay: 1200, log: { time: '0.04s', source: 'langgraph', message: 'Executing node: call_model (gpt-5-mini)' } }, + { delay: 2200, log: { time: '0.82s', source: 'langgraph', message: 'SSE event: { type: "values", messages: [...] }' } }, + { delay: 2600, log: { time: '0.84s', source: 'transport', message: 'Received chunk → messages$.next([...])' } }, + { delay: 2800, log: { time: '0.85s', source: 'signal', message: 'messages() updated → 2 messages' } }, + { delay: 3000, chatBubble: { role: 'assistant', text: 'Angular Signals', streaming: true }, log: { time: '0.86s', source: 'signal', message: 'status() → "loading"' } }, + { delay: 3400, chatBubble: { role: 'assistant', text: 'Angular Signals provide a synchronous', streaming: true }, log: { time: '1.12s', source: 'transport', message: 'Received chunk → values event' } }, + { delay: 3900, chatBubble: { role: 'assistant', text: 'Angular Signals provide a synchronous, reactive way to', streaming: true }, log: { time: '1.45s', source: 'signal', message: 'messages() updated → streaming token' } }, + { delay: 4500, chatBubble: { role: 'assistant', text: 'Angular Signals provide a synchronous, reactive way to track streaming state.', streaming: true }, log: { time: '1.82s', source: 'langgraph', message: 'SSE event: { type: "values", status: "done" }' } }, + { delay: 5200, chatBubble: { role: 'assistant', text: 'Angular Signals provide a synchronous, reactive way to track streaming state. Each token updates the Signal, and OnPush change detection re-renders automatically.' }, log: { time: '2.10s', source: 'signal', message: 'status() → "resolved" ✓' } }, + { delay: 6000, log: { time: '2.12s', source: 'angular', message: 'Template re-rendered (OnPush) — 1 component' } }, +]; + +const SOURCE_COLORS: Record = { + angular: { bg: 'rgba(221,0,49,0.08)', text: '#c62828', label: 'ANGULAR' }, + transport: { bg: 'rgba(100,80,200,0.08)', text: '#5e35b1', label: 'TRANSPORT' }, + langgraph: { bg: 'rgba(0,64,144,0.08)', text: '#004090', label: 'LANGGRAPH' }, + signal: { bg: 'rgba(16,185,129,0.08)', text: '#059669', label: 'SIGNAL' }, +}; + +export function ArchFlowDiagram() { + const [logs, setLogs] = useState([]); + const [bubbles, setBubbles] = useState<{ role: 'user' | 'assistant'; text: string; streaming?: boolean }[]>([]); + const [cycle, setCycle] = useState(0); + const logRef = useRef(null); + + useEffect(() => { + const timeouts: ReturnType[] = []; + + const runScenario = () => { + setLogs([]); + setBubbles([]); + + SCENARIO.forEach((step, i) => { + timeouts.push(setTimeout(() => { + setLogs(prev => [...prev, step.log]); + if (step.chatBubble) { + setBubbles(prev => { + const existing = prev.findIndex(b => b.role === step.chatBubble!.role && b.role === 'assistant'); + if (existing >= 0 && step.chatBubble!.role === 'assistant') { + const updated = [...prev]; + updated[existing] = step.chatBubble!; + return updated; + } + return [...prev, step.chatBubble!]; + }); + } + if (logRef.current) logRef.current.scrollTop = logRef.current.scrollHeight; + }, step.delay)); + }); + + // Restart after completion + timeouts.push(setTimeout(() => { + setCycle(c => c + 1); + }, 8000)); + }; + + runScenario(); + return () => timeouts.forEach(clearTimeout); + }, [cycle]); + + return ( +
+ {/* Header bar */} +
+
+
+
+
+
+ streamResource() — live architecture flow + localhost:4200 +
+ +
+ {/* Left: Chat simulation */} +
+
Chat Interface
+ +
+ {bubbles.map((b, i) => ( +
+ {b.role === 'assistant' && ( +
AI
+ )} +
+ {b.text} + {b.streaming && } +
+
+ ))} +
+
+ + {/* Right: Console log */} +
+
Developer Console
+ + {logs.map((log, i) => { + const sc = SOURCE_COLORS[log.source]; + return ( +
+ {log.time} + {sc.label} + {log.message} +
+ ); + })} + + {logs.length === 0 && ( +
Waiting for interaction...
+ )} +
+
+ + +
+ ); +} diff --git a/apps/website/src/components/docs/MdxRenderer.tsx b/apps/website/src/components/docs/MdxRenderer.tsx index d1f6001c8..aa67747eb 100644 --- a/apps/website/src/components/docs/MdxRenderer.tsx +++ b/apps/website/src/components/docs/MdxRenderer.tsx @@ -5,6 +5,8 @@ import { Steps, Step } from './mdx/Steps'; import { Tabs, Tab } from './mdx/Tabs'; import { Card, CardGroup } from './mdx/Card'; import { CodeGroup } from './mdx/CodeGroup'; +import { Pre } from './mdx/CodeBlock'; +import { ArchFlowDiagram } from './ArchFlowDiagram'; import { DocsBreadcrumb } from './DocsBreadcrumb'; import { DocsPrevNext } from './DocsPrevNext'; import rehypePrettyCode from 'rehype-pretty-code'; @@ -19,6 +21,8 @@ const mdxComponents = { Card, CardGroup, CodeGroup, + ArchFlowDiagram, + pre: Pre, }; const rehypeOptions = { diff --git a/apps/website/src/components/docs/mdx/CodeBlock.tsx b/apps/website/src/components/docs/mdx/CodeBlock.tsx new file mode 100644 index 000000000..6b4d10647 --- /dev/null +++ b/apps/website/src/components/docs/mdx/CodeBlock.tsx @@ -0,0 +1,42 @@ +'use client'; +import { useRef, useState } from 'react'; + +export function Pre({ children, ...props }: React.HTMLAttributes) { + const ref = useRef(null); + const [copied, setCopied] = useState(false); + + const copy = async () => { + const text = ref.current?.textContent ?? ''; + await navigator.clipboard.writeText(text); + setCopied(true); + setTimeout(() => setCopied(false), 2000); + }; + + return ( +
+
{children}
+ +
+ ); +} diff --git a/apps/website/src/components/docs/mdx/Tabs.tsx b/apps/website/src/components/docs/mdx/Tabs.tsx index 772f8756d..89fac6851 100644 --- a/apps/website/src/components/docs/mdx/Tabs.tsx +++ b/apps/website/src/components/docs/mdx/Tabs.tsx @@ -1,11 +1,23 @@ 'use client'; -import { useState, Children } from 'react'; +import { useState, Children, isValidElement } from 'react'; import { tokens } from '../../../../lib/design-tokens'; +interface TabProps { + label?: string; + children: React.ReactNode; +} + export function Tabs({ items, children }: { items?: string[]; children: React.ReactNode }) { const [active, setActive] = useState(0); const tabs = Children.toArray(children); - const labels = items ?? tabs.map((_, i) => `Tab ${i + 1}`); + + // Extract labels: from items prop, from Tab label prop, or fallback + const labels = items ?? tabs.map((child, i) => { + if (isValidElement(child) && child.props.label) { + return child.props.label; + } + return `Tab ${i + 1}`; + }); return (
@@ -40,6 +52,6 @@ export function Tabs({ items, children }: { items?: string[]; children: React.Re ); } -export function Tab({ children }: { children: React.ReactNode }) { +export function Tab({ children }: TabProps) { return
{children}
; } From 158e80c9a72fe19d061959f17041b4fb63185464 Mon Sep 17 00:00:00 2001 From: Brian Love Date: Sat, 4 Apr 2026 11:02:18 -0700 Subject: [PATCH 002/187] feat: gradient feature chips, copy icon, TOC fix (#9) - Add FeatureChips component for Key Concepts (gradient chips, horizontal scroll) - Redesign copy button with SVG icons (copy/check), better visibility - Fix code block border (darker, more visible on light backgrounds) - Fix TOC sticky positioning with alignSelf and right padding - Replace CardGroup in Key Concepts with FeatureChips --- .../docs-v2/getting-started/introduction.mdx | 29 +------ apps/website/src/app/global.css | 4 +- apps/website/src/components/docs/DocsTOC.tsx | 4 +- .../src/components/docs/MdxRenderer.tsx | 2 + .../src/components/docs/mdx/CodeBlock.tsx | 57 ++++++++++---- .../src/components/docs/mdx/FeatureChips.tsx | 78 +++++++++++++++++++ 6 files changed, 129 insertions(+), 45 deletions(-) create mode 100644 apps/website/src/components/docs/mdx/FeatureChips.tsx diff --git a/apps/website/content/docs-v2/getting-started/introduction.mdx b/apps/website/content/docs-v2/getting-started/introduction.mdx index eddc6d57d..574c57b4d 100644 --- a/apps/website/content/docs-v2/getting-started/introduction.mdx +++ b/apps/website/content/docs-v2/getting-started/introduction.mdx @@ -242,34 +242,9 @@ Open `http://localhost:4200` and start chatting with your agent. Messages stream ## Key Concepts -Here's what streamResource() gives you out of the box: +Everything streamResource() gives you out of the box — click any to learn more: - - - `chat.messages()` — live message list that updates as each token arrives from the agent - - - `chat.status()` — current state: idle, loading, resolved, or error - - - `threadId` option — conversations survive page refreshes via localStorage or backend - - - `chat.interrupt()` — agent pauses for human approval, your UI handles the decision - - - `chat.history()` — full checkpoint timeline for debugging and branching - - - `chat.subagents()` — track delegated agent work across multiple graphs - - - `chat.toolCalls()` — see what tools the agent is invoking in real-time - - - `MockStreamTransport` — deterministic testing without a running server - - + ## Deploy to Production diff --git a/apps/website/src/app/global.css b/apps/website/src/app/global.css index 4dfc73682..42a4a63f2 100644 --- a/apps/website/src/app/global.css +++ b/apps/website/src/app/global.css @@ -91,8 +91,8 @@ html { .docs-prose [data-rehype-pretty-code-figure] pre { padding: 1.25rem 1.5rem; border-radius: 0.75rem; - border: 1px solid rgba(255, 255, 255, 0.6); - box-shadow: 0 4px 24px rgba(0, 0, 0, 0.06); + border: 1px solid rgba(0, 0, 0, 0.1); + box-shadow: 0 2px 12px rgba(0, 0, 0, 0.08), 0 0 0 1px rgba(0, 0, 0, 0.04); overflow-x: auto; font-size: 0.8rem; line-height: 1.7; diff --git a/apps/website/src/components/docs/DocsTOC.tsx b/apps/website/src/components/docs/DocsTOC.tsx index 9c12354c9..8ae074112 100644 --- a/apps/website/src/components/docs/DocsTOC.tsx +++ b/apps/website/src/components/docs/DocsTOC.tsx @@ -29,8 +29,8 @@ export function DocsTOC({ headings }: { headings: DocHeading[] }) { if (headings.length === 0) return null; return ( -
); diff --git a/apps/website/src/components/docs/mdx/FeatureChips.tsx b/apps/website/src/components/docs/mdx/FeatureChips.tsx new file mode 100644 index 000000000..e888e7cff --- /dev/null +++ b/apps/website/src/components/docs/mdx/FeatureChips.tsx @@ -0,0 +1,78 @@ +'use client'; +import Link from 'next/link'; +import { tokens } from '../../../../lib/design-tokens'; + +interface ChipData { + icon: string; + title: string; + signal: string; + href: string; + gradient: string; + border: string; +} + +const CHIPS: ChipData[] = [ + { icon: '⚡', title: 'Messages', signal: 'chat.messages()', href: '/docs/guides/streaming', gradient: 'linear-gradient(135deg, rgba(0,64,144,0.06), rgba(100,195,253,0.08))', border: 'rgba(0,64,144,0.1)' }, + { icon: '📡', title: 'Status', signal: 'chat.status()', href: '/docs/guides/streaming', gradient: 'linear-gradient(135deg, rgba(100,80,200,0.06), rgba(140,120,220,0.08))', border: 'rgba(100,80,200,0.1)' }, + { icon: '💾', title: 'Persistence', signal: 'threadId', href: '/docs/guides/persistence', gradient: 'linear-gradient(135deg, rgba(16,185,129,0.06), rgba(52,199,89,0.08))', border: 'rgba(16,185,129,0.1)' }, + { icon: '✋', title: 'Interrupts', signal: 'chat.interrupt()', href: '/docs/guides/interrupts', gradient: 'linear-gradient(135deg, rgba(232,147,12,0.06), rgba(245,180,60,0.08))', border: 'rgba(232,147,12,0.1)' }, + { icon: '⏪', title: 'Time Travel', signal: 'chat.history()', href: '/docs/guides/time-travel', gradient: 'linear-gradient(135deg, rgba(221,0,49,0.05), rgba(255,100,130,0.07))', border: 'rgba(221,0,49,0.08)' }, + { icon: '🔀', title: 'Subagents', signal: 'chat.subagents()', href: '/docs/guides/subgraphs', gradient: 'linear-gradient(135deg, rgba(0,64,144,0.05), rgba(0,100,180,0.07))', border: 'rgba(0,64,144,0.08)' }, + { icon: '🔧', title: 'Tool Calls', signal: 'chat.toolCalls()', href: '/docs/guides/streaming', gradient: 'linear-gradient(135deg, rgba(100,80,200,0.05), rgba(120,100,210,0.07))', border: 'rgba(100,80,200,0.08)' }, + { icon: '🧪', title: 'Testing', signal: 'MockTransport', href: '/docs/guides/testing', gradient: 'linear-gradient(135deg, rgba(16,185,129,0.05), rgba(40,200,140,0.07))', border: 'rgba(16,185,129,0.08)' }, +]; + +export function FeatureChips() { + return ( +
+ {CHIPS.map((chip) => ( + +
{ + e.currentTarget.style.transform = 'translateY(-2px)'; + e.currentTarget.style.boxShadow = '0 4px 16px rgba(0,0,0,0.06)'; + }} + onMouseLeave={(e) => { + e.currentTarget.style.transform = 'translateY(0)'; + e.currentTarget.style.boxShadow = 'none'; + }}> +
{chip.icon}
+
{chip.title}
+
{chip.signal}
+
+ + ))} +
+ ); +} From fae8af8fa46d345b99a6d6950798e7bac2af3f75 Mon Sep 17 00:00:00 2001 From: Brian Love Date: Sat, 4 Apr 2026 14:56:48 -0700 Subject: [PATCH 003/187] docs: improve all 18 docs pages to intro quality (#10) * docs: add plan for improving all docs pages to intro quality * docs(website): expand streaming guide with modes, errors, throttle * docs(website): expand time-travel guide with history UI and debugging * docs(website): expand API reference stubs with intros and examples * docs(website): expand subgraphs guide with orchestrator and error handling * docs(website): add What's Next navigation to all docs pages --------- Co-authored-by: Claude Sonnet 4.6 --- .../docs-v2/api/fetch-stream-transport.mdx | 21 ++ .../docs-v2/api/mock-stream-transport.mdx | 29 +++ .../docs-v2/api/provide-stream-resource.mdx | 25 +++ .../content/docs-v2/api/stream-resource.mdx | 23 +++ .../docs-v2/concepts/agent-architecture.mdx | 14 ++ .../docs-v2/concepts/angular-signals.mdx | 14 ++ .../docs-v2/concepts/langgraph-basics.mdx | 14 ++ .../docs-v2/concepts/state-management.mdx | 14 ++ .../content/docs-v2/guides/deployment.mdx | 17 ++ .../content/docs-v2/guides/interrupts.mdx | 17 ++ .../website/content/docs-v2/guides/memory.mdx | 17 ++ .../content/docs-v2/guides/persistence.mdx | 17 ++ .../content/docs-v2/guides/streaming.mdx | 182 ++++++++++++++++-- .../content/docs-v2/guides/subgraphs.mdx | 147 +++++++++++++- .../content/docs-v2/guides/testing.mdx | 17 ++ .../content/docs-v2/guides/time-travel.mdx | 135 ++++++++++++- .../2026-04-04-docs-pages-improvement.md | 171 ++++++++++++++++ 17 files changed, 847 insertions(+), 27 deletions(-) create mode 100644 docs/superpowers/plans/2026-04-04-docs-pages-improvement.md diff --git a/apps/website/content/docs-v2/api/fetch-stream-transport.mdx b/apps/website/content/docs-v2/api/fetch-stream-transport.mdx index 86856dc4f..afb74b5f2 100644 --- a/apps/website/content/docs-v2/api/fetch-stream-transport.mdx +++ b/apps/website/content/docs-v2/api/fetch-stream-transport.mdx @@ -1,3 +1,24 @@ # FetchStreamTransport +`FetchStreamTransport` is the production-ready transport that opens a real server-sent event connection using the browser's `fetch` API and reads a `ReadableStream` response body. It is the default transport you register with `provideStreamResource` in production builds. + +You rarely need to interact with `FetchStreamTransport` directly — simply provide it once at the application level and every `streamResource` will use it automatically. You would reach for it explicitly only when constructing a resource outside the normal DI tree or when you need to override the transport for a single resource while keeping the global default intact. + +```ts +import { inject } from '@angular/core'; +import { streamResource, FetchStreamTransport } from '@ngxp/stream-resource'; + +// Override transport for a single resource +const events = streamResource({ + url: () => '/api/events', + transport: inject(FetchStreamTransport), +}); +``` + + + `FetchStreamTransport` implements the `StreamTransport` interface. You can + create custom transports (e.g. WebSocket-backed) by implementing the same + interface and providing them in place of this class. + + {/* Auto-rendered from api-docs.json — see page component */} diff --git a/apps/website/content/docs-v2/api/mock-stream-transport.mdx b/apps/website/content/docs-v2/api/mock-stream-transport.mdx index 3ed2407e0..fbf014cd6 100644 --- a/apps/website/content/docs-v2/api/mock-stream-transport.mdx +++ b/apps/website/content/docs-v2/api/mock-stream-transport.mdx @@ -1,3 +1,32 @@ # MockStreamTransport +`MockStreamTransport` is a test-friendly transport that replaces real network calls with an in-memory event emitter. Use it in unit and component tests to push values on demand and assert against your component's reactive state without a running server. + +```ts +import { TestBed } from '@angular/core/testing'; +import { + provideStreamResource, + MockStreamTransport, +} from '@ngxp/stream-resource'; + +beforeEach(() => { + TestBed.configureTestingModule({ + providers: [provideStreamResource({ transport: MockStreamTransport })], + }); +}); + +it('reflects streamed value', () => { + const transport = TestBed.inject(MockStreamTransport); + // Emit a value into the stream + transport.emit('/api/repos/42', { id: 42, name: 'my-repo' }); + // Assert your component's signal updated accordingly +}); +``` + + + Because `MockStreamTransport` is synchronous by default, you can emit values + and assert state changes in the same test tick — no `fakeAsync` or `tick` + required. + + {/* Auto-rendered from api-docs.json — see page component */} diff --git a/apps/website/content/docs-v2/api/provide-stream-resource.mdx b/apps/website/content/docs-v2/api/provide-stream-resource.mdx index 59d469a11..6e618bdb1 100644 --- a/apps/website/content/docs-v2/api/provide-stream-resource.mdx +++ b/apps/website/content/docs-v2/api/provide-stream-resource.mdx @@ -1,3 +1,28 @@ # provideStreamResource() +`provideStreamResource` is the provider factory that registers `stream-resource` in Angular's dependency injection system. Call it inside `bootstrapApplication` (or an `ApplicationConfig`) to configure the transport and any global defaults used by every `streamResource` in your app. + +```ts +import { bootstrapApplication } from '@angular/platform-browser'; +import { + provideStreamResource, + FetchStreamTransport, +} from '@ngxp/stream-resource'; +import { AppComponent } from './app/app.component'; + +bootstrapApplication(AppComponent, { + providers: [ + provideStreamResource({ + transport: FetchStreamTransport, + }), + ], +}); +``` + + + Swap `FetchStreamTransport` for `MockStreamTransport` (or any custom class + implementing the `StreamTransport` interface) to change the transport for all + resources at once — useful for testing or SSR. + + {/* Auto-rendered from api-docs.json — see page component */} diff --git a/apps/website/content/docs-v2/api/stream-resource.mdx b/apps/website/content/docs-v2/api/stream-resource.mdx index fa1c3417c..719efab1f 100644 --- a/apps/website/content/docs-v2/api/stream-resource.mdx +++ b/apps/website/content/docs-v2/api/stream-resource.mdx @@ -1,3 +1,26 @@ # streamResource() +`streamResource` is the core primitive of the library. It creates a reactive resource that opens a server-sent event stream, tracks loading and error states, and exposes the latest emitted value — all within Angular's signal-based reactivity model. + +```ts +import { streamResource } from '@ngxp/stream-resource'; + +// Inside a component or service with injection context +const repo = streamResource({ + url: () => `/api/repos/${this.repoId()}`, + transport: inject(FetchStreamTransport), +}); + +// Use in template +// repo.value() — latest emitted value (or undefined) +// repo.status() — 'idle' | 'loading' | 'streaming' | 'error' +``` + + + `streamResource` must be called during construction, inside an injection + context (e.g. a component constructor, field initializer, or a function + passed to `runInInjectionContext`). Calling it outside an injection context + will throw. + + {/* Auto-rendered from api-docs.json — see page component */} diff --git a/apps/website/content/docs-v2/concepts/agent-architecture.mdx b/apps/website/content/docs-v2/concepts/agent-architecture.mdx index 3d84d0d24..32a334de6 100644 --- a/apps/website/content/docs-v2/concepts/agent-architecture.mdx +++ b/apps/website/content/docs-v2/concepts/agent-architecture.mdx @@ -53,3 +53,17 @@ streamResource() supports these patterns through the `subagents()` and `activeSu Most applications only need a single agent with tools. Add subagents when you need true task delegation with isolated state. + +## What's Next + + + + Learn the graph, node, and edge model that agents are built on. + + + Compose agents into multi-agent pipelines using subgraphs. + + + Pause agent execution and wait for human approval mid-run. + + diff --git a/apps/website/content/docs-v2/concepts/angular-signals.mdx b/apps/website/content/docs-v2/concepts/angular-signals.mdx index cd9677f72..5fb2a5887 100644 --- a/apps/website/content/docs-v2/concepts/angular-signals.mdx +++ b/apps/website/content/docs-v2/concepts/angular-signals.mdx @@ -59,3 +59,17 @@ Unlike traditional Angular HTTP patterns, streamResource doesn't use Observables Signals are simpler for UI state. They synchronously read the latest value, compose with computed(), and integrate with Angular's template syntax. streamResource handles the async SSE connection internally and surfaces results as Signals. + +## What's Next + + + + Understand how LangGraph agent state flows into Angular Signals. + + + See Signals in action with token-by-token streaming responses. + + + Full reference for every Signal exposed by streamResource. + + diff --git a/apps/website/content/docs-v2/concepts/langgraph-basics.mdx b/apps/website/content/docs-v2/concepts/langgraph-basics.mdx index 046e64cf2..4a8e27d94 100644 --- a/apps/website/content/docs-v2/concepts/langgraph-basics.mdx +++ b/apps/website/content/docs-v2/concepts/langgraph-basics.mdx @@ -49,3 +49,17 @@ streamResource({ ... }) For deeper LangGraph concepts (persistence, interrupts, memory), see the individual guide pages. + +## What's Next + + + + Understand the planning, tool-calling, and execution lifecycle. + + + Stream token-by-token responses from your LangGraph agent. + + + Learn how streamResource exposes agent state as Angular Signals. + + diff --git a/apps/website/content/docs-v2/concepts/state-management.mdx b/apps/website/content/docs-v2/concepts/state-management.mdx index 8e9274cad..6c1c9e94a 100644 --- a/apps/website/content/docs-v2/concepts/state-management.mdx +++ b/apps/website/content/docs-v2/concepts/state-management.mdx @@ -66,4 +66,18 @@ Every state update from the agent creates a new signal value. Angular's change d const hasErrors = computed(() => agent.value().analysis.issues.length > 0 ); + +## What's Next + + + + Learn how streamResource uses Signals for reactive rendering. + + + Persist thread state so users can resume conversations later. + + + Preserve context across sessions with LangGraph's memory store. + + ``` diff --git a/apps/website/content/docs-v2/guides/deployment.mdx b/apps/website/content/docs-v2/guides/deployment.mdx index ff23ad567..91abe0c21 100644 --- a/apps/website/content/docs-v2/guides/deployment.mdx +++ b/apps/website/content/docs-v2/guides/deployment.mdx @@ -88,3 +88,20 @@ Store threadId in localStorage or a backend so users can resume conversations. Set `throttle` option if token-by-token updates are too frequent for your UI. + +## What's Next + + + + Test agent interactions deterministically before deploying. + + + Store thread IDs so users can resume conversations across sessions. + + + Tune streaming options like throttle for production performance. + + + Full reference for provideStreamResource configuration options. + + diff --git a/apps/website/content/docs-v2/guides/interrupts.mdx b/apps/website/content/docs-v2/guides/interrupts.mdx index b17d8f5ef..8063b852f 100644 --- a/apps/website/content/docs-v2/guides/interrupts.mdx +++ b/apps/website/content/docs-v2/guides/interrupts.mdx @@ -76,3 +76,20 @@ interruptCount = computed(() => agent.interrupts().length); Use the BagTemplate generic parameter to type your interrupt payloads for full TypeScript safety. + +## What's Next + + + + Resume conversations across page refreshes with thread persistence. + + + Stream token-by-token responses and tool progress in real time. + + + Script interrupt events deterministically with MockStreamTransport. + + + Full reference for streamResource options and returned signals. + + diff --git a/apps/website/content/docs-v2/guides/memory.mdx b/apps/website/content/docs-v2/guides/memory.mdx index b72d55fe2..f2da7b9d5 100644 --- a/apps/website/content/docs-v2/guides/memory.mdx +++ b/apps/website/content/docs-v2/guides/memory.mdx @@ -63,3 +63,20 @@ const agent = streamResource({ The agent controls what gets stored in memory. streamResource() just surfaces the current state. Design your agent's state schema to include the fields you want to persist. + +## What's Next + + + + Save thread IDs and resume conversations across sessions. + + + Replay and branch agent runs from any past checkpoint. + + + Understand how agent state flows into Angular Signals. + + + Test memory and state behavior with MockStreamTransport. + + diff --git a/apps/website/content/docs-v2/guides/persistence.mdx b/apps/website/content/docs-v2/guides/persistence.mdx index 3132b920c..2eb55eca9 100644 --- a/apps/website/content/docs-v2/guides/persistence.mdx +++ b/apps/website/content/docs-v2/guides/persistence.mdx @@ -87,3 +87,20 @@ When a connection drops, streamResource() can rejoin an in-progress run. await chat.joinStream(runId, lastEventId); // Picks up from where the connection was lost ``` + +## What's Next + + + + Pause agent execution and wait for human input with interrupt signals. + + + Preserve context across sessions using LangGraph's memory store. + + + Stream token-by-token responses and tool progress in real time. + + + Test agent interactions deterministically with MockStreamTransport. + + diff --git a/apps/website/content/docs-v2/guides/streaming.mdx b/apps/website/content/docs-v2/guides/streaming.mdx index 55d2fde6b..b3dc03962 100644 --- a/apps/website/content/docs-v2/guides/streaming.mdx +++ b/apps/website/content/docs-v2/guides/streaming.mdx @@ -1,6 +1,6 @@ # Streaming -StreamResource provides token-by-token streaming from LangGraph agents via Server-Sent Events (SSE). Every update lands directly in Angular Signals. +StreamResource provides token-by-token streaming from LangGraph agents via Server-Sent Events (SSE). Every update lands directly in Angular Signals — no subscriptions, no manual change detection. Make sure you've completed the Installation guide first. @@ -8,29 +8,45 @@ Make sure you've completed the Installation guide first. ## Basic streaming - - +Create a `streamResource` in your component, pass it a message, and bind to the resulting signals. + + + ```typescript -// chat.component.ts -const chat = streamResource<{ messages: BaseMessage[] }>({ - assistantId: 'chat_agent', -}); +import { Component, computed } from '@angular/core'; +import { streamResource } from '@stream-resource/angular'; +import { BaseMessage } from '@langchain/core/messages'; + +@Component({ selector: 'app-chat', templateUrl: './chat.component.html' }) +export class ChatComponent { + readonly chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'chat_agent', + }); -// Status updates as streaming progresses -const isStreaming = computed(() => chat.status() === 'streaming'); + readonly isStreaming = computed(() => this.chat.status() === 'streaming'); + + send(text: string) { + this.chat.stream({ messages: [{ role: 'user', content: text }] }); + } +} ``` - + ```html - + + @for (msg of chat.messages(); track $index) {

{{ msg.content }}

} + +@if (chat.status() === 'error') { +

{{ chat.error()?.message }}

+} ```
@@ -38,16 +54,152 @@ const isStreaming = computed(() => chat.status() === 'streaming'); ## Stream status -The `status()` signal reports the current state: +The `status()` signal reports the current lifecycle state of the SSE connection: -No active stream. Ready to send a message. +No active stream. The resource is ready to accept a new message. -Tokens are arriving. Messages update in real-time. +Tokens are arriving over the SSE connection. Signal values update in real-time with each chunk. -Something went wrong. Check the error() signal for details. +The connection was interrupted or the agent returned an error. Inspect `error()` for the full details. + +## Stream modes + +LangGraph supports three stream modes. Pass `streamMode` to control what each SSE chunk contains. + + + + +```typescript +// Receives the full agent state after every node execution. +// Best for message-based chat interfaces. +const chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'chat_agent', + streamMode: 'values', +}); + +// chat.messages() always contains the complete message list +``` + + + + +```typescript +// Streams individual message tokens as they are generated. +// Best for token-by-token rendering with lowest perceived latency. +const chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'chat_agent', + streamMode: 'messages', +}); +``` + + + + +```typescript +// Emits raw LangGraph run events (on_chain_start, on_llm_stream, etc.). +// Best for advanced observability or custom progress indicators. +const chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'chat_agent', + streamMode: 'events', +}); +``` + + + + + +Use `values` for most chat UIs — it gives you a consistent, complete state snapshot. Switch to `messages` only when you need sub-token latency or are rendering a live typing cursor. + + +## Error handling + +If the SSE connection drops or the agent throws, `status()` transitions to `'error'` and `error()` is populated. Use these signals to render fallback UI and retry. + + + + +```typescript +import { Component, computed, effect } from '@angular/core'; +import { streamResource } from '@stream-resource/angular'; +import { BaseMessage } from '@langchain/core/messages'; + +@Component({ selector: 'app-chat', templateUrl: './chat.component.html' }) +export class ChatComponent { + readonly chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'chat_agent', + }); + + readonly hasError = computed(() => this.chat.status() === 'error'); + + retry() { + // Re-stream using the same thread so context is preserved + this.chat.stream(); + } +} +``` + + + + +```html +@if (hasError()) { +
+

{{ chat.error()?.message }}

+ +
+} +``` + +
+
+ + +`error()` surfaces both transport-level failures (lost connection, 5xx) and application-level errors returned by the agent graph. Check `error().cause` for the underlying HTTP status when you need to distinguish them. + + +## Throttle configuration + +By default StreamResource emits a signal update for every incoming SSE chunk. On fast connections this can trigger hundreds of renders per second. Use the `throttle` option to coalesce updates. + +```typescript +const chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'chat_agent', + // Batch incoming chunks and flush at most once every 50 ms + throttle: 50, +}); +``` + +The value is in milliseconds. A `throttle` of `0` (default) disables batching and passes every chunk through immediately. Good starting values: + +| Use case | Recommended throttle | +|---|---| +| Token-by-token typing effect | 0 ms (disabled) | +| Standard chat bubble | 50 ms | +| Background summarisation | 150 ms | + + +Each call to `chat.stream()` opens a new SSE connection. Connections are automatically closed when the agent run completes or when the Angular component is destroyed — you do not need to manage the lifecycle manually. + + +## What's Next + + + + Resume conversations across page reloads using thread IDs and checkpointers. + + + Pause agent execution mid-stream to collect human input before continuing. + + + Unit-test components that use streamResource with the built-in test harness. + + + Full option reference for streamResource(), including all configuration keys. + + diff --git a/apps/website/content/docs-v2/guides/subgraphs.mdx b/apps/website/content/docs-v2/guides/subgraphs.mdx index ed76391bf..2663160e1 100644 --- a/apps/website/content/docs-v2/guides/subgraphs.mdx +++ b/apps/website/content/docs-v2/guides/subgraphs.mdx @@ -8,7 +8,7 @@ LangGraph calls them subgraphs (modular graph composition). Deep Agents calls th ## Tracking subagent execution -The `subagents()` signal contains a Map of active subagent streams. +The `subagents()` signal contains a Map of active subagent streams. Use it to inspect the full set of delegated tasks and their current state. ```typescript const orchestrator = streamResource({ @@ -16,20 +16,25 @@ const orchestrator = streamResource({ subagentToolNames: ['research', 'analyze', 'summarize'], }); -// All subagent streams +// All subagent streams (active and completed) const subagents = computed(() => orchestrator.subagents()); // Only active ones const running = computed(() => orchestrator.activeSubagents()); const runningCount = computed(() => running().length); + +// React to count changes +effect(() => { + console.log(`${runningCount()} subagents currently running`); +}); ``` ## Subagent stream details -Each `SubagentStreamRef` provides its own signals. +Each `SubagentStreamRef` exposes its own reactive signals — status, messages, and errors — so you can surface granular progress in your UI. ```typescript -// Access a specific subagent +// Access a specific subagent by its tool call ID const researchAgent = computed(() => orchestrator.subagents().get('research-tool-call-id') ); @@ -37,7 +42,89 @@ const researchAgent = computed(() => // Track its progress const researchStatus = computed(() => researchAgent()?.status()); const researchMessages = computed(() => researchAgent()?.messages() ?? []); +const researchError = computed(() => researchAgent()?.error()); +``` + +## Orchestrator pattern + +The orchestrator pattern delegates specialised work to subagents and merges their results. Each subagent runs its own graph independently while the parent coordinates. + +```typescript +const pipeline = streamResource({ + assistantId: 'pipeline-orchestrator', + subagentToolNames: ['fetch-data', 'transform', 'validate', 'publish'], + filterSubagentMessages: true, +}); + +// Derive a summary of all subagent states +const pipelineStatus = computed(() => { + const agents = pipeline.subagents(); + const entries = [...agents.entries()]; + + return { + total: entries.length, + pending: entries.filter(([, a]) => a.status() === 'pending').length, + running: entries.filter(([, a]) => a.status() === 'streaming').length, + done: entries.filter(([, a]) => a.status() === 'complete').length, + failed: entries.filter(([, a]) => a.status() === 'error').length, + }; +}); +``` + +## Subagent progress UI + +Render live progress for each subagent using the signals above. + + + +```typescript +import { computed } from '@angular/core'; + +@Component({ + selector: 'app-subagent-progress', + template: ` + @for (entry of subagentEntries(); track entry[0]) { +
+ {{ entry[0] }} + {{ entry[1].status() }} + @if (entry[1].status() === 'error') { + {{ entry[1].error()?.message }} + } +
+ } + `, +}) +export class SubagentProgressComponent { + orchestrator = inject(OrchestratorService).resource; + + subagentEntries = computed(() => + [...this.orchestrator.subagents().entries()] + ); +} +``` +
+ +```html + +@for (entry of subagentEntries(); track entry[0]) { +
+ {{ entry[0] }} + + {{ entry[1].status() }} + + + @if (entry[1].status() === 'streaming') { + + } + + @if (entry[1].status() === 'error') { +

{{ entry[1].error()?.message }}

+ } +
+} ``` +
+
## Filtering subagent messages @@ -57,3 +144,55 @@ const parentMessages = computed(() => orchestrator.messages()); Set `subagentToolNames` to the tool names that spawn subagents. streamResource() uses this to identify which tool calls create subagent streams. + +## Error handling per subagent + +Each subagent exposes its own `error()` signal so failures are isolated — one subagent failing does not stop the others. + +```typescript +const agents = orchestrator.subagents(); + +for (const [id, agent] of agents) { + effect(() => { + const err = agent.error(); + if (err) { + console.error(`Subagent ${id} failed:`, err.message); + // Retry, surface to user, or fall back gracefully + } + }); +} + +// Collect all failed subagents reactively +const failedAgents = computed(() => + [...orchestrator.subagents().entries()].filter( + ([, agent]) => agent.status() === 'error' + ) +); +``` + + +Always check `failedAgents()` before presenting final results. A completed orchestrator can still have subagents that errored — success at the top level does not guarantee all delegates succeeded. + + +## When to use subagents vs a single agent + + +Use **subagents** when tasks are independent and can run in parallel, when each task needs its own context window, or when you want isolated error boundaries. Use a **single agent** for sequential reasoning, tasks that share tightly coupled state, or when latency from spawning subagents outweighs the parallelism benefit. + + +## What's Next + + + + Understand how streamResource() surfaces tokens, status, and errors in real time. + + + Write unit and integration tests for orchestrator graphs and subagent interactions. + + + Full reference for streamResource() options, signals, and subagent configuration. + + + Patterns for retries, fallbacks, and surfacing errors from deeply nested agents. + + diff --git a/apps/website/content/docs-v2/guides/testing.mdx b/apps/website/content/docs-v2/guides/testing.mdx index 5b1cad068..0d0f7f80f 100644 --- a/apps/website/content/docs-v2/guides/testing.mdx +++ b/apps/website/content/docs-v2/guides/testing.mdx @@ -104,3 +104,20 @@ it('should surface errors', () => { streamResource() must be called within an Angular injection context. In tests, wrap calls in `TestBed.runInInjectionContext()`. + +## What's Next + + + + Understand the SSE event model your tests simulate. + + + Test human-in-the-loop approval flows with scripted interrupt events. + + + Configure streamResource() for production LangGraph Cloud. + + + Full reference for MockStreamTransport options and methods. + + diff --git a/apps/website/content/docs-v2/guides/time-travel.mdx b/apps/website/content/docs-v2/guides/time-travel.mdx index d45cab714..37501fbb9 100644 --- a/apps/website/content/docs-v2/guides/time-travel.mdx +++ b/apps/website/content/docs-v2/guides/time-travel.mdx @@ -1,14 +1,14 @@ # Time Travel -Time travel lets you inspect earlier states and replay alternate execution paths. streamResource() exposes the full checkpoint history and branch navigation through Angular Signals. +Time travel lets you inspect earlier states and replay alternate execution paths. `streamResource()` exposes the full checkpoint history and branch navigation through Angular Signals. Use it to debug agent decisions, explore alternate paths, and build undo/redo experiences. -Debug agent decisions, explore alternate paths, and build undo/redo experiences for your users. +Debug agent decisions, explore alternate paths, and build undo/redo experiences for your users. Time travel works with any LangGraph agent that persists checkpoints to a thread. ## Browsing execution history -The `history()` signal contains an array of `ThreadState` checkpoints. +The `history()` signal contains an array of `ThreadState` checkpoints ordered from oldest to newest. Each checkpoint captures the complete agent state at that point in execution, including messages, intermediate results, and any custom state fields. ```typescript const agent = streamResource({ @@ -19,14 +19,21 @@ const agent = streamResource({ // Full execution timeline const checkpoints = computed(() => agent.history()); const checkpointCount = computed(() => agent.history().length); + +// Access a specific checkpoint +const latestCheckpoint = computed(() => { + const history = agent.history(); + return history[history.length - 1]; +}); ``` +Each `ThreadState` entry exposes `checkpoint`, `metadata`, `created_at`, and the full `values` snapshot, giving you complete visibility into every step of execution. + ## Forking from a checkpoint -Submit with a specific checkpoint to branch execution from an earlier state. +Submit with a specific checkpoint to branch execution from an earlier state. This creates a new branch in the thread graph while leaving the original path intact. ```typescript -// Fork from the 3rd checkpoint with new input forkFromCheckpoint(index: number) { const checkpoint = this.agent.history()[index]; this.agent.submit( @@ -34,22 +41,134 @@ forkFromCheckpoint(index: number) { { checkpoint: checkpoint.checkpoint } ); } + +// Fork with a completely different input +retryWithAlternative(index: number, newInput: string) { + const checkpoint = this.agent.history()[index]; + this.agent.submit( + { messages: [{ role: 'user', content: newInput }] }, + { checkpoint: checkpoint.checkpoint } + ); +} ``` ## Branch navigation -Use `branch()` and `setBranch()` to navigate between execution branches. +Use `branch()` and `setBranch()` to navigate between execution branches. Branches are automatically created when you fork from a checkpoint. ```typescript -// Current branch +// Current branch identifier const activeBranch = computed(() => agent.branch()); +// All available branches (if exposed by your graph) +const allBranches = computed(() => agent.history() + .map(s => s.metadata?.branch) + .filter(Boolean) +); + // Switch to a different branch selectBranch(branchId: string) { agent.setBranch(branchId); } ``` +## Building a history UI + +Expose checkpoint history directly in your component to let users scrub through execution steps or rewind to any earlier state. + + + +```typescript +import { Component, inject, computed } from '@angular/core'; +import { streamResource } from '@stream-resource/angular'; +import { AgentService } from './agent.service'; + +@Component({ + selector: 'app-history-viewer', + templateUrl: './history-viewer.component.html', +}) +export class HistoryViewerComponent { + private agentService = inject(AgentService); + readonly agent = this.agentService.agent; + + readonly checkpoints = computed(() => this.agent.history()); + readonly activeIndex = computed(() => + this.checkpoints().length - 1 + ); + + fork(index: number) { + const checkpoint = this.checkpoints()[index]; + this.agent.submit( + { messages: [{ role: 'user', content: 'Try a different approach' }] }, + { checkpoint: checkpoint.checkpoint } + ); + } + + formatTime(isoString: string): string { + return new Date(isoString).toLocaleTimeString(); + } +} +``` + + +```html +
    + @for (cp of checkpoints(); track cp.checkpoint.id; let i = $index) { +
  • + Step {{ i + 1 }} + {{ formatTime(cp.created_at) }} + +
  • + } +
+``` +
+
+ +## Comparing checkpoints + +Diff two checkpoints to understand exactly what changed between execution steps. This is useful for understanding tool call results, message additions, or state mutations. + +```typescript +compareCheckpoints(indexA: number, indexB: number) { + const history = this.agent.history(); + const stateA = history[indexA]?.values; + const stateB = history[indexB]?.values; + + if (!stateA || !stateB) return null; + + // Compare message counts + const messagesAdded = (stateB.messages?.length ?? 0) + - (stateA.messages?.length ?? 0); + + // Identify changed keys + const changedKeys = Object.keys({ ...stateA, ...stateB }).filter( + key => JSON.stringify(stateA[key]) !== JSON.stringify(stateB[key]) + ); + + return { messagesAdded, changedKeys }; +} +``` + +Use the comparison result to render a diff view, highlight changed fields in your UI, or log what the agent modified during a specific step. + -Time travel is most useful during development. Inspect why an agent chose a particular path, then fork to test alternatives without restarting the conversation. +Time travel is most useful during development. Inspect why an agent chose a particular path by comparing adjacent checkpoints, then fork to test alternatives without restarting the conversation. Combine `history()` with Angular DevTools to watch checkpoint arrays update in real time as the agent streams. + +## What's Next + + + + Configure thread storage so checkpoints survive page reloads and are available across sessions. + + + Understand how streamResource() surfaces incremental updates and how history integrates with live streaming state. + + + Full reference for streamResource() options, signals, and the submit() API including checkpoint parameters. + + + Deep dive into branch management, merging strategies, and presenting multi-branch UIs to end users. + + diff --git a/docs/superpowers/plans/2026-04-04-docs-pages-improvement.md b/docs/superpowers/plans/2026-04-04-docs-pages-improvement.md new file mode 100644 index 000000000..f45b73d92 --- /dev/null +++ b/docs/superpowers/plans/2026-04-04-docs-pages-improvement.md @@ -0,0 +1,171 @@ +# Docs Pages Improvement Plan + +> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. + +**Goal:** Bring all 18 docs pages up to the quality level of the introduction page — expanded content, proper MDX components, navigation sections, and consistent design patterns. + +**Architecture:** Each task updates one or more MDX files. Changes are content-only (no new components needed). All pages should use: glass Callouts with SVG icons, labeled Tabs, code blocks with copy buttons (automatic via Pre component), and "What's Next" CardGroup at the bottom. + +**Baseline:** The introduction page (292 lines) sets the quality bar with: animated diagram, FeatureChips, expanded code examples, Callouts, Steps, Tabs with labels, and CardGroup navigation. + +--- + +## Audit Summary + +| Quality | Pages | Action | +|---------|-------|--------| +| **THIN (needs major expansion)** | streaming (53), time-travel (55), subgraphs (59), 4 API stubs (3 each) | Double or triple content | +| **ADEQUATE (needs polish)** | persistence (89), interrupts (78), memory (65), testing (106), deployment (90), langgraph-basics (51), agent-architecture (55) | Add nav section, expand examples, add Callouts | +| **GOOD (minor polish)** | quickstart (130), installation (102), angular-signals (61), state-management (69) | Add nav section where missing | + +## Common Improvements for ALL Pages + +Every page should get: +1. **"What's Next" CardGroup** at the bottom (links to 2-4 related pages) +2. **At least one Callout** (tip, info, or warning) for key insights +3. **Tab-labeled code examples** where showing TypeScript + Template patterns + +--- + +### Task 1: Expand Streaming Guide (THIN → GOOD) + +**File:** `apps/website/content/docs-v2/guides/streaming.mdx` + +Expand from 53 to ~120 lines. Add: +- Stream modes explanation (values, messages, events) +- Error handling during streaming +- Throttle configuration +- Template patterns with `@if` / `@for` +- Callout about SSE connection behavior +- "What's Next" CardGroup + +--- + +### Task 2: Expand Time Travel Guide (THIN → GOOD) + +**File:** `apps/website/content/docs-v2/guides/time-travel.mdx` + +Expand from 55 to ~100 lines. Add: +- UI pattern for building a history timeline +- Tabs showing TypeScript + Template for history display +- Comparing checkpoints +- Callout about debugging workflow +- "What's Next" CardGroup + +--- + +### Task 3: Expand Subgraphs Guide (THIN → GOOD) + +**File:** `apps/website/content/docs-v2/guides/subgraphs.mdx` + +Expand from 59 to ~100 lines. Add: +- Orchestrator pattern with code example +- Tabs showing TypeScript + Template for subagent UI +- Error handling per subagent +- Callout about when to use subagents vs single agent +- "What's Next" CardGroup + +--- + +### Task 4: Expand API Reference Stubs (THIN → ADEQUATE) + +**Files:** +- `apps/website/content/docs-v2/api/stream-resource.mdx` +- `apps/website/content/docs-v2/api/provide-stream-resource.mdx` +- `apps/website/content/docs-v2/api/fetch-stream-transport.mdx` +- `apps/website/content/docs-v2/api/mock-stream-transport.mdx` + +Each API page should have a brief intro paragraph and a usage example before the auto-generated content. ~15-20 lines each. + +--- + +### Task 5: Polish Persistence Guide (ADEQUATE → GOOD) + +**File:** `apps/website/content/docs-v2/guides/persistence.mdx` + +Add: +- "What's Next" CardGroup +- Callout about production persistence patterns +- Tab labels using `label` prop if not already + +--- + +### Task 6: Polish Interrupts Guide (ADEQUATE → GOOD) + +**File:** `apps/website/content/docs-v2/guides/interrupts.mdx` + +Add: +- Multi-step approval pattern +- "What's Next" CardGroup +- Callout about timeout handling + +--- + +### Task 7: Polish Memory Guide (ADEQUATE → GOOD) + +**File:** `apps/website/content/docs-v2/guides/memory.mdx` + +Add: +- Tabs for TypeScript + Template patterns +- "What's Next" CardGroup +- Callout about memory best practices + +--- + +### Task 8: Polish Testing Guide (ADEQUATE → GOOD) + +**File:** `apps/website/content/docs-v2/guides/testing.mdx` + +Add: +- "What's Next" CardGroup +- Integration test example with TestBed + +--- + +### Task 9: Polish Deployment Guide (ADEQUATE → GOOD) + +**File:** `apps/website/content/docs-v2/guides/deployment.mdx` + +Add: +- "What's Next" CardGroup +- Monitoring/observability section +- CORS configuration callout + +--- + +### Task 10: Polish Concept Pages (ADEQUATE → GOOD) + +**Files:** +- `apps/website/content/docs-v2/concepts/langgraph-basics.mdx` +- `apps/website/content/docs-v2/concepts/agent-architecture.mdx` + +Add: +- "What's Next" CardGroup to both +- Code examples with Tabs where appropriate + +--- + +### Task 11: Add Navigation to Good Pages + +**Files:** +- `apps/website/content/docs-v2/concepts/angular-signals.mdx` +- `apps/website/content/docs-v2/concepts/state-management.mdx` + +Add: +- "What's Next" CardGroup (these are the only good pages missing it) + +--- + +### Task 12: Final Build Verification + +- [ ] Build website: `npx nx build website --skip-nx-cache` +- [ ] Verify all 19 pages render +- [ ] Spot-check 5 pages for CardGroup, Callouts, and code blocks + +--- + +## Execution Strategy + +Tasks 1-3 (THIN pages) are the priority — these need the most work. +Tasks 4-11 are polish passes that can be parallelized. +All tasks are independent of each other. From 032f99c3b88a7c7933fe6265d840bb20f0605899 Mon Sep 17 00:00:00 2001 From: Brian Love Date: Sat, 4 Apr 2026 14:56:48 -0700 Subject: [PATCH 004/187] docs: improve all 18 docs pages to intro quality (#10) * docs: add plan for improving all docs pages to intro quality * docs(website): expand streaming guide with modes, errors, throttle * docs(website): expand time-travel guide with history UI and debugging * docs(website): expand API reference stubs with intros and examples * docs(website): expand subgraphs guide with orchestrator and error handling * docs(website): add What's Next navigation to all docs pages Co-Authored-By: Claude Sonnet 4.6 --------- Co-authored-by: Claude Sonnet 4.6 --- .../docs-v2/api/fetch-stream-transport.mdx | 21 ++ .../docs-v2/api/mock-stream-transport.mdx | 29 +++ .../docs-v2/api/provide-stream-resource.mdx | 25 +++ .../content/docs-v2/api/stream-resource.mdx | 23 +++ .../docs-v2/concepts/agent-architecture.mdx | 14 ++ .../docs-v2/concepts/angular-signals.mdx | 14 ++ .../docs-v2/concepts/langgraph-basics.mdx | 14 ++ .../docs-v2/concepts/state-management.mdx | 14 ++ .../content/docs-v2/guides/deployment.mdx | 17 ++ .../content/docs-v2/guides/interrupts.mdx | 17 ++ .../website/content/docs-v2/guides/memory.mdx | 17 ++ .../content/docs-v2/guides/persistence.mdx | 17 ++ .../content/docs-v2/guides/streaming.mdx | 182 ++++++++++++++++-- .../content/docs-v2/guides/subgraphs.mdx | 147 +++++++++++++- .../content/docs-v2/guides/testing.mdx | 17 ++ .../content/docs-v2/guides/time-travel.mdx | 135 ++++++++++++- .../2026-04-04-docs-pages-improvement.md | 171 ++++++++++++++++ 17 files changed, 847 insertions(+), 27 deletions(-) create mode 100644 docs/superpowers/plans/2026-04-04-docs-pages-improvement.md diff --git a/apps/website/content/docs-v2/api/fetch-stream-transport.mdx b/apps/website/content/docs-v2/api/fetch-stream-transport.mdx index 86856dc4f..afb74b5f2 100644 --- a/apps/website/content/docs-v2/api/fetch-stream-transport.mdx +++ b/apps/website/content/docs-v2/api/fetch-stream-transport.mdx @@ -1,3 +1,24 @@ # FetchStreamTransport +`FetchStreamTransport` is the production-ready transport that opens a real server-sent event connection using the browser's `fetch` API and reads a `ReadableStream` response body. It is the default transport you register with `provideStreamResource` in production builds. + +You rarely need to interact with `FetchStreamTransport` directly — simply provide it once at the application level and every `streamResource` will use it automatically. You would reach for it explicitly only when constructing a resource outside the normal DI tree or when you need to override the transport for a single resource while keeping the global default intact. + +```ts +import { inject } from '@angular/core'; +import { streamResource, FetchStreamTransport } from '@ngxp/stream-resource'; + +// Override transport for a single resource +const events = streamResource({ + url: () => '/api/events', + transport: inject(FetchStreamTransport), +}); +``` + + + `FetchStreamTransport` implements the `StreamTransport` interface. You can + create custom transports (e.g. WebSocket-backed) by implementing the same + interface and providing them in place of this class. + + {/* Auto-rendered from api-docs.json — see page component */} diff --git a/apps/website/content/docs-v2/api/mock-stream-transport.mdx b/apps/website/content/docs-v2/api/mock-stream-transport.mdx index 3ed2407e0..fbf014cd6 100644 --- a/apps/website/content/docs-v2/api/mock-stream-transport.mdx +++ b/apps/website/content/docs-v2/api/mock-stream-transport.mdx @@ -1,3 +1,32 @@ # MockStreamTransport +`MockStreamTransport` is a test-friendly transport that replaces real network calls with an in-memory event emitter. Use it in unit and component tests to push values on demand and assert against your component's reactive state without a running server. + +```ts +import { TestBed } from '@angular/core/testing'; +import { + provideStreamResource, + MockStreamTransport, +} from '@ngxp/stream-resource'; + +beforeEach(() => { + TestBed.configureTestingModule({ + providers: [provideStreamResource({ transport: MockStreamTransport })], + }); +}); + +it('reflects streamed value', () => { + const transport = TestBed.inject(MockStreamTransport); + // Emit a value into the stream + transport.emit('/api/repos/42', { id: 42, name: 'my-repo' }); + // Assert your component's signal updated accordingly +}); +``` + + + Because `MockStreamTransport` is synchronous by default, you can emit values + and assert state changes in the same test tick — no `fakeAsync` or `tick` + required. + + {/* Auto-rendered from api-docs.json — see page component */} diff --git a/apps/website/content/docs-v2/api/provide-stream-resource.mdx b/apps/website/content/docs-v2/api/provide-stream-resource.mdx index 59d469a11..6e618bdb1 100644 --- a/apps/website/content/docs-v2/api/provide-stream-resource.mdx +++ b/apps/website/content/docs-v2/api/provide-stream-resource.mdx @@ -1,3 +1,28 @@ # provideStreamResource() +`provideStreamResource` is the provider factory that registers `stream-resource` in Angular's dependency injection system. Call it inside `bootstrapApplication` (or an `ApplicationConfig`) to configure the transport and any global defaults used by every `streamResource` in your app. + +```ts +import { bootstrapApplication } from '@angular/platform-browser'; +import { + provideStreamResource, + FetchStreamTransport, +} from '@ngxp/stream-resource'; +import { AppComponent } from './app/app.component'; + +bootstrapApplication(AppComponent, { + providers: [ + provideStreamResource({ + transport: FetchStreamTransport, + }), + ], +}); +``` + + + Swap `FetchStreamTransport` for `MockStreamTransport` (or any custom class + implementing the `StreamTransport` interface) to change the transport for all + resources at once — useful for testing or SSR. + + {/* Auto-rendered from api-docs.json — see page component */} diff --git a/apps/website/content/docs-v2/api/stream-resource.mdx b/apps/website/content/docs-v2/api/stream-resource.mdx index fa1c3417c..719efab1f 100644 --- a/apps/website/content/docs-v2/api/stream-resource.mdx +++ b/apps/website/content/docs-v2/api/stream-resource.mdx @@ -1,3 +1,26 @@ # streamResource() +`streamResource` is the core primitive of the library. It creates a reactive resource that opens a server-sent event stream, tracks loading and error states, and exposes the latest emitted value — all within Angular's signal-based reactivity model. + +```ts +import { streamResource } from '@ngxp/stream-resource'; + +// Inside a component or service with injection context +const repo = streamResource({ + url: () => `/api/repos/${this.repoId()}`, + transport: inject(FetchStreamTransport), +}); + +// Use in template +// repo.value() — latest emitted value (or undefined) +// repo.status() — 'idle' | 'loading' | 'streaming' | 'error' +``` + + + `streamResource` must be called during construction, inside an injection + context (e.g. a component constructor, field initializer, or a function + passed to `runInInjectionContext`). Calling it outside an injection context + will throw. + + {/* Auto-rendered from api-docs.json — see page component */} diff --git a/apps/website/content/docs-v2/concepts/agent-architecture.mdx b/apps/website/content/docs-v2/concepts/agent-architecture.mdx index 3d84d0d24..32a334de6 100644 --- a/apps/website/content/docs-v2/concepts/agent-architecture.mdx +++ b/apps/website/content/docs-v2/concepts/agent-architecture.mdx @@ -53,3 +53,17 @@ streamResource() supports these patterns through the `subagents()` and `activeSu Most applications only need a single agent with tools. Add subagents when you need true task delegation with isolated state. + +## What's Next + + + + Learn the graph, node, and edge model that agents are built on. + + + Compose agents into multi-agent pipelines using subgraphs. + + + Pause agent execution and wait for human approval mid-run. + + diff --git a/apps/website/content/docs-v2/concepts/angular-signals.mdx b/apps/website/content/docs-v2/concepts/angular-signals.mdx index cd9677f72..5fb2a5887 100644 --- a/apps/website/content/docs-v2/concepts/angular-signals.mdx +++ b/apps/website/content/docs-v2/concepts/angular-signals.mdx @@ -59,3 +59,17 @@ Unlike traditional Angular HTTP patterns, streamResource doesn't use Observables Signals are simpler for UI state. They synchronously read the latest value, compose with computed(), and integrate with Angular's template syntax. streamResource handles the async SSE connection internally and surfaces results as Signals. + +## What's Next + + + + Understand how LangGraph agent state flows into Angular Signals. + + + See Signals in action with token-by-token streaming responses. + + + Full reference for every Signal exposed by streamResource. + + diff --git a/apps/website/content/docs-v2/concepts/langgraph-basics.mdx b/apps/website/content/docs-v2/concepts/langgraph-basics.mdx index 046e64cf2..4a8e27d94 100644 --- a/apps/website/content/docs-v2/concepts/langgraph-basics.mdx +++ b/apps/website/content/docs-v2/concepts/langgraph-basics.mdx @@ -49,3 +49,17 @@ streamResource({ ... }) For deeper LangGraph concepts (persistence, interrupts, memory), see the individual guide pages. + +## What's Next + + + + Understand the planning, tool-calling, and execution lifecycle. + + + Stream token-by-token responses from your LangGraph agent. + + + Learn how streamResource exposes agent state as Angular Signals. + + diff --git a/apps/website/content/docs-v2/concepts/state-management.mdx b/apps/website/content/docs-v2/concepts/state-management.mdx index 8e9274cad..6c1c9e94a 100644 --- a/apps/website/content/docs-v2/concepts/state-management.mdx +++ b/apps/website/content/docs-v2/concepts/state-management.mdx @@ -66,4 +66,18 @@ Every state update from the agent creates a new signal value. Angular's change d const hasErrors = computed(() => agent.value().analysis.issues.length > 0 ); + +## What's Next + + + + Learn how streamResource uses Signals for reactive rendering. + + + Persist thread state so users can resume conversations later. + + + Preserve context across sessions with LangGraph's memory store. + + ``` diff --git a/apps/website/content/docs-v2/guides/deployment.mdx b/apps/website/content/docs-v2/guides/deployment.mdx index ff23ad567..91abe0c21 100644 --- a/apps/website/content/docs-v2/guides/deployment.mdx +++ b/apps/website/content/docs-v2/guides/deployment.mdx @@ -88,3 +88,20 @@ Store threadId in localStorage or a backend so users can resume conversations. Set `throttle` option if token-by-token updates are too frequent for your UI. + +## What's Next + + + + Test agent interactions deterministically before deploying. + + + Store thread IDs so users can resume conversations across sessions. + + + Tune streaming options like throttle for production performance. + + + Full reference for provideStreamResource configuration options. + + diff --git a/apps/website/content/docs-v2/guides/interrupts.mdx b/apps/website/content/docs-v2/guides/interrupts.mdx index b17d8f5ef..8063b852f 100644 --- a/apps/website/content/docs-v2/guides/interrupts.mdx +++ b/apps/website/content/docs-v2/guides/interrupts.mdx @@ -76,3 +76,20 @@ interruptCount = computed(() => agent.interrupts().length); Use the BagTemplate generic parameter to type your interrupt payloads for full TypeScript safety. + +## What's Next + + + + Resume conversations across page refreshes with thread persistence. + + + Stream token-by-token responses and tool progress in real time. + + + Script interrupt events deterministically with MockStreamTransport. + + + Full reference for streamResource options and returned signals. + + diff --git a/apps/website/content/docs-v2/guides/memory.mdx b/apps/website/content/docs-v2/guides/memory.mdx index b72d55fe2..f2da7b9d5 100644 --- a/apps/website/content/docs-v2/guides/memory.mdx +++ b/apps/website/content/docs-v2/guides/memory.mdx @@ -63,3 +63,20 @@ const agent = streamResource({ The agent controls what gets stored in memory. streamResource() just surfaces the current state. Design your agent's state schema to include the fields you want to persist. + +## What's Next + + + + Save thread IDs and resume conversations across sessions. + + + Replay and branch agent runs from any past checkpoint. + + + Understand how agent state flows into Angular Signals. + + + Test memory and state behavior with MockStreamTransport. + + diff --git a/apps/website/content/docs-v2/guides/persistence.mdx b/apps/website/content/docs-v2/guides/persistence.mdx index 3132b920c..2eb55eca9 100644 --- a/apps/website/content/docs-v2/guides/persistence.mdx +++ b/apps/website/content/docs-v2/guides/persistence.mdx @@ -87,3 +87,20 @@ When a connection drops, streamResource() can rejoin an in-progress run. await chat.joinStream(runId, lastEventId); // Picks up from where the connection was lost ``` + +## What's Next + + + + Pause agent execution and wait for human input with interrupt signals. + + + Preserve context across sessions using LangGraph's memory store. + + + Stream token-by-token responses and tool progress in real time. + + + Test agent interactions deterministically with MockStreamTransport. + + diff --git a/apps/website/content/docs-v2/guides/streaming.mdx b/apps/website/content/docs-v2/guides/streaming.mdx index 55d2fde6b..b3dc03962 100644 --- a/apps/website/content/docs-v2/guides/streaming.mdx +++ b/apps/website/content/docs-v2/guides/streaming.mdx @@ -1,6 +1,6 @@ # Streaming -StreamResource provides token-by-token streaming from LangGraph agents via Server-Sent Events (SSE). Every update lands directly in Angular Signals. +StreamResource provides token-by-token streaming from LangGraph agents via Server-Sent Events (SSE). Every update lands directly in Angular Signals — no subscriptions, no manual change detection. Make sure you've completed the Installation guide first. @@ -8,29 +8,45 @@ Make sure you've completed the Installation guide first. ## Basic streaming - - +Create a `streamResource` in your component, pass it a message, and bind to the resulting signals. + + + ```typescript -// chat.component.ts -const chat = streamResource<{ messages: BaseMessage[] }>({ - assistantId: 'chat_agent', -}); +import { Component, computed } from '@angular/core'; +import { streamResource } from '@stream-resource/angular'; +import { BaseMessage } from '@langchain/core/messages'; + +@Component({ selector: 'app-chat', templateUrl: './chat.component.html' }) +export class ChatComponent { + readonly chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'chat_agent', + }); -// Status updates as streaming progresses -const isStreaming = computed(() => chat.status() === 'streaming'); + readonly isStreaming = computed(() => this.chat.status() === 'streaming'); + + send(text: string) { + this.chat.stream({ messages: [{ role: 'user', content: text }] }); + } +} ``` - + ```html - + + @for (msg of chat.messages(); track $index) {

{{ msg.content }}

} + +@if (chat.status() === 'error') { +

{{ chat.error()?.message }}

+} ```
@@ -38,16 +54,152 @@ const isStreaming = computed(() => chat.status() === 'streaming'); ## Stream status -The `status()` signal reports the current state: +The `status()` signal reports the current lifecycle state of the SSE connection: -No active stream. Ready to send a message. +No active stream. The resource is ready to accept a new message. -Tokens are arriving. Messages update in real-time. +Tokens are arriving over the SSE connection. Signal values update in real-time with each chunk. -Something went wrong. Check the error() signal for details. +The connection was interrupted or the agent returned an error. Inspect `error()` for the full details. + +## Stream modes + +LangGraph supports three stream modes. Pass `streamMode` to control what each SSE chunk contains. + + + + +```typescript +// Receives the full agent state after every node execution. +// Best for message-based chat interfaces. +const chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'chat_agent', + streamMode: 'values', +}); + +// chat.messages() always contains the complete message list +``` + + + + +```typescript +// Streams individual message tokens as they are generated. +// Best for token-by-token rendering with lowest perceived latency. +const chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'chat_agent', + streamMode: 'messages', +}); +``` + + + + +```typescript +// Emits raw LangGraph run events (on_chain_start, on_llm_stream, etc.). +// Best for advanced observability or custom progress indicators. +const chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'chat_agent', + streamMode: 'events', +}); +``` + + + + + +Use `values` for most chat UIs — it gives you a consistent, complete state snapshot. Switch to `messages` only when you need sub-token latency or are rendering a live typing cursor. + + +## Error handling + +If the SSE connection drops or the agent throws, `status()` transitions to `'error'` and `error()` is populated. Use these signals to render fallback UI and retry. + + + + +```typescript +import { Component, computed, effect } from '@angular/core'; +import { streamResource } from '@stream-resource/angular'; +import { BaseMessage } from '@langchain/core/messages'; + +@Component({ selector: 'app-chat', templateUrl: './chat.component.html' }) +export class ChatComponent { + readonly chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'chat_agent', + }); + + readonly hasError = computed(() => this.chat.status() === 'error'); + + retry() { + // Re-stream using the same thread so context is preserved + this.chat.stream(); + } +} +``` + + + + +```html +@if (hasError()) { +
+

{{ chat.error()?.message }}

+ +
+} +``` + +
+
+ + +`error()` surfaces both transport-level failures (lost connection, 5xx) and application-level errors returned by the agent graph. Check `error().cause` for the underlying HTTP status when you need to distinguish them. + + +## Throttle configuration + +By default StreamResource emits a signal update for every incoming SSE chunk. On fast connections this can trigger hundreds of renders per second. Use the `throttle` option to coalesce updates. + +```typescript +const chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'chat_agent', + // Batch incoming chunks and flush at most once every 50 ms + throttle: 50, +}); +``` + +The value is in milliseconds. A `throttle` of `0` (default) disables batching and passes every chunk through immediately. Good starting values: + +| Use case | Recommended throttle | +|---|---| +| Token-by-token typing effect | 0 ms (disabled) | +| Standard chat bubble | 50 ms | +| Background summarisation | 150 ms | + + +Each call to `chat.stream()` opens a new SSE connection. Connections are automatically closed when the agent run completes or when the Angular component is destroyed — you do not need to manage the lifecycle manually. + + +## What's Next + + + + Resume conversations across page reloads using thread IDs and checkpointers. + + + Pause agent execution mid-stream to collect human input before continuing. + + + Unit-test components that use streamResource with the built-in test harness. + + + Full option reference for streamResource(), including all configuration keys. + + diff --git a/apps/website/content/docs-v2/guides/subgraphs.mdx b/apps/website/content/docs-v2/guides/subgraphs.mdx index ed76391bf..2663160e1 100644 --- a/apps/website/content/docs-v2/guides/subgraphs.mdx +++ b/apps/website/content/docs-v2/guides/subgraphs.mdx @@ -8,7 +8,7 @@ LangGraph calls them subgraphs (modular graph composition). Deep Agents calls th ## Tracking subagent execution -The `subagents()` signal contains a Map of active subagent streams. +The `subagents()` signal contains a Map of active subagent streams. Use it to inspect the full set of delegated tasks and their current state. ```typescript const orchestrator = streamResource({ @@ -16,20 +16,25 @@ const orchestrator = streamResource({ subagentToolNames: ['research', 'analyze', 'summarize'], }); -// All subagent streams +// All subagent streams (active and completed) const subagents = computed(() => orchestrator.subagents()); // Only active ones const running = computed(() => orchestrator.activeSubagents()); const runningCount = computed(() => running().length); + +// React to count changes +effect(() => { + console.log(`${runningCount()} subagents currently running`); +}); ``` ## Subagent stream details -Each `SubagentStreamRef` provides its own signals. +Each `SubagentStreamRef` exposes its own reactive signals — status, messages, and errors — so you can surface granular progress in your UI. ```typescript -// Access a specific subagent +// Access a specific subagent by its tool call ID const researchAgent = computed(() => orchestrator.subagents().get('research-tool-call-id') ); @@ -37,7 +42,89 @@ const researchAgent = computed(() => // Track its progress const researchStatus = computed(() => researchAgent()?.status()); const researchMessages = computed(() => researchAgent()?.messages() ?? []); +const researchError = computed(() => researchAgent()?.error()); +``` + +## Orchestrator pattern + +The orchestrator pattern delegates specialised work to subagents and merges their results. Each subagent runs its own graph independently while the parent coordinates. + +```typescript +const pipeline = streamResource({ + assistantId: 'pipeline-orchestrator', + subagentToolNames: ['fetch-data', 'transform', 'validate', 'publish'], + filterSubagentMessages: true, +}); + +// Derive a summary of all subagent states +const pipelineStatus = computed(() => { + const agents = pipeline.subagents(); + const entries = [...agents.entries()]; + + return { + total: entries.length, + pending: entries.filter(([, a]) => a.status() === 'pending').length, + running: entries.filter(([, a]) => a.status() === 'streaming').length, + done: entries.filter(([, a]) => a.status() === 'complete').length, + failed: entries.filter(([, a]) => a.status() === 'error').length, + }; +}); +``` + +## Subagent progress UI + +Render live progress for each subagent using the signals above. + + + +```typescript +import { computed } from '@angular/core'; + +@Component({ + selector: 'app-subagent-progress', + template: ` + @for (entry of subagentEntries(); track entry[0]) { +
+ {{ entry[0] }} + {{ entry[1].status() }} + @if (entry[1].status() === 'error') { + {{ entry[1].error()?.message }} + } +
+ } + `, +}) +export class SubagentProgressComponent { + orchestrator = inject(OrchestratorService).resource; + + subagentEntries = computed(() => + [...this.orchestrator.subagents().entries()] + ); +} +``` +
+ +```html + +@for (entry of subagentEntries(); track entry[0]) { +
+ {{ entry[0] }} + + {{ entry[1].status() }} + + + @if (entry[1].status() === 'streaming') { + + } + + @if (entry[1].status() === 'error') { +

{{ entry[1].error()?.message }}

+ } +
+} ``` +
+
## Filtering subagent messages @@ -57,3 +144,55 @@ const parentMessages = computed(() => orchestrator.messages()); Set `subagentToolNames` to the tool names that spawn subagents. streamResource() uses this to identify which tool calls create subagent streams. + +## Error handling per subagent + +Each subagent exposes its own `error()` signal so failures are isolated — one subagent failing does not stop the others. + +```typescript +const agents = orchestrator.subagents(); + +for (const [id, agent] of agents) { + effect(() => { + const err = agent.error(); + if (err) { + console.error(`Subagent ${id} failed:`, err.message); + // Retry, surface to user, or fall back gracefully + } + }); +} + +// Collect all failed subagents reactively +const failedAgents = computed(() => + [...orchestrator.subagents().entries()].filter( + ([, agent]) => agent.status() === 'error' + ) +); +``` + + +Always check `failedAgents()` before presenting final results. A completed orchestrator can still have subagents that errored — success at the top level does not guarantee all delegates succeeded. + + +## When to use subagents vs a single agent + + +Use **subagents** when tasks are independent and can run in parallel, when each task needs its own context window, or when you want isolated error boundaries. Use a **single agent** for sequential reasoning, tasks that share tightly coupled state, or when latency from spawning subagents outweighs the parallelism benefit. + + +## What's Next + + + + Understand how streamResource() surfaces tokens, status, and errors in real time. + + + Write unit and integration tests for orchestrator graphs and subagent interactions. + + + Full reference for streamResource() options, signals, and subagent configuration. + + + Patterns for retries, fallbacks, and surfacing errors from deeply nested agents. + + diff --git a/apps/website/content/docs-v2/guides/testing.mdx b/apps/website/content/docs-v2/guides/testing.mdx index 5b1cad068..0d0f7f80f 100644 --- a/apps/website/content/docs-v2/guides/testing.mdx +++ b/apps/website/content/docs-v2/guides/testing.mdx @@ -104,3 +104,20 @@ it('should surface errors', () => { streamResource() must be called within an Angular injection context. In tests, wrap calls in `TestBed.runInInjectionContext()`. + +## What's Next + + + + Understand the SSE event model your tests simulate. + + + Test human-in-the-loop approval flows with scripted interrupt events. + + + Configure streamResource() for production LangGraph Cloud. + + + Full reference for MockStreamTransport options and methods. + + diff --git a/apps/website/content/docs-v2/guides/time-travel.mdx b/apps/website/content/docs-v2/guides/time-travel.mdx index d45cab714..37501fbb9 100644 --- a/apps/website/content/docs-v2/guides/time-travel.mdx +++ b/apps/website/content/docs-v2/guides/time-travel.mdx @@ -1,14 +1,14 @@ # Time Travel -Time travel lets you inspect earlier states and replay alternate execution paths. streamResource() exposes the full checkpoint history and branch navigation through Angular Signals. +Time travel lets you inspect earlier states and replay alternate execution paths. `streamResource()` exposes the full checkpoint history and branch navigation through Angular Signals. Use it to debug agent decisions, explore alternate paths, and build undo/redo experiences. -Debug agent decisions, explore alternate paths, and build undo/redo experiences for your users. +Debug agent decisions, explore alternate paths, and build undo/redo experiences for your users. Time travel works with any LangGraph agent that persists checkpoints to a thread. ## Browsing execution history -The `history()` signal contains an array of `ThreadState` checkpoints. +The `history()` signal contains an array of `ThreadState` checkpoints ordered from oldest to newest. Each checkpoint captures the complete agent state at that point in execution, including messages, intermediate results, and any custom state fields. ```typescript const agent = streamResource({ @@ -19,14 +19,21 @@ const agent = streamResource({ // Full execution timeline const checkpoints = computed(() => agent.history()); const checkpointCount = computed(() => agent.history().length); + +// Access a specific checkpoint +const latestCheckpoint = computed(() => { + const history = agent.history(); + return history[history.length - 1]; +}); ``` +Each `ThreadState` entry exposes `checkpoint`, `metadata`, `created_at`, and the full `values` snapshot, giving you complete visibility into every step of execution. + ## Forking from a checkpoint -Submit with a specific checkpoint to branch execution from an earlier state. +Submit with a specific checkpoint to branch execution from an earlier state. This creates a new branch in the thread graph while leaving the original path intact. ```typescript -// Fork from the 3rd checkpoint with new input forkFromCheckpoint(index: number) { const checkpoint = this.agent.history()[index]; this.agent.submit( @@ -34,22 +41,134 @@ forkFromCheckpoint(index: number) { { checkpoint: checkpoint.checkpoint } ); } + +// Fork with a completely different input +retryWithAlternative(index: number, newInput: string) { + const checkpoint = this.agent.history()[index]; + this.agent.submit( + { messages: [{ role: 'user', content: newInput }] }, + { checkpoint: checkpoint.checkpoint } + ); +} ``` ## Branch navigation -Use `branch()` and `setBranch()` to navigate between execution branches. +Use `branch()` and `setBranch()` to navigate between execution branches. Branches are automatically created when you fork from a checkpoint. ```typescript -// Current branch +// Current branch identifier const activeBranch = computed(() => agent.branch()); +// All available branches (if exposed by your graph) +const allBranches = computed(() => agent.history() + .map(s => s.metadata?.branch) + .filter(Boolean) +); + // Switch to a different branch selectBranch(branchId: string) { agent.setBranch(branchId); } ``` +## Building a history UI + +Expose checkpoint history directly in your component to let users scrub through execution steps or rewind to any earlier state. + + + +```typescript +import { Component, inject, computed } from '@angular/core'; +import { streamResource } from '@stream-resource/angular'; +import { AgentService } from './agent.service'; + +@Component({ + selector: 'app-history-viewer', + templateUrl: './history-viewer.component.html', +}) +export class HistoryViewerComponent { + private agentService = inject(AgentService); + readonly agent = this.agentService.agent; + + readonly checkpoints = computed(() => this.agent.history()); + readonly activeIndex = computed(() => + this.checkpoints().length - 1 + ); + + fork(index: number) { + const checkpoint = this.checkpoints()[index]; + this.agent.submit( + { messages: [{ role: 'user', content: 'Try a different approach' }] }, + { checkpoint: checkpoint.checkpoint } + ); + } + + formatTime(isoString: string): string { + return new Date(isoString).toLocaleTimeString(); + } +} +``` + + +```html +
    + @for (cp of checkpoints(); track cp.checkpoint.id; let i = $index) { +
  • + Step {{ i + 1 }} + {{ formatTime(cp.created_at) }} + +
  • + } +
+``` +
+
+ +## Comparing checkpoints + +Diff two checkpoints to understand exactly what changed between execution steps. This is useful for understanding tool call results, message additions, or state mutations. + +```typescript +compareCheckpoints(indexA: number, indexB: number) { + const history = this.agent.history(); + const stateA = history[indexA]?.values; + const stateB = history[indexB]?.values; + + if (!stateA || !stateB) return null; + + // Compare message counts + const messagesAdded = (stateB.messages?.length ?? 0) + - (stateA.messages?.length ?? 0); + + // Identify changed keys + const changedKeys = Object.keys({ ...stateA, ...stateB }).filter( + key => JSON.stringify(stateA[key]) !== JSON.stringify(stateB[key]) + ); + + return { messagesAdded, changedKeys }; +} +``` + +Use the comparison result to render a diff view, highlight changed fields in your UI, or log what the agent modified during a specific step. + -Time travel is most useful during development. Inspect why an agent chose a particular path, then fork to test alternatives without restarting the conversation. +Time travel is most useful during development. Inspect why an agent chose a particular path by comparing adjacent checkpoints, then fork to test alternatives without restarting the conversation. Combine `history()` with Angular DevTools to watch checkpoint arrays update in real time as the agent streams. + +## What's Next + + + + Configure thread storage so checkpoints survive page reloads and are available across sessions. + + + Understand how streamResource() surfaces incremental updates and how history integrates with live streaming state. + + + Full reference for streamResource() options, signals, and the submit() API including checkpoint parameters. + + + Deep dive into branch management, merging strategies, and presenting multi-branch UIs to end users. + + diff --git a/docs/superpowers/plans/2026-04-04-docs-pages-improvement.md b/docs/superpowers/plans/2026-04-04-docs-pages-improvement.md new file mode 100644 index 000000000..f45b73d92 --- /dev/null +++ b/docs/superpowers/plans/2026-04-04-docs-pages-improvement.md @@ -0,0 +1,171 @@ +# Docs Pages Improvement Plan + +> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. + +**Goal:** Bring all 18 docs pages up to the quality level of the introduction page — expanded content, proper MDX components, navigation sections, and consistent design patterns. + +**Architecture:** Each task updates one or more MDX files. Changes are content-only (no new components needed). All pages should use: glass Callouts with SVG icons, labeled Tabs, code blocks with copy buttons (automatic via Pre component), and "What's Next" CardGroup at the bottom. + +**Baseline:** The introduction page (292 lines) sets the quality bar with: animated diagram, FeatureChips, expanded code examples, Callouts, Steps, Tabs with labels, and CardGroup navigation. + +--- + +## Audit Summary + +| Quality | Pages | Action | +|---------|-------|--------| +| **THIN (needs major expansion)** | streaming (53), time-travel (55), subgraphs (59), 4 API stubs (3 each) | Double or triple content | +| **ADEQUATE (needs polish)** | persistence (89), interrupts (78), memory (65), testing (106), deployment (90), langgraph-basics (51), agent-architecture (55) | Add nav section, expand examples, add Callouts | +| **GOOD (minor polish)** | quickstart (130), installation (102), angular-signals (61), state-management (69) | Add nav section where missing | + +## Common Improvements for ALL Pages + +Every page should get: +1. **"What's Next" CardGroup** at the bottom (links to 2-4 related pages) +2. **At least one Callout** (tip, info, or warning) for key insights +3. **Tab-labeled code examples** where showing TypeScript + Template patterns + +--- + +### Task 1: Expand Streaming Guide (THIN → GOOD) + +**File:** `apps/website/content/docs-v2/guides/streaming.mdx` + +Expand from 53 to ~120 lines. Add: +- Stream modes explanation (values, messages, events) +- Error handling during streaming +- Throttle configuration +- Template patterns with `@if` / `@for` +- Callout about SSE connection behavior +- "What's Next" CardGroup + +--- + +### Task 2: Expand Time Travel Guide (THIN → GOOD) + +**File:** `apps/website/content/docs-v2/guides/time-travel.mdx` + +Expand from 55 to ~100 lines. Add: +- UI pattern for building a history timeline +- Tabs showing TypeScript + Template for history display +- Comparing checkpoints +- Callout about debugging workflow +- "What's Next" CardGroup + +--- + +### Task 3: Expand Subgraphs Guide (THIN → GOOD) + +**File:** `apps/website/content/docs-v2/guides/subgraphs.mdx` + +Expand from 59 to ~100 lines. Add: +- Orchestrator pattern with code example +- Tabs showing TypeScript + Template for subagent UI +- Error handling per subagent +- Callout about when to use subagents vs single agent +- "What's Next" CardGroup + +--- + +### Task 4: Expand API Reference Stubs (THIN → ADEQUATE) + +**Files:** +- `apps/website/content/docs-v2/api/stream-resource.mdx` +- `apps/website/content/docs-v2/api/provide-stream-resource.mdx` +- `apps/website/content/docs-v2/api/fetch-stream-transport.mdx` +- `apps/website/content/docs-v2/api/mock-stream-transport.mdx` + +Each API page should have a brief intro paragraph and a usage example before the auto-generated content. ~15-20 lines each. + +--- + +### Task 5: Polish Persistence Guide (ADEQUATE → GOOD) + +**File:** `apps/website/content/docs-v2/guides/persistence.mdx` + +Add: +- "What's Next" CardGroup +- Callout about production persistence patterns +- Tab labels using `label` prop if not already + +--- + +### Task 6: Polish Interrupts Guide (ADEQUATE → GOOD) + +**File:** `apps/website/content/docs-v2/guides/interrupts.mdx` + +Add: +- Multi-step approval pattern +- "What's Next" CardGroup +- Callout about timeout handling + +--- + +### Task 7: Polish Memory Guide (ADEQUATE → GOOD) + +**File:** `apps/website/content/docs-v2/guides/memory.mdx` + +Add: +- Tabs for TypeScript + Template patterns +- "What's Next" CardGroup +- Callout about memory best practices + +--- + +### Task 8: Polish Testing Guide (ADEQUATE → GOOD) + +**File:** `apps/website/content/docs-v2/guides/testing.mdx` + +Add: +- "What's Next" CardGroup +- Integration test example with TestBed + +--- + +### Task 9: Polish Deployment Guide (ADEQUATE → GOOD) + +**File:** `apps/website/content/docs-v2/guides/deployment.mdx` + +Add: +- "What's Next" CardGroup +- Monitoring/observability section +- CORS configuration callout + +--- + +### Task 10: Polish Concept Pages (ADEQUATE → GOOD) + +**Files:** +- `apps/website/content/docs-v2/concepts/langgraph-basics.mdx` +- `apps/website/content/docs-v2/concepts/agent-architecture.mdx` + +Add: +- "What's Next" CardGroup to both +- Code examples with Tabs where appropriate + +--- + +### Task 11: Add Navigation to Good Pages + +**Files:** +- `apps/website/content/docs-v2/concepts/angular-signals.mdx` +- `apps/website/content/docs-v2/concepts/state-management.mdx` + +Add: +- "What's Next" CardGroup (these are the only good pages missing it) + +--- + +### Task 12: Final Build Verification + +- [ ] Build website: `npx nx build website --skip-nx-cache` +- [ ] Verify all 19 pages render +- [ ] Spot-check 5 pages for CardGroup, Callouts, and code blocks + +--- + +## Execution Strategy + +Tasks 1-3 (THIN pages) are the priority — these need the most work. +Tasks 4-11 are polish passes that can be parallelized. +All tasks are independent of each other. From 421b5fc2ed450ee9418e01eac884a0889b36e2c9 Mon Sep 17 00:00:00 2001 From: Brian Love Date: Sat, 4 Apr 2026 16:12:58 -0700 Subject: [PATCH 005/187] docs: expand LangGraph Basics with 4 agent patterns + fix data flow (#11) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * docs(website): massively expand LangGraph Basics with agent patterns From 66 to 377 lines. Covers: - Core concepts: nodes, edges, state with detailed code - 4 agent patterns: ReAct, human-in-the-loop, multi-agent, persistence - Each pattern with Python graph code + Angular streamResource connection - Signal mapping table showing every LangGraph concept → Signal - Graph API vs Functional API comparison - Expanded What's Next with 6 cards * fix(website): replace ASCII data flow diagram with Steps component --- .../docs-v2/concepts/langgraph-basics.mdx | 388 ++++++++++++++++-- 1 file changed, 353 insertions(+), 35 deletions(-) diff --git a/apps/website/content/docs-v2/concepts/langgraph-basics.mdx b/apps/website/content/docs-v2/concepts/langgraph-basics.mdx index 4a8e27d94..7e9e10536 100644 --- a/apps/website/content/docs-v2/concepts/langgraph-basics.mdx +++ b/apps/website/content/docs-v2/concepts/langgraph-basics.mdx @@ -1,65 +1,383 @@ # LangGraph Basics -LangGraph is a framework for building stateful AI agents as directed graphs. This page explains the core concepts for Angular developers who are new to agent development. +LangGraph is a framework for building stateful AI agents as directed graphs. If you're an Angular developer building AI-powered applications, this page teaches you how LangGraph agents work and why streamResource() is the natural bridge between your frontend and your agent backend. -## Graphs, nodes, and edges + +Graphs give you explicit control over agent behavior. Instead of a black-box prompt-and-pray approach, you define exactly how your agent reasons, when it calls tools, and where it pauses for human input. Every step is visible, testable, and debuggable. + + +## The Core Concepts + +A LangGraph agent has three building blocks: + +### Nodes — Functions That Do Work + +A node is a Python function that receives the current state, does something, and returns updated state. Every node has the same signature: + +```python +def my_node(state: State, config: RunnableConfig) -> dict: + # Read from state + messages = state["messages"] + + # Do work (call LLM, query DB, invoke tool) + response = llm.invoke(messages) + + # Return state updates (merged into existing state) + return {"messages": [response]} +``` + + +Nodes don't replace state — they return updates that get **merged** into the existing state. For lists like messages, LangGraph uses reducers (like `operator.add`) to accumulate entries instead of overwriting. + + +### Edges — Connections Between Nodes + +Edges define the execution flow. There are two types: + +**Normal edges** — always route to the next node: +```python +builder.add_edge(START, "call_model") # Start → call_model +builder.add_edge("call_model", END) # call_model → End +``` + +**Conditional edges** — route based on state: +```python +def should_continue(state: State) -> str: + last_msg = state["messages"][-1] + if last_msg.tool_calls: + return "tools" # Agent wants to use a tool + return END # Agent is done, return response + +builder.add_conditional_edges("call_model", should_continue) +``` + +### State — The Shared Memory + +All nodes read from and write to a shared state object. You define its shape as a Python `TypedDict`: + +```python +from typing_extensions import TypedDict, Annotated +from operator import add + +class State(TypedDict): + messages: Annotated[list, add] # Accumulates messages + plan: list[str] # Agent's current plan + results: dict # Tool results +``` + +This state is exactly what streamResource() exposes to your Angular app through Signals. + +## Building Your First Agent + +Here's the simplest possible agent — a chat model that takes messages and responds: + + + + +```python +from langgraph.graph import END, START, MessagesState, StateGraph +from langchain_openai import ChatOpenAI + +llm = ChatOpenAI(model="gpt-5-mini") -A LangGraph agent is a directed graph where: +def call_model(state: MessagesState) -> dict: + response = llm.invoke(state["messages"]) + return {"messages": [response]} + +# Build the graph: START → call_model → END +builder = StateGraph(MessagesState) +builder.add_node("call_model", call_model) +builder.add_edge(START, "call_model") +builder.add_edge("call_model", END) + +graph = builder.compile() +``` + + + + +```json +{ + "dependencies": ["."], + "graphs": { + "chat_agent": "./src/chat_agent/agent.py:graph" + }, + "env": ".env", + "python_version": "3.12" +} +``` + + + + +```typescript +// This is all you need on the Angular side +const chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'chat_agent', +}); + +// chat.messages() updates as the agent streams its response +// chat.status() tells you if it's idle, loading, or done +``` + + + + +## Agent Patterns + +The power of LangGraph is in the patterns you can build. Each pattern maps to specific streamResource() signals. + +### Pattern 1: ReAct Agent (Tool Calling) + +The agent reasons, decides to call a tool, observes the result, and loops until it has an answer. + +```python +from langgraph.prebuilt import ToolNode + +@tool +def search_docs(query: str) -> str: + """Search the knowledge base.""" + return vector_store.similarity_search(query) + +tools = [search_docs] + +def call_model(state: State) -> dict: + response = llm.bind_tools(tools).invoke(state["messages"]) + return {"messages": [response]} + +def should_continue(state: State) -> str: + if state["messages"][-1].tool_calls: + return "tools" + return END + +builder = StateGraph(State) +builder.add_node("model", call_model) +builder.add_node("tools", ToolNode(tools)) +builder.add_edge(START, "model") +builder.add_conditional_edges("model", should_continue) +builder.add_edge("tools", "model") # Loop back after tool execution + +graph = builder.compile() +``` + +**Angular connection:** Track tool execution in real-time: +```typescript +const agent = streamResource({ + assistantId: 'react_agent', +}); + +// Watch tools execute +const activeTools = computed(() => agent.toolProgress()); +const completedTools = computed(() => agent.toolCalls()); +``` + +### Pattern 2: Human-in-the-Loop (Approval) + +The agent proposes an action and pauses. Your Angular UI shows an approval dialog. The user decides, and the agent resumes. + +```python +from langgraph.types import Interrupt + +def propose_action(state: State) -> dict: + action = llm.invoke(state["messages"]) + # Pause execution — Angular will show approval UI + raise Interrupt(value={ + "action": "send_email", + "to": "client@example.com", + "body": action.content, + }) + +def execute_action(state: State) -> dict: + # Only runs after human approves + send_email(state["pending_action"]) + return {"messages": [{"role": "assistant", "content": "Email sent."}]} +``` + +**Angular connection:** The interrupt surfaces automatically: +```typescript +const agent = streamResource({ + assistantId: 'approval_agent', +}); + +// Show approval UI when agent pauses +const pendingAction = computed(() => agent.interrupt()); + +// User clicks approve → resume the agent +approve() { + agent.submit(null, { resume: { approved: true } }); +} +``` + +### Pattern 3: Multi-Agent Orchestration + +A supervisor agent delegates work to specialist sub-agents. Each sub-agent is its own graph. + +```python +def supervisor(state: State) -> dict: + routing = llm.invoke([ + {"role": "system", "content": "Route to: researcher, analyst, or writer"}, + *state["messages"] + ]) + return {"next_agent": routing.tool_calls[0].args["agent"]} + +builder = StateGraph(State) +builder.add_node("supervisor", supervisor) +builder.add_node("researcher", researcher_subgraph) +builder.add_node("analyst", analyst_subgraph) +builder.add_conditional_edges("supervisor", lambda s: s["next_agent"]) +``` + +**Angular connection:** Track each sub-agent independently: +```typescript +const orchestrator = streamResource({ + assistantId: 'orchestrator', + subagentToolNames: ['researcher', 'analyst', 'writer'], +}); + +// See all active sub-agents +const workers = computed(() => orchestrator.activeSubagents()); +const workerCount = computed(() => workers().length); +``` + +### Pattern 4: Persistent Conversations + +Thread-based persistence means conversations survive page refreshes, browser restarts, and even server deployments. + +```python +from langgraph.checkpoint.postgres import PostgresSaver + +checkpointer = PostgresSaver.from_connection_string(DATABASE_URL) +graph = builder.compile(checkpointer=checkpointer) + +# Each thread_id is a persistent conversation +result = graph.invoke( + {"messages": [user_message]}, + config={"configurable": {"thread_id": "user_123_session"}} +) +``` + +**Angular connection:** Thread persistence is built into streamResource: +```typescript +const chat = streamResource({ + assistantId: 'chat_agent', + threadId: signal(localStorage.getItem('threadId')), + onThreadId: (id) => localStorage.setItem('threadId', id), +}); + +// User returns tomorrow — same thread, full history restored +// No code needed — streamResource handles it +``` + +## How streamResource() Bridges the Gap + +Here's why streamResource() is the natural Angular companion for LangGraph: + + + - -Each node performs one action — calling an LLM, querying a database, or making an API request. Nodes receive state and return updated state. + +Calls `submit({ messages: [userMsg] })` to send user input - -Edges connect nodes. Conditional edges route execution based on state, enabling branching logic. + +Passes input to the transport layer - -All nodes read from and write to a shared state object. This state is what streamResource() exposes through its signals. + +Sends HTTP POST to LangGraph Platform, opens SSE connection + + +Executes graph nodes, calls tools, streams SSE events back + + +Parses SSE chunks into BehaviorSubjects + + +Converts BehaviorSubjects to Angular Signals via `toSignal()` + + +Templates re-render automatically via OnPush change detection -## How streamResource connects + + -Your Angular app doesn't run the graph — LangGraph Platform does. streamResource() is the bridge: +```typescript +// Every LangGraph concept maps to a Signal: -1. Your component calls `submit()` with user input -2. FetchStreamTransport sends an HTTP POST to LangGraph Platform -3. The platform runs the graph and streams state updates via SSE -4. streamResource() updates its Signals as events arrive -5. Angular re-renders your templates automatically +// Agent state values +agent.value() // Signal — full state object -## State design +// Conversation +agent.messages() // Signal — message history -The generic type parameter in `streamResource()` defines your agent's state shape. +// Lifecycle +agent.status() // Signal — idle/loading/done +agent.isLoading() // Signal — is the agent running? -```typescript -// Simple chat state -streamResource<{ messages: BaseMessage[] }>({ ... }) - -// Rich agent state with custom fields -interface AgentState { - messages: BaseMessage[]; - plan: string[]; - currentStep: number; - results: Record; -} -streamResource({ ... }) +// Human-in-the-loop +agent.interrupt() // Signal — agent is paused + +// Debugging +agent.history() // Signal — checkpoint timeline +agent.branch() // Signal — time-travel branch + +// Multi-agent +agent.subagents() // Signal — delegated agents +agent.activeSubagents() // Signal — running workers +agent.toolCalls() // Signal — tool results ``` - -For deeper LangGraph concepts (persistence, interrupts, memory), see the individual guide pages. + + + + +You don't configure SSE, parse events, manage WebSocket connections, or handle reconnection. streamResource() does all of that. You call `submit()` and read Signals — that's the entire API surface for your Angular code. +## Graph API vs Functional API + +LangGraph offers two ways to define agents: + +**Graph API** (recommended for most cases): +```python +builder = StateGraph(State) +builder.add_node("model", call_model) +builder.add_edge(START, "model") +graph = builder.compile() +``` + +**Functional API** (for simpler workflows): +```python +from langgraph.func import entrypoint, task + +@entrypoint +async def agent(messages): + response = await call_model(messages) + return response +``` + +Both APIs produce the same output and work identically with streamResource(). Choose the Graph API when you need conditional routing, subgraphs, or interrupts. Choose the Functional API for simple, linear workflows. + ## What's Next - Understand the planning, tool-calling, and execution lifecycle. + Deep dive into the planning, tool-calling, and execution lifecycle - Stream token-by-token responses from your LangGraph agent. + Stream token-by-token responses with multiple stream modes + + + Build human-in-the-loop approval flows + + + Compose multi-agent systems with orchestrators + + + Thread-based conversation persistence - Learn how streamResource exposes agent state as Angular Signals. + How Signals power streamResource's reactive model From c9fc71c390b54d612f4aeabcdaa1ba56d267fea0 Mon Sep 17 00:00:00 2001 From: Brian Love Date: Sat, 4 Apr 2026 16:12:58 -0700 Subject: [PATCH 006/187] docs: expand LangGraph Basics with 4 agent patterns + fix data flow (#11) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * docs(website): massively expand LangGraph Basics with agent patterns From 66 to 377 lines. Covers: - Core concepts: nodes, edges, state with detailed code - 4 agent patterns: ReAct, human-in-the-loop, multi-agent, persistence - Each pattern with Python graph code + Angular streamResource connection - Signal mapping table showing every LangGraph concept → Signal - Graph API vs Functional API comparison - Expanded What's Next with 6 cards * fix(website): replace ASCII data flow diagram with Steps component --- .../docs-v2/concepts/langgraph-basics.mdx | 388 ++++++++++++++++-- 1 file changed, 353 insertions(+), 35 deletions(-) diff --git a/apps/website/content/docs-v2/concepts/langgraph-basics.mdx b/apps/website/content/docs-v2/concepts/langgraph-basics.mdx index 4a8e27d94..7e9e10536 100644 --- a/apps/website/content/docs-v2/concepts/langgraph-basics.mdx +++ b/apps/website/content/docs-v2/concepts/langgraph-basics.mdx @@ -1,65 +1,383 @@ # LangGraph Basics -LangGraph is a framework for building stateful AI agents as directed graphs. This page explains the core concepts for Angular developers who are new to agent development. +LangGraph is a framework for building stateful AI agents as directed graphs. If you're an Angular developer building AI-powered applications, this page teaches you how LangGraph agents work and why streamResource() is the natural bridge between your frontend and your agent backend. -## Graphs, nodes, and edges + +Graphs give you explicit control over agent behavior. Instead of a black-box prompt-and-pray approach, you define exactly how your agent reasons, when it calls tools, and where it pauses for human input. Every step is visible, testable, and debuggable. + + +## The Core Concepts + +A LangGraph agent has three building blocks: + +### Nodes — Functions That Do Work + +A node is a Python function that receives the current state, does something, and returns updated state. Every node has the same signature: + +```python +def my_node(state: State, config: RunnableConfig) -> dict: + # Read from state + messages = state["messages"] + + # Do work (call LLM, query DB, invoke tool) + response = llm.invoke(messages) + + # Return state updates (merged into existing state) + return {"messages": [response]} +``` + + +Nodes don't replace state — they return updates that get **merged** into the existing state. For lists like messages, LangGraph uses reducers (like `operator.add`) to accumulate entries instead of overwriting. + + +### Edges — Connections Between Nodes + +Edges define the execution flow. There are two types: + +**Normal edges** — always route to the next node: +```python +builder.add_edge(START, "call_model") # Start → call_model +builder.add_edge("call_model", END) # call_model → End +``` + +**Conditional edges** — route based on state: +```python +def should_continue(state: State) -> str: + last_msg = state["messages"][-1] + if last_msg.tool_calls: + return "tools" # Agent wants to use a tool + return END # Agent is done, return response + +builder.add_conditional_edges("call_model", should_continue) +``` + +### State — The Shared Memory + +All nodes read from and write to a shared state object. You define its shape as a Python `TypedDict`: + +```python +from typing_extensions import TypedDict, Annotated +from operator import add + +class State(TypedDict): + messages: Annotated[list, add] # Accumulates messages + plan: list[str] # Agent's current plan + results: dict # Tool results +``` + +This state is exactly what streamResource() exposes to your Angular app through Signals. + +## Building Your First Agent + +Here's the simplest possible agent — a chat model that takes messages and responds: + + + + +```python +from langgraph.graph import END, START, MessagesState, StateGraph +from langchain_openai import ChatOpenAI + +llm = ChatOpenAI(model="gpt-5-mini") -A LangGraph agent is a directed graph where: +def call_model(state: MessagesState) -> dict: + response = llm.invoke(state["messages"]) + return {"messages": [response]} + +# Build the graph: START → call_model → END +builder = StateGraph(MessagesState) +builder.add_node("call_model", call_model) +builder.add_edge(START, "call_model") +builder.add_edge("call_model", END) + +graph = builder.compile() +``` + + + + +```json +{ + "dependencies": ["."], + "graphs": { + "chat_agent": "./src/chat_agent/agent.py:graph" + }, + "env": ".env", + "python_version": "3.12" +} +``` + + + + +```typescript +// This is all you need on the Angular side +const chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'chat_agent', +}); + +// chat.messages() updates as the agent streams its response +// chat.status() tells you if it's idle, loading, or done +``` + + + + +## Agent Patterns + +The power of LangGraph is in the patterns you can build. Each pattern maps to specific streamResource() signals. + +### Pattern 1: ReAct Agent (Tool Calling) + +The agent reasons, decides to call a tool, observes the result, and loops until it has an answer. + +```python +from langgraph.prebuilt import ToolNode + +@tool +def search_docs(query: str) -> str: + """Search the knowledge base.""" + return vector_store.similarity_search(query) + +tools = [search_docs] + +def call_model(state: State) -> dict: + response = llm.bind_tools(tools).invoke(state["messages"]) + return {"messages": [response]} + +def should_continue(state: State) -> str: + if state["messages"][-1].tool_calls: + return "tools" + return END + +builder = StateGraph(State) +builder.add_node("model", call_model) +builder.add_node("tools", ToolNode(tools)) +builder.add_edge(START, "model") +builder.add_conditional_edges("model", should_continue) +builder.add_edge("tools", "model") # Loop back after tool execution + +graph = builder.compile() +``` + +**Angular connection:** Track tool execution in real-time: +```typescript +const agent = streamResource({ + assistantId: 'react_agent', +}); + +// Watch tools execute +const activeTools = computed(() => agent.toolProgress()); +const completedTools = computed(() => agent.toolCalls()); +``` + +### Pattern 2: Human-in-the-Loop (Approval) + +The agent proposes an action and pauses. Your Angular UI shows an approval dialog. The user decides, and the agent resumes. + +```python +from langgraph.types import Interrupt + +def propose_action(state: State) -> dict: + action = llm.invoke(state["messages"]) + # Pause execution — Angular will show approval UI + raise Interrupt(value={ + "action": "send_email", + "to": "client@example.com", + "body": action.content, + }) + +def execute_action(state: State) -> dict: + # Only runs after human approves + send_email(state["pending_action"]) + return {"messages": [{"role": "assistant", "content": "Email sent."}]} +``` + +**Angular connection:** The interrupt surfaces automatically: +```typescript +const agent = streamResource({ + assistantId: 'approval_agent', +}); + +// Show approval UI when agent pauses +const pendingAction = computed(() => agent.interrupt()); + +// User clicks approve → resume the agent +approve() { + agent.submit(null, { resume: { approved: true } }); +} +``` + +### Pattern 3: Multi-Agent Orchestration + +A supervisor agent delegates work to specialist sub-agents. Each sub-agent is its own graph. + +```python +def supervisor(state: State) -> dict: + routing = llm.invoke([ + {"role": "system", "content": "Route to: researcher, analyst, or writer"}, + *state["messages"] + ]) + return {"next_agent": routing.tool_calls[0].args["agent"]} + +builder = StateGraph(State) +builder.add_node("supervisor", supervisor) +builder.add_node("researcher", researcher_subgraph) +builder.add_node("analyst", analyst_subgraph) +builder.add_conditional_edges("supervisor", lambda s: s["next_agent"]) +``` + +**Angular connection:** Track each sub-agent independently: +```typescript +const orchestrator = streamResource({ + assistantId: 'orchestrator', + subagentToolNames: ['researcher', 'analyst', 'writer'], +}); + +// See all active sub-agents +const workers = computed(() => orchestrator.activeSubagents()); +const workerCount = computed(() => workers().length); +``` + +### Pattern 4: Persistent Conversations + +Thread-based persistence means conversations survive page refreshes, browser restarts, and even server deployments. + +```python +from langgraph.checkpoint.postgres import PostgresSaver + +checkpointer = PostgresSaver.from_connection_string(DATABASE_URL) +graph = builder.compile(checkpointer=checkpointer) + +# Each thread_id is a persistent conversation +result = graph.invoke( + {"messages": [user_message]}, + config={"configurable": {"thread_id": "user_123_session"}} +) +``` + +**Angular connection:** Thread persistence is built into streamResource: +```typescript +const chat = streamResource({ + assistantId: 'chat_agent', + threadId: signal(localStorage.getItem('threadId')), + onThreadId: (id) => localStorage.setItem('threadId', id), +}); + +// User returns tomorrow — same thread, full history restored +// No code needed — streamResource handles it +``` + +## How streamResource() Bridges the Gap + +Here's why streamResource() is the natural Angular companion for LangGraph: + + + - -Each node performs one action — calling an LLM, querying a database, or making an API request. Nodes receive state and return updated state. + +Calls `submit({ messages: [userMsg] })` to send user input - -Edges connect nodes. Conditional edges route execution based on state, enabling branching logic. + +Passes input to the transport layer - -All nodes read from and write to a shared state object. This state is what streamResource() exposes through its signals. + +Sends HTTP POST to LangGraph Platform, opens SSE connection + + +Executes graph nodes, calls tools, streams SSE events back + + +Parses SSE chunks into BehaviorSubjects + + +Converts BehaviorSubjects to Angular Signals via `toSignal()` + + +Templates re-render automatically via OnPush change detection -## How streamResource connects + + -Your Angular app doesn't run the graph — LangGraph Platform does. streamResource() is the bridge: +```typescript +// Every LangGraph concept maps to a Signal: -1. Your component calls `submit()` with user input -2. FetchStreamTransport sends an HTTP POST to LangGraph Platform -3. The platform runs the graph and streams state updates via SSE -4. streamResource() updates its Signals as events arrive -5. Angular re-renders your templates automatically +// Agent state values +agent.value() // Signal — full state object -## State design +// Conversation +agent.messages() // Signal — message history -The generic type parameter in `streamResource()` defines your agent's state shape. +// Lifecycle +agent.status() // Signal — idle/loading/done +agent.isLoading() // Signal — is the agent running? -```typescript -// Simple chat state -streamResource<{ messages: BaseMessage[] }>({ ... }) - -// Rich agent state with custom fields -interface AgentState { - messages: BaseMessage[]; - plan: string[]; - currentStep: number; - results: Record; -} -streamResource({ ... }) +// Human-in-the-loop +agent.interrupt() // Signal — agent is paused + +// Debugging +agent.history() // Signal — checkpoint timeline +agent.branch() // Signal — time-travel branch + +// Multi-agent +agent.subagents() // Signal — delegated agents +agent.activeSubagents() // Signal — running workers +agent.toolCalls() // Signal — tool results ``` - -For deeper LangGraph concepts (persistence, interrupts, memory), see the individual guide pages. + + + + +You don't configure SSE, parse events, manage WebSocket connections, or handle reconnection. streamResource() does all of that. You call `submit()` and read Signals — that's the entire API surface for your Angular code. +## Graph API vs Functional API + +LangGraph offers two ways to define agents: + +**Graph API** (recommended for most cases): +```python +builder = StateGraph(State) +builder.add_node("model", call_model) +builder.add_edge(START, "model") +graph = builder.compile() +``` + +**Functional API** (for simpler workflows): +```python +from langgraph.func import entrypoint, task + +@entrypoint +async def agent(messages): + response = await call_model(messages) + return response +``` + +Both APIs produce the same output and work identically with streamResource(). Choose the Graph API when you need conditional routing, subgraphs, or interrupts. Choose the Functional API for simple, linear workflows. + ## What's Next - Understand the planning, tool-calling, and execution lifecycle. + Deep dive into the planning, tool-calling, and execution lifecycle - Stream token-by-token responses from your LangGraph agent. + Stream token-by-token responses with multiple stream modes + + + Build human-in-the-loop approval flows + + + Compose multi-agent systems with orchestrators + + + Thread-based conversation persistence - Learn how streamResource exposes agent state as Angular Signals. + How Signals power streamResource's reactive model From 7dfc91591c7515b7506edfc88a1f7c3bf389aca2 Mon Sep 17 00:00:00 2001 From: Brian Love Date: Sat, 4 Apr 2026 17:01:14 -0700 Subject: [PATCH 007/187] =?UTF-8?q?docs:=20comprehensive=20overhaul=20?= =?UTF-8?q?=E2=80=94=208=20pages=20rewritten,=203300+=20lines=20added=20(#?= =?UTF-8?q?12)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * docs: add comprehensive docs overhaul master plan (15 tasks, 3 phases) * fix(website): resolve import paths, broken links, code fence, .tsx extensions * fix(website): convert all Tabs to label prop syntax * docs(website): rewrite Angular Signals concept with streaming lifecycle and Python code * docs(website): rewrite State Management with Python reducers and TypeScript mapping * docs(website): rewrite Memory guide with Python Store API and Angular patterns * docs(website): rewrite Agent Architecture with full Python patterns and Angular mapping * docs(website): rewrite Deployment guide with full LangGraph Cloud + Angular deployment * docs(website): rewrite Persistence guide with Python checkpointers and thread UI * docs(website): rewrite Testing guide with comprehensive mock patterns * docs(website): rewrite Interrupts guide with Python interrupt code and approval component * fix(website): fix Callout type='warn' → type='warning' + strip code fence titles --- .../docs-v2/api/fetch-stream-transport.mdx | 2 +- .../docs-v2/api/mock-stream-transport.mdx | 2 +- .../docs-v2/api/provide-stream-resource.mdx | 2 +- .../content/docs-v2/api/stream-resource.mdx | 2 +- .../docs-v2/concepts/agent-architecture.mdx | 684 +++++++++++++++++- .../docs-v2/concepts/angular-signals.mdx | 542 +++++++++++++- .../docs-v2/concepts/state-management.mdx | 536 +++++++++++++- .../docs-v2/getting-started/installation.mdx | 6 +- .../docs-v2/getting-started/quickstart.mdx | 6 +- .../content/docs-v2/guides/deployment.mdx | 410 +++++++++-- .../content/docs-v2/guides/interrupts.mdx | 546 +++++++++++++- .../website/content/docs-v2/guides/memory.mdx | 412 ++++++++++- .../content/docs-v2/guides/persistence.mdx | 313 +++++++- .../content/docs-v2/guides/streaming.mdx | 20 +- .../content/docs-v2/guides/subgraphs.mdx | 11 +- .../content/docs-v2/guides/testing.mdx | 500 +++++++++++-- .../content/docs-v2/guides/time-travel.mdx | 11 +- apps/website/next-env.d.ts | 2 +- .../2026-04-04-docs-comprehensive-overhaul.md | 234 ++++++ 19 files changed, 3908 insertions(+), 333 deletions(-) create mode 100644 docs/superpowers/plans/2026-04-04-docs-comprehensive-overhaul.md diff --git a/apps/website/content/docs-v2/api/fetch-stream-transport.mdx b/apps/website/content/docs-v2/api/fetch-stream-transport.mdx index afb74b5f2..be313baed 100644 --- a/apps/website/content/docs-v2/api/fetch-stream-transport.mdx +++ b/apps/website/content/docs-v2/api/fetch-stream-transport.mdx @@ -6,7 +6,7 @@ You rarely need to interact with `FetchStreamTransport` directly — simply prov ```ts import { inject } from '@angular/core'; -import { streamResource, FetchStreamTransport } from '@ngxp/stream-resource'; +import { streamResource, FetchStreamTransport } from '@cacheplane/stream-resource'; // Override transport for a single resource const events = streamResource({ diff --git a/apps/website/content/docs-v2/api/mock-stream-transport.mdx b/apps/website/content/docs-v2/api/mock-stream-transport.mdx index fbf014cd6..d9ebd13c8 100644 --- a/apps/website/content/docs-v2/api/mock-stream-transport.mdx +++ b/apps/website/content/docs-v2/api/mock-stream-transport.mdx @@ -7,7 +7,7 @@ import { TestBed } from '@angular/core/testing'; import { provideStreamResource, MockStreamTransport, -} from '@ngxp/stream-resource'; +} from '@cacheplane/stream-resource'; beforeEach(() => { TestBed.configureTestingModule({ diff --git a/apps/website/content/docs-v2/api/provide-stream-resource.mdx b/apps/website/content/docs-v2/api/provide-stream-resource.mdx index 6e618bdb1..4863cf489 100644 --- a/apps/website/content/docs-v2/api/provide-stream-resource.mdx +++ b/apps/website/content/docs-v2/api/provide-stream-resource.mdx @@ -7,7 +7,7 @@ import { bootstrapApplication } from '@angular/platform-browser'; import { provideStreamResource, FetchStreamTransport, -} from '@ngxp/stream-resource'; +} from '@cacheplane/stream-resource'; import { AppComponent } from './app/app.component'; bootstrapApplication(AppComponent, { diff --git a/apps/website/content/docs-v2/api/stream-resource.mdx b/apps/website/content/docs-v2/api/stream-resource.mdx index 719efab1f..e383d3164 100644 --- a/apps/website/content/docs-v2/api/stream-resource.mdx +++ b/apps/website/content/docs-v2/api/stream-resource.mdx @@ -3,7 +3,7 @@ `streamResource` is the core primitive of the library. It creates a reactive resource that opens a server-sent event stream, tracks loading and error states, and exposes the latest emitted value — all within Angular's signal-based reactivity model. ```ts -import { streamResource } from '@ngxp/stream-resource'; +import { streamResource } from '@cacheplane/stream-resource'; // Inside a component or service with injection context const repo = streamResource({ diff --git a/apps/website/content/docs-v2/concepts/agent-architecture.mdx b/apps/website/content/docs-v2/concepts/agent-architecture.mdx index 32a334de6..2571643ed 100644 --- a/apps/website/content/docs-v2/concepts/agent-architecture.mdx +++ b/apps/website/content/docs-v2/concepts/agent-architecture.mdx @@ -1,69 +1,701 @@ # Agent Architecture -How AI agents work — the planning, execution, and tool-calling lifecycle that streamResource() connects your Angular app to. +How AI agents work — the planning, execution, and tool-calling lifecycle that streamResource() connects your Angular app to. This page shows you the Python patterns that power modern agents and exactly how each pattern surfaces in Angular through `@cacheplane/stream-resource`. -## The agent loop + +Every section below shows the Python backend code first, then the Angular frontend code that consumes it. You need both halves to build a production agent application — LangGraph handles the intelligence, streamResource() handles the reactivity. + + +## The Agent Loop -An AI agent follows a cycle: +Every agent follows a five-phase cycle. Understanding this cycle is critical because each phase maps to a specific streamResource() signal in your Angular app. - -The user sends a message via `submit()`. streamResource() posts it to LangGraph Platform. + +The user sends a message. On the Angular side, `submit()` posts input to LangGraph Platform. On the Python side, the message lands in the graph's `messages` state key. + +```python +class AgentState(TypedDict): + messages: Annotated[list, add] + plan: list[str] + tool_results: dict +``` + -The LLM decides what to do next — respond directly, call a tool, or delegate to a subagent. +The LLM examines the full message history plus any accumulated state. It decides what to do next — respond directly, call one or more tools, or delegate to a subagent. + +```python +def plan(state: AgentState, config: RunnableConfig) -> dict: + system = """You are a research assistant. Given the conversation, + decide whether to respond directly, search for information, + or analyze data. Use tools when the user needs factual answers.""" + + response = llm.bind_tools(tools).invoke([ + {"role": "system", "content": system}, + *state["messages"], + ]) + return {"messages": [response]} +``` + -Tools run (database queries, API calls, code execution). Results feed back into state. +If the LLM decided to call tools, LangGraph routes to the tool node. Tools run — database queries, API calls, code execution — and their results feed back into state as `ToolMessage` entries. + +```python +from langgraph.prebuilt import ToolNode + +tool_node = ToolNode(tools) +# LangGraph automatically calls each tool the LLM requested +# and appends ToolMessage results to state["messages"] +``` + -The agent streams its response token-by-token. streamResource() updates the `messages()` signal in real-time. +After tools finish (or if no tools were needed), the agent streams its final response token by token. streamResource() updates the `messages()` signal in real time so your Angular template re-renders incrementally. + +```typescript +// Angular side — messages update as tokens arrive +@if (agent.isLoading()) { + +} +@for (msg of agent.messages(); track msg.id) { + +} +``` + -State is checkpointed. The agent may loop back to Plan, or finish. +LangGraph checkpoints the full state — messages, tool results, plan, everything. The agent may loop back to Plan (if tools returned data that needs further reasoning) or finish. The checkpoint is what enables time-travel debugging via `history()`. + +```python +from langgraph.checkpoint.postgres import PostgresSaver + +checkpointer = PostgresSaver.from_connection_string(DATABASE_URL) +graph = builder.compile(checkpointer=checkpointer) +``` + -## Tool calling +## ReAct Pattern + +ReAct (Reason + Act) is the most common agent pattern. The agent reasons about the user's question, decides to call a tool, observes the result, and loops until it has enough information to answer. + + + + +```python +from langgraph.graph import END, START, StateGraph +from langgraph.prebuilt import ToolNode +from langchain_openai import ChatOpenAI +from langchain_core.tools import tool +from typing_extensions import TypedDict, Annotated +from operator import add + +# --- State --- +class AgentState(TypedDict): + messages: Annotated[list, add] + +# --- Tools --- +@tool +def search_docs(query: str) -> str: + """Search the knowledge base for relevant documents.""" + results = vector_store.similarity_search(query, k=3) + return "\n\n".join(doc.page_content for doc in results) + +@tool +def query_database(sql: str) -> str: + """Run a read-only SQL query against the analytics database.""" + rows = db.execute(text(sql)).fetchall() + return json.dumps([dict(r) for r in rows]) + +@tool +def get_weather(city: str) -> str: + """Get current weather for a city.""" + resp = httpx.get(f"https://api.weather.com/v1/{city}") + return resp.json()["summary"] + +tools = [search_docs, query_database, get_weather] + +# --- LLM with tools bound --- +llm = ChatOpenAI(model="gpt-5-mini") + +def call_model(state: AgentState) -> dict: + response = llm.bind_tools(tools).invoke(state["messages"]) + return {"messages": [response]} + +# --- Routing --- +def should_continue(state: AgentState) -> str: + last_message = state["messages"][-1] + if last_message.tool_calls: + return "tools" + return END + +# --- Graph --- +builder = StateGraph(AgentState) +builder.add_node("model", call_model) +builder.add_node("tools", ToolNode(tools)) + +builder.add_edge(START, "model") +builder.add_conditional_edges("model", should_continue) +builder.add_edge("tools", "model") # After tools, reason again + +graph = builder.compile() +``` + + + + +```typescript +import { ChangeDetectionStrategy, Component, computed } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; + +interface AgentState { + messages: BaseMessage[]; +} + +@Component({ + selector: 'app-react-agent', + changeDetection: ChangeDetectionStrategy.OnPush, + template: ` + @for (msg of messages(); track msg.id) { + + } + + @if (activeTools().length) { + + } + + @for (result of completedTools(); track result.id) { + + } + `, +}) +export class ReactAgentComponent { + agent = streamResource({ + assistantId: 'react_agent', + }); + + messages = this.agent.messages; -Agents extend their capabilities through tools. streamResource() tracks tool execution: + // Tools currently executing (spinner, progress bar) + activeTools = computed(() => this.agent.toolProgress()); + + // Tools that finished with results (expandable cards) + completedTools = computed(() => this.agent.toolCalls()); + + send(text: string) { + this.agent.submit({ + messages: [{ role: 'human', content: text }], + }); + } +} +``` + + + + +The key insight: `should_continue` is the decision point. If the LLM's response contains `tool_calls`, the graph routes to the `tools` node. If not, it ends. After tools execute, the graph loops back to `model` so the LLM can reason about the tool results. This loop continues until the LLM responds without requesting any tools. + +## Tool Calling Deep Dive + +Tools are how agents interact with the outside world. Understanding both the Python definition and the Angular consumption is essential. + +### Defining Tools in Python + +Every tool is a Python function decorated with `@tool`. LangGraph converts the function signature and docstring into the JSON schema that the LLM uses to decide when and how to call it: + +```python +from langchain_core.tools import tool +from pydantic import BaseModel, Field + +# Simple tool — args inferred from function signature +@tool +def calculate(expression: str) -> str: + """Evaluate a mathematical expression and return the result.""" + return str(eval(expression)) # Use a sandbox in production + +# Structured tool — explicit schema with validation +class EmailInput(BaseModel): + to: str = Field(description="Recipient email address") + subject: str = Field(description="Email subject line") + body: str = Field(description="Email body content") + +@tool(args_schema=EmailInput) +def send_email(to: str, subject: str, body: str) -> str: + """Send an email to the specified recipient.""" + mail_service.send(to=to, subject=subject, body=body) + return f"Email sent to {to}" +``` + + +The LLM reads the docstring to decide when to call a tool. A vague docstring like "does stuff" means the LLM will not know when to use it. Be specific: what the tool does, what it returns, when to use it. + + +### How Tools Surface in Angular + +When the agent calls a tool, streamResource() exposes the execution lifecycle through two signals: + + + ```typescript +// toolProgress() — tools currently executing +// Updates in real time as tools start and complete + const agent = streamResource({ - assistantId: 'research_agent', + assistantId: 'react_agent', }); -// Currently executing tools -const tools = computed(() => agent.toolProgress()); +// Each entry has: name, args, status +const activeTools = computed(() => agent.toolProgress()); + +// Template usage +@Component({ + changeDetection: ChangeDetectionStrategy.OnPush, + template: ` + @for (tool of activeTools(); track tool.id) { +
+ + Running {{ tool.name }}... +
{{ tool.args | json }}
+
+ } + `, +}) +export class ToolProgressComponent { + activeTools = computed(() => this.agent.toolProgress()); +} +``` + +
+ + +```typescript +// toolCalls() — completed tool calls with results +// Available after each tool finishes -// Completed tool calls with results const completedTools = computed(() => agent.toolCalls()); + +// Each entry has: name, args, result, duration +@Component({ + changeDetection: ChangeDetectionStrategy.OnPush, + template: ` + @for (call of completedTools(); track call.id) { +
+ + {{ call.name }} + {{ call.duration }}ms + +
+

Input

+
{{ call.args | json }}
+
+
+

Output

+
{{ call.result }}
+
+
+ } + `, +}) +export class ToolResultsComponent { + completedTools = computed(() => this.agent.toolCalls()); +} ``` -## Multi-agent patterns +
+
+ +### Tool Execution Flow + +The full lifecycle from Python tool definition to Angular UI update: + + + +The model returns an `AIMessage` with a `tool_calls` array. Each entry specifies the tool name and arguments. + + +The `should_continue` conditional edge detects `tool_calls` and routes to the `tools` node. + + +`ToolNode` calls the Python function. The result is wrapped in a `ToolMessage` and appended to state. + + +LangGraph Platform streams the tool call and result as SSE events to the Angular client. + + +`toolProgress()` updates during execution. `toolCalls()` updates when the tool completes. Both trigger OnPush change detection. + + + +## Multi-Agent Architecture + +When a single agent with tools is not enough, you can compose multiple agents into a supervisor-worker architecture. A supervisor agent receives the user's request, decides which specialist to delegate to, and synthesizes the final answer. + + + + +```python +from langgraph.graph import END, START, StateGraph +from langchain_openai import ChatOpenAI +from typing import Literal +from typing_extensions import TypedDict, Annotated +from operator import add + +class OrchestratorState(TypedDict): + messages: Annotated[list, add] + next_agent: str + research_output: str + analysis_output: str + +llm = ChatOpenAI(model="gpt-5-mini") + +# --- Supervisor --- +def supervisor(state: OrchestratorState) -> dict: + response = llm.bind_tools([route_tool]).invoke([ + {"role": "system", "content": """You are a supervisor. + Route to 'researcher' for fact-finding, + 'analyst' for data analysis, + 'writer' for drafting content, + or 'finish' if the task is complete."""}, + *state["messages"], + ]) + destination = response.tool_calls[0]["args"]["agent"] + return {"next_agent": destination, "messages": [response]} + +# --- Specialist subagents (each is its own compiled graph) --- +researcher_graph = build_researcher_agent() +analyst_graph = build_analyst_agent() +writer_graph = build_writer_agent() -Complex tasks use multiple agents working together: +# --- Routing --- +def route_to_agent(state: OrchestratorState) -> str: + return state["next_agent"] -- **Orchestrator** — one agent delegates to specialized subagents -- **Pipeline** — agents process sequentially, each refining the output -- **Debate** — agents review each other's work +# --- Orchestrator graph --- +builder = StateGraph(OrchestratorState) +builder.add_node("supervisor", supervisor) +builder.add_node("researcher", researcher_graph) +builder.add_node("analyst", analyst_graph) +builder.add_node("writer", writer_graph) -streamResource() supports these patterns through the `subagents()` and `activeSubagents()` signals. +builder.add_edge(START, "supervisor") +builder.add_conditional_edges("supervisor", route_to_agent, { + "researcher": "researcher", + "analyst": "analyst", + "writer": "writer", + "finish": END, +}) +# After each specialist, return to supervisor +builder.add_edge("researcher", "supervisor") +builder.add_edge("analyst", "supervisor") +builder.add_edge("writer", "supervisor") + +graph = builder.compile() +``` + + + + +```typescript +import { ChangeDetectionStrategy, Component, computed } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; + +interface OrchestratorState { + messages: BaseMessage[]; + next_agent: string; + research_output: string; + analysis_output: string; +} + +@Component({ + selector: 'app-multi-agent', + changeDetection: ChangeDetectionStrategy.OnPush, + template: ` + + + + +
+

All Subagents

+ @for (entry of allSubagents(); track entry[0]) { + + } +
+ `, +}) +export class MultiAgentComponent { + orchestrator = streamResource({ + assistantId: 'orchestrator', + subagentToolNames: ['researcher', 'analyst', 'writer'], + }); + + messages = this.orchestrator.messages; + + // Currently running subagents with live status + activeWorkers = computed(() => this.orchestrator.activeSubagents()); + + // Full map of all subagents (active + completed) + allSubagents = computed(() => + Array.from(this.orchestrator.subagents().entries()) + ); + + send(text: string) { + this.orchestrator.submit({ + messages: [{ role: 'human', content: text }], + }); + } +} +``` + +
+
+ + +The `subagentToolNames` option tells streamResource() which graph nodes are subagents. Without it, subagent execution looks like regular tool calls. With it, `activeSubagents()` and `subagents()` provide dedicated tracking with isolated message histories. + + +## Error Handling and Recovery + +Agents fail. Tools throw exceptions, APIs time out, LLMs hallucinate invalid tool arguments. A robust architecture handles all of these gracefully. + +### Python-Side Error Handling + +```python +from langchain_core.tools import tool, ToolException + +@tool(handle_tool_error=True) +def query_database(sql: str) -> str: + """Run a read-only SQL query against the analytics database.""" + if "DROP" in sql.upper() or "DELETE" in sql.upper(): + raise ToolException("Destructive queries are not allowed.") + try: + rows = db.execute(text(sql)).fetchall() + return json.dumps([dict(r) for r in rows]) + except Exception as e: + raise ToolException(f"Query failed: {str(e)}") +``` + +When `handle_tool_error=True` is set, LangGraph catches `ToolException` and feeds the error message back to the LLM as a `ToolMessage`. The LLM sees the error and can retry with corrected arguments or explain the failure to the user. + +### How Errors Surface in Angular + +```typescript +const agent = streamResource({ + assistantId: 'react_agent', +}); + +// The error() signal captures both transport and agent errors +const error = computed(() => agent.error()); + +// In your template +@Component({ + changeDetection: ChangeDetectionStrategy.OnPush, + template: ` + @if (error()) { + + } + `, +}) +export class AgentComponent { + error = computed(() => this.agent.error()); + + retry() { + // Re-submit the last message to retry + this.agent.submit(this.lastInput); + } +} +``` + +### Error Recovery Strategies + +| Error type | Python behavior | Angular signal | +|---|---|---| +| Tool throws `ToolException` | Error fed back to LLM, agent retries | `toolCalls()` shows error in result | +| Tool throws unexpected error | LangGraph catches it, marks tool as failed | `error()` fires with details | +| LLM returns invalid tool args | ToolNode validation fails, error fed to LLM | `toolProgress()` shows failed status | +| Transport error (network) | N/A | `error()` fires, `status()` becomes `'error'` | +| Agent exceeds recursion limit | Graph raises `GraphRecursionError` | `error()` fires with recursion message | + + +LangGraph defaults to 25 recursion steps. If your agent loops between `model` and `tools` more than 25 times, it stops with a `GraphRecursionError`. Increase the limit in production with `graph.compile(recursion_limit=50)` or redesign the agent to converge faster. + + +## Checkpointing and Debugging + +Every time a node completes, LangGraph saves a checkpoint — a full snapshot of the agent's state at that moment. streamResource() exposes this checkpoint timeline to Angular, giving you time-travel debugging for free. + +### How Checkpoints Work + +```python +from langgraph.checkpoint.postgres import PostgresSaver + +checkpointer = PostgresSaver.from_connection_string(DATABASE_URL) +graph = builder.compile(checkpointer=checkpointer) + +# Every node execution creates a checkpoint: +# checkpoint_1: after "model" (LLM decided to call search_docs) +# checkpoint_2: after "tools" (search_docs returned results) +# checkpoint_3: after "model" (LLM responded with final answer) +``` + +### Exposing Checkpoints in Angular + +```typescript +const agent = streamResource({ + assistantId: 'react_agent', + threadId: signal('thread_abc123'), +}); + +// Full checkpoint timeline — every state snapshot +const timeline = computed(() => agent.history()); + +// Current branch (for time-travel) +const branch = computed(() => agent.branch()); +``` + +### Building a Debug Timeline + +```typescript +@Component({ + selector: 'app-debug-timeline', + changeDetection: ChangeDetectionStrategy.OnPush, + template: ` +
+ @for (checkpoint of history(); track checkpoint.id) { + + } +
+ +
+

State at checkpoint

+
{{ selectedState() | json }}
+
+ `, +}) +export class DebugTimelineComponent { + history = computed(() => this.agent.history()); + currentCheckpoint = signal(null); + + selectedState = computed(() => { + const id = this.currentCheckpoint(); + return this.history().find(c => c.id === id)?.state; + }); + + timeTravel(checkpointId: string) { + this.currentCheckpoint.set(checkpointId); + this.agent.submit(null, { checkpoint: checkpointId }); + } +} +``` + + +When you submit from a previous checkpoint, LangGraph creates a new branch from that point. The original timeline is preserved. The `branch()` signal tells you which branch is currently active. See the [Time Travel guide](/docs/guides/time-travel) for the full walkthrough. + + +## Choosing an Architecture + +Not every application needs a multi-agent swarm. Here is a decision guide for picking the right level of complexity. + +### Single Agent with Tools + +**Use when:** Most applications. The user has a conversation, the agent calls tools as needed, and responds. + +```python +# Simple, powerful, covers 80% of use cases +builder = StateGraph(AgentState) +builder.add_node("model", call_model) +builder.add_node("tools", ToolNode(tools)) +builder.add_edge(START, "model") +builder.add_conditional_edges("model", should_continue) +builder.add_edge("tools", "model") +graph = builder.compile() +``` + +**Angular signals used:** `messages()`, `toolCalls()`, `toolProgress()`, `status()` + +### Single Agent with Human-in-the-Loop + +**Use when:** The agent takes high-stakes actions (sending emails, modifying data, making purchases) that need human approval. + +```python +from langgraph.types import Interrupt + +def propose_action(state: AgentState) -> dict: + plan = llm.invoke(state["messages"]) + raise Interrupt(value={"action": plan.content, "requires_approval": True}) + +def execute_action(state: AgentState) -> dict: + # Only runs after human approves + return perform_action(state["pending_action"]) +``` + +**Angular signals used:** `messages()`, `interrupt()`, `status()` plus `submit(null, { resume })` to approve + +### Multi-Agent Supervisor + +**Use when:** The task naturally decomposes into specialist roles (researcher, analyst, writer), and each specialist needs its own tools, prompts, and reasoning chain. + +```python +builder = StateGraph(OrchestratorState) +builder.add_node("supervisor", supervisor) +builder.add_node("researcher", researcher_subgraph) +builder.add_node("analyst", analyst_subgraph) +builder.add_conditional_edges("supervisor", route_to_agent) +``` + +**Angular signals used:** `messages()`, `subagents()`, `activeSubagents()`, `toolCalls()`, `status()` + +### Decision Matrix + +| Factor | Single agent | Single + approval | Multi-agent | +|---|---|---|---| +| Tool count | 1-10 | 1-10 | 10+ across specialists | +| Task complexity | Single domain | Single domain, high stakes | Cross-domain | +| Latency budget | Low | Medium (human wait) | Higher (multiple LLM calls) | +| State isolation | Shared | Shared + interrupt | Isolated per subagent | +| Angular complexity | Low | Medium | Higher | -Most applications only need a single agent with tools. Add subagents when you need true task delegation with isolated state. +Begin with a single agent and tools. Add human-in-the-loop when you need approval flows. Graduate to multi-agent only when a single agent's context window cannot hold all the tools and instructions it needs. ## What's Next - Learn the graph, node, and edge model that agents are built on. + Learn the graph, node, and edge primitives that agents are built on. - - Compose agents into multi-agent pipelines using subgraphs. + + Stream token-by-token responses with multiple stream modes. - Pause agent execution and wait for human approval mid-run. + Build human-in-the-loop approval flows that pause and resume agents. + + + Compose multi-agent systems with orchestrators and specialist workers. + + + Debug agents by stepping through checkpoint history and branching. + + + How Signals power the reactive model behind streamResource(). diff --git a/apps/website/content/docs-v2/concepts/angular-signals.mdx b/apps/website/content/docs-v2/concepts/angular-signals.mdx index 5fb2a5887..d476ee891 100644 --- a/apps/website/content/docs-v2/concepts/angular-signals.mdx +++ b/apps/website/content/docs-v2/concepts/angular-signals.mdx @@ -1,75 +1,559 @@ # Angular Signals -streamResource() is built on Angular Signals — the reactive primitive introduced in Angular 16+. Every property on a StreamResourceRef is a Signal, making it work seamlessly with OnPush change detection, computed values, and effect callbacks. +Angular Signals are the reactive primitive that powers streamResource(). If you're coming from a Python AI/agent background and wondering how Angular handles real-time streaming data, this page is your guide. Every property on a StreamResourceRef is a Signal, which means your templates update automatically as tokens arrive — no manual subscriptions, no async pipes, no RxJS boilerplate. -## Signals primer + +Think of Signals like a Python property with built-in change notification. When the value changes, every consumer — templates, computed values, effects — re-evaluates automatically. If you've used Pydantic models with validators that react to field changes, Signals are the Angular equivalent but deeply integrated into the rendering engine. + + +## What Are Angular Signals? -A Signal is a reactive value container. When a Signal's value changes, Angular automatically re-renders any template that reads it. +A Signal is a reactive value container introduced in Angular 16+. You create one, read it by calling it like a function, and Angular tracks which templates and computations depend on it. ```typescript -// streamResource returns Signals, not Observables -const chat = streamResource({ assistantId: 'agent' }); +import { signal, computed } from '@angular/core'; + +// Create a writable signal +const count = signal(0); + +// Read the current value — call it like a function +console.log(count()); // 0 + +// Update the value +count.set(1); +count.update(prev => prev + 1); -chat.messages() // Signal — call to read -chat.status() // Signal -chat.error() // Signal -chat.isLoading() // Signal (computed) +// Derive new values with computed() +const doubled = computed(() => count() * 2); +console.log(doubled()); // 4 ``` -## Computed values +The key insight: Angular knows which Signals a template reads. When those Signals change, Angular re-renders only the affected parts of the DOM. No diffing the entire tree, no zone.js overhead. -Use `computed()` to derive new Signals from streamResource signals. +## How streamResource Uses Signals Internally + +Under the hood, streamResource() receives Server-Sent Events (SSE) over HTTP and feeds them into RxJS BehaviorSubjects. It then converts those BehaviorSubjects into Angular Signals using `toSignal()`. This is the bridge between the async streaming world and Angular's synchronous reactivity model. + + + ```typescript -const lastMessage = computed(() => - chat.messages().at(-1)?.content ?? '' -); +// Simplified view of what streamResource does internally: + +// 1. SSE events arrive as an observable stream +const messages$ = new BehaviorSubject([]); +const status$ = new BehaviorSubject('idle'); + +// 2. Each SSE chunk updates the BehaviorSubject +transport.onChunk(chunk => { + messages$.next([...messages$.getValue(), chunk.message]); +}); + +// 3. BehaviorSubjects become Signals via toSignal() +const messages = toSignal(messages$, { initialValue: [] }); +const status = toSignal(status$, { initialValue: 'idle' }); + +// 4. Your component reads pure Signals — no RxJS knowledge needed +``` + + + + +```typescript +import { streamResource } from '@cacheplane/stream-resource'; + +// You never touch BehaviorSubjects or toSignal() yourself. +// streamResource() hands you clean Signals: +const chat = streamResource({ + assistantId: 'chat_agent', +}); + +chat.messages(); // Signal +chat.status(); // Signal +chat.error(); // Signal +chat.isLoading(); // Signal +chat.value(); // Signal +``` + + + + + +The BehaviorSubject-to-Signal conversion means you get the best of both worlds: RxJS handles the async SSE transport (reconnection, backpressure, error recovery), while Signals handle the synchronous UI reactivity (change detection, template binding, computed derivations). You only interact with the Signal side. + + +## The Streaming Lifecycle as Signals + +Every streamResource() instance moves through a lifecycle: **idle**, **loading**, tokens arriving, then **resolved** (or **error**). The `status()` Signal reflects each transition in real time. + + + +The resource has been created but no request has been submitted yet. All Signals hold their initial values. + +```typescript +const chat = streamResource({ + assistantId: 'chat_agent', +}); + +console.log(chat.status()); // 'idle' +console.log(chat.messages()); // [] +console.log(chat.isLoading()); // false +``` + + + +After calling `submit()`, the status transitions to `'loading'`. The SSE connection is open and the agent is processing. + +```typescript +chat.submit({ messages: [{ role: 'user', content: 'Explain quantum computing' }] }); + +console.log(chat.status()); // 'loading' +console.log(chat.isLoading()); // true +console.log(chat.messages()); // [] (no tokens yet) +``` + -const messageCount = computed(() => - chat.messages().length + +As the agent generates tokens, the `messages()` Signal updates with each chunk. The status remains `'loading'` throughout. + +```typescript +// After first few tokens arrive: +console.log(chat.status()); // 'loading' (still streaming) +console.log(chat.messages()); // [AIMessageChunk("Quantum computing uses...")] + +// After more tokens: +console.log(chat.messages()); // [AIMessageChunk("Quantum computing uses qubits...")] +// The message content grows as tokens stream in +``` + + + +The agent has finished. All tokens have arrived. The status transitions to `'resolved'`. + +```typescript +console.log(chat.status()); // 'resolved' +console.log(chat.isLoading()); // false +console.log(chat.messages()); // [AIMessage("Quantum computing uses qubits to...")] +``` + + + +If the agent fails or the connection drops, the status transitions to `'error'` and the `error()` Signal contains the failure details. + +```typescript +console.log(chat.status()); // 'error' +console.log(chat.error()); // HttpErrorResponse { status: 500, ... } +console.log(chat.isLoading()); // false +``` + + + +## Composing Derived State with computed() + +`computed()` lets you derive new Signals from streamResource Signals. These derived Signals update automatically whenever their dependencies change — during streaming, that means every time a new token arrives. + +```typescript +import { computed } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; + +const chat = streamResource({ + assistantId: 'chat_agent', +}); + +// Count all messages in the conversation +const messageCount = computed(() => chat.messages().length); + +// Get the last message (useful for showing the latest response) +const lastMessage = computed(() => chat.messages().at(-1)); + +// Extract just the assistant's messages +const assistantMessages = computed(() => + chat.messages().filter(m => m._getType() === 'ai') ); -const isIdle = computed(() => - chat.status() === 'idle' +// Track which tools the agent is actively calling +const activeTools = computed(() => + chat.messages() + .filter(m => m._getType() === 'ai') + .flatMap(m => m.tool_calls ?? []) + .filter(tc => !tc.result) ); + +// Build a user-facing error message +const errorDisplay = computed(() => { + const err = chat.error(); + if (!err) return null; + if (err instanceof HttpErrorResponse) { + return err.status === 429 + ? 'Rate limited. Please wait a moment.' + : `Server error (${err.status})`; + } + return 'An unexpected error occurred.'; +}); + +// Combine multiple signals into a single view model +const viewModel = computed(() => ({ + messages: chat.messages(), + isStreaming: chat.isLoading(), + canSend: chat.status() !== 'loading', + messageCount: messageCount(), + error: errorDisplay(), +})); +``` + + +A `computed()` only re-evaluates when one of its dependencies actually changes, and it caches the result. If `chat.messages()` emits the same reference, downstream computeds skip their work entirely. This matters for high-frequency streaming where tokens arrive rapidly. + + +## Side Effects with effect() + +Use `effect()` when a Signal change should trigger work that lives outside the template — logging, analytics, scrolling, persisting state. Effects run in the injection context and are automatically cleaned up when the component is destroyed. + +```typescript +import { effect } from '@angular/core'; + +// Log errors for observability +effect(() => { + const err = chat.error(); + if (err) { + console.error('[StreamResource] Agent error:', err); + this.analytics.track('agent_error', { error: err }); + } +}); + +// Auto-scroll to bottom when new messages arrive +effect(() => { + const msgs = chat.messages(); + if (msgs.length > 0) { + // Schedule after Angular renders the new message + setTimeout(() => { + this.chatContainer.nativeElement.scrollTo({ + top: this.chatContainer.nativeElement.scrollHeight, + behavior: 'smooth', + }); + }); + } +}); + +// Track streaming duration for performance monitoring +effect(() => { + const status = chat.status(); + if (status === 'loading') { + this.streamStart = performance.now(); + } + if (status === 'resolved' && this.streamStart) { + const duration = performance.now() - this.streamStart; + this.analytics.track('stream_duration_ms', { duration }); + this.streamStart = null; + } +}); +``` + + +Writing to a Signal inside an `effect()` can create infinite loops. If you need to transform one Signal into another, use `computed()` instead. Reserve `effect()` for side effects that leave the reactive graph — DOM manipulation, logging, analytics, network calls. + + +## Template Patterns + +Angular's new control flow syntax (`@if`, `@for`, `@switch`) works naturally with Signals. Here's a complete chat template that handles every lifecycle state. + +```typescript +import { ChangeDetectionStrategy, Component, computed, effect, ElementRef, ViewChild } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; + +@Component({ + selector: 'app-chat', + changeDetection: ChangeDetectionStrategy.OnPush, + template: ` + + @switch (chat.status()) { + @case ('loading') { +
+ Agent is responding... +
+ } + @case ('error') { +
+ {{ errorDisplay() }} + +
+ } + } + + +
+ @for (message of chat.messages(); track $index) { + @switch (message._getType()) { + @case ('human') { +
+ {{ message.content }} +
+ } + @case ('ai') { +
+ {{ message.content }} + + + @for (tool of message.tool_calls ?? []; track tool.id) { +
+ Called: {{ tool.name }} +
+ } +
+ } + @case ('tool') { +
+ {{ message.name }}: {{ message.content }} +
+ } + } + } @empty { +
+ Send a message to start the conversation. +
+ } +
+ + +
+ + +
+ `, +}) +export class ChatComponent { + @ViewChild('chatContainer') chatContainer!: ElementRef; + + chat = streamResource({ + assistantId: 'chat_agent', + }); + + errorDisplay = computed(() => { + const err = this.chat.error(); + if (!err) return ''; + return err instanceof HttpErrorResponse + ? `Error ${err.status}: ${err.statusText}` + : 'Connection lost. Please retry.'; + }); + + scrollEffect = effect(() => { + const msgs = this.chat.messages(); + if (msgs.length) { + setTimeout(() => + this.chatContainer?.nativeElement.scrollTo({ + top: this.chatContainer.nativeElement.scrollHeight, + behavior: 'smooth', + }) + ); + } + }); + + send(event: Event) { + event.preventDefault(); + const input = (event.target as HTMLFormElement).querySelector('input')!; + const content = input.value.trim(); + if (!content) return; + + this.chat.submit({ + messages: [{ role: 'user', content }], + }); + input.value = ''; + } + + retry() { + this.chat.submit({ + messages: [{ role: 'user', content: 'Please try again.' }], + }); + } +} +``` + +## OnPush Change Detection + +Every component using streamResource() should use `ChangeDetectionStrategy.OnPush`. Here's why it works and why it's efficient. + +With the default change detection strategy, Angular checks every component in the tree on every browser event — clicks, timers, HTTP responses. For a streaming agent emitting dozens of tokens per second, that means hundreds of unnecessary checks across your entire app. + +With OnPush, Angular only checks a component when: + +1. An `@Input()` reference changes +2. An event fires inside the component's template +3. A **Signal** that the template reads changes + +Since streamResource() exposes Signals, condition 3 handles everything. When a new token arrives and `messages()` updates, Angular marks only the components reading that Signal for check — not the entire tree. + +```typescript +@Component({ + // Always use OnPush with streamResource + changeDetection: ChangeDetectionStrategy.OnPush, + template: ` +

{{ chat.messages().length }} messages

+ @if (chat.isLoading()) { + + } + `, +}) +export class ChatComponent { + chat = streamResource({ assistantId: 'chat_agent' }); +} ``` -## OnPush change detection + +With older Observable-based patterns, you had to call `ChangeDetectorRef.markForCheck()` or use the `async` pipe to trigger OnPush updates. Signals do this automatically. When a Signal's value changes, Angular's internal notification system marks the component dirty — zero manual intervention. + + +## Python Agent to Angular Signals + +The real power of streamResource() is how it pairs a Python LangGraph agent with Angular Signals. The agent defines the logic; Signals surface the results in real time. + + + + +```python +from langgraph.graph import END, START, MessagesState, StateGraph +from langchain_openai import ChatOpenAI +from langchain_core.tools import tool + +llm = ChatOpenAI(model="gpt-5-mini", streaming=True) + +@tool +def search_knowledge_base(query: str) -> str: + """Search internal documentation for relevant information.""" + results = vector_store.similarity_search(query, k=3) + return "\n".join(r.page_content for r in results) + +tools = [search_knowledge_base] + +def call_model(state: MessagesState) -> dict: + response = llm.bind_tools(tools).invoke(state["messages"]) + return {"messages": [response]} + +def should_continue(state: MessagesState) -> str: + last_msg = state["messages"][-1] + if last_msg.tool_calls: + return "tools" + return END -Because Signals trigger change detection automatically, streamResource works perfectly with `ChangeDetectionStrategy.OnPush`. +# Build the graph +builder = StateGraph(MessagesState) +builder.add_node("model", call_model) +builder.add_node("tools", ToolNode(tools)) +builder.add_edge(START, "model") +builder.add_conditional_edges("model", should_continue) +builder.add_edge("tools", "model") + +graph = builder.compile() +``` + + + ```typescript +import { ChangeDetectionStrategy, Component, computed, effect } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; + @Component({ + selector: 'app-chat', changeDetection: ChangeDetectionStrategy.OnPush, template: ` @for (msg of chat.messages(); track $index) { -

{{ msg.content }}

+ @switch (msg._getType()) { + @case ('human') { +
{{ msg.content }}
+ } + @case ('ai') { +
+ {{ msg.content }} + @for (tc of msg.tool_calls ?? []; track tc.id) { + {{ tc.name }} + } +
+ } + @case ('tool') { +
{{ msg.name }}: {{ msg.content }}
+ } + } + } + + @if (chat.isLoading()) { +
Agent is thinking...
} `, }) export class ChatComponent { - chat = streamResource({ assistantId: 'agent' }); + chat = streamResource({ + assistantId: 'chat_agent', + }); + + // Derived state from the Python agent's output + toolsUsed = computed(() => + this.chat.messages() + .filter(m => m._getType() === 'tool') + .map(m => m.name) + ); + + hasError = computed(() => this.chat.status() === 'error'); + + sendMessage(content: string) { + this.chat.submit({ + messages: [{ role: 'user', content }], + }); + } } ``` -## No RxJS required +
+
-Unlike traditional Angular HTTP patterns, streamResource doesn't use Observables. There are no subscriptions to manage, no async pipes needed, and no memory leak risks. +When the Python agent calls `search_knowledge_base`, the tool call streams to Angular as a message. When the tool returns, the result streams as another message. The agent's final response streams token by token. Every one of these events updates the `messages()` Signal, and your template re-renders the new content automatically. - -Signals are simpler for UI state. They synchronously read the latest value, compose with computed(), and integrate with Angular's template syntax. streamResource handles the async SSE connection internally and surfaces results as Signals. +## Performance: Signals vs Alternatives + +High-frequency token streaming puts unique pressure on a frontend framework. Here's why Signals with OnPush outperform the alternatives. + +| Approach | Token update cost | Memory overhead | Cleanup required | +|---|---|---|---| +| **Signals + OnPush** | Marks only reading components | None beyond Signal | Automatic | +| Observable + async pipe | Creates/destroys subscriptions per `@if` block | Subscription objects | Pipe handles it | +| Observable + manual subscribe | Full component check if you forget `markForCheck()` | Subscription tracking | Manual unsubscribe | +| Default change detection | Checks entire component tree | None | None | + +For a typical chat UI receiving 30-50 tokens per second: + +- **Signals + OnPush**: Only the message list component and its direct ancestors are checked. The sidebar, header, settings panel — all skipped. +- **Default strategy**: Every component in the tree is checked 30-50 times per second, even components with no streaming data. +- **Observable + async pipe**: Works correctly but creates and destroys subscriptions each time an `@if` or `@for` block re-evaluates, adding GC pressure during rapid streaming. + + +Signals use referential equality (`===`) by default. streamResource() creates new array references for `messages()` only when the array actually changes (a new token arrives). Between updates, reading `messages()` returns the same reference and skips downstream recomputation. For custom equality, pass an `equal` function when creating a `computed()`. ## What's Next - Understand how LangGraph agent state flows into Angular Signals. + How LangGraph agent state flows into Angular Signals and how to structure complex state. - See Signals in action with token-by-token streaming responses. + Configure stream modes, handle token-by-token rendering, and manage concurrent streams. + + + Understand the Python agent patterns that produce the events Signals consume. - Full reference for every Signal exposed by streamResource. + Full reference for every Signal, method, and option on StreamResourceRef. + + + Build human-in-the-loop approval flows that pause and resume the agent. + + + Deep dive into change detection optimization for streaming applications. diff --git a/apps/website/content/docs-v2/concepts/state-management.mdx b/apps/website/content/docs-v2/concepts/state-management.mdx index 6c1c9e94a..68397a602 100644 --- a/apps/website/content/docs-v2/concepts/state-management.mdx +++ b/apps/website/content/docs-v2/concepts/state-management.mdx @@ -1,83 +1,541 @@ # State Management -How state flows through streamResource() — from LangGraph's server-side state machine to Angular Signals in your templates. +How agent state flows from LangGraph's server-side state machine into Angular Signals — and why the separation between server state and UI state makes your app simpler, not more complex. -## State lives on the server + +LangGraph Platform owns the state. Angular owns the view. `streamResource()` is the read-only bridge between them. You never manually sync, serialize, or manage agent state in your Angular code. + + +## State Lives on the Server + +In a traditional Angular app, state lives in an NgRx store or a signals-based service. In a LangGraph app, **the agent's state lives on the server** — in LangGraph Platform's checkpoint store. Your Angular app is a stateless view layer that reads state through signals as the agent streams it back. + +This inversion is intentional. Agent state can span multiple LLM calls, tool executions, and human-in-the-loop interrupts. It needs to survive browser refreshes, reconnections, and even server deployments. A server-side checkpoint store handles all of that automatically. Your Angular app just calls `.submit()` and reads signals. + + + +Your Angular component calls `agent.submit({ messages: [userMsg] })`. No state is stored in the component. + + +`@cacheplane/stream-resource` forwards the input to `FetchStreamTransport`, which opens an HTTP POST and SSE connection to LangGraph Platform. + + +The agent runs its nodes — calling the LLM, invoking tools, checking conditions — and streams SSE events back with incremental state updates. + + +Incoming SSE chunks are parsed and pushed into BehaviorSubjects — one per signal type. + + +BehaviorSubjects are converted to Angular Signals via `toSignal()`. Every update triggers Angular's change detection automatically. + + +Components using `OnPush` re-render only when signal values change. No manual `detectChanges()`, no zone triggers, no subscriptions to manage. + + + +## Python State Design + +On the Python side, your agent's state is a `TypedDict`. The fields you define here are exactly what `streamResource()` exposes in TypeScript. Getting the Python state design right is the most important architectural decision in your agent. + +### The TypedDict Pattern + +Every LangGraph state is a `TypedDict`. Fields can be plain values or annotated with reducers that control how updates are merged. -Unlike traditional Angular state management (NgRx, signals stores), agent state lives on the LangGraph Platform. Your Angular app is a stateless view layer. + + +```python +from typing_extensions import TypedDict + +class ChatState(TypedDict): + messages: list # Will be replaced on each update + session_id: str # Single value, replaced on update + turn_count: int # Single value, replaced on update ``` -LangGraph Platform (source of truth) - ↓ SSE stream -FetchStreamTransport (transport layer) - ↓ events -streamResource() (signal conversion) - ↓ Signals -Angular templates (reactive rendering) + + + + +```python +from typing_extensions import TypedDict, Annotated +from operator import add + +class AgentState(TypedDict): + # Annotated[list, add] means: append new items, don't replace + messages: Annotated[list, add] + tool_results: Annotated[list, add] + + # Plain fields: each update replaces the previous value + status: str + current_plan: list[str] ``` -## The state shape + + + +```python +from langgraph.graph import MessagesState + +# MessagesState is a built-in TypedDict that pre-wires +# messages: Annotated[list[AnyMessage], add_messages] +# add_messages handles deduplication, type coercion, and ordering + +class ProjectState(MessagesState): + # Extend with your own fields + files: Annotated[list[str], add] # Accumulates file paths + analysis: dict # Latest analysis result + progress: int # 0–100 progress value +``` + + + + +### Reducers: How State Merges + +When a node returns `{"messages": [new_msg]}`, LangGraph doesn't replace the messages list — it **calls the reducer** to merge the update. This is what `Annotated[list, add]` means: use Python's `operator.add` to concatenate lists. + +```python +from typing_extensions import TypedDict, Annotated +from operator import add + +class ResearchState(TypedDict): + # Each node can append to these — they accumulate across the run + messages: Annotated[list, add] + sources: Annotated[list[str], add] + findings: Annotated[list[str], add] + + # These are replaced (last write wins) + query: str + model: str + confidence: float + +def researcher_node(state: ResearchState) -> dict: + result = llm.invoke(state["messages"]) + new_sources = extract_sources(result.content) + + # Returns partial state — only fields being updated + # LangGraph merges this into the existing state + return { + "messages": [result], # Appended via reducer + "sources": new_sources, # Appended via reducer + "confidence": 0.87, # Replaced + } +``` + + +Nodes return only the fields they change. LangGraph merges partial updates into the full state object. This is why you can have 10 nodes each updating different fields without conflicts. + + +## TypeScript Interface Mapping + +The TypeScript interface you pass to `streamResource()` is your contract with the Python state. Every Python state field maps to a TypeScript property. The types don't need to match exactly — they just need to be compatible with the JSON that LangGraph streams back. + + + + +```python +from typing_extensions import TypedDict, Annotated +from operator import add +from langgraph.graph import MessagesState + +class ProjectState(MessagesState): + # From MessagesState: messages: Annotated[list[AnyMessage], add_messages] + files: Annotated[list[str], add] + analysis: dict[str, any] | None + progress: int + plan: Annotated[list[str], add] + error: str | None +``` -Your state type defines what the agent manages. The `value()` signal exposes the full state object. + + ```typescript +import { BaseMessage } from '@langchain/core/messages'; + interface ProjectState { + // Maps from MessagesState.messages messages: BaseMessage[]; + + // Maps from Python fields (reducers are transparent — you see the final list) files: string[]; - analysis: { score: number; issues: string[] }; + analysis: { score: number; issues: string[]; summary: string } | null; + progress: number; + plan: string[]; + error: string | null; } const agent = streamResource({ assistantId: 'project_agent', }); - -// Access any state field as a reactive value -const files = computed(() => agent.value().files); -const score = computed(() => agent.value().analysis.score); ``` -## Thread state vs application state + + - -Thread state (managed by LangGraph) and application state (managed by Angular) are separate concerns. Don't try to sync them — read thread state from signals, manage UI state with Angular signals. - +| Python type | TypeScript type | +|-------------|-----------------| +| `str` | `string` | +| `int` / `float` | `number` | +| `bool` | `boolean` | +| `list[str]` | `string[]` | +| `dict[str, any]` | `Record` | +| `TypedDict` | `interface` or `type` | +| `str \| None` | `string \| null` | +| `list[AnyMessage]` | `BaseMessage[]` | +| `Annotated[list, add]` | Same as the list type — reducer is invisible | + + + + +Once you define the interface, every field is accessible via `agent.value()`: ```typescript -// Thread state — from the agent -const messages = agent.messages(); // Read-only signal -const agentStatus = agent.status(); // Read-only signal +// Full typed state object +const state = agent.value(); // Signal + +// Computed values from nested fields +const score = computed(() => agent.value().analysis?.score ?? 0); +const fileCount = computed(() => agent.value().files.length); +const isDone = computed(() => agent.value().progress === 100); + +// Direct messages access (shortcut for agent.value().messages) +const messages = agent.messages(); // Signal +``` -// Application state — your Angular code -const sidebarOpen = signal(true); // Your UI state -const selectedTab = signal('chat'); // Your UI state +## State Updates During Streaming + +The agent doesn't wait until it's finished to send state updates. It streams partial state updates as each node completes. Your Angular signals update incrementally throughout the run. + +### How Partial Updates Arrive + +LangGraph streams in `values` mode by default — each SSE event contains the full state snapshot after a node completes. In `messages` mode, you get individual message tokens as they're generated. + +```typescript +const agent = streamResource({ + assistantId: 'project_agent', + // Default: values mode — full state after each node + // streamMode: 'messages' — token-by-token for text fields +}); ``` -## State updates are immutable +### Signals Update Mid-Stream -Every state update from the agent creates a new signal value. Angular's change detection picks this up automatically. +Because every state update is a new signal value, your templates reflect the agent's progress in real time — without polling, without timers, without manual state management. ```typescript -// This works with OnPush because the Signal reference changes -@for (msg of agent.messages(); track $index) { -

{{ msg.content }}

+@Component({ + changeDetection: ChangeDetectionStrategy.OnPush, + template: ` + +

Files processed: {{ agent.value().files.length }}

+ + + + + + @for (step of agent.value().plan; track step) { +
  • {{ step }}
  • + } + + + @for (msg of agent.messages(); track $index) { + + } + ` +}) +export class ProjectComponent { + readonly agent = streamResource({ + assistantId: 'project_agent', + }); } +``` + +### Immutability and OnPush -// Computed values re-evaluate when dependencies change +Every signal update produces a new object reference. Angular's `OnPush` change detection compares references — when a signal emits a new value, the component re-renders. You never need to clone objects or call `markForCheck()` manually. + +```typescript +// Safe: computed() re-evaluates when agent.value() changes const hasErrors = computed(() => - agent.value().analysis.issues.length > 0 + (agent.value().analysis?.issues ?? []).length > 0 ); +// Safe: @for tracks by identity, not index, for stable DOM +// track $index is fine for messages since they always append +@for (msg of agent.messages(); track $index) { + +} + +// Safe: null-coalescing handles state fields not yet populated +const score = computed(() => agent.value().analysis?.score ?? 0); +``` + + +`streamResource()` uses `toSignal()` internally with `requireSync: false`. Signals always have a value — even before the first stream update. You never need to handle `undefined` explicitly for the signal itself, though individual state fields may be `null` until the agent populates them. + + +## Thread State vs Application State + +There are two kinds of state in a LangGraph Angular app, and keeping them separate makes your code much easier to reason about. + +**Thread state** is owned by LangGraph Platform. You read it through `streamResource()` signals. You never write to it directly — you only send new input via `.submit()`. + +**Application state** is owned by your Angular component or service. It's UI-only: sidebar visibility, active tab, selected message, form input values. It has nothing to do with the agent. + +```typescript +@Component({ + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class ChatComponent { + // --- Thread state (from agent, read-only) --- + readonly agent = streamResource({ + assistantId: 'chat_agent', + }); + + // Convenience computed values from thread state + readonly messages = this.agent.messages; // Signal + readonly isLoading = this.agent.isLoading; // Signal + readonly interrupted = this.agent.interrupt; // Signal + + // --- Application state (your Angular signals) --- + readonly sidebarOpen = signal(true); + readonly activeTab = signal<'chat' | 'history' | 'settings'>('chat'); + readonly inputText = signal(''); + readonly selectedMessageId = signal(null); + + // --- Actions --- + send() { + const text = this.inputText(); + if (!text.trim()) return; + this.agent.submit({ messages: [{ role: 'user', content: text }] }); + this.inputText.set(''); // UI state — clear the input + } + + approve() { + this.agent.submit(null, { resume: { approved: true } }); + } +} +``` + + +A common mistake is copying `agent.messages()` into a local signal to "control" it. This creates stale state bugs and defeats the purpose of the reactive signal model. Read thread state directly from `agent.*` signals and derive what you need with `computed()`. + + +## The Checkpoint Model + +LangGraph Platform persists state at every node boundary using a checkpoint store. Each checkpoint is an immutable snapshot of the full state at a point in time. + +``` +Thread: "user_123_session" +│ +├── Checkpoint 1 ← After call_model: { messages: [HumanMessage, AIMessage] } +├── Checkpoint 2 ← After tool_node: { messages: [..., ToolMessage] } +├── Checkpoint 3 ← After call_model: { messages: [..., AIMessage("Here's what I found...")] } +└── (current) +``` + +### What This Means for Your Angular App + +**Resumable threads** — If the user refreshes the page or closes the browser, the thread is still there. Pass the same `threadId` and `streamResource()` will restore the full conversation history automatically. + +**Time travel** — You can fork a thread at any checkpoint and replay it with different input. This powers the time-travel debugging guides. + +**Interrupt persistence** — When the agent raises an `Interrupt`, the checkpoint captures everything. The agent can be resumed hours or days later. + +```typescript +const agent = streamResource({ + assistantId: 'chat_agent', + + // Same threadId = restored conversation history + threadId: signal(this.route.snapshot.params['threadId']), + + // New threadId auto-created for new conversations + onThreadId: (id) => this.router.navigate(['/chat', id]), +}); + +// Read checkpoint history for time-travel UI +const history = agent.history(); // Signal +const branch = agent.branch(); // Signal — active branch ID +``` + +For full checkpoint and time-travel patterns, see the [Persistence guide](/docs/guides/persistence) and [Time Travel guide](/docs/guides/time-travel). + +## Custom State Fields + +`messages` is just one field. Real agents carry rich state: structured plans, tool results, progress indicators, metadata, and more. Every custom field you define in Python is available in your TypeScript interface. + + + + +```python +from typing_extensions import TypedDict, Annotated +from operator import add +from langgraph.graph import MessagesState +from langchain_openai import ChatOpenAI + +llm = ChatOpenAI(model="gpt-5-mini") + +class ResearchState(MessagesState): + # Accumulating lists — each node can append + plan: Annotated[list[str], add] + sources: Annotated[list[str], add] + findings: Annotated[list[str], add] + + # Scalar progress + progress: int # 0–100 + + # Structured results + report: dict | None # Final report when complete + + # Agent metadata + query: str + model_used: str + +def planner_node(state: ResearchState) -> dict: + steps = llm.invoke([ + {"role": "system", "content": "Break this query into research steps."}, + *state["messages"] + ]) + plan_items = steps.content.split("\n") + return { + "plan": plan_items, # Appended via reducer + "progress": 10, + "model_used": "gpt-5-mini", + } + +def researcher_node(state: ResearchState) -> dict: + # Runs once per plan step in a loop + for step in state["plan"]: + result = search(step) + yield { + "findings": [result], # Each iteration appends + "progress": state["progress"] + (80 // len(state["plan"])), + } +``` + + + + +```typescript +import { BaseMessage } from '@langchain/core/messages'; +import { streamResource } from '@cacheplane/stream-resource'; + +interface ResearchState { + messages: BaseMessage[]; + plan: string[]; + sources: string[]; + findings: string[]; + progress: number; + report: { + title: string; + summary: string; + sections: { heading: string; content: string }[]; + } | null; + query: string; + model_used: string; +} + +// In your component: +readonly agent = streamResource({ + assistantId: 'research_agent', +}); +``` + + + + +```typescript +@Component({ + changeDetection: ChangeDetectionStrategy.OnPush, + template: ` + +
    +
    +
    +

    {{ agent.value().progress }}% complete

    + + +
      + @for (step of agent.value().plan; track step) { +
    1. {{ step }}
    2. + } +
    + + +
      + @for (finding of agent.value().findings; track finding) { +
    • {{ finding }}
    • + } +
    + + + @if (agent.value().report; as report) { +
    +

    {{ report.title }}

    +

    {{ report.summary }}

    + @for (section of report.sections; track section.heading) { +
    +

    {{ section.heading }}

    +

    {{ section.content }}

    +
    + } +
    + } + ` +}) +export class ResearchComponent { + readonly agent = streamResource({ + assistantId: 'research_agent', + }); + + startResearch(query: string) { + this.agent.submit({ + messages: [{ role: 'user', content: query }], + }); + } +} +``` + +
    +
    + +### Derived State with computed() + +You rarely need to consume `agent.value()` raw in your template. Use `computed()` to derive clean, focused values: + +```typescript +readonly agent = streamResource({ + assistantId: 'research_agent', +}); + +// Derived signals — recalculate only when their dependencies change +readonly progress = computed(() => this.agent.value().progress); +readonly isPlanning = computed(() => this.agent.value().plan.length === 0 && this.agent.isLoading()); +readonly sourceCount = computed(() => this.agent.value().sources.length); +readonly hasReport = computed(() => this.agent.value().report !== null); +readonly reportTitle = computed(() => this.agent.value().report?.title ?? ''); +``` + ## What's Next - Learn how streamResource uses Signals for reactive rendering. + How streamResource() uses Angular Signals for zero-subscription reactive rendering. + + + Configure stream modes — values, messages, events — for different use cases. - Persist thread state so users can resume conversations later. + Thread-based conversation persistence and checkpoint configuration. + + + Fork threads at any checkpoint and replay with different input. - - Preserve context across sessions with LangGraph's memory store. + + Human-in-the-loop approval flows and how interrupt state surfaces in Angular. + + + Nodes, edges, and the graph execution model behind the state machine. -``` diff --git a/apps/website/content/docs-v2/getting-started/installation.mdx b/apps/website/content/docs-v2/getting-started/installation.mdx index f06f1f942..8200f7f67 100644 --- a/apps/website/content/docs-v2/getting-started/installation.mdx +++ b/apps/website/content/docs-v2/getting-started/installation.mdx @@ -48,8 +48,8 @@ Any option passed to `streamResource()` directly overrides the global provider c ## Environment setup - - + + For local development, run a LangGraph server: @@ -61,7 +61,7 @@ langgraph dev ``` - + For production, point to your LangGraph Cloud deployment: diff --git a/apps/website/content/docs-v2/getting-started/quickstart.mdx b/apps/website/content/docs-v2/getting-started/quickstart.mdx index 96f8fcff6..de1aee2fb 100644 --- a/apps/website/content/docs-v2/getting-started/quickstart.mdx +++ b/apps/website/content/docs-v2/getting-started/quickstart.mdx @@ -33,8 +33,8 @@ export const appConfig: ApplicationConfig = { Use `streamResource()` in a component field initializer. Every property on the returned ref is an Angular Signal. - - + + ```typescript // chat.component.ts @@ -67,7 +67,7 @@ export class ChatComponent { ``` - + ```html diff --git a/apps/website/content/docs-v2/guides/deployment.mdx b/apps/website/content/docs-v2/guides/deployment.mdx index 91abe0c21..79e0faea7 100644 --- a/apps/website/content/docs-v2/guides/deployment.mdx +++ b/apps/website/content/docs-v2/guides/deployment.mdx @@ -1,91 +1,407 @@ # Deployment -Configure streamResource() for production with LangGraph Cloud, environment-based URLs, and error handling patterns. +Deploy your LangGraph agent to the cloud and ship your Angular frontend to production with environment-based configuration, authentication, error handling, and observability. -## Production configuration +## Python: LangGraph Cloud deployment -Point `apiUrl` to your LangGraph Cloud deployment. +Your agent code needs a `langgraph.json` manifest at the project root. This file tells LangGraph Cloud how to build and serve your agent. - - +```json +{ + "dependencies": ["."], + "graphs": { + "chat_agent": "./agent/graph.py:graph" + }, + "env": ".env" +} +``` -```typescript -// app.config.ts -provideStreamResource({ - apiUrl: environment.langgraphUrl, -}) +The `graphs` key maps an assistant ID (used by `streamResource()` on the Angular side) to the Python module path and graph variable. The `env` key points to a file with secrets like `OPENAI_API_KEY` that will be injected at runtime. + +### Agent entry point + +```python +from langchain_openai import ChatOpenAI +from langgraph.graph import StateGraph, MessagesState + +llm = ChatOpenAI(model="gpt-5-mini") + +def call_model(state: MessagesState): + return {"messages": [llm.invoke(state["messages"])]} + +graph = StateGraph(MessagesState) +graph.add_node("model", call_model) +graph.set_entry_point("model") +graph = graph.compile() ``` +### Push and deploy + +```bash +# Initialize and push to GitHub +git init && git add . && git commit -m "initial agent" +gh repo create my-agent --public --source=. --push + +# Deploy via CLI (alternative to the LangSmith UI) +pip install langgraph-cli +langgraph deploy --project my-agent +``` + +The CLI watches your repository and builds a container image on LangGraph Cloud. First deployments take roughly 10-15 minutes. Subsequent pushes to the default branch trigger automatic redeployments. + +## LangSmith deployment walkthrough + +The LangSmith UI provides a visual deployment flow if you prefer not to use the CLI. + + + + +Navigate to [smith.langchain.com](https://smith.langchain.com) and click **Deployments** in the left sidebar, then **+ New Deployment**. + + + + +Authorize LangSmith to access your GitHub account. Select the repository containing your `langgraph.json`. LangSmith auto-detects the manifest and shows the graphs it found. + + + + +Add secrets like `OPENAI_API_KEY` in the deployment settings. These are encrypted at rest and injected into your container at runtime. You can also set `LANGCHAIN_TRACING_V2=true` here to enable automatic tracing. + + + + +Click **Deploy**. Once the build succeeds, you will see a deployment URL like `https://my-agent-abc123.langgraph.app`. Copy this URL for your Angular environment configuration. + + + + +## Angular: environment configuration + +Angular uses file-based environment replacement at build time rather than `process.env`. Create separate environment files for development and production. + + + + ```typescript -// environment.prod.ts export const environment = { - langgraphUrl: 'https://your-project.langgraph.app', + production: false, + langgraphUrl: 'http://localhost:2024', + langsmithApiKey: '', // not needed locally }; ``` - + ```typescript -// app.config.ts -provideStreamResource({ - apiUrl: 'https://your-project.langgraph.app', -}) +export const environment = { + production: true, + langgraphUrl: 'https://my-agent-abc123.langgraph.app', + langsmithApiKey: 'lsv2_pt_xxxxxxxx', +}; ``` +Wire the environment into `provideStreamResource()`: + +```typescript +import { provideStreamResource } from '@cacheplane/stream-resource'; +import { environment } from '../environments/environment'; + +export const appConfig: ApplicationConfig = { + providers: [ + provideStreamResource({ + apiUrl: environment.langgraphUrl, + }), + ], +}; +``` + +Angular CLI replaces `environment.ts` with `environment.prod.ts` during `ng build --configuration production` automatically via the `fileReplacements` array in `angular.json`. + +## Authentication + +### API key for LangGraph Platform + +LangGraph Cloud deployments require an API key on every request. The recommended approach is an Angular HTTP interceptor that attaches the key as a header. + +```typescript +import { HttpInterceptorFn } from '@angular/common/http'; +import { environment } from '../environments/environment'; + +export const langGraphAuthInterceptor: HttpInterceptorFn = (req, next) => { + if (req.url.startsWith(environment.langgraphUrl)) { + const cloned = req.clone({ + setHeaders: { + 'x-api-key': environment.langsmithApiKey, + }, + }); + return next(cloned); + } + return next(req); +}; +``` + +Register the interceptor in your application config: + +```typescript +import { provideHttpClient, withInterceptors } from '@angular/common/http'; +import { langGraphAuthInterceptor } from './auth.interceptor'; + +export const appConfig: ApplicationConfig = { + providers: [ + provideHttpClient(withInterceptors([langGraphAuthInterceptor])), + provideStreamResource({ + apiUrl: environment.langgraphUrl, + }), + ], +}; +``` + + +Add `environment.prod.ts` to `.gitignore`. In CI, generate it from environment variables or inject secrets at build time. + + +### User-level authentication + +If your app has its own user authentication (JWT, session cookies), you can add a second interceptor or extend the one above to forward identity headers that your agent can use for per-user scoping. + +## CORS configuration + +When your Angular frontend and LangGraph backend are on different origins, you must configure CORS on the LangGraph side. + +In `langgraph.json`, add an `http` section: + +```json +{ + "dependencies": ["."], + "graphs": { + "chat_agent": "./agent/graph.py:graph" + }, + "http": { + "cors": { + "allow_origins": ["https://your-angular-app.com"], + "allow_methods": ["GET", "POST", "PUT", "DELETE", "OPTIONS"], + "allow_headers": ["Content-Type", "x-api-key", "Authorization"], + "allow_credentials": true + } + } +} +``` + + +During local development with `langgraph dev`, CORS is permissive by default. You only need explicit CORS configuration for production deployments. + + ## Error boundaries -Handle errors gracefully in production. +Production apps need graceful error handling. Build a reactive error boundary using `streamResource()` signals. ```typescript -const chat = streamResource({ - assistantId: 'chat_agent', -}); +import { ChangeDetectionStrategy, Component, computed } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; -// Reactive error display -hasError = computed(() => chat.status() === 'error'); -errorMessage = computed(() => { - const err = chat.error(); - return err instanceof Error ? err.message : 'Something went wrong'; -}); +@Component({ + selector: 'app-chat', + changeDetection: ChangeDetectionStrategy.OnPush, + template: ` + @if (hasError()) { +
    +

    {{ errorMessage() }}

    + +
    + } + `, +}) +export class ChatComponent { + chat = streamResource({ + assistantId: 'chat_agent', + }); + + hasError = computed(() => this.chat.status() === 'error'); + + errorMessage = computed(() => { + const err = this.chat.error(); + if (err instanceof HttpErrorResponse) { + switch (err.status) { + case 401: return 'Authentication failed. Please check your API key.'; + case 429: return 'Rate limit exceeded. Please wait a moment.'; + case 503: return 'Agent is starting up. Please try again shortly.'; + default: return 'Something went wrong. Please try again.'; + } + } + return err instanceof Error ? err.message : 'An unexpected error occurred.'; + }); + + retry(): void { + this.chat.reload(); + } +} +``` + +### Retry with exponential backoff -// Retry after error -retry() { - chat.reload(); +For automated retries (network blips, transient 5xx errors), wrap `.submit()` with a backoff utility: + +```typescript +export async function retrySubmit( + chat: ReturnType, + input: Record, + maxAttempts = 3, +): Promise { + for (let attempt = 0; attempt < maxAttempts; attempt++) { + try { + chat.submit(input); + return; + } catch { + if (attempt === maxAttempts - 1) throw new Error('Max retries exceeded'); + await new Promise(r => setTimeout(r, 1000 * 2 ** attempt)); + } + } } ``` -## Recovering interrupted streams +## Stream recovery -Use `joinStream()` to reconnect to a running stream after a network interruption. +Use `joinStream()` to reconnect to a running agent execution after a network interruption, page refresh, or navigation event. ```typescript -// If you know the run ID (e.g., from a status endpoint) -await chat.joinStream(runId, lastEventId); -// Resumes streaming from where it left off +// Store the run ID when starting a stream +const runId = this.chat.runId(); +localStorage.setItem('activeRunId', runId); + +// After reconnecting, resume from where the stream left off +const savedRunId = localStorage.getItem('activeRunId'); +if (savedRunId) { + await this.chat.joinStream(savedRunId, lastEventId); +} ``` +`joinStream()` replays any events the client missed, then switches to live streaming. This works because all state lives on the LangGraph Platform, and the SSE endpoint supports event ID-based resumption. + -streamResource() is a stateless client. All state lives on the LangGraph Platform. This means your Angular app can be deployed anywhere (CDN, edge, SSR) without state management concerns. +`streamResource()` is a stateless client. All state lives on the LangGraph Platform. This means your Angular app can be deployed anywhere (CDN, edge, SSR) without state management concerns. Scale your frontend independently of your agent infrastructure. -## Checklist +## CI/CD pipeline + +A typical pipeline deploys the Python agent and Angular frontend in parallel since they are independent artifacts. + +```yaml +name: Deploy +on: + push: + branches: [main] + +jobs: + deploy-agent: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: '3.12' + - run: pip install langgraph-cli + - run: langgraph deploy --project my-agent + env: + LANGSMITH_API_KEY: ${{ secrets.LANGSMITH_API_KEY }} + + deploy-angular: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version: '22' + - run: npm ci + - name: Generate production environment + run: | + cat > src/environments/environment.prod.ts << 'EOF' + export const environment = { + production: true, + langgraphUrl: '${{ secrets.LANGGRAPH_URL }}', + langsmithApiKey: '${{ secrets.LANGSMITH_API_KEY }}', + }; + EOF + - run: npx ng build --configuration production + - name: Deploy to hosting + run: | + # Replace with your hosting provider's CLI + # e.g., npx vercel deploy --prod dist/my-app/browser + echo "Deploy dist/ to your hosting platform" +``` + +## Monitoring + +### LangSmith observability + +When `LANGCHAIN_TRACING_V2=true` is set in your agent environment, every run is automatically traced in LangSmith. No code changes are needed. + +Key metrics to track in production: + +| Metric | Where to find it | Why it matters | +|--------|-------------------|----------------| +| End-to-end latency | LangSmith Runs tab | Directly affects user-perceived responsiveness | +| Error rate | LangSmith Runs tab, filter by error | Spike detection for broken tools or provider outages | +| Token usage | LangSmith per-run detail | Cost control and budget alerting | +| Time to first token | Angular performance monitoring | Stream startup latency visible to users | +| Thread count | LangGraph Platform dashboard | Capacity planning | + +### Client-side monitoring + +Track stream health from your Angular app: + +```typescript +const status = this.chat.status(); // 'idle' | 'streaming' | 'error' +const isStreaming = this.chat.isStreaming(); + +// Log stream lifecycle for your APM tool +effect(() => { + const s = this.chat.status(); + if (s === 'error') { + this.analytics.trackError('stream_error', this.chat.error()); + } +}); +``` + +## Deployment checklist -Point to your LangGraph Cloud deployment URL. +Point `provideStreamResource({ apiUrl })` to your LangGraph Cloud deployment URL via `environment.prod.ts`. + + +Add an HTTP interceptor to attach `x-api-key` headers to all LangGraph requests. + + +Add your Angular app's origin to the `allow_origins` list in `langgraph.json`. + + +Show user-friendly error messages for 401, 429, 503, and network failures. Provide retry buttons. - -Show user-friendly error messages and retry buttons. + +Store `runId` and use `joinStream()` to reconnect after network interruptions. -Store threadId in localStorage or a backend so users can resume conversations. +Store `threadId` in `localStorage` or a backend so users can resume conversations across sessions. -Set `throttle` option if token-by-token updates are too frequent for your UI. +Set the `throttle` option if token-by-token updates are too frequent for your UI rendering. + + +Set `LANGCHAIN_TRACING_V2=true` in your agent environment for production observability. + + +Add `environment.prod.ts` to `.gitignore`. Generate it from CI secrets at build time. + + +Automate agent and Angular deployments on push to your main branch. + + +Confirm LangSmith traces are arriving and set up alerts for error rate spikes and latency regressions. @@ -93,15 +409,21 @@ Set `throttle` option if token-by-token updates are too frequent for your UI. - Test agent interactions deterministically before deploying. + Test agent interactions deterministically before deploying to production. Store thread IDs so users can resume conversations across sessions. - Tune streaming options like throttle for production performance. + Tune streaming options like throttle and stream modes for production performance. + + + Understand the agent patterns your deployment will serve. Full reference for provideStreamResource configuration options. + + Deep dive into error recovery patterns beyond basic error boundaries. + diff --git a/apps/website/content/docs-v2/guides/interrupts.mdx b/apps/website/content/docs-v2/guides/interrupts.mdx index 8063b852f..bb3bb7a2a 100644 --- a/apps/website/content/docs-v2/guides/interrupts.mdx +++ b/apps/website/content/docs-v2/guides/interrupts.mdx @@ -1,95 +1,561 @@ # Interrupts -Interrupts let your LangGraph agent pause execution and wait for human input. streamResource() surfaces interrupts as Angular Signals, making it easy to build approval flows, confirmation dialogs, and human-in-the-loop experiences. +Interrupts let your LangGraph agent pause mid-execution and hand control to a human. The agent proposes an action, the graph freezes, your Angular UI shows an approval dialog, the user decides, and the agent resumes with the human's decision. streamResource() surfaces interrupts as Angular Signals, so building approval flows, confirmation dialogs, and multi-step review experiences requires no manual event wiring. -Use interrupts for human approval, late-binding decisions, or any step where the agent needs external input before continuing. +Use interrupts when an agent action is irreversible (sending an email, placing an order, deleting data), when the agent needs a human decision it cannot make on its own, or when compliance requires explicit approval before execution. -## Basic interrupt handling +## The Interrupt Lifecycle -When an agent interrupts, the `interrupt()` signal contains the interrupt data. +Before diving into code, understand the five-stage lifecycle that every interrupt follows: - - + + +The agent reasons about the user's request and determines an action that requires human approval. It builds a structured payload describing what it wants to do. + + +The agent node calls `raise Interrupt(value={...})`, which freezes the graph. The interrupt payload is persisted in the checkpoint and streamed to the client. + + +streamResource() updates the `interrupt()` signal. Your Angular template detects the change through OnPush change detection and renders an approval dialog with the interrupt payload. + + +The user reviews the proposed action and clicks Approve or Reject. Your component calls `agent.submit()` with a resume payload containing the decision. + + +LangGraph resumes the graph from the interrupted checkpoint. The next node receives the human's decision and either executes or aborts the action. + + + +## Python: Raising an Interrupt + +An interrupt is raised inside any graph node by calling `raise Interrupt(value={...})`. The value can be any JSON-serializable object — it becomes the payload your Angular component displays. + + + + +```python +from langgraph.graph import END, START, StateGraph +from langgraph.types import Interrupt, Command +from langchain_openai import ChatOpenAI +from typing_extensions import TypedDict, Annotated +from operator import add + +llm = ChatOpenAI(model="gpt-5-mini") + +class State(TypedDict): + messages: Annotated[list, add] + proposed_action: dict + approval_result: dict + +def plan_action(state: State) -> dict: + """Agent analyzes the request and proposes an action.""" + response = llm.invoke([ + {"role": "system", "content": ( + "Analyze the user's request. If it requires sending " + "an email, modifying data, or any irreversible action, " + "return a JSON action plan with keys: action, target, " + "description, risk_level." + )}, + *state["messages"] + ]) + action = parse_json(response.content) + return { + "proposed_action": action, + "messages": [response], + } + +def request_approval(state: State) -> dict: + """Pause the graph and ask the human for approval.""" + action = state["proposed_action"] + raise Interrupt(value={ + "action": action["action"], + "target": action["target"], + "description": action["description"], + "risk_level": action.get("risk_level", "medium"), + }) + +def execute_action(state: State) -> dict: + """Run the approved action or explain the rejection.""" + result = state.get("approval_result", {}) + if result.get("approved"): + # Execute the real action + outcome = perform_action(state["proposed_action"]) + return { + "messages": [{"role": "assistant", "content": ( + f"Done. {outcome}" + )}] + } + else: + reason = result.get("reason", "No reason given") + return { + "messages": [{"role": "assistant", "content": ( + f"Action cancelled. Reason: {reason}" + )}] + } + +# Build the graph: plan → approve → execute +builder = StateGraph(State) +builder.add_node("plan", plan_action) +builder.add_node("approve", request_approval) +builder.add_node("execute", execute_action) +builder.add_edge(START, "plan") +builder.add_edge("plan", "approve") +builder.add_edge("approve", "execute") +builder.add_edge("execute", END) + +graph = builder.compile() +``` + + + + +```json +{ + "dependencies": ["."], + "graphs": { + "approval_agent": "./src/approval_agent/agent.py:graph" + }, + "env": ".env", + "python_version": "3.12" +} +``` + + + + + +Place the `raise Interrupt()` call in its own dedicated node. This gives you a clean three-node pattern (plan, approve, execute) where the interrupt sits between reasoning and action. If you raise an interrupt inside a node that also does work, the work before the interrupt runs twice on resume. + + +## Angular: Building an Approval Component + +When the agent raises an interrupt, streamResource() populates the `interrupt()` signal with the interrupt payload. Your component reads this signal to render a dialog and calls `submit()` to resume. + + + ```typescript -// approval.component.ts +import { + Component, + computed, + signal, + ChangeDetectionStrategy, +} from '@angular/core'; +import { streamResource, BaseMessage } from '@cacheplane/stream-resource'; + interface ApprovalPayload { action: string; + target: string; description: string; - risk: 'low' | 'medium' | 'high'; + risk_level: 'low' | 'medium' | 'high'; } -const agent = streamResource({ - assistantId: 'approval_agent', -}); +interface AgentState { + messages: BaseMessage[]; + proposed_action: ApprovalPayload; + approval_result: { approved: boolean; reason?: string }; +} + +@Component({ + selector: 'app-approval', + templateUrl: './approval.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class ApprovalComponent { + agent = streamResource({ + assistantId: 'approval_agent', + }); + + messages = computed(() => this.agent.messages()); + pendingApproval = computed(() => this.agent.interrupt()); + isLoading = computed(() => this.agent.isLoading()); + + rejectionReason = signal(''); + + riskClass = computed(() => { + const interrupt = this.pendingApproval(); + if (!interrupt) return ''; + const level = interrupt.value?.risk_level ?? 'medium'; + return `risk-${level}`; + }); + + send(input: string) { + this.agent.submit({ + messages: [{ role: 'user', content: input }], + }); + } -// Check for pending interrupts -pendingApproval = computed(() => agent.interrupt()); + approve() { + this.agent.submit(null, { + resume: { approved: true }, + }); + } + + reject() { + this.agent.submit(null, { + resume: { + approved: false, + reason: this.rejectionReason() || 'User rejected', + }, + }); + this.rejectionReason.set(''); + } +} ``` - + ```html - + +
    + @for (msg of messages(); track msg) { +
    {{ msg.content }}
    + } + + @if (isLoading()) { +
    Agent is working...
    + } +
    + + @if (pendingApproval(); as approval) { -
    -

    Agent needs approval

    -

    {{ approval.value.description }}

    -

    Risk level: {{ approval.value.risk }}

    - - +
    +

    Agent Needs Approval

    + +
    +
    Action
    +
    {{ approval.value.action }}
    + +
    Target
    +
    {{ approval.value.target }}
    + +
    Description
    +
    {{ approval.value.description }}
    + +
    Risk Level
    +
    + + {{ approval.value.risk_level | titlecase }} + +
    +
    + +
    + + +
    + +
    + + +
    } + + +@if (!pendingApproval()) { +
    + + +
    +} ``` -## Resuming from an interrupt +## Multi-Step Approval Pattern + +Some workflows require multiple approvals in sequence. For example, an agent that plans a multi-step deployment might need approval at each stage. Each node in the graph can raise its own interrupt. + + + + +```python +from langgraph.graph import END, START, StateGraph +from langgraph.types import Interrupt +from typing_extensions import TypedDict, Annotated +from operator import add + +class DeployState(TypedDict): + messages: Annotated[list, add] + plan: list[dict] + current_step: int + completed_steps: list[str] + +def create_plan(state: DeployState) -> dict: + """Generate a multi-step deployment plan.""" + plan = [ + {"step": "backup", "description": "Back up current database"}, + {"step": "migrate", "description": "Run schema migrations"}, + {"step": "deploy", "description": "Deploy new application version"}, + ] + return {"plan": plan, "current_step": 0} + +def approve_step(state: DeployState) -> dict: + """Interrupt for each step that needs approval.""" + step_index = state["current_step"] + step = state["plan"][step_index] + raise Interrupt(value={ + "step_number": step_index + 1, + "total_steps": len(state["plan"]), + "step": step["step"], + "description": step["description"], + "completed": state.get("completed_steps", []), + }) + +def execute_step(state: DeployState) -> dict: + """Execute the approved step and advance.""" + step = state["plan"][state["current_step"]] + # ... perform the actual deployment step ... + return { + "completed_steps": [step["step"]], + "current_step": state["current_step"] + 1, + "messages": [{"role": "assistant", "content": ( + f"Completed: {step['description']}" + )}], + } -Call `submit()` with the resume payload to continue execution. +def should_continue(state: DeployState) -> str: + if state["current_step"] < len(state["plan"]): + return "approve_step" + return END + +builder = StateGraph(DeployState) +builder.add_node("create_plan", create_plan) +builder.add_node("approve_step", approve_step) +builder.add_node("execute_step", execute_step) +builder.add_edge(START, "create_plan") +builder.add_edge("create_plan", "approve_step") +builder.add_edge("approve_step", "execute_step") +builder.add_conditional_edges("execute_step", should_continue) + +graph = builder.compile() +``` + + + ```typescript -approve() { - this.agent.submit(null, { resume: { approved: true } }); +import { + Component, + computed, + ChangeDetectionStrategy, +} from '@angular/core'; +import { streamResource, BaseMessage } from '@cacheplane/stream-resource'; + +interface StepApproval { + step_number: number; + total_steps: number; + step: string; + description: string; + completed: string[]; +} + +@Component({ + selector: 'app-deploy-approval', + templateUrl: './approval.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class DeployApprovalComponent { + agent = streamResource<{ + messages: BaseMessage[]; + plan: { step: string; description: string }[]; + current_step: number; + completed_steps: string[]; + }>({ + assistantId: 'deploy_agent', + }); + + currentStep = computed(() => { + const interrupt = this.agent.interrupt(); + return interrupt?.value as StepApproval | null; + }); + + progress = computed(() => { + const step = this.currentStep(); + if (!step) return 0; + return (step.completed.length / step.total_steps) * 100; + }); + + allInterrupts = computed(() => this.agent.interrupts()); + + approveStep() { + this.agent.submit(null, { resume: { approved: true } }); + } + + abortDeploy() { + this.agent.submit(null, { + resume: { approved: false, reason: 'Deployment aborted by user' }, + }); + } } +``` + + + + +```html +@if (currentStep(); as step) { +
    +

    Step {{ step.step_number }} of {{ step.total_steps }}

    + + +
    +
    +
    -reject() { - this.agent.submit(null, { resume: { approved: false, reason: 'User rejected' } }); + + @if (step.completed.length) { +
      + @for (done of step.completed; track done) { +
    • {{ done }}
    • + } +
    + } + + +
    + {{ step.step }} +

    {{ step.description }}

    +
    + +
    + + +
    +
    } ``` -## Multiple interrupts +
    +
    + +## Typed Interrupt Payloads with BagTemplate -The `interrupts()` signal tracks all interrupts received during a run, not just the current one. +By default, `interrupt()` returns an untyped object. The BagTemplate generic parameter on streamResource() lets you define the exact shape of your interrupt payloads, giving you full TypeScript safety throughout your component. + +BagTemplate is a type parameter on the streamResource configuration that maps signal names to their types. When you specify an interrupt type through BagTemplate, the `interrupt()` signal returns a properly typed object instead of `unknown`. This means your template expressions, computed signals, and event handlers all benefit from compile-time checking. ```typescript -// Track interrupt history -allInterrupts = computed(() => agent.interrupts()); -latestInterrupt = computed(() => agent.interrupt()); -interruptCount = computed(() => agent.interrupts().length); +import { streamResource, BagTemplate } from '@cacheplane/stream-resource'; + +// Define the exact shape of your interrupt payload +interface DeployApproval { + step_number: number; + total_steps: number; + step: string; + description: string; + completed: string[]; +} + +// Pass the interrupt type via BagTemplate +const agent = streamResource< + DeployState, + BagTemplate<{ interrupt: DeployApproval }> +>({ + assistantId: 'deploy_agent', +}); + +// Now interrupt() is typed — no casting needed +const step = agent.interrupt(); +// ^? Signal<{ value: DeployApproval } | null> + +// TypeScript catches errors at compile time +const num = step?.value.step_number; // number — correct +const bad = step?.value.nonexistent; // Error — property doesn't exist ``` - -Use the BagTemplate generic parameter to type your interrupt payloads for full TypeScript safety. + +Define your interrupt payload interfaces alongside your Python state schema. This creates a contract between your agent and your UI. When the Python payload shape changes, the TypeScript interface should change too. Consider generating types from a shared schema to keep them in sync. + + +## Timeout Handling + +Interrupts pause graph execution indefinitely by default — the agent waits until a human responds. In production, you often need to handle cases where no one responds within a reasonable time. There are two strategies for managing interrupt timeouts. + +**Server-side timeout with a background task:** Schedule a background job that checks for stale interrupts and resumes them with a default decision. + +```python +async def check_stale_interrupts(): + """Periodic task to auto-reject stale interrupts.""" + threads = await client.threads.search( + status="interrupted", + metadata={"interrupt_type": "approval"}, + ) + for thread in threads: + created = thread.updated_at + if (now() - created).total_seconds() > 3600: # 1 hour timeout + await client.runs.create( + thread["thread_id"], + assistant_id="approval_agent", + input=None, + command={"resume": { + "approved": False, + "reason": "Auto-rejected: approval timeout", + }}, + ) +``` + +**Client-side timeout in Angular:** Use a timer in your component to auto-reject if the user does not act. + +```typescript +import { effect } from '@angular/core'; +import { timer } from 'rxjs'; + +// Watch for interrupts and start a timeout +effect(() => { + const interrupt = this.agent.interrupt(); + if (interrupt) { + const sub = timer(5 * 60 * 1000).subscribe(() => { + // Auto-reject after 5 minutes of inaction + this.agent.submit(null, { + resume: { approved: false, reason: 'Approval timeout' }, + }); + }); + // Clean up if user responds before timeout + return () => sub.unsubscribe(); + } +}); +``` + + +Avoid running both server-side and client-side timeouts simultaneously. If both fire, the second resume call will fail because the graph already moved past the interrupt. Choose server-side timeouts for reliability (works even if the browser closes) or client-side timeouts for immediacy. + + + +Because interrupts are checkpointed, the user can close their browser, come back hours later, and still approve or reject the pending action. The graph state is frozen in the checkpoint store, not in browser memory. ## What's Next + + Give your agent short-term and long-term memory with the Store API. + - Resume conversations across page refreshes with thread persistence. + Configure checkpointers that keep interrupt state across deployments. - Stream token-by-token responses and tool progress in real time. + Stream token-by-token responses alongside interrupt events. Script interrupt events deterministically with MockStreamTransport. - - Full reference for streamResource options and returned signals. - diff --git a/apps/website/content/docs-v2/guides/memory.mdx b/apps/website/content/docs-v2/guides/memory.mdx index f2da7b9d5..ca507fdcc 100644 --- a/apps/website/content/docs-v2/guides/memory.mdx +++ b/apps/website/content/docs-v2/guides/memory.mdx @@ -1,82 +1,414 @@ # Memory -Memory in LangGraph preserves useful context that later steps can read back. streamResource() exposes memory through the messages and state signals, with thread persistence providing cross-session continuity. +Memory gives your LangGraph agent the ability to recall past interactions, user preferences, and learned facts. There are two distinct kinds: short-term memory scoped to a single thread (conversation), and long-term memory that persists across threads using the LangGraph Store API. streamResource() surfaces both through Angular Signals so your components stay reactive without manual state wiring. -Short-term memory lives within a thread (conversation history). Long-term memory persists across threads via LangGraph's memory store. +Short-term memory lives within a thread — it is the conversation history plus any custom state fields your agent accumulates during a run. Long-term memory lives in the LangGraph Store and survives across threads, users, and sessions. Think of short-term as "what happened in this conversation" and long-term as "what the agent knows about this user." -## Short-term memory (thread-scoped) +## Agent State with Custom Memory Fields -Every message in a thread is automatically preserved. When you reconnect with the same `threadId`, the full conversation history is restored. +Every LangGraph agent has a state schema. You control what the agent remembers by adding fields to that schema. Messages accumulate automatically, but you can define any additional fields the agent should track. -```typescript -const chat = streamResource<{ messages: BaseMessage[] }>({ - assistantId: 'memory_agent', - threadId: signal(userId()), // User-specific thread -}); + + -// Messages accumulate across the conversation -const messageCount = computed(() => chat.messages().length); +```python +from typing_extensions import TypedDict, Annotated +from operator import add +from langgraph.graph import END, START, StateGraph +from langchain_openai import ChatOpenAI -// Resume where you left off on next visit -// threadId persists, so history is restored -``` +llm = ChatOpenAI(model="gpt-5-mini") + +class State(TypedDict): + messages: Annotated[list, add] + user_preferences: dict # Accumulated user preferences + conversation_summary: str # Rolling summary of past context + mentioned_topics: list[str] # Topics the user has brought up + +def call_model(state: State) -> dict: + system = "You are a helpful assistant." + if state.get("conversation_summary"): + system += f"\n\nPrevious context: {state['conversation_summary']}" + if state.get("user_preferences"): + system += f"\n\nUser preferences: {state['user_preferences']}" -## Accessing agent state as memory + response = llm.invoke([ + {"role": "system", "content": system}, + *state["messages"] + ]) + return {"messages": [response]} -The `value()` signal contains the full agent state, which can include custom memory fields. +def update_memory(state: State) -> dict: + """Extract preferences and topics from the latest exchange.""" + extraction = llm.invoke([ + {"role": "system", "content": ( + "Extract any user preferences and topics from " + "this conversation. Return JSON with keys: " + "preferences (dict), topics (list[str]), summary (str)." + )}, + *state["messages"][-4:] # Last two exchanges + ]) + parsed = parse_json(extraction.content) + return { + "user_preferences": { + **state.get("user_preferences", {}), + **parsed.get("preferences", {}), + }, + "mentioned_topics": parsed.get("topics", []), + "conversation_summary": parsed.get("summary", ""), + } + +builder = StateGraph(State) +builder.add_node("model", call_model) +builder.add_node("update_memory", update_memory) +builder.add_edge(START, "model") +builder.add_edge("model", "update_memory") +builder.add_edge("update_memory", END) + +graph = builder.compile() +``` + + + ```typescript +import { Component, computed, ChangeDetectionStrategy } from '@angular/core'; +import { streamResource, BaseMessage } from '@cacheplane/stream-resource'; + interface AgentState { messages: BaseMessage[]; - userPreferences: { theme: string; language: string }; - projectContext: { name: string; files: string[] }; + user_preferences: Record; + conversation_summary: string; + mentioned_topics: string[]; } -const agent = streamResource({ - assistantId: 'context_agent', - threadId: signal(projectId()), -}); +@Component({ + selector: 'app-memory-chat', + templateUrl: './memory.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class MemoryChatComponent { + agent = streamResource({ + assistantId: 'memory_agent', + threadId: signal(localStorage.getItem('memory-thread')), + onThreadId: (id) => localStorage.setItem('memory-thread', id), + }); + + // Reactive memory signals derived from agent state + preferences = computed(() => this.agent.value()?.user_preferences ?? {}); + summary = computed(() => this.agent.value()?.conversation_summary ?? ''); + topics = computed(() => this.agent.value()?.mentioned_topics ?? []); + messages = computed(() => this.agent.messages()); -// Read memory fields from agent state -const prefs = computed(() => agent.value().userPreferences); -const context = computed(() => agent.value().projectContext); + send(input: string) { + this.agent.submit({ + messages: [{ role: 'user', content: input }], + }); + } +} ``` -## Cross-session memory + + + +```html +
    + @for (msg of messages(); track msg) { +
    {{ msg.content }}
    + } + + @if (agent.isLoading()) { +
    Agent is thinking...
    + } +
    + + + +``` + +
    +
    + + +When `update_memory` returns `user_preferences`, the dict is merged into the existing state. For list fields using the `Annotated[list, add]` reducer, new items are appended. Design your state schema with these merge semantics in mind. + + +## Short-Term Memory (Thread-Scoped) + +Short-term memory is the simplest form: the conversation history and any accumulated state fields within a single thread. Every message, tool call, and state update is automatically checkpointed. When a user reconnects with the same `threadId`, the full history is restored. -Thread persistence enables memory that spans sessions. The agent decides what to store in its state. +```python +from langgraph.checkpoint.postgres import PostgresSaver + +checkpointer = PostgresSaver.from_connection_string(DATABASE_URL) +graph = builder.compile(checkpointer=checkpointer) + +# Every invocation within the same thread accumulates state +result = graph.invoke( + {"messages": [{"role": "user", "content": "I prefer dark mode"}]}, + config={"configurable": {"thread_id": "user_42_session"}} +) + +# Later invocation — same thread, memory intact +result = graph.invoke( + {"messages": [{"role": "user", "content": "What theme do I like?"}]}, + config={"configurable": {"thread_id": "user_42_session"}} +) +# Agent responds: "You mentioned you prefer dark mode." +``` + +On the Angular side, thread-scoped memory requires no extra code. The `threadId` signal handles it: ```typescript -// User returns days later — same threadId resumes context -const agent = streamResource({ +const chat = streamResource({ assistantId: 'memory_agent', - threadId: signal(localStorage.getItem('agent-thread')), - onThreadId: (id) => localStorage.setItem('agent-thread', id), + threadId: signal(userId()), // Same user = same thread = same memory }); -// Agent recalls past decisions, preferences, and context -// No explicit memory management needed on the Angular side +// chat.messages() restores full history on reconnect +// chat.value() restores all custom state fields +``` + +## Long-Term Memory (Cross-Thread) with the Store API + +Short-term memory disappears when you start a new thread. For knowledge that should persist across conversations — user preferences, learned facts, project context — use the LangGraph Store API. The Store is a key-value layer that any node can read from and write to, independent of the current thread. + + + + +```python +from langgraph.graph import END, START, StateGraph, MessagesState +from langgraph.store.base import BaseStore +from langchain_openai import ChatOpenAI + +llm = ChatOpenAI(model="gpt-5-mini") + +def recall_memories(state: MessagesState, *, store: BaseStore, config) -> dict: + """Load long-term memories for this user before responding.""" + user_id = config["configurable"]["user_id"] + + # Fetch all memories in this user's namespace + memories = store.search(("memories", user_id)) + memory_text = "\n".join( + f"- {m.value['content']}" for m in memories + ) + + system = ( + "You are a helpful assistant with long-term memory.\n\n" + f"What you remember about this user:\n{memory_text}" + ) + response = llm.invoke([ + {"role": "system", "content": system}, + *state["messages"] + ]) + return {"messages": [response]} + +def save_memories(state: MessagesState, *, store: BaseStore, config) -> dict: + """Extract and persist new facts to the Store.""" + user_id = config["configurable"]["user_id"] + + extraction = llm.invoke([ + {"role": "system", "content": ( + "Extract new facts about the user from the latest " + "exchange. Return a JSON list of strings. " + "Return [] if nothing new." + )}, + *state["messages"][-4:] + ]) + facts = parse_json(extraction.content) + + for fact in facts: + store.put( + ("memories", user_id), + key=str(uuid4()), + value={"content": fact}, + ) + + return {} + +builder = StateGraph(MessagesState) +builder.add_node("recall", recall_memories) +builder.add_node("save", save_memories) +builder.add_edge(START, "recall") +builder.add_edge("recall", "save") +builder.add_edge("save", END) + +graph = builder.compile() +``` + + + + +```typescript +import { Component, computed, signal, ChangeDetectionStrategy } from '@angular/core'; +import { streamResource, BaseMessage } from '@cacheplane/stream-resource'; + +@Component({ + selector: 'app-longterm-chat', + templateUrl: './memory.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class LongTermChatComponent { + // Each conversation gets a new thread, but the agent + // remembers the user across all of them via the Store. + agent = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'memory_agent', + config: { configurable: { user_id: 'user_42' } }, + }); + + messages = computed(() => this.agent.messages()); + + send(input: string) { + this.agent.submit({ + messages: [{ role: 'user', content: input }], + }); + } +} +``` + + + + +```html +
    + @for (msg of messages(); track msg) { +
    {{ msg.content }}
    + } + + @if (agent.isLoading()) { +
    Thinking...
    + } + +
    + + +
    +
    +``` + +
    +
    + + +The checkpointer saves thread state (short-term memory). The Store saves cross-thread knowledge (long-term memory). They serve different purposes and you will typically use both. The checkpointer is configured at compile time; the Store is injected into nodes that declare a `store` parameter. + + +## Semantic Memory with Vector Search + +For agents that accumulate hundreds or thousands of memories, keyword matching is not enough. The Store API supports semantic search with embeddings, so your agent can retrieve the most relevant memories for any given context. + +```python +from langchain_openai import OpenAIEmbeddings +from langgraph.store.base import BaseStore + +def recall_relevant(state: MessagesState, *, store: BaseStore, config) -> dict: + """Retrieve memories semantically related to the current question.""" + user_id = config["configurable"]["user_id"] + query = state["messages"][-1].content + + # Vector search — returns memories ranked by cosine similarity + results = store.search( + ("memories", user_id), + query=query, + limit=5, + ) + + memory_text = "\n".join( + f"- [{r.score:.2f}] {r.value['content']}" for r in results + ) + + response = llm.invoke([ + {"role": "system", "content": ( + "Relevant memories (similarity score in brackets):\n" + f"{memory_text}\n\n" + "Use these memories to personalize your response." + )}, + *state["messages"] + ]) + return {"messages": [response]} +``` + +The `store.search()` call accepts a `query` string and returns results ranked by vector similarity. You control how many results to retrieve with the `limit` parameter. Each result includes a `score` field (0 to 1) indicating how relevant the memory is to the query. + + +Semantic search requires an embedding model configured on the Store. LangGraph Platform handles this configuration in `langgraph.json`. When running locally, pass the embeddings provider when constructing your Store instance. + + +## Surfacing Memory in Angular with value() + +The `value()` signal is the primary way memory surfaces in your Angular components. It contains the full agent state object, including all custom memory fields. Because it is a Signal, your template re-renders automatically through OnPush change detection whenever the agent state changes. + +```typescript +// The value() signal contains everything the agent knows +const state = agent.value(); + +// Access specific memory fields +const prefs = state?.user_preferences; +const summary = state?.conversation_summary; +const topics = state?.mentioned_topics; + +// Compose derived signals for template binding +const hasMemory = computed(() => { + const val = agent.value(); + return val?.conversation_summary || val?.mentioned_topics?.length; +}); ``` - -The agent controls what gets stored in memory. streamResource() just surfaces the current state. Design your agent's state schema to include the fields you want to persist. +For long-term memory stored in the Store, the agent must explicitly include retrieved memories in its response or state output. The Store lives server-side; your Angular app only sees what the agent puts into the thread state. + +## Memory Best Practices + + +Every field in your state schema is persisted by the checkpointer. Only include fields the agent actively uses. Avoid dumping raw LLM outputs into state — extract structured data instead. + + + +Thread state grows with every message and state update. For long-running conversations, consider summarizing older messages into a `conversation_summary` field and trimming the message list. This keeps checkpoints small and LLM context windows manageable. + + + +Use hierarchical namespaces like `("memories", user_id)` or `("project", project_id, "notes")` to keep long-term memories organized. This also makes cleanup straightforward — delete an entire namespace when a user requests data removal. ## What's Next - Save thread IDs and resume conversations across sessions. + Configure checkpointers and thread storage for production deployments. Replay and branch agent runs from any past checkpoint. - - Understand how agent state flows into Angular Signals. + + Pause for human input before the agent acts on its memory. - - Test memory and state behavior with MockStreamTransport. + + How agent state flows from LangGraph into Angular Signals. diff --git a/apps/website/content/docs-v2/guides/persistence.mdx b/apps/website/content/docs-v2/guides/persistence.mdx index 2eb55eca9..73b62cc72 100644 --- a/apps/website/content/docs-v2/guides/persistence.mdx +++ b/apps/website/content/docs-v2/guides/persistence.mdx @@ -1,47 +1,260 @@ # Persistence -Thread persistence keeps conversations alive across page refreshes, browser restarts, and session changes. streamResource() manages thread state through the `threadId` signal and `onThreadId` callback. +Thread persistence keeps conversations alive across page refreshes, browser restarts, and server deployments. This guide covers configuring checkpointers on the Python side and wiring up thread management in your Angular components with streamResource(). -LangGraph checkpoints state at every super-step. streamResource() connects to these checkpoints via thread IDs, letting you resume exactly where you left off. +LangGraph checkpoints agent state at every super-step. Each checkpoint is keyed by a thread ID. streamResource() connects to these checkpoints automatically, so your users resume exactly where they left off — even if your server restarted between sessions. -## Basic thread persistence +## Python: Checkpointer Setup -Save the thread ID to localStorage so conversations survive page refreshes. +Every LangGraph agent needs a checkpointer to persist state between invocations. The checkpointer you choose depends on your environment. - - + + + +```python +from langgraph.checkpoint.memory import MemorySaver +from langgraph.graph import START, END, MessagesState, StateGraph +from langchain_openai import ChatOpenAI + +llm = ChatOpenAI(model="gpt-5-mini") + +def call_model(state: MessagesState) -> dict: + return {"messages": [llm.invoke(state["messages"])]} + +builder = StateGraph(MessagesState) +builder.add_node("model", call_model) +builder.add_edge(START, "model") +builder.add_edge("model", END) + +# MemorySaver stores checkpoints in-process memory +# Fast for development — lost when the process restarts +graph = builder.compile(checkpointer=MemorySaver()) +``` + + + + +```python +from langgraph.checkpoint.sqlite.aio import AsyncSqliteSaver +from langgraph.graph import START, END, MessagesState, StateGraph + +# Persists to a local file — survives restarts, zero infrastructure +async with AsyncSqliteSaver.from_conn_string("checkpoints.db") as checkpointer: + builder = StateGraph(MessagesState) + builder.add_node("model", call_model) + builder.add_edge(START, "model") + builder.add_edge("model", END) + + graph = builder.compile(checkpointer=checkpointer) +``` + + + + +```python +from langgraph.checkpoint.postgres.aio import AsyncPostgresSaver +from langgraph.graph import START, END, MessagesState, StateGraph + +DATABASE_URL = "postgresql://user:pass@localhost:5432/myapp" + +async with AsyncPostgresSaver.from_conn_string(DATABASE_URL) as checkpointer: + # Run migrations once on startup + await checkpointer.setup() + + builder = StateGraph(MessagesState) + builder.add_node("model", call_model) + builder.add_edge(START, "model") + builder.add_edge("model", END) + + graph = builder.compile(checkpointer=checkpointer) +``` + + + + + +MemorySaver is for development only — all state vanishes when the process exits. For anything users depend on, use PostgresSaver. SqliteSaver is a middle ground for prototypes and single-server deployments where you need persistence without a database. + + +## Python: Thread IDs in Graph Invocation + +The thread ID is how LangGraph associates a conversation with its checkpoint history. Pass it in the `configurable` dict every time you invoke the graph: + +```python +# First message creates the thread +result = graph.invoke( + {"messages": [{"role": "user", "content": "What is LangGraph?"}]}, + config={"configurable": {"thread_id": "user_123"}} +) + +# Second message continues the same conversation +result = graph.invoke( + {"messages": [{"role": "user", "content": "How does it handle state?"}]}, + config={"configurable": {"thread_id": "user_123"}} +) +# The agent sees both messages — the full history is restored from the checkpoint +``` + + +Use stable, user-scoped identifiers for thread IDs. A common pattern is `f"{user_id}_{session_id}"` — this prevents cross-user data leaks and lets one user have multiple conversations. + + +## Angular: Basic Thread Persistence + +Save the thread ID to localStorage so conversations survive page refreshes. streamResource() handles thread creation and restoration automatically. + + + ```typescript -// chat.component.ts -const chat = streamResource<{ messages: BaseMessage[] }>({ - assistantId: 'chat_agent', - threadId: signal(localStorage.getItem('threadId')), - onThreadId: (id) => localStorage.setItem('threadId', id), -}); +import { ChangeDetectionStrategy, Component } from '@angular/core'; +import { signal } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; + +@Component({ + selector: 'app-chat', + templateUrl: './chat.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class ChatComponent { + chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'chat_agent', + // Restore thread from localStorage on mount + threadId: signal(localStorage.getItem('threadId')), + // Persist thread ID whenever a new thread is created + onThreadId: (id) => localStorage.setItem('threadId', id), + }); + + send(text: string) { + this.chat.submit({ messages: [{ role: 'user', content: text }] }); + } +} ``` - + ```html - - + @for (msg of chat.messages(); track $index) { -

    {{ msg.content }}

    +
    +

    {{ msg.content }}

    +
    +} + +@if (chat.isLoading()) { +
    Agent is thinking...
    +} +``` + +
    +
    + +## Angular: Thread-List Component + +A real chat application needs a sidebar showing all conversations. Here is a full thread-list component that manages multiple threads alongside your chat resource. + + + + +```typescript +import { ChangeDetectionStrategy, Component, signal, computed } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; + +interface Thread { + id: string; + title: string; + updatedAt: Date; +} + +@Component({ + selector: 'app-thread-list', + templateUrl: './thread-list.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class ThreadListComponent { + threads = signal(this.loadThreads()); + activeThreadId = signal(null); + + chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'chat_agent', + threadId: this.activeThreadId, + onThreadId: (id) => { + this.activeThreadId.set(id); + this.addThread(id, 'New conversation'); + }, + }); + + activeThread = computed(() => + this.threads().find((t) => t.id === this.activeThreadId()) + ); + + selectThread(id: string) { + this.activeThreadId.set(id); + } + + newConversation() { + this.chat.switchThread(null); + // A new thread ID is assigned on the next submit + } + + private addThread(id: string, title: string) { + this.threads.update((list) => [ + { id, title, updatedAt: new Date() }, + ...list.filter((t) => t.id !== id), + ]); + this.saveThreads(); + } + + private loadThreads(): Thread[] { + return JSON.parse(localStorage.getItem('threads') ?? '[]'); + } + + private saveThreads() { + localStorage.setItem('threads', JSON.stringify(this.threads())); + } } ``` + + + +```html + + +
    + @if (chat.isThreadLoading()) { +
    Loading conversation...
    + } @else { + @for (msg of chat.messages(); track $index) { +
    {{ msg.content }}
    + } + } +
    +``` +
    -## Reactive thread switching +## Reactive Thread Switching -Pass a Signal as `threadId` to reactively switch between conversations. +When you pass a Signal as `threadId`, streamResource() reacts to every change. Set the signal and the conversation switches automatically — no imperative calls needed. ```typescript -// conversation-list.component.ts activeThreadId = signal(null); chat = streamResource<{ messages: BaseMessage[] }>({ @@ -50,57 +263,89 @@ chat = streamResource<{ messages: BaseMessage[] }>({ onThreadId: (id) => this.activeThreadId.set(id), }); -// Switch to a different conversation +// Clicking a thread in the sidebar triggers a reactive switch selectThread(id: string) { this.activeThreadId.set(id); - // streamResource automatically loads the new thread's state + // streamResource detects the signal change, fetches the thread's + // checkpoint from the server, and updates all derived signals } ``` -Use the `isThreadLoading()` signal to show a loading indicator while thread state is being fetched from the server. +Use the `isThreadLoading()` signal to show a skeleton UI while streamResource() fetches checkpoint state from the server. This avoids a flash of empty content when switching threads. -## Manual thread switching +## Manual Thread Switching -Use `switchThread()` for imperative thread changes that also reset derived state. +Use `switchThread()` for imperative thread changes. This is useful when you want to explicitly control when the switch happens — for example, after an animation completes or a modal closes. ```typescript -// Reset and start a new conversation +// Start a fresh conversation (null = new thread on next submit) newConversation() { this.chat.switchThread(null); - // Creates a new thread on next submit } -// Switch to a specific thread +// Jump to a specific thread loadConversation(threadId: string) { this.chat.switchThread(threadId); } + +// Fork a conversation — create a new thread from current state +forkConversation() { + this.chat.switchThread(null); + this.chat.submit({ + messages: this.chat.messages(), + }); +} ``` -## Checkpoint recovery +## Checkpoint Recovery -When a connection drops, streamResource() can rejoin an in-progress run. +When a connection drops mid-stream, `joinStream()` reconnects to an in-progress run without restarting the agent. This prevents duplicate work and lost tokens. ```typescript -// Rejoin a running stream +// Rejoin a running stream after a network interruption await chat.joinStream(runId, lastEventId); -// Picks up from where the connection was lost +// Picks up from the last event — no duplicate agent execution ``` + +In most cases streamResource() handles reconnection internally. Use `joinStream()` directly only when you need explicit control — for example, when restoring a run ID from a URL parameter after a full page reload. + + +## Thread Lifecycle + + + +streamResource() reads the `threadId` signal. If it contains a value, the existing thread's checkpoint is fetched from the server. + + +If `threadId` is null, streamResource() creates a new thread via the LangGraph API and fires `onThreadId` with the new ID. + + +Each super-step is checkpointed server-side. The `messages()` signal updates in real time as events arrive. + + +Setting the `threadId` signal (or calling `switchThread()`) loads the target thread's latest checkpoint. All signals update to reflect the restored state. + + +`joinStream()` reconnects to the in-progress run. The agent does not restart — streaming resumes from the last received event. + + + ## What's Next - Pause agent execution and wait for human input with interrupt signals. + Pause agent execution and wait for human approval before continuing. - Preserve context across sessions using LangGraph's memory store. + Preserve long-term context across sessions with LangGraph's memory store. Stream token-by-token responses and tool progress in real time. - Test agent interactions deterministically with MockStreamTransport. + Test thread persistence and switching deterministically with MockStreamTransport. diff --git a/apps/website/content/docs-v2/guides/streaming.mdx b/apps/website/content/docs-v2/guides/streaming.mdx index b3dc03962..0a9df886a 100644 --- a/apps/website/content/docs-v2/guides/streaming.mdx +++ b/apps/website/content/docs-v2/guides/streaming.mdx @@ -15,7 +15,7 @@ Create a `streamResource` in your component, pass it a message, and bind to the ```typescript import { Component, computed } from '@angular/core'; -import { streamResource } from '@stream-resource/angular'; +import { streamResource } from '@cacheplane/stream-resource'; import { BaseMessage } from '@langchain/core/messages'; @Component({ selector: 'app-chat', templateUrl: './chat.component.html' }) @@ -24,10 +24,10 @@ export class ChatComponent { assistantId: 'chat_agent', }); - readonly isStreaming = computed(() => this.chat.status() === 'streaming'); + readonly isStreaming = computed(() => this.chat.status() === 'loading'); send(text: string) { - this.chat.stream({ messages: [{ role: 'user', content: text }] }); + this.chat.submit({ messages: [{ role: 'user', content: text }] }); } } ``` @@ -126,7 +126,7 @@ If the SSE connection drops or the agent throws, `status()` transitions to `'err ```typescript import { Component, computed, effect } from '@angular/core'; -import { streamResource } from '@stream-resource/angular'; +import { streamResource } from '@cacheplane/stream-resource'; import { BaseMessage } from '@langchain/core/messages'; @Component({ selector: 'app-chat', templateUrl: './chat.component.html' }) @@ -139,7 +139,7 @@ export class ChatComponent { retry() { // Re-stream using the same thread so context is preserved - this.chat.stream(); + this.chat.submit(); } } ``` @@ -184,22 +184,22 @@ The value is in milliseconds. A `throttle` of `0` (default) disables batching an | Background summarisation | 150 ms | -Each call to `chat.stream()` opens a new SSE connection. Connections are automatically closed when the agent run completes or when the Angular component is destroyed — you do not need to manage the lifecycle manually. +Each call to `chat.submit()` opens a new SSE connection. Connections are automatically closed when the agent run completes or when the Angular component is destroyed — you do not need to manage the lifecycle manually. ## What's Next - + Resume conversations across page reloads using thread IDs and checkpointers. - + Pause agent execution mid-stream to collect human input before continuing. - + Unit-test components that use streamResource with the built-in test harness. - + Full option reference for streamResource(), including all configuration keys. diff --git a/apps/website/content/docs-v2/guides/subgraphs.mdx b/apps/website/content/docs-v2/guides/subgraphs.mdx index 2663160e1..5519b47be 100644 --- a/apps/website/content/docs-v2/guides/subgraphs.mdx +++ b/apps/website/content/docs-v2/guides/subgraphs.mdx @@ -76,7 +76,7 @@ const pipelineStatus = computed(() => { Render live progress for each subagent using the signals above. - + ```typescript import { computed } from '@angular/core'; @@ -183,16 +183,13 @@ Use **subagents** when tasks are independent and can run in parallel, when each ## What's Next - + Understand how streamResource() surfaces tokens, status, and errors in real time. - + Write unit and integration tests for orchestrator graphs and subagent interactions. - + Full reference for streamResource() options, signals, and subagent configuration. - - Patterns for retries, fallbacks, and surfacing errors from deeply nested agents. - diff --git a/apps/website/content/docs-v2/guides/testing.mdx b/apps/website/content/docs-v2/guides/testing.mdx index 0d0f7f80f..453bd54ce 100644 --- a/apps/website/content/docs-v2/guides/testing.mdx +++ b/apps/website/content/docs-v2/guides/testing.mdx @@ -1,19 +1,55 @@ # Testing -MockStreamTransport lets you test agent interactions deterministically without a running LangGraph server. Script exact event sequences and step through them in your Angular test specs. +MockStreamTransport lets you test agent interactions deterministically without a running LangGraph server. Script exact event sequences, step through streaming lifecycles, and verify every signal transition in your Angular test specs. -MockStreamTransport eliminates network dependencies, timing issues, and server state. Every test run produces identical results. +MockStreamTransport eliminates network dependencies, timing issues, and server state. Every test run produces identical results. Your CI pipeline stays green. -## Basic test setup +## Python: Testing the Agent -Create a MockStreamTransport with scripted events and pass it to streamResource. +Before testing the Angular side, make sure your agent logic is correct. LangGraph agents are plain Python functions — test them directly with pytest. + +```python +import pytest +from langchain_core.messages import HumanMessage +from my_agent.agent import graph + +@pytest.mark.asyncio +async def test_agent_responds(): + result = await graph.ainvoke( + {"messages": [HumanMessage(content="Hello")]}, + config={"configurable": {"thread_id": "test_1"}}, + ) + assert len(result["messages"]) >= 2 + assert result["messages"][-1].type == "ai" + +@pytest.mark.asyncio +async def test_agent_uses_tools(): + result = await graph.ainvoke( + {"messages": [HumanMessage(content="Search for LangGraph docs")]}, + config={"configurable": {"thread_id": "test_2"}}, + ) + # Verify the agent called the search tool + tool_messages = [m for m in result["messages"] if m.type == "tool"] + assert len(tool_messages) > 0 +``` + + +With MemorySaver and a mocked LLM, agent tests run in milliseconds. Use `langchain_core.language_models.FakeListChatModel` to remove the LLM dependency entirely. + + +## MockStreamTransport: Basic Setup + +On the Angular side, MockStreamTransport replaces the real HTTP transport. Create it inside `TestBed.runInInjectionContext` so streamResource() has access to Angular's dependency injection. + + + ```typescript import { TestBed } from '@angular/core/testing'; -import { MockStreamTransport } from '@cacheplane/stream-resource'; -import type { StreamEvent } from '@cacheplane/stream-resource'; +import { MockStreamTransport, streamResource } from '@cacheplane/stream-resource'; +import type { BaseMessage } from '@cacheplane/stream-resource'; describe('ChatComponent', () => { it('should display agent messages', () => { @@ -25,9 +61,12 @@ describe('ChatComponent', () => { transport, }); - // Emit a values event + // Emit a values event — simulates the agent responding transport.emit([ - { type: 'values', messages: [{ role: 'assistant', content: 'Hello!' }] }, + { + type: 'values', + messages: [{ role: 'assistant', content: 'Hello!' }], + }, ]); expect(chat.messages().length).toBe(1); @@ -37,72 +76,441 @@ describe('ChatComponent', () => { }); ``` -## Scripting event sequences + + + +```typescript +import { ChangeDetectionStrategy, Component } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; + +@Component({ + selector: 'app-chat', + templateUrl: './chat.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class ChatComponent { + chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'chat_agent', + }); + + send(text: string) { + this.chat.submit({ messages: [{ role: 'user', content: text }] }); + } +} +``` + + + + +## Scripted Event Sequences -Pass event batches to the constructor for sequential playback. +Pass event batches to the constructor for sequential playback. Each call to `nextBatch()` advances one step — giving you frame-by-frame control over what the component sees. ```typescript const transport = new MockStreamTransport([ - // Batch 1: Initial response + // Batch 1: Agent starts thinking [{ type: 'values', messages: [{ role: 'assistant', content: 'Analyzing...' }] }], - // Batch 2: Final response - [{ type: 'values', messages: [{ role: 'assistant', content: 'Done!' }] }], + // Batch 2: Agent finishes + [{ type: 'values', messages: [{ role: 'assistant', content: 'Here is your answer.' }] }], ]); -// Advance through batches -const batch1 = transport.nextBatch(); // First batch -const batch2 = transport.nextBatch(); // Second batch +TestBed.runInInjectionContext(() => { + const chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'test_agent', + transport, + }); + + chat.submit({ messages: [{ role: 'user', content: 'Explain signals' }] }); + + // Step through each batch + transport.nextBatch(); + expect(chat.messages()[0].content).toBe('Analyzing...'); + + transport.nextBatch(); + expect(chat.messages()[0].content).toBe('Here is your answer.'); +}); ``` -## Testing interrupts +## Testing the Streaming Lifecycle + +The most common test pattern verifies the full submit-to-resolved lifecycle: submit triggers loading, values arrive, and the status settles to resolved. -Script an interrupt event to test human-in-the-loop flows. + + ```typescript -it('should handle interrupts', () => { - const transport = new MockStreamTransport(); +import { TestBed } from '@angular/core/testing'; +import { MockStreamTransport, streamResource } from '@cacheplane/stream-resource'; + +describe('streaming lifecycle', () => { + it('should transition through loading → values → resolved', () => { + const transport = new MockStreamTransport([ + [{ type: 'values', messages: [{ role: 'assistant', content: 'Thinking...' }] }], + [{ type: 'values', messages: [{ role: 'assistant', content: 'Done!' }] }], + ]); - TestBed.runInInjectionContext(() => { - const agent = streamResource({ - assistantId: 'approval_agent', - transport, + TestBed.runInInjectionContext(() => { + const chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'test_agent', + transport, + }); + + // Initial state + expect(chat.status()).toBe('idle'); + expect(chat.messages()).toEqual([]); + + // Submit triggers loading + chat.submit({ messages: [{ role: 'user', content: 'Hello' }] }); + expect(chat.status()).toBe('loading'); + expect(chat.isLoading()).toBe(true); + + // First batch — partial response + transport.nextBatch(); + expect(chat.messages()[0].content).toBe('Thinking...'); + expect(chat.status()).toBe('loading'); + + // Second batch — final response + transport.nextBatch(); + expect(chat.messages()[0].content).toBe('Done!'); + + // Stream completes + transport.complete(); + expect(chat.status()).toBe('resolved'); + expect(chat.isLoading()).toBe(false); }); + }); +}); +``` - // Emit an interrupt - transport.emit([ - { type: 'interrupt', value: { action: 'delete', risk: 'high' } }, - ]); + + + +```typescript +import { ChangeDetectionStrategy, Component } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; - expect(agent.interrupt()).toBeDefined(); - expect(agent.interrupt()?.value.risk).toBe('high'); +@Component({ + selector: 'app-chat', + template: ` + @if (chat.isLoading()) { +
    Thinking...
    + } + @for (msg of chat.messages(); track $index) { +
    {{ msg.content }}
    + } + `, + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class ChatComponent { + chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'chat_agent', + }); + + send(text: string) { + this.chat.submit({ messages: [{ role: 'user', content: text }] }); + } +} +``` + +
    +
    + +## Testing Interrupts + +Script an interrupt event to test human-in-the-loop flows. Verify the interrupt signal surfaces the payload, then resume and confirm the agent continues. + + + + +```typescript +import { TestBed } from '@angular/core/testing'; +import { MockStreamTransport, streamResource } from '@cacheplane/stream-resource'; + +describe('interrupt handling', () => { + it('should surface interrupt and resume on approval', () => { + const transport = new MockStreamTransport(); + + TestBed.runInInjectionContext(() => { + const agent = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'approval_agent', + transport, + }); + + // Agent hits an interrupt + transport.emit([ + { + type: 'interrupt', + value: { action: 'delete_account', risk: 'high' }, + }, + ]); + + // Verify interrupt signal + expect(agent.interrupt()).toBeDefined(); + expect(agent.interrupt()?.value.action).toBe('delete_account'); + expect(agent.interrupt()?.value.risk).toBe('high'); + + // User approves — resume the agent + agent.submit(null, { resume: { approved: true } }); + + // Agent continues after approval + transport.emit([ + { + type: 'values', + messages: [{ role: 'assistant', content: 'Account deleted.' }], + }, + ]); + + expect(agent.interrupt()).toBeNull(); + expect(agent.messages()[0].content).toBe('Account deleted.'); + }); }); }); ``` -## Testing errors + + + +```typescript +import { ChangeDetectionStrategy, Component } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; + +@Component({ + selector: 'app-approval', + template: ` + @if (agent.interrupt(); as interrupt) { +
    +

    Action: {{ interrupt.value.action }}

    +

    Risk: {{ interrupt.value.risk }}

    + + +
    + } + `, + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class ApprovalComponent { + agent = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'approval_agent', + }); + + approve() { + this.agent.submit(null, { resume: { approved: true } }); + } + + reject() { + this.agent.submit(null, { resume: { approved: false } }); + } +} +``` + +
    +
    + +## Testing Errors + +Inject errors with `emitError()` to verify your component handles failures gracefully. -Inject errors to test error handling. + + ```typescript -it('should surface errors', () => { - const transport = new MockStreamTransport(); +import { TestBed } from '@angular/core/testing'; +import { MockStreamTransport, streamResource } from '@cacheplane/stream-resource'; + +describe('error handling', () => { + it('should surface errors and set error status', () => { + const transport = new MockStreamTransport(); + + TestBed.runInInjectionContext(() => { + const chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'test_agent', + transport, + }); + + chat.submit({ messages: [{ role: 'user', content: 'Hello' }] }); - TestBed.runInInjectionContext(() => { - const chat = streamResource({ - assistantId: 'test_agent', - transport, + // Simulate a connection failure + transport.emitError(new Error('Connection lost')); + + expect(chat.error()).toBeDefined(); + expect(chat.error()?.message).toBe('Connection lost'); + expect(chat.status()).toBe('error'); + expect(chat.isLoading()).toBe(false); }); + }); + + it('should recover from errors on retry', () => { + const transport = new MockStreamTransport(); + + TestBed.runInInjectionContext(() => { + const chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'test_agent', + transport, + }); - transport.emitError(new Error('Connection lost')); + // First attempt fails + chat.submit({ messages: [{ role: 'user', content: 'Hello' }] }); + transport.emitError(new Error('Timeout')); + expect(chat.status()).toBe('error'); - expect(chat.error()).toBeDefined(); - expect(chat.status()).toBe('error'); + // Retry succeeds + chat.submit({ messages: [{ role: 'user', content: 'Hello' }] }); + transport.emit([ + { + type: 'values', + messages: [{ role: 'assistant', content: 'Sorry for the delay!' }], + }, + ]); + + expect(chat.status()).not.toBe('error'); + expect(chat.messages()[0].content).toBe('Sorry for the delay!'); + }); }); }); ``` - -streamResource() must be called within an Angular injection context. In tests, wrap calls in `TestBed.runInInjectionContext()`. + + + +```typescript +import { ChangeDetectionStrategy, Component } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; + +@Component({ + selector: 'app-chat', + template: ` + @if (chat.error(); as err) { +
    +

    {{ err.message }}

    + +
    + } + `, + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class ChatComponent { + chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'chat_agent', + }); + private lastMessage = ''; + + send(text: string) { + this.lastMessage = text; + this.chat.submit({ messages: [{ role: 'user', content: text }] }); + } + + retry() { + this.send(this.lastMessage); + } +} +``` + +
    +
    + +## Testing Thread Switching + +Verify that switching threads loads the correct conversation state and clears the previous thread's messages. + +```typescript +describe('thread switching', () => { + it('should load new thread state on switch', () => { + const transport = new MockStreamTransport(); + + TestBed.runInInjectionContext(() => { + const threadId = signal('thread_A'); + + const chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'test_agent', + threadId, + transport, + }); + + // Thread A has messages + transport.emit([ + { + type: 'values', + messages: [{ role: 'assistant', content: 'Thread A response' }], + }, + ]); + expect(chat.messages()[0].content).toBe('Thread A response'); + + // Switch to thread B + chat.switchThread('thread_B'); + + // Thread B loads its own state + transport.emit([ + { + type: 'values', + messages: [{ role: 'assistant', content: 'Thread B response' }], + }, + ]); + expect(chat.messages()[0].content).toBe('Thread B response'); + }); + }); + + it('should create a new thread when switching to null', () => { + const transport = new MockStreamTransport(); + + TestBed.runInInjectionContext(() => { + const chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'test_agent', + transport, + }); + + // Start a conversation + transport.emit([ + { + type: 'values', + messages: [{ role: 'assistant', content: 'Hello' }], + }, + ]); + + // Switch to new thread + chat.switchThread(null); + expect(chat.messages()).toEqual([]); + }); + }); +}); +``` + +## Test Setup Workflow + + + +Make sure `@cacheplane/stream-resource` is available in your test environment. MockStreamTransport ships with the main package — no extra install needed. + + +Instantiate `MockStreamTransport` with optional pre-scripted batches for sequential playback, or leave it empty for imperative `emit()` calls. + + +Call `TestBed.runInInjectionContext(() => { ... })` so streamResource() can access Angular's injector for signal creation and cleanup. + + +Pass the transport to streamResource() via the `transport` option. All other options (assistantId, threadId, onThreadId) work identically to production code. + + +Use `transport.emit()` for ad-hoc events, `transport.nextBatch()` for pre-scripted sequences, or `transport.emitError()` for failure scenarios. + + +Read signals like `chat.messages()`, `chat.status()`, `chat.interrupt()`, and `chat.error()` to verify your component reacts correctly. + + + +## Integration Testing + +For end-to-end confidence, run tests against a real LangGraph dev server. The LangGraph CLI starts a local server that your tests can hit directly. + +```bash +# Start the dev server +langgraph dev --config langgraph.json + +# Run Angular tests against it (no MockStreamTransport needed) +ng test --watch=false +``` + + +Integration tests hit a real server and (potentially) a real LLM. Reserve them for CI pipelines or pre-release smoke tests. Use MockStreamTransport for the vast majority of your test suite — it runs in milliseconds with zero external dependencies. ## What's Next @@ -112,10 +520,10 @@ streamResource() must be called within an Angular injection context. In tests, w Understand the SSE event model your tests simulate. - Test human-in-the-loop approval flows with scripted interrupt events. + Build human-in-the-loop approval flows tested with scripted interrupt events. - - Configure streamResource() for production LangGraph Cloud. + + Thread persistence patterns that pair with thread-switching tests. Full reference for MockStreamTransport options and methods. diff --git a/apps/website/content/docs-v2/guides/time-travel.mdx b/apps/website/content/docs-v2/guides/time-travel.mdx index 37501fbb9..743025e54 100644 --- a/apps/website/content/docs-v2/guides/time-travel.mdx +++ b/apps/website/content/docs-v2/guides/time-travel.mdx @@ -80,7 +80,7 @@ Expose checkpoint history directly in your component to let users scrub through ```typescript import { Component, inject, computed } from '@angular/core'; -import { streamResource } from '@stream-resource/angular'; +import { streamResource } from '@cacheplane/stream-resource'; import { AgentService } from './agent.service'; @Component({ @@ -159,16 +159,13 @@ Time travel is most useful during development. Inspect why an agent chose a part ## What's Next - + Configure thread storage so checkpoints survive page reloads and are available across sessions. - + Understand how streamResource() surfaces incremental updates and how history integrates with live streaming state. - + Full reference for streamResource() options, signals, and the submit() API including checkpoint parameters. - - Deep dive into branch management, merging strategies, and presenting multi-branch UIs to end users. - diff --git a/apps/website/next-env.d.ts b/apps/website/next-env.d.ts index c4b7818fb..fdbfe5258 100644 --- a/apps/website/next-env.d.ts +++ b/apps/website/next-env.d.ts @@ -1,6 +1,6 @@ /// /// -import "./.next/dev/types/routes.d.ts"; +import "./../../dist/apps/website/.next/types/routes.d.ts"; // NOTE: This file should not be edited // see https://nextjs.org/docs/app/api-reference/config/typescript for more information. diff --git a/docs/superpowers/plans/2026-04-04-docs-comprehensive-overhaul.md b/docs/superpowers/plans/2026-04-04-docs-comprehensive-overhaul.md new file mode 100644 index 000000000..409188dc1 --- /dev/null +++ b/docs/superpowers/plans/2026-04-04-docs-comprehensive-overhaul.md @@ -0,0 +1,234 @@ +# Comprehensive Docs Overhaul — Master Plan + +> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. + +**Goal:** Bring all 18 docs pages to gold standard quality — every page shows both Python agent code AND Angular streamResource code, uses correct MDX syntax, has 200+ lines of rich content, and tells the product story. + +**Architecture:** Each task rewrites one MDX file. The gold standard is `introduction.mdx` (337 lines) and `langgraph-basics.mdx` (384 lines). Every page should pair Python LangGraph patterns with Angular streamResource consumption, use correct Tab label syntax, include Callouts, Steps, and CardGroup navigation. + +**Tech Stack:** MDX with custom components (Callout, Steps, Tabs/Tab with label prop, CardGroup/Card, FeatureChips, ArchFlowDiagram) + +--- + +## Phase 0: Critical Fixes (do first, affects all pages) + +### Task 0: Fix Cross-Cutting Issues + +**Files:** Multiple + +- [ ] **Step 1: Fix import path inconsistency** + +Search all MDX and TSX files for `@ngxp/stream-resource` and `@stream-resource/angular`. Replace ALL with `@cacheplane/stream-resource`. + +Run: `grep -rn "@ngxp/stream-resource\|@stream-resource/angular" apps/website/content/docs-v2/ apps/website/src/` + +Replace all occurrences with `@cacheplane/stream-resource`. + +- [ ] **Step 2: Fix API method inconsistency** + +Search for `.stream(` in docs (should be `.submit(`). Search for `status() === 'streaming'` (should be `status() === 'loading'`). + +- [ ] **Step 3: Fix broken links** + +Search for `/docs-v2/` (should be `/docs/`). Search for `/docs/guides/branching` and `/docs/guides/error-handling` (don't exist — remove or replace). + +- [ ] **Step 4: Fix unclosed code fence in state-management.mdx** + +Line ~60 has an unclosed TypeScript code fence that swallows the rest of the page. + +- [ ] **Step 5: Fix .tsx file extensions** + +Search for `.tsx` in Tab labels (should be `.ts` — this is Angular, not React). + +- [ ] **Step 6: Commit** + +```bash +git add -A +git commit -m "fix(website): resolve import paths, API naming, broken links, code fence" +``` + +--- + +## Phase 1: Rewrite THIN Pages (highest impact) + +Each page below needs to be expanded to 200+ lines with Python + Angular code pairs. + +### Task 1: Rewrite `concepts/angular-signals.mdx` (76 → 250+ lines) + +Current: Surface-level primer. No Python code. No streaming lifecycle explanation. + +New content needed: +- How `toSignal()` converts BehaviorSubjects internally +- Streaming lifecycle: idle → loading → streaming tokens → resolved +- `computed()` for derived AI state (message count, last message, tool progress) +- `effect()` for side effects (analytics, logging, error reporting) +- A complete component example showing all signal patterns +- Performance: why Signals + OnPush is efficient for high-frequency streaming +- Python agent code showing what produces the streaming events that Signals consume + +### Task 2: Rewrite `concepts/agent-architecture.mdx` (70 → 250+ lines) + +Current: 5-bullet overview, single code snippet, 3-line pattern list. + +New content needed: +- Full ReAct agent pattern with Python code + Angular streamResource code +- Tool calling: Python `@tool` decorator → Angular `toolCalls()` signal +- Multi-agent: Python supervisor graph → Angular `subagents()` signal +- Error handling and recovery patterns +- Planning phase: how LLMs decide actions +- Checkpointing: how `history()` and `branch()` expose decisions + +### Task 3: Rewrite `concepts/state-management.mdx` (83 → 200+ lines) + +Current: Has syntax error (unclosed code fence). No Python code. ASCII diagram. + +New content needed: +- Fix unclosed code fence +- Python TypedDict with reducers → TypeScript interface mapping +- How `Annotated[list, add]` works and why messages accumulate +- State updates during streaming (partial values) +- Checkpoint model: persistence, restore, branching +- Tabs showing Python state definition + Angular consumption +- Replace ASCII diagram with Steps component + +### Task 4: Rewrite `guides/memory.mdx` (83 → 200+ lines) + +Current: Thinnest guide. No Tabs, no Python, no template code. + +New content needed: +- Python: agent state with memory fields, LangGraph Store API +- Short-term (thread-scoped) vs long-term (cross-thread) memory +- Semantic memory with vector search +- Tabs: TypeScript component + Angular template for memory-aware UI +- How memory updates surface through `value()` signal + +### Task 5: Rewrite `guides/interrupts.mdx` (96 → 200+ lines) + +Current: No Python code. Dangling reference to BagTemplate. Tab syntax wrong. + +New content needed: +- Python: `raise Interrupt(value={...})` in agent node +- Python: graph structure with approval node +- Full approval component: TypeScript + Template in Tabs +- Multi-step approval pattern +- Typed interrupt payloads with BagTemplate (explain the reference) +- Steps component for interrupt lifecycle +- Fix Tab syntax to use `label` prop + +### Task 6: Rewrite `guides/persistence.mdx` (107 → 200+ lines) + +Current: No Python code. Tab syntax wrong. + +New content needed: +- Python: checkpointer setup (MemorySaver, PostgresSaver) +- Python: thread_id in graph invocation +- Full thread-list component: TypeScript + Template +- Thread switching UI pattern +- Fix Tab syntax to use `label` prop + +### Task 7: Rewrite `guides/testing.mdx` (124 → 200+ lines) + +Current: No Tabs, no Python, no template code. + +New content needed: +- Python: how to test the agent side +- Tabs: spec file + component file pairs +- Testing subagent interactions +- Testing interrupts and thread switching +- Integration testing with real LangGraph dev server +- Steps for test setup workflow + +### Task 8: Rewrite `guides/deployment.mdx` (108 → 200+ lines) + +Current: Tab syntax wrong. Introduction page has better deployment content. + +New content needed: +- Python: LangGraph Cloud deployment (langgraph.json, CLI) +- LangSmith deployment walkthrough +- Authentication / API key configuration +- CORS configuration for SSE +- CI/CD pipeline example +- Monitoring and health checks +- Fix Tab syntax to use `label` prop + +--- + +## Phase 2: Polish CLOSE Pages + +### Task 9: Polish `guides/streaming.mdx` (206 lines — fix issues) + +Fix: +- Import path: `@stream-resource/angular` → `@cacheplane/stream-resource` +- `.stream()` → `.submit()` +- `'streaming'` status → `'loading'` +- Add Python agent showing `stream_mode` configuration +- Add `ChangeDetectionStrategy.OnPush` to component + +### Task 10: Polish `guides/time-travel.mdx` (175 lines — fix issues) + +Fix: +- `.tsx` extension in Tab label → `.ts` +- Remove broken link to `/docs-v2/guides/branching` +- Add Python checkpointer setup code +- Expand to 200+ lines + +### Task 11: Polish `guides/subgraphs.mdx` (199 lines — fix issues) + +Fix: +- `.tsx` extension in Tab label → `.ts` +- Remove broken link to `/docs-v2/guides/error-handling` +- Add Python subgraph composition code + +### Task 12: Polish `getting-started/quickstart.mdx` (131 lines) + +Fix: +- Tab syntax: `items={[...]}` → `` +- Replace plain `##` numbered headings with `/` +- Add `ChangeDetectionStrategy.OnPush` +- Add error display (`chat.error()`) to template +- Add agent setup context or link + +### Task 13: Polish `getting-started/installation.mdx` (103 lines) + +Fix: +- Tab syntax: `items={[...]}` → `` +- Fix `process.env` error → use Angular `environment.ts` +- Fix verify example (needs injection context) +- Add troubleshooting section +- Expand "Next steps" to 4+ cards + +--- + +## Phase 3: Expand API Pages + +### Task 14: Expand 4 API Reference Pages + +Fix import path `@ngxp/stream-resource` → `@cacheplane/stream-resource` in all 4. +Add "What's Next" CardGroup to all 4. +Expand intros with more context about when/why to use each. + +--- + +## Execution Strategy + +**Phase 0** (Task 0): Do first — fixes affect all pages. Single commit. +**Phase 1** (Tasks 1-8): Highest impact. 8 full rewrites. Dispatch as parallel subagents. +**Phase 2** (Tasks 9-13): Polish passes. 5 targeted fixes. Dispatch as parallel subagents. +**Phase 3** (Task 14): API pages. Single task. + +Total: 15 tasks, ~14 files rewritten. + +## Quality Checklist (apply to every page) + +- [ ] 200+ lines of content +- [ ] Python LangGraph code showing the agent/server pattern +- [ ] Angular streamResource code showing the frontend consumption +- [ ] Both paired together to tell the product story +- [ ] All imports use `@cacheplane/stream-resource` +- [ ] All Tab components use `` syntax +- [ ] `ChangeDetectionStrategy.OnPush` in component examples +- [ ] At least 2 Callouts (tip, info, or warning) +- [ ] "What's Next" CardGroup with 4+ cards +- [ ] No broken links +- [ ] Correct API method names (`.submit()`, not `.stream()`) +- [ ] Correct status values (`'loading'`, not `'streaming'`) From ea05e773859fcddfa1f88f24e05f949f1ba7d09a Mon Sep 17 00:00:00 2001 From: Brian Love Date: Sat, 4 Apr 2026 17:01:14 -0700 Subject: [PATCH 008/187] =?UTF-8?q?docs:=20comprehensive=20overhaul=20?= =?UTF-8?q?=E2=80=94=208=20pages=20rewritten,=203300+=20lines=20added=20(#?= =?UTF-8?q?12)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * docs: add comprehensive docs overhaul master plan (15 tasks, 3 phases) * fix(website): resolve import paths, broken links, code fence, .tsx extensions * fix(website): convert all Tabs to label prop syntax * docs(website): rewrite Angular Signals concept with streaming lifecycle and Python code * docs(website): rewrite State Management with Python reducers and TypeScript mapping * docs(website): rewrite Memory guide with Python Store API and Angular patterns * docs(website): rewrite Agent Architecture with full Python patterns and Angular mapping * docs(website): rewrite Deployment guide with full LangGraph Cloud + Angular deployment * docs(website): rewrite Persistence guide with Python checkpointers and thread UI * docs(website): rewrite Testing guide with comprehensive mock patterns * docs(website): rewrite Interrupts guide with Python interrupt code and approval component * fix(website): fix Callout type='warn' → type='warning' + strip code fence titles --- .../docs-v2/api/fetch-stream-transport.mdx | 2 +- .../docs-v2/api/mock-stream-transport.mdx | 2 +- .../docs-v2/api/provide-stream-resource.mdx | 2 +- .../content/docs-v2/api/stream-resource.mdx | 2 +- .../docs-v2/concepts/agent-architecture.mdx | 684 +++++++++++++++++- .../docs-v2/concepts/angular-signals.mdx | 542 +++++++++++++- .../docs-v2/concepts/state-management.mdx | 536 +++++++++++++- .../docs-v2/getting-started/installation.mdx | 6 +- .../docs-v2/getting-started/quickstart.mdx | 6 +- .../content/docs-v2/guides/deployment.mdx | 410 +++++++++-- .../content/docs-v2/guides/interrupts.mdx | 546 +++++++++++++- .../website/content/docs-v2/guides/memory.mdx | 412 ++++++++++- .../content/docs-v2/guides/persistence.mdx | 313 +++++++- .../content/docs-v2/guides/streaming.mdx | 20 +- .../content/docs-v2/guides/subgraphs.mdx | 11 +- .../content/docs-v2/guides/testing.mdx | 500 +++++++++++-- .../content/docs-v2/guides/time-travel.mdx | 11 +- apps/website/next-env.d.ts | 2 +- .../2026-04-04-docs-comprehensive-overhaul.md | 234 ++++++ 19 files changed, 3908 insertions(+), 333 deletions(-) create mode 100644 docs/superpowers/plans/2026-04-04-docs-comprehensive-overhaul.md diff --git a/apps/website/content/docs-v2/api/fetch-stream-transport.mdx b/apps/website/content/docs-v2/api/fetch-stream-transport.mdx index afb74b5f2..be313baed 100644 --- a/apps/website/content/docs-v2/api/fetch-stream-transport.mdx +++ b/apps/website/content/docs-v2/api/fetch-stream-transport.mdx @@ -6,7 +6,7 @@ You rarely need to interact with `FetchStreamTransport` directly — simply prov ```ts import { inject } from '@angular/core'; -import { streamResource, FetchStreamTransport } from '@ngxp/stream-resource'; +import { streamResource, FetchStreamTransport } from '@cacheplane/stream-resource'; // Override transport for a single resource const events = streamResource({ diff --git a/apps/website/content/docs-v2/api/mock-stream-transport.mdx b/apps/website/content/docs-v2/api/mock-stream-transport.mdx index fbf014cd6..d9ebd13c8 100644 --- a/apps/website/content/docs-v2/api/mock-stream-transport.mdx +++ b/apps/website/content/docs-v2/api/mock-stream-transport.mdx @@ -7,7 +7,7 @@ import { TestBed } from '@angular/core/testing'; import { provideStreamResource, MockStreamTransport, -} from '@ngxp/stream-resource'; +} from '@cacheplane/stream-resource'; beforeEach(() => { TestBed.configureTestingModule({ diff --git a/apps/website/content/docs-v2/api/provide-stream-resource.mdx b/apps/website/content/docs-v2/api/provide-stream-resource.mdx index 6e618bdb1..4863cf489 100644 --- a/apps/website/content/docs-v2/api/provide-stream-resource.mdx +++ b/apps/website/content/docs-v2/api/provide-stream-resource.mdx @@ -7,7 +7,7 @@ import { bootstrapApplication } from '@angular/platform-browser'; import { provideStreamResource, FetchStreamTransport, -} from '@ngxp/stream-resource'; +} from '@cacheplane/stream-resource'; import { AppComponent } from './app/app.component'; bootstrapApplication(AppComponent, { diff --git a/apps/website/content/docs-v2/api/stream-resource.mdx b/apps/website/content/docs-v2/api/stream-resource.mdx index 719efab1f..e383d3164 100644 --- a/apps/website/content/docs-v2/api/stream-resource.mdx +++ b/apps/website/content/docs-v2/api/stream-resource.mdx @@ -3,7 +3,7 @@ `streamResource` is the core primitive of the library. It creates a reactive resource that opens a server-sent event stream, tracks loading and error states, and exposes the latest emitted value — all within Angular's signal-based reactivity model. ```ts -import { streamResource } from '@ngxp/stream-resource'; +import { streamResource } from '@cacheplane/stream-resource'; // Inside a component or service with injection context const repo = streamResource({ diff --git a/apps/website/content/docs-v2/concepts/agent-architecture.mdx b/apps/website/content/docs-v2/concepts/agent-architecture.mdx index 32a334de6..2571643ed 100644 --- a/apps/website/content/docs-v2/concepts/agent-architecture.mdx +++ b/apps/website/content/docs-v2/concepts/agent-architecture.mdx @@ -1,69 +1,701 @@ # Agent Architecture -How AI agents work — the planning, execution, and tool-calling lifecycle that streamResource() connects your Angular app to. +How AI agents work — the planning, execution, and tool-calling lifecycle that streamResource() connects your Angular app to. This page shows you the Python patterns that power modern agents and exactly how each pattern surfaces in Angular through `@cacheplane/stream-resource`. -## The agent loop + +Every section below shows the Python backend code first, then the Angular frontend code that consumes it. You need both halves to build a production agent application — LangGraph handles the intelligence, streamResource() handles the reactivity. + + +## The Agent Loop -An AI agent follows a cycle: +Every agent follows a five-phase cycle. Understanding this cycle is critical because each phase maps to a specific streamResource() signal in your Angular app. - -The user sends a message via `submit()`. streamResource() posts it to LangGraph Platform. + +The user sends a message. On the Angular side, `submit()` posts input to LangGraph Platform. On the Python side, the message lands in the graph's `messages` state key. + +```python +class AgentState(TypedDict): + messages: Annotated[list, add] + plan: list[str] + tool_results: dict +``` + -The LLM decides what to do next — respond directly, call a tool, or delegate to a subagent. +The LLM examines the full message history plus any accumulated state. It decides what to do next — respond directly, call one or more tools, or delegate to a subagent. + +```python +def plan(state: AgentState, config: RunnableConfig) -> dict: + system = """You are a research assistant. Given the conversation, + decide whether to respond directly, search for information, + or analyze data. Use tools when the user needs factual answers.""" + + response = llm.bind_tools(tools).invoke([ + {"role": "system", "content": system}, + *state["messages"], + ]) + return {"messages": [response]} +``` + -Tools run (database queries, API calls, code execution). Results feed back into state. +If the LLM decided to call tools, LangGraph routes to the tool node. Tools run — database queries, API calls, code execution — and their results feed back into state as `ToolMessage` entries. + +```python +from langgraph.prebuilt import ToolNode + +tool_node = ToolNode(tools) +# LangGraph automatically calls each tool the LLM requested +# and appends ToolMessage results to state["messages"] +``` + -The agent streams its response token-by-token. streamResource() updates the `messages()` signal in real-time. +After tools finish (or if no tools were needed), the agent streams its final response token by token. streamResource() updates the `messages()` signal in real time so your Angular template re-renders incrementally. + +```typescript +// Angular side — messages update as tokens arrive +@if (agent.isLoading()) { + +} +@for (msg of agent.messages(); track msg.id) { + +} +``` + -State is checkpointed. The agent may loop back to Plan, or finish. +LangGraph checkpoints the full state — messages, tool results, plan, everything. The agent may loop back to Plan (if tools returned data that needs further reasoning) or finish. The checkpoint is what enables time-travel debugging via `history()`. + +```python +from langgraph.checkpoint.postgres import PostgresSaver + +checkpointer = PostgresSaver.from_connection_string(DATABASE_URL) +graph = builder.compile(checkpointer=checkpointer) +``` + -## Tool calling +## ReAct Pattern + +ReAct (Reason + Act) is the most common agent pattern. The agent reasons about the user's question, decides to call a tool, observes the result, and loops until it has enough information to answer. + + + + +```python +from langgraph.graph import END, START, StateGraph +from langgraph.prebuilt import ToolNode +from langchain_openai import ChatOpenAI +from langchain_core.tools import tool +from typing_extensions import TypedDict, Annotated +from operator import add + +# --- State --- +class AgentState(TypedDict): + messages: Annotated[list, add] + +# --- Tools --- +@tool +def search_docs(query: str) -> str: + """Search the knowledge base for relevant documents.""" + results = vector_store.similarity_search(query, k=3) + return "\n\n".join(doc.page_content for doc in results) + +@tool +def query_database(sql: str) -> str: + """Run a read-only SQL query against the analytics database.""" + rows = db.execute(text(sql)).fetchall() + return json.dumps([dict(r) for r in rows]) + +@tool +def get_weather(city: str) -> str: + """Get current weather for a city.""" + resp = httpx.get(f"https://api.weather.com/v1/{city}") + return resp.json()["summary"] + +tools = [search_docs, query_database, get_weather] + +# --- LLM with tools bound --- +llm = ChatOpenAI(model="gpt-5-mini") + +def call_model(state: AgentState) -> dict: + response = llm.bind_tools(tools).invoke(state["messages"]) + return {"messages": [response]} + +# --- Routing --- +def should_continue(state: AgentState) -> str: + last_message = state["messages"][-1] + if last_message.tool_calls: + return "tools" + return END + +# --- Graph --- +builder = StateGraph(AgentState) +builder.add_node("model", call_model) +builder.add_node("tools", ToolNode(tools)) + +builder.add_edge(START, "model") +builder.add_conditional_edges("model", should_continue) +builder.add_edge("tools", "model") # After tools, reason again + +graph = builder.compile() +``` + + + + +```typescript +import { ChangeDetectionStrategy, Component, computed } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; + +interface AgentState { + messages: BaseMessage[]; +} + +@Component({ + selector: 'app-react-agent', + changeDetection: ChangeDetectionStrategy.OnPush, + template: ` + @for (msg of messages(); track msg.id) { + + } + + @if (activeTools().length) { + + } + + @for (result of completedTools(); track result.id) { + + } + `, +}) +export class ReactAgentComponent { + agent = streamResource({ + assistantId: 'react_agent', + }); + + messages = this.agent.messages; -Agents extend their capabilities through tools. streamResource() tracks tool execution: + // Tools currently executing (spinner, progress bar) + activeTools = computed(() => this.agent.toolProgress()); + + // Tools that finished with results (expandable cards) + completedTools = computed(() => this.agent.toolCalls()); + + send(text: string) { + this.agent.submit({ + messages: [{ role: 'human', content: text }], + }); + } +} +``` + + + + +The key insight: `should_continue` is the decision point. If the LLM's response contains `tool_calls`, the graph routes to the `tools` node. If not, it ends. After tools execute, the graph loops back to `model` so the LLM can reason about the tool results. This loop continues until the LLM responds without requesting any tools. + +## Tool Calling Deep Dive + +Tools are how agents interact with the outside world. Understanding both the Python definition and the Angular consumption is essential. + +### Defining Tools in Python + +Every tool is a Python function decorated with `@tool`. LangGraph converts the function signature and docstring into the JSON schema that the LLM uses to decide when and how to call it: + +```python +from langchain_core.tools import tool +from pydantic import BaseModel, Field + +# Simple tool — args inferred from function signature +@tool +def calculate(expression: str) -> str: + """Evaluate a mathematical expression and return the result.""" + return str(eval(expression)) # Use a sandbox in production + +# Structured tool — explicit schema with validation +class EmailInput(BaseModel): + to: str = Field(description="Recipient email address") + subject: str = Field(description="Email subject line") + body: str = Field(description="Email body content") + +@tool(args_schema=EmailInput) +def send_email(to: str, subject: str, body: str) -> str: + """Send an email to the specified recipient.""" + mail_service.send(to=to, subject=subject, body=body) + return f"Email sent to {to}" +``` + + +The LLM reads the docstring to decide when to call a tool. A vague docstring like "does stuff" means the LLM will not know when to use it. Be specific: what the tool does, what it returns, when to use it. + + +### How Tools Surface in Angular + +When the agent calls a tool, streamResource() exposes the execution lifecycle through two signals: + + + ```typescript +// toolProgress() — tools currently executing +// Updates in real time as tools start and complete + const agent = streamResource({ - assistantId: 'research_agent', + assistantId: 'react_agent', }); -// Currently executing tools -const tools = computed(() => agent.toolProgress()); +// Each entry has: name, args, status +const activeTools = computed(() => agent.toolProgress()); + +// Template usage +@Component({ + changeDetection: ChangeDetectionStrategy.OnPush, + template: ` + @for (tool of activeTools(); track tool.id) { +
    + + Running {{ tool.name }}... +
    {{ tool.args | json }}
    +
    + } + `, +}) +export class ToolProgressComponent { + activeTools = computed(() => this.agent.toolProgress()); +} +``` + +
    + + +```typescript +// toolCalls() — completed tool calls with results +// Available after each tool finishes -// Completed tool calls with results const completedTools = computed(() => agent.toolCalls()); + +// Each entry has: name, args, result, duration +@Component({ + changeDetection: ChangeDetectionStrategy.OnPush, + template: ` + @for (call of completedTools(); track call.id) { +
    + + {{ call.name }} + {{ call.duration }}ms + +
    +

    Input

    +
    {{ call.args | json }}
    +
    +
    +

    Output

    +
    {{ call.result }}
    +
    +
    + } + `, +}) +export class ToolResultsComponent { + completedTools = computed(() => this.agent.toolCalls()); +} ``` -## Multi-agent patterns +
    +
    + +### Tool Execution Flow + +The full lifecycle from Python tool definition to Angular UI update: + + + +The model returns an `AIMessage` with a `tool_calls` array. Each entry specifies the tool name and arguments. + + +The `should_continue` conditional edge detects `tool_calls` and routes to the `tools` node. + + +`ToolNode` calls the Python function. The result is wrapped in a `ToolMessage` and appended to state. + + +LangGraph Platform streams the tool call and result as SSE events to the Angular client. + + +`toolProgress()` updates during execution. `toolCalls()` updates when the tool completes. Both trigger OnPush change detection. + + + +## Multi-Agent Architecture + +When a single agent with tools is not enough, you can compose multiple agents into a supervisor-worker architecture. A supervisor agent receives the user's request, decides which specialist to delegate to, and synthesizes the final answer. + + + + +```python +from langgraph.graph import END, START, StateGraph +from langchain_openai import ChatOpenAI +from typing import Literal +from typing_extensions import TypedDict, Annotated +from operator import add + +class OrchestratorState(TypedDict): + messages: Annotated[list, add] + next_agent: str + research_output: str + analysis_output: str + +llm = ChatOpenAI(model="gpt-5-mini") + +# --- Supervisor --- +def supervisor(state: OrchestratorState) -> dict: + response = llm.bind_tools([route_tool]).invoke([ + {"role": "system", "content": """You are a supervisor. + Route to 'researcher' for fact-finding, + 'analyst' for data analysis, + 'writer' for drafting content, + or 'finish' if the task is complete."""}, + *state["messages"], + ]) + destination = response.tool_calls[0]["args"]["agent"] + return {"next_agent": destination, "messages": [response]} + +# --- Specialist subagents (each is its own compiled graph) --- +researcher_graph = build_researcher_agent() +analyst_graph = build_analyst_agent() +writer_graph = build_writer_agent() -Complex tasks use multiple agents working together: +# --- Routing --- +def route_to_agent(state: OrchestratorState) -> str: + return state["next_agent"] -- **Orchestrator** — one agent delegates to specialized subagents -- **Pipeline** — agents process sequentially, each refining the output -- **Debate** — agents review each other's work +# --- Orchestrator graph --- +builder = StateGraph(OrchestratorState) +builder.add_node("supervisor", supervisor) +builder.add_node("researcher", researcher_graph) +builder.add_node("analyst", analyst_graph) +builder.add_node("writer", writer_graph) -streamResource() supports these patterns through the `subagents()` and `activeSubagents()` signals. +builder.add_edge(START, "supervisor") +builder.add_conditional_edges("supervisor", route_to_agent, { + "researcher": "researcher", + "analyst": "analyst", + "writer": "writer", + "finish": END, +}) +# After each specialist, return to supervisor +builder.add_edge("researcher", "supervisor") +builder.add_edge("analyst", "supervisor") +builder.add_edge("writer", "supervisor") + +graph = builder.compile() +``` + + + + +```typescript +import { ChangeDetectionStrategy, Component, computed } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; + +interface OrchestratorState { + messages: BaseMessage[]; + next_agent: string; + research_output: string; + analysis_output: string; +} + +@Component({ + selector: 'app-multi-agent', + changeDetection: ChangeDetectionStrategy.OnPush, + template: ` + + + + +
    +

    All Subagents

    + @for (entry of allSubagents(); track entry[0]) { + + } +
    + `, +}) +export class MultiAgentComponent { + orchestrator = streamResource({ + assistantId: 'orchestrator', + subagentToolNames: ['researcher', 'analyst', 'writer'], + }); + + messages = this.orchestrator.messages; + + // Currently running subagents with live status + activeWorkers = computed(() => this.orchestrator.activeSubagents()); + + // Full map of all subagents (active + completed) + allSubagents = computed(() => + Array.from(this.orchestrator.subagents().entries()) + ); + + send(text: string) { + this.orchestrator.submit({ + messages: [{ role: 'human', content: text }], + }); + } +} +``` + +
    +
    + + +The `subagentToolNames` option tells streamResource() which graph nodes are subagents. Without it, subagent execution looks like regular tool calls. With it, `activeSubagents()` and `subagents()` provide dedicated tracking with isolated message histories. + + +## Error Handling and Recovery + +Agents fail. Tools throw exceptions, APIs time out, LLMs hallucinate invalid tool arguments. A robust architecture handles all of these gracefully. + +### Python-Side Error Handling + +```python +from langchain_core.tools import tool, ToolException + +@tool(handle_tool_error=True) +def query_database(sql: str) -> str: + """Run a read-only SQL query against the analytics database.""" + if "DROP" in sql.upper() or "DELETE" in sql.upper(): + raise ToolException("Destructive queries are not allowed.") + try: + rows = db.execute(text(sql)).fetchall() + return json.dumps([dict(r) for r in rows]) + except Exception as e: + raise ToolException(f"Query failed: {str(e)}") +``` + +When `handle_tool_error=True` is set, LangGraph catches `ToolException` and feeds the error message back to the LLM as a `ToolMessage`. The LLM sees the error and can retry with corrected arguments or explain the failure to the user. + +### How Errors Surface in Angular + +```typescript +const agent = streamResource({ + assistantId: 'react_agent', +}); + +// The error() signal captures both transport and agent errors +const error = computed(() => agent.error()); + +// In your template +@Component({ + changeDetection: ChangeDetectionStrategy.OnPush, + template: ` + @if (error()) { + + } + `, +}) +export class AgentComponent { + error = computed(() => this.agent.error()); + + retry() { + // Re-submit the last message to retry + this.agent.submit(this.lastInput); + } +} +``` + +### Error Recovery Strategies + +| Error type | Python behavior | Angular signal | +|---|---|---| +| Tool throws `ToolException` | Error fed back to LLM, agent retries | `toolCalls()` shows error in result | +| Tool throws unexpected error | LangGraph catches it, marks tool as failed | `error()` fires with details | +| LLM returns invalid tool args | ToolNode validation fails, error fed to LLM | `toolProgress()` shows failed status | +| Transport error (network) | N/A | `error()` fires, `status()` becomes `'error'` | +| Agent exceeds recursion limit | Graph raises `GraphRecursionError` | `error()` fires with recursion message | + + +LangGraph defaults to 25 recursion steps. If your agent loops between `model` and `tools` more than 25 times, it stops with a `GraphRecursionError`. Increase the limit in production with `graph.compile(recursion_limit=50)` or redesign the agent to converge faster. + + +## Checkpointing and Debugging + +Every time a node completes, LangGraph saves a checkpoint — a full snapshot of the agent's state at that moment. streamResource() exposes this checkpoint timeline to Angular, giving you time-travel debugging for free. + +### How Checkpoints Work + +```python +from langgraph.checkpoint.postgres import PostgresSaver + +checkpointer = PostgresSaver.from_connection_string(DATABASE_URL) +graph = builder.compile(checkpointer=checkpointer) + +# Every node execution creates a checkpoint: +# checkpoint_1: after "model" (LLM decided to call search_docs) +# checkpoint_2: after "tools" (search_docs returned results) +# checkpoint_3: after "model" (LLM responded with final answer) +``` + +### Exposing Checkpoints in Angular + +```typescript +const agent = streamResource({ + assistantId: 'react_agent', + threadId: signal('thread_abc123'), +}); + +// Full checkpoint timeline — every state snapshot +const timeline = computed(() => agent.history()); + +// Current branch (for time-travel) +const branch = computed(() => agent.branch()); +``` + +### Building a Debug Timeline + +```typescript +@Component({ + selector: 'app-debug-timeline', + changeDetection: ChangeDetectionStrategy.OnPush, + template: ` +
    + @for (checkpoint of history(); track checkpoint.id) { + + } +
    + +
    +

    State at checkpoint

    +
    {{ selectedState() | json }}
    +
    + `, +}) +export class DebugTimelineComponent { + history = computed(() => this.agent.history()); + currentCheckpoint = signal(null); + + selectedState = computed(() => { + const id = this.currentCheckpoint(); + return this.history().find(c => c.id === id)?.state; + }); + + timeTravel(checkpointId: string) { + this.currentCheckpoint.set(checkpointId); + this.agent.submit(null, { checkpoint: checkpointId }); + } +} +``` + + +When you submit from a previous checkpoint, LangGraph creates a new branch from that point. The original timeline is preserved. The `branch()` signal tells you which branch is currently active. See the [Time Travel guide](/docs/guides/time-travel) for the full walkthrough. + + +## Choosing an Architecture + +Not every application needs a multi-agent swarm. Here is a decision guide for picking the right level of complexity. + +### Single Agent with Tools + +**Use when:** Most applications. The user has a conversation, the agent calls tools as needed, and responds. + +```python +# Simple, powerful, covers 80% of use cases +builder = StateGraph(AgentState) +builder.add_node("model", call_model) +builder.add_node("tools", ToolNode(tools)) +builder.add_edge(START, "model") +builder.add_conditional_edges("model", should_continue) +builder.add_edge("tools", "model") +graph = builder.compile() +``` + +**Angular signals used:** `messages()`, `toolCalls()`, `toolProgress()`, `status()` + +### Single Agent with Human-in-the-Loop + +**Use when:** The agent takes high-stakes actions (sending emails, modifying data, making purchases) that need human approval. + +```python +from langgraph.types import Interrupt + +def propose_action(state: AgentState) -> dict: + plan = llm.invoke(state["messages"]) + raise Interrupt(value={"action": plan.content, "requires_approval": True}) + +def execute_action(state: AgentState) -> dict: + # Only runs after human approves + return perform_action(state["pending_action"]) +``` + +**Angular signals used:** `messages()`, `interrupt()`, `status()` plus `submit(null, { resume })` to approve + +### Multi-Agent Supervisor + +**Use when:** The task naturally decomposes into specialist roles (researcher, analyst, writer), and each specialist needs its own tools, prompts, and reasoning chain. + +```python +builder = StateGraph(OrchestratorState) +builder.add_node("supervisor", supervisor) +builder.add_node("researcher", researcher_subgraph) +builder.add_node("analyst", analyst_subgraph) +builder.add_conditional_edges("supervisor", route_to_agent) +``` + +**Angular signals used:** `messages()`, `subagents()`, `activeSubagents()`, `toolCalls()`, `status()` + +### Decision Matrix + +| Factor | Single agent | Single + approval | Multi-agent | +|---|---|---|---| +| Tool count | 1-10 | 1-10 | 10+ across specialists | +| Task complexity | Single domain | Single domain, high stakes | Cross-domain | +| Latency budget | Low | Medium (human wait) | Higher (multiple LLM calls) | +| State isolation | Shared | Shared + interrupt | Isolated per subagent | +| Angular complexity | Low | Medium | Higher | -Most applications only need a single agent with tools. Add subagents when you need true task delegation with isolated state. +Begin with a single agent and tools. Add human-in-the-loop when you need approval flows. Graduate to multi-agent only when a single agent's context window cannot hold all the tools and instructions it needs. ## What's Next - Learn the graph, node, and edge model that agents are built on. + Learn the graph, node, and edge primitives that agents are built on. - - Compose agents into multi-agent pipelines using subgraphs. + + Stream token-by-token responses with multiple stream modes. - Pause agent execution and wait for human approval mid-run. + Build human-in-the-loop approval flows that pause and resume agents. + + + Compose multi-agent systems with orchestrators and specialist workers. + + + Debug agents by stepping through checkpoint history and branching. + + + How Signals power the reactive model behind streamResource(). diff --git a/apps/website/content/docs-v2/concepts/angular-signals.mdx b/apps/website/content/docs-v2/concepts/angular-signals.mdx index 5fb2a5887..d476ee891 100644 --- a/apps/website/content/docs-v2/concepts/angular-signals.mdx +++ b/apps/website/content/docs-v2/concepts/angular-signals.mdx @@ -1,75 +1,559 @@ # Angular Signals -streamResource() is built on Angular Signals — the reactive primitive introduced in Angular 16+. Every property on a StreamResourceRef is a Signal, making it work seamlessly with OnPush change detection, computed values, and effect callbacks. +Angular Signals are the reactive primitive that powers streamResource(). If you're coming from a Python AI/agent background and wondering how Angular handles real-time streaming data, this page is your guide. Every property on a StreamResourceRef is a Signal, which means your templates update automatically as tokens arrive — no manual subscriptions, no async pipes, no RxJS boilerplate. -## Signals primer + +Think of Signals like a Python property with built-in change notification. When the value changes, every consumer — templates, computed values, effects — re-evaluates automatically. If you've used Pydantic models with validators that react to field changes, Signals are the Angular equivalent but deeply integrated into the rendering engine. + + +## What Are Angular Signals? -A Signal is a reactive value container. When a Signal's value changes, Angular automatically re-renders any template that reads it. +A Signal is a reactive value container introduced in Angular 16+. You create one, read it by calling it like a function, and Angular tracks which templates and computations depend on it. ```typescript -// streamResource returns Signals, not Observables -const chat = streamResource({ assistantId: 'agent' }); +import { signal, computed } from '@angular/core'; + +// Create a writable signal +const count = signal(0); + +// Read the current value — call it like a function +console.log(count()); // 0 + +// Update the value +count.set(1); +count.update(prev => prev + 1); -chat.messages() // Signal — call to read -chat.status() // Signal -chat.error() // Signal -chat.isLoading() // Signal (computed) +// Derive new values with computed() +const doubled = computed(() => count() * 2); +console.log(doubled()); // 4 ``` -## Computed values +The key insight: Angular knows which Signals a template reads. When those Signals change, Angular re-renders only the affected parts of the DOM. No diffing the entire tree, no zone.js overhead. -Use `computed()` to derive new Signals from streamResource signals. +## How streamResource Uses Signals Internally + +Under the hood, streamResource() receives Server-Sent Events (SSE) over HTTP and feeds them into RxJS BehaviorSubjects. It then converts those BehaviorSubjects into Angular Signals using `toSignal()`. This is the bridge between the async streaming world and Angular's synchronous reactivity model. + + + ```typescript -const lastMessage = computed(() => - chat.messages().at(-1)?.content ?? '' -); +// Simplified view of what streamResource does internally: + +// 1. SSE events arrive as an observable stream +const messages$ = new BehaviorSubject([]); +const status$ = new BehaviorSubject('idle'); + +// 2. Each SSE chunk updates the BehaviorSubject +transport.onChunk(chunk => { + messages$.next([...messages$.getValue(), chunk.message]); +}); + +// 3. BehaviorSubjects become Signals via toSignal() +const messages = toSignal(messages$, { initialValue: [] }); +const status = toSignal(status$, { initialValue: 'idle' }); + +// 4. Your component reads pure Signals — no RxJS knowledge needed +``` + + + + +```typescript +import { streamResource } from '@cacheplane/stream-resource'; + +// You never touch BehaviorSubjects or toSignal() yourself. +// streamResource() hands you clean Signals: +const chat = streamResource({ + assistantId: 'chat_agent', +}); + +chat.messages(); // Signal +chat.status(); // Signal +chat.error(); // Signal +chat.isLoading(); // Signal +chat.value(); // Signal +``` + + + + + +The BehaviorSubject-to-Signal conversion means you get the best of both worlds: RxJS handles the async SSE transport (reconnection, backpressure, error recovery), while Signals handle the synchronous UI reactivity (change detection, template binding, computed derivations). You only interact with the Signal side. + + +## The Streaming Lifecycle as Signals + +Every streamResource() instance moves through a lifecycle: **idle**, **loading**, tokens arriving, then **resolved** (or **error**). The `status()` Signal reflects each transition in real time. + + + +The resource has been created but no request has been submitted yet. All Signals hold their initial values. + +```typescript +const chat = streamResource({ + assistantId: 'chat_agent', +}); + +console.log(chat.status()); // 'idle' +console.log(chat.messages()); // [] +console.log(chat.isLoading()); // false +``` + + + +After calling `submit()`, the status transitions to `'loading'`. The SSE connection is open and the agent is processing. + +```typescript +chat.submit({ messages: [{ role: 'user', content: 'Explain quantum computing' }] }); + +console.log(chat.status()); // 'loading' +console.log(chat.isLoading()); // true +console.log(chat.messages()); // [] (no tokens yet) +``` + -const messageCount = computed(() => - chat.messages().length + +As the agent generates tokens, the `messages()` Signal updates with each chunk. The status remains `'loading'` throughout. + +```typescript +// After first few tokens arrive: +console.log(chat.status()); // 'loading' (still streaming) +console.log(chat.messages()); // [AIMessageChunk("Quantum computing uses...")] + +// After more tokens: +console.log(chat.messages()); // [AIMessageChunk("Quantum computing uses qubits...")] +// The message content grows as tokens stream in +``` + + + +The agent has finished. All tokens have arrived. The status transitions to `'resolved'`. + +```typescript +console.log(chat.status()); // 'resolved' +console.log(chat.isLoading()); // false +console.log(chat.messages()); // [AIMessage("Quantum computing uses qubits to...")] +``` + + + +If the agent fails or the connection drops, the status transitions to `'error'` and the `error()` Signal contains the failure details. + +```typescript +console.log(chat.status()); // 'error' +console.log(chat.error()); // HttpErrorResponse { status: 500, ... } +console.log(chat.isLoading()); // false +``` + + + +## Composing Derived State with computed() + +`computed()` lets you derive new Signals from streamResource Signals. These derived Signals update automatically whenever their dependencies change — during streaming, that means every time a new token arrives. + +```typescript +import { computed } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; + +const chat = streamResource({ + assistantId: 'chat_agent', +}); + +// Count all messages in the conversation +const messageCount = computed(() => chat.messages().length); + +// Get the last message (useful for showing the latest response) +const lastMessage = computed(() => chat.messages().at(-1)); + +// Extract just the assistant's messages +const assistantMessages = computed(() => + chat.messages().filter(m => m._getType() === 'ai') ); -const isIdle = computed(() => - chat.status() === 'idle' +// Track which tools the agent is actively calling +const activeTools = computed(() => + chat.messages() + .filter(m => m._getType() === 'ai') + .flatMap(m => m.tool_calls ?? []) + .filter(tc => !tc.result) ); + +// Build a user-facing error message +const errorDisplay = computed(() => { + const err = chat.error(); + if (!err) return null; + if (err instanceof HttpErrorResponse) { + return err.status === 429 + ? 'Rate limited. Please wait a moment.' + : `Server error (${err.status})`; + } + return 'An unexpected error occurred.'; +}); + +// Combine multiple signals into a single view model +const viewModel = computed(() => ({ + messages: chat.messages(), + isStreaming: chat.isLoading(), + canSend: chat.status() !== 'loading', + messageCount: messageCount(), + error: errorDisplay(), +})); +``` + + +A `computed()` only re-evaluates when one of its dependencies actually changes, and it caches the result. If `chat.messages()` emits the same reference, downstream computeds skip their work entirely. This matters for high-frequency streaming where tokens arrive rapidly. + + +## Side Effects with effect() + +Use `effect()` when a Signal change should trigger work that lives outside the template — logging, analytics, scrolling, persisting state. Effects run in the injection context and are automatically cleaned up when the component is destroyed. + +```typescript +import { effect } from '@angular/core'; + +// Log errors for observability +effect(() => { + const err = chat.error(); + if (err) { + console.error('[StreamResource] Agent error:', err); + this.analytics.track('agent_error', { error: err }); + } +}); + +// Auto-scroll to bottom when new messages arrive +effect(() => { + const msgs = chat.messages(); + if (msgs.length > 0) { + // Schedule after Angular renders the new message + setTimeout(() => { + this.chatContainer.nativeElement.scrollTo({ + top: this.chatContainer.nativeElement.scrollHeight, + behavior: 'smooth', + }); + }); + } +}); + +// Track streaming duration for performance monitoring +effect(() => { + const status = chat.status(); + if (status === 'loading') { + this.streamStart = performance.now(); + } + if (status === 'resolved' && this.streamStart) { + const duration = performance.now() - this.streamStart; + this.analytics.track('stream_duration_ms', { duration }); + this.streamStart = null; + } +}); +``` + + +Writing to a Signal inside an `effect()` can create infinite loops. If you need to transform one Signal into another, use `computed()` instead. Reserve `effect()` for side effects that leave the reactive graph — DOM manipulation, logging, analytics, network calls. + + +## Template Patterns + +Angular's new control flow syntax (`@if`, `@for`, `@switch`) works naturally with Signals. Here's a complete chat template that handles every lifecycle state. + +```typescript +import { ChangeDetectionStrategy, Component, computed, effect, ElementRef, ViewChild } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; + +@Component({ + selector: 'app-chat', + changeDetection: ChangeDetectionStrategy.OnPush, + template: ` + + @switch (chat.status()) { + @case ('loading') { +
    + Agent is responding... +
    + } + @case ('error') { +
    + {{ errorDisplay() }} + +
    + } + } + + +
    + @for (message of chat.messages(); track $index) { + @switch (message._getType()) { + @case ('human') { +
    + {{ message.content }} +
    + } + @case ('ai') { +
    + {{ message.content }} + + + @for (tool of message.tool_calls ?? []; track tool.id) { +
    + Called: {{ tool.name }} +
    + } +
    + } + @case ('tool') { +
    + {{ message.name }}: {{ message.content }} +
    + } + } + } @empty { +
    + Send a message to start the conversation. +
    + } +
    + + +
    + + +
    + `, +}) +export class ChatComponent { + @ViewChild('chatContainer') chatContainer!: ElementRef; + + chat = streamResource({ + assistantId: 'chat_agent', + }); + + errorDisplay = computed(() => { + const err = this.chat.error(); + if (!err) return ''; + return err instanceof HttpErrorResponse + ? `Error ${err.status}: ${err.statusText}` + : 'Connection lost. Please retry.'; + }); + + scrollEffect = effect(() => { + const msgs = this.chat.messages(); + if (msgs.length) { + setTimeout(() => + this.chatContainer?.nativeElement.scrollTo({ + top: this.chatContainer.nativeElement.scrollHeight, + behavior: 'smooth', + }) + ); + } + }); + + send(event: Event) { + event.preventDefault(); + const input = (event.target as HTMLFormElement).querySelector('input')!; + const content = input.value.trim(); + if (!content) return; + + this.chat.submit({ + messages: [{ role: 'user', content }], + }); + input.value = ''; + } + + retry() { + this.chat.submit({ + messages: [{ role: 'user', content: 'Please try again.' }], + }); + } +} +``` + +## OnPush Change Detection + +Every component using streamResource() should use `ChangeDetectionStrategy.OnPush`. Here's why it works and why it's efficient. + +With the default change detection strategy, Angular checks every component in the tree on every browser event — clicks, timers, HTTP responses. For a streaming agent emitting dozens of tokens per second, that means hundreds of unnecessary checks across your entire app. + +With OnPush, Angular only checks a component when: + +1. An `@Input()` reference changes +2. An event fires inside the component's template +3. A **Signal** that the template reads changes + +Since streamResource() exposes Signals, condition 3 handles everything. When a new token arrives and `messages()` updates, Angular marks only the components reading that Signal for check — not the entire tree. + +```typescript +@Component({ + // Always use OnPush with streamResource + changeDetection: ChangeDetectionStrategy.OnPush, + template: ` +

    {{ chat.messages().length }} messages

    + @if (chat.isLoading()) { + + } + `, +}) +export class ChatComponent { + chat = streamResource({ assistantId: 'chat_agent' }); +} ``` -## OnPush change detection + +With older Observable-based patterns, you had to call `ChangeDetectorRef.markForCheck()` or use the `async` pipe to trigger OnPush updates. Signals do this automatically. When a Signal's value changes, Angular's internal notification system marks the component dirty — zero manual intervention. + + +## Python Agent to Angular Signals + +The real power of streamResource() is how it pairs a Python LangGraph agent with Angular Signals. The agent defines the logic; Signals surface the results in real time. + + + + +```python +from langgraph.graph import END, START, MessagesState, StateGraph +from langchain_openai import ChatOpenAI +from langchain_core.tools import tool + +llm = ChatOpenAI(model="gpt-5-mini", streaming=True) + +@tool +def search_knowledge_base(query: str) -> str: + """Search internal documentation for relevant information.""" + results = vector_store.similarity_search(query, k=3) + return "\n".join(r.page_content for r in results) + +tools = [search_knowledge_base] + +def call_model(state: MessagesState) -> dict: + response = llm.bind_tools(tools).invoke(state["messages"]) + return {"messages": [response]} + +def should_continue(state: MessagesState) -> str: + last_msg = state["messages"][-1] + if last_msg.tool_calls: + return "tools" + return END -Because Signals trigger change detection automatically, streamResource works perfectly with `ChangeDetectionStrategy.OnPush`. +# Build the graph +builder = StateGraph(MessagesState) +builder.add_node("model", call_model) +builder.add_node("tools", ToolNode(tools)) +builder.add_edge(START, "model") +builder.add_conditional_edges("model", should_continue) +builder.add_edge("tools", "model") + +graph = builder.compile() +``` + + + ```typescript +import { ChangeDetectionStrategy, Component, computed, effect } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; + @Component({ + selector: 'app-chat', changeDetection: ChangeDetectionStrategy.OnPush, template: ` @for (msg of chat.messages(); track $index) { -

    {{ msg.content }}

    + @switch (msg._getType()) { + @case ('human') { +
    {{ msg.content }}
    + } + @case ('ai') { +
    + {{ msg.content }} + @for (tc of msg.tool_calls ?? []; track tc.id) { + {{ tc.name }} + } +
    + } + @case ('tool') { +
    {{ msg.name }}: {{ msg.content }}
    + } + } + } + + @if (chat.isLoading()) { +
    Agent is thinking...
    } `, }) export class ChatComponent { - chat = streamResource({ assistantId: 'agent' }); + chat = streamResource({ + assistantId: 'chat_agent', + }); + + // Derived state from the Python agent's output + toolsUsed = computed(() => + this.chat.messages() + .filter(m => m._getType() === 'tool') + .map(m => m.name) + ); + + hasError = computed(() => this.chat.status() === 'error'); + + sendMessage(content: string) { + this.chat.submit({ + messages: [{ role: 'user', content }], + }); + } } ``` -## No RxJS required +
    +
    -Unlike traditional Angular HTTP patterns, streamResource doesn't use Observables. There are no subscriptions to manage, no async pipes needed, and no memory leak risks. +When the Python agent calls `search_knowledge_base`, the tool call streams to Angular as a message. When the tool returns, the result streams as another message. The agent's final response streams token by token. Every one of these events updates the `messages()` Signal, and your template re-renders the new content automatically. - -Signals are simpler for UI state. They synchronously read the latest value, compose with computed(), and integrate with Angular's template syntax. streamResource handles the async SSE connection internally and surfaces results as Signals. +## Performance: Signals vs Alternatives + +High-frequency token streaming puts unique pressure on a frontend framework. Here's why Signals with OnPush outperform the alternatives. + +| Approach | Token update cost | Memory overhead | Cleanup required | +|---|---|---|---| +| **Signals + OnPush** | Marks only reading components | None beyond Signal | Automatic | +| Observable + async pipe | Creates/destroys subscriptions per `@if` block | Subscription objects | Pipe handles it | +| Observable + manual subscribe | Full component check if you forget `markForCheck()` | Subscription tracking | Manual unsubscribe | +| Default change detection | Checks entire component tree | None | None | + +For a typical chat UI receiving 30-50 tokens per second: + +- **Signals + OnPush**: Only the message list component and its direct ancestors are checked. The sidebar, header, settings panel — all skipped. +- **Default strategy**: Every component in the tree is checked 30-50 times per second, even components with no streaming data. +- **Observable + async pipe**: Works correctly but creates and destroys subscriptions each time an `@if` or `@for` block re-evaluates, adding GC pressure during rapid streaming. + + +Signals use referential equality (`===`) by default. streamResource() creates new array references for `messages()` only when the array actually changes (a new token arrives). Between updates, reading `messages()` returns the same reference and skips downstream recomputation. For custom equality, pass an `equal` function when creating a `computed()`. ## What's Next - Understand how LangGraph agent state flows into Angular Signals. + How LangGraph agent state flows into Angular Signals and how to structure complex state. - See Signals in action with token-by-token streaming responses. + Configure stream modes, handle token-by-token rendering, and manage concurrent streams. + + + Understand the Python agent patterns that produce the events Signals consume. - Full reference for every Signal exposed by streamResource. + Full reference for every Signal, method, and option on StreamResourceRef. + + + Build human-in-the-loop approval flows that pause and resume the agent. + + + Deep dive into change detection optimization for streaming applications. diff --git a/apps/website/content/docs-v2/concepts/state-management.mdx b/apps/website/content/docs-v2/concepts/state-management.mdx index 6c1c9e94a..68397a602 100644 --- a/apps/website/content/docs-v2/concepts/state-management.mdx +++ b/apps/website/content/docs-v2/concepts/state-management.mdx @@ -1,83 +1,541 @@ # State Management -How state flows through streamResource() — from LangGraph's server-side state machine to Angular Signals in your templates. +How agent state flows from LangGraph's server-side state machine into Angular Signals — and why the separation between server state and UI state makes your app simpler, not more complex. -## State lives on the server + +LangGraph Platform owns the state. Angular owns the view. `streamResource()` is the read-only bridge between them. You never manually sync, serialize, or manage agent state in your Angular code. + + +## State Lives on the Server + +In a traditional Angular app, state lives in an NgRx store or a signals-based service. In a LangGraph app, **the agent's state lives on the server** — in LangGraph Platform's checkpoint store. Your Angular app is a stateless view layer that reads state through signals as the agent streams it back. + +This inversion is intentional. Agent state can span multiple LLM calls, tool executions, and human-in-the-loop interrupts. It needs to survive browser refreshes, reconnections, and even server deployments. A server-side checkpoint store handles all of that automatically. Your Angular app just calls `.submit()` and reads signals. + + + +Your Angular component calls `agent.submit({ messages: [userMsg] })`. No state is stored in the component. + + +`@cacheplane/stream-resource` forwards the input to `FetchStreamTransport`, which opens an HTTP POST and SSE connection to LangGraph Platform. + + +The agent runs its nodes — calling the LLM, invoking tools, checking conditions — and streams SSE events back with incremental state updates. + + +Incoming SSE chunks are parsed and pushed into BehaviorSubjects — one per signal type. + + +BehaviorSubjects are converted to Angular Signals via `toSignal()`. Every update triggers Angular's change detection automatically. + + +Components using `OnPush` re-render only when signal values change. No manual `detectChanges()`, no zone triggers, no subscriptions to manage. + + + +## Python State Design + +On the Python side, your agent's state is a `TypedDict`. The fields you define here are exactly what `streamResource()` exposes in TypeScript. Getting the Python state design right is the most important architectural decision in your agent. + +### The TypedDict Pattern + +Every LangGraph state is a `TypedDict`. Fields can be plain values or annotated with reducers that control how updates are merged. -Unlike traditional Angular state management (NgRx, signals stores), agent state lives on the LangGraph Platform. Your Angular app is a stateless view layer. + + +```python +from typing_extensions import TypedDict + +class ChatState(TypedDict): + messages: list # Will be replaced on each update + session_id: str # Single value, replaced on update + turn_count: int # Single value, replaced on update ``` -LangGraph Platform (source of truth) - ↓ SSE stream -FetchStreamTransport (transport layer) - ↓ events -streamResource() (signal conversion) - ↓ Signals -Angular templates (reactive rendering) + + + + +```python +from typing_extensions import TypedDict, Annotated +from operator import add + +class AgentState(TypedDict): + # Annotated[list, add] means: append new items, don't replace + messages: Annotated[list, add] + tool_results: Annotated[list, add] + + # Plain fields: each update replaces the previous value + status: str + current_plan: list[str] ``` -## The state shape + + + +```python +from langgraph.graph import MessagesState + +# MessagesState is a built-in TypedDict that pre-wires +# messages: Annotated[list[AnyMessage], add_messages] +# add_messages handles deduplication, type coercion, and ordering + +class ProjectState(MessagesState): + # Extend with your own fields + files: Annotated[list[str], add] # Accumulates file paths + analysis: dict # Latest analysis result + progress: int # 0–100 progress value +``` + + + + +### Reducers: How State Merges + +When a node returns `{"messages": [new_msg]}`, LangGraph doesn't replace the messages list — it **calls the reducer** to merge the update. This is what `Annotated[list, add]` means: use Python's `operator.add` to concatenate lists. + +```python +from typing_extensions import TypedDict, Annotated +from operator import add + +class ResearchState(TypedDict): + # Each node can append to these — they accumulate across the run + messages: Annotated[list, add] + sources: Annotated[list[str], add] + findings: Annotated[list[str], add] + + # These are replaced (last write wins) + query: str + model: str + confidence: float + +def researcher_node(state: ResearchState) -> dict: + result = llm.invoke(state["messages"]) + new_sources = extract_sources(result.content) + + # Returns partial state — only fields being updated + # LangGraph merges this into the existing state + return { + "messages": [result], # Appended via reducer + "sources": new_sources, # Appended via reducer + "confidence": 0.87, # Replaced + } +``` + + +Nodes return only the fields they change. LangGraph merges partial updates into the full state object. This is why you can have 10 nodes each updating different fields without conflicts. + + +## TypeScript Interface Mapping + +The TypeScript interface you pass to `streamResource()` is your contract with the Python state. Every Python state field maps to a TypeScript property. The types don't need to match exactly — they just need to be compatible with the JSON that LangGraph streams back. + + + + +```python +from typing_extensions import TypedDict, Annotated +from operator import add +from langgraph.graph import MessagesState + +class ProjectState(MessagesState): + # From MessagesState: messages: Annotated[list[AnyMessage], add_messages] + files: Annotated[list[str], add] + analysis: dict[str, any] | None + progress: int + plan: Annotated[list[str], add] + error: str | None +``` -Your state type defines what the agent manages. The `value()` signal exposes the full state object. + + ```typescript +import { BaseMessage } from '@langchain/core/messages'; + interface ProjectState { + // Maps from MessagesState.messages messages: BaseMessage[]; + + // Maps from Python fields (reducers are transparent — you see the final list) files: string[]; - analysis: { score: number; issues: string[] }; + analysis: { score: number; issues: string[]; summary: string } | null; + progress: number; + plan: string[]; + error: string | null; } const agent = streamResource({ assistantId: 'project_agent', }); - -// Access any state field as a reactive value -const files = computed(() => agent.value().files); -const score = computed(() => agent.value().analysis.score); ``` -## Thread state vs application state + + - -Thread state (managed by LangGraph) and application state (managed by Angular) are separate concerns. Don't try to sync them — read thread state from signals, manage UI state with Angular signals. - +| Python type | TypeScript type | +|-------------|-----------------| +| `str` | `string` | +| `int` / `float` | `number` | +| `bool` | `boolean` | +| `list[str]` | `string[]` | +| `dict[str, any]` | `Record` | +| `TypedDict` | `interface` or `type` | +| `str \| None` | `string \| null` | +| `list[AnyMessage]` | `BaseMessage[]` | +| `Annotated[list, add]` | Same as the list type — reducer is invisible | + + + + +Once you define the interface, every field is accessible via `agent.value()`: ```typescript -// Thread state — from the agent -const messages = agent.messages(); // Read-only signal -const agentStatus = agent.status(); // Read-only signal +// Full typed state object +const state = agent.value(); // Signal + +// Computed values from nested fields +const score = computed(() => agent.value().analysis?.score ?? 0); +const fileCount = computed(() => agent.value().files.length); +const isDone = computed(() => agent.value().progress === 100); + +// Direct messages access (shortcut for agent.value().messages) +const messages = agent.messages(); // Signal +``` -// Application state — your Angular code -const sidebarOpen = signal(true); // Your UI state -const selectedTab = signal('chat'); // Your UI state +## State Updates During Streaming + +The agent doesn't wait until it's finished to send state updates. It streams partial state updates as each node completes. Your Angular signals update incrementally throughout the run. + +### How Partial Updates Arrive + +LangGraph streams in `values` mode by default — each SSE event contains the full state snapshot after a node completes. In `messages` mode, you get individual message tokens as they're generated. + +```typescript +const agent = streamResource({ + assistantId: 'project_agent', + // Default: values mode — full state after each node + // streamMode: 'messages' — token-by-token for text fields +}); ``` -## State updates are immutable +### Signals Update Mid-Stream -Every state update from the agent creates a new signal value. Angular's change detection picks this up automatically. +Because every state update is a new signal value, your templates reflect the agent's progress in real time — without polling, without timers, without manual state management. ```typescript -// This works with OnPush because the Signal reference changes -@for (msg of agent.messages(); track $index) { -

    {{ msg.content }}

    +@Component({ + changeDetection: ChangeDetectionStrategy.OnPush, + template: ` + +

    Files processed: {{ agent.value().files.length }}

    + + + + + + @for (step of agent.value().plan; track step) { +
  • {{ step }}
  • + } + + + @for (msg of agent.messages(); track $index) { + + } + ` +}) +export class ProjectComponent { + readonly agent = streamResource({ + assistantId: 'project_agent', + }); } +``` + +### Immutability and OnPush -// Computed values re-evaluate when dependencies change +Every signal update produces a new object reference. Angular's `OnPush` change detection compares references — when a signal emits a new value, the component re-renders. You never need to clone objects or call `markForCheck()` manually. + +```typescript +// Safe: computed() re-evaluates when agent.value() changes const hasErrors = computed(() => - agent.value().analysis.issues.length > 0 + (agent.value().analysis?.issues ?? []).length > 0 ); +// Safe: @for tracks by identity, not index, for stable DOM +// track $index is fine for messages since they always append +@for (msg of agent.messages(); track $index) { + +} + +// Safe: null-coalescing handles state fields not yet populated +const score = computed(() => agent.value().analysis?.score ?? 0); +``` + + +`streamResource()` uses `toSignal()` internally with `requireSync: false`. Signals always have a value — even before the first stream update. You never need to handle `undefined` explicitly for the signal itself, though individual state fields may be `null` until the agent populates them. + + +## Thread State vs Application State + +There are two kinds of state in a LangGraph Angular app, and keeping them separate makes your code much easier to reason about. + +**Thread state** is owned by LangGraph Platform. You read it through `streamResource()` signals. You never write to it directly — you only send new input via `.submit()`. + +**Application state** is owned by your Angular component or service. It's UI-only: sidebar visibility, active tab, selected message, form input values. It has nothing to do with the agent. + +```typescript +@Component({ + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class ChatComponent { + // --- Thread state (from agent, read-only) --- + readonly agent = streamResource({ + assistantId: 'chat_agent', + }); + + // Convenience computed values from thread state + readonly messages = this.agent.messages; // Signal + readonly isLoading = this.agent.isLoading; // Signal + readonly interrupted = this.agent.interrupt; // Signal + + // --- Application state (your Angular signals) --- + readonly sidebarOpen = signal(true); + readonly activeTab = signal<'chat' | 'history' | 'settings'>('chat'); + readonly inputText = signal(''); + readonly selectedMessageId = signal(null); + + // --- Actions --- + send() { + const text = this.inputText(); + if (!text.trim()) return; + this.agent.submit({ messages: [{ role: 'user', content: text }] }); + this.inputText.set(''); // UI state — clear the input + } + + approve() { + this.agent.submit(null, { resume: { approved: true } }); + } +} +``` + + +A common mistake is copying `agent.messages()` into a local signal to "control" it. This creates stale state bugs and defeats the purpose of the reactive signal model. Read thread state directly from `agent.*` signals and derive what you need with `computed()`. + + +## The Checkpoint Model + +LangGraph Platform persists state at every node boundary using a checkpoint store. Each checkpoint is an immutable snapshot of the full state at a point in time. + +``` +Thread: "user_123_session" +│ +├── Checkpoint 1 ← After call_model: { messages: [HumanMessage, AIMessage] } +├── Checkpoint 2 ← After tool_node: { messages: [..., ToolMessage] } +├── Checkpoint 3 ← After call_model: { messages: [..., AIMessage("Here's what I found...")] } +└── (current) +``` + +### What This Means for Your Angular App + +**Resumable threads** — If the user refreshes the page or closes the browser, the thread is still there. Pass the same `threadId` and `streamResource()` will restore the full conversation history automatically. + +**Time travel** — You can fork a thread at any checkpoint and replay it with different input. This powers the time-travel debugging guides. + +**Interrupt persistence** — When the agent raises an `Interrupt`, the checkpoint captures everything. The agent can be resumed hours or days later. + +```typescript +const agent = streamResource({ + assistantId: 'chat_agent', + + // Same threadId = restored conversation history + threadId: signal(this.route.snapshot.params['threadId']), + + // New threadId auto-created for new conversations + onThreadId: (id) => this.router.navigate(['/chat', id]), +}); + +// Read checkpoint history for time-travel UI +const history = agent.history(); // Signal +const branch = agent.branch(); // Signal — active branch ID +``` + +For full checkpoint and time-travel patterns, see the [Persistence guide](/docs/guides/persistence) and [Time Travel guide](/docs/guides/time-travel). + +## Custom State Fields + +`messages` is just one field. Real agents carry rich state: structured plans, tool results, progress indicators, metadata, and more. Every custom field you define in Python is available in your TypeScript interface. + + + + +```python +from typing_extensions import TypedDict, Annotated +from operator import add +from langgraph.graph import MessagesState +from langchain_openai import ChatOpenAI + +llm = ChatOpenAI(model="gpt-5-mini") + +class ResearchState(MessagesState): + # Accumulating lists — each node can append + plan: Annotated[list[str], add] + sources: Annotated[list[str], add] + findings: Annotated[list[str], add] + + # Scalar progress + progress: int # 0–100 + + # Structured results + report: dict | None # Final report when complete + + # Agent metadata + query: str + model_used: str + +def planner_node(state: ResearchState) -> dict: + steps = llm.invoke([ + {"role": "system", "content": "Break this query into research steps."}, + *state["messages"] + ]) + plan_items = steps.content.split("\n") + return { + "plan": plan_items, # Appended via reducer + "progress": 10, + "model_used": "gpt-5-mini", + } + +def researcher_node(state: ResearchState) -> dict: + # Runs once per plan step in a loop + for step in state["plan"]: + result = search(step) + yield { + "findings": [result], # Each iteration appends + "progress": state["progress"] + (80 // len(state["plan"])), + } +``` + + + + +```typescript +import { BaseMessage } from '@langchain/core/messages'; +import { streamResource } from '@cacheplane/stream-resource'; + +interface ResearchState { + messages: BaseMessage[]; + plan: string[]; + sources: string[]; + findings: string[]; + progress: number; + report: { + title: string; + summary: string; + sections: { heading: string; content: string }[]; + } | null; + query: string; + model_used: string; +} + +// In your component: +readonly agent = streamResource({ + assistantId: 'research_agent', +}); +``` + + + + +```typescript +@Component({ + changeDetection: ChangeDetectionStrategy.OnPush, + template: ` + +
    +
    +
    +

    {{ agent.value().progress }}% complete

    + + +
      + @for (step of agent.value().plan; track step) { +
    1. {{ step }}
    2. + } +
    + + +
      + @for (finding of agent.value().findings; track finding) { +
    • {{ finding }}
    • + } +
    + + + @if (agent.value().report; as report) { +
    +

    {{ report.title }}

    +

    {{ report.summary }}

    + @for (section of report.sections; track section.heading) { +
    +

    {{ section.heading }}

    +

    {{ section.content }}

    +
    + } +
    + } + ` +}) +export class ResearchComponent { + readonly agent = streamResource({ + assistantId: 'research_agent', + }); + + startResearch(query: string) { + this.agent.submit({ + messages: [{ role: 'user', content: query }], + }); + } +} +``` + +
    +
    + +### Derived State with computed() + +You rarely need to consume `agent.value()` raw in your template. Use `computed()` to derive clean, focused values: + +```typescript +readonly agent = streamResource({ + assistantId: 'research_agent', +}); + +// Derived signals — recalculate only when their dependencies change +readonly progress = computed(() => this.agent.value().progress); +readonly isPlanning = computed(() => this.agent.value().plan.length === 0 && this.agent.isLoading()); +readonly sourceCount = computed(() => this.agent.value().sources.length); +readonly hasReport = computed(() => this.agent.value().report !== null); +readonly reportTitle = computed(() => this.agent.value().report?.title ?? ''); +``` + ## What's Next - Learn how streamResource uses Signals for reactive rendering. + How streamResource() uses Angular Signals for zero-subscription reactive rendering. + + + Configure stream modes — values, messages, events — for different use cases. - Persist thread state so users can resume conversations later. + Thread-based conversation persistence and checkpoint configuration. + + + Fork threads at any checkpoint and replay with different input. - - Preserve context across sessions with LangGraph's memory store. + + Human-in-the-loop approval flows and how interrupt state surfaces in Angular. + + + Nodes, edges, and the graph execution model behind the state machine. -``` diff --git a/apps/website/content/docs-v2/getting-started/installation.mdx b/apps/website/content/docs-v2/getting-started/installation.mdx index f06f1f942..8200f7f67 100644 --- a/apps/website/content/docs-v2/getting-started/installation.mdx +++ b/apps/website/content/docs-v2/getting-started/installation.mdx @@ -48,8 +48,8 @@ Any option passed to `streamResource()` directly overrides the global provider c ## Environment setup - - + + For local development, run a LangGraph server: @@ -61,7 +61,7 @@ langgraph dev ``` - + For production, point to your LangGraph Cloud deployment: diff --git a/apps/website/content/docs-v2/getting-started/quickstart.mdx b/apps/website/content/docs-v2/getting-started/quickstart.mdx index 96f8fcff6..de1aee2fb 100644 --- a/apps/website/content/docs-v2/getting-started/quickstart.mdx +++ b/apps/website/content/docs-v2/getting-started/quickstart.mdx @@ -33,8 +33,8 @@ export const appConfig: ApplicationConfig = { Use `streamResource()` in a component field initializer. Every property on the returned ref is an Angular Signal. - - + + ```typescript // chat.component.ts @@ -67,7 +67,7 @@ export class ChatComponent { ``` - + ```html diff --git a/apps/website/content/docs-v2/guides/deployment.mdx b/apps/website/content/docs-v2/guides/deployment.mdx index 91abe0c21..79e0faea7 100644 --- a/apps/website/content/docs-v2/guides/deployment.mdx +++ b/apps/website/content/docs-v2/guides/deployment.mdx @@ -1,91 +1,407 @@ # Deployment -Configure streamResource() for production with LangGraph Cloud, environment-based URLs, and error handling patterns. +Deploy your LangGraph agent to the cloud and ship your Angular frontend to production with environment-based configuration, authentication, error handling, and observability. -## Production configuration +## Python: LangGraph Cloud deployment -Point `apiUrl` to your LangGraph Cloud deployment. +Your agent code needs a `langgraph.json` manifest at the project root. This file tells LangGraph Cloud how to build and serve your agent. - - +```json +{ + "dependencies": ["."], + "graphs": { + "chat_agent": "./agent/graph.py:graph" + }, + "env": ".env" +} +``` -```typescript -// app.config.ts -provideStreamResource({ - apiUrl: environment.langgraphUrl, -}) +The `graphs` key maps an assistant ID (used by `streamResource()` on the Angular side) to the Python module path and graph variable. The `env` key points to a file with secrets like `OPENAI_API_KEY` that will be injected at runtime. + +### Agent entry point + +```python +from langchain_openai import ChatOpenAI +from langgraph.graph import StateGraph, MessagesState + +llm = ChatOpenAI(model="gpt-5-mini") + +def call_model(state: MessagesState): + return {"messages": [llm.invoke(state["messages"])]} + +graph = StateGraph(MessagesState) +graph.add_node("model", call_model) +graph.set_entry_point("model") +graph = graph.compile() ``` +### Push and deploy + +```bash +# Initialize and push to GitHub +git init && git add . && git commit -m "initial agent" +gh repo create my-agent --public --source=. --push + +# Deploy via CLI (alternative to the LangSmith UI) +pip install langgraph-cli +langgraph deploy --project my-agent +``` + +The CLI watches your repository and builds a container image on LangGraph Cloud. First deployments take roughly 10-15 minutes. Subsequent pushes to the default branch trigger automatic redeployments. + +## LangSmith deployment walkthrough + +The LangSmith UI provides a visual deployment flow if you prefer not to use the CLI. + + + + +Navigate to [smith.langchain.com](https://smith.langchain.com) and click **Deployments** in the left sidebar, then **+ New Deployment**. + + + + +Authorize LangSmith to access your GitHub account. Select the repository containing your `langgraph.json`. LangSmith auto-detects the manifest and shows the graphs it found. + + + + +Add secrets like `OPENAI_API_KEY` in the deployment settings. These are encrypted at rest and injected into your container at runtime. You can also set `LANGCHAIN_TRACING_V2=true` here to enable automatic tracing. + + + + +Click **Deploy**. Once the build succeeds, you will see a deployment URL like `https://my-agent-abc123.langgraph.app`. Copy this URL for your Angular environment configuration. + + + + +## Angular: environment configuration + +Angular uses file-based environment replacement at build time rather than `process.env`. Create separate environment files for development and production. + + + + ```typescript -// environment.prod.ts export const environment = { - langgraphUrl: 'https://your-project.langgraph.app', + production: false, + langgraphUrl: 'http://localhost:2024', + langsmithApiKey: '', // not needed locally }; ``` - + ```typescript -// app.config.ts -provideStreamResource({ - apiUrl: 'https://your-project.langgraph.app', -}) +export const environment = { + production: true, + langgraphUrl: 'https://my-agent-abc123.langgraph.app', + langsmithApiKey: 'lsv2_pt_xxxxxxxx', +}; ``` +Wire the environment into `provideStreamResource()`: + +```typescript +import { provideStreamResource } from '@cacheplane/stream-resource'; +import { environment } from '../environments/environment'; + +export const appConfig: ApplicationConfig = { + providers: [ + provideStreamResource({ + apiUrl: environment.langgraphUrl, + }), + ], +}; +``` + +Angular CLI replaces `environment.ts` with `environment.prod.ts` during `ng build --configuration production` automatically via the `fileReplacements` array in `angular.json`. + +## Authentication + +### API key for LangGraph Platform + +LangGraph Cloud deployments require an API key on every request. The recommended approach is an Angular HTTP interceptor that attaches the key as a header. + +```typescript +import { HttpInterceptorFn } from '@angular/common/http'; +import { environment } from '../environments/environment'; + +export const langGraphAuthInterceptor: HttpInterceptorFn = (req, next) => { + if (req.url.startsWith(environment.langgraphUrl)) { + const cloned = req.clone({ + setHeaders: { + 'x-api-key': environment.langsmithApiKey, + }, + }); + return next(cloned); + } + return next(req); +}; +``` + +Register the interceptor in your application config: + +```typescript +import { provideHttpClient, withInterceptors } from '@angular/common/http'; +import { langGraphAuthInterceptor } from './auth.interceptor'; + +export const appConfig: ApplicationConfig = { + providers: [ + provideHttpClient(withInterceptors([langGraphAuthInterceptor])), + provideStreamResource({ + apiUrl: environment.langgraphUrl, + }), + ], +}; +``` + + +Add `environment.prod.ts` to `.gitignore`. In CI, generate it from environment variables or inject secrets at build time. + + +### User-level authentication + +If your app has its own user authentication (JWT, session cookies), you can add a second interceptor or extend the one above to forward identity headers that your agent can use for per-user scoping. + +## CORS configuration + +When your Angular frontend and LangGraph backend are on different origins, you must configure CORS on the LangGraph side. + +In `langgraph.json`, add an `http` section: + +```json +{ + "dependencies": ["."], + "graphs": { + "chat_agent": "./agent/graph.py:graph" + }, + "http": { + "cors": { + "allow_origins": ["https://your-angular-app.com"], + "allow_methods": ["GET", "POST", "PUT", "DELETE", "OPTIONS"], + "allow_headers": ["Content-Type", "x-api-key", "Authorization"], + "allow_credentials": true + } + } +} +``` + + +During local development with `langgraph dev`, CORS is permissive by default. You only need explicit CORS configuration for production deployments. + + ## Error boundaries -Handle errors gracefully in production. +Production apps need graceful error handling. Build a reactive error boundary using `streamResource()` signals. ```typescript -const chat = streamResource({ - assistantId: 'chat_agent', -}); +import { ChangeDetectionStrategy, Component, computed } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; -// Reactive error display -hasError = computed(() => chat.status() === 'error'); -errorMessage = computed(() => { - const err = chat.error(); - return err instanceof Error ? err.message : 'Something went wrong'; -}); +@Component({ + selector: 'app-chat', + changeDetection: ChangeDetectionStrategy.OnPush, + template: ` + @if (hasError()) { +
    +

    {{ errorMessage() }}

    + +
    + } + `, +}) +export class ChatComponent { + chat = streamResource({ + assistantId: 'chat_agent', + }); + + hasError = computed(() => this.chat.status() === 'error'); + + errorMessage = computed(() => { + const err = this.chat.error(); + if (err instanceof HttpErrorResponse) { + switch (err.status) { + case 401: return 'Authentication failed. Please check your API key.'; + case 429: return 'Rate limit exceeded. Please wait a moment.'; + case 503: return 'Agent is starting up. Please try again shortly.'; + default: return 'Something went wrong. Please try again.'; + } + } + return err instanceof Error ? err.message : 'An unexpected error occurred.'; + }); + + retry(): void { + this.chat.reload(); + } +} +``` + +### Retry with exponential backoff -// Retry after error -retry() { - chat.reload(); +For automated retries (network blips, transient 5xx errors), wrap `.submit()` with a backoff utility: + +```typescript +export async function retrySubmit( + chat: ReturnType, + input: Record, + maxAttempts = 3, +): Promise { + for (let attempt = 0; attempt < maxAttempts; attempt++) { + try { + chat.submit(input); + return; + } catch { + if (attempt === maxAttempts - 1) throw new Error('Max retries exceeded'); + await new Promise(r => setTimeout(r, 1000 * 2 ** attempt)); + } + } } ``` -## Recovering interrupted streams +## Stream recovery -Use `joinStream()` to reconnect to a running stream after a network interruption. +Use `joinStream()` to reconnect to a running agent execution after a network interruption, page refresh, or navigation event. ```typescript -// If you know the run ID (e.g., from a status endpoint) -await chat.joinStream(runId, lastEventId); -// Resumes streaming from where it left off +// Store the run ID when starting a stream +const runId = this.chat.runId(); +localStorage.setItem('activeRunId', runId); + +// After reconnecting, resume from where the stream left off +const savedRunId = localStorage.getItem('activeRunId'); +if (savedRunId) { + await this.chat.joinStream(savedRunId, lastEventId); +} ``` +`joinStream()` replays any events the client missed, then switches to live streaming. This works because all state lives on the LangGraph Platform, and the SSE endpoint supports event ID-based resumption. + -streamResource() is a stateless client. All state lives on the LangGraph Platform. This means your Angular app can be deployed anywhere (CDN, edge, SSR) without state management concerns. +`streamResource()` is a stateless client. All state lives on the LangGraph Platform. This means your Angular app can be deployed anywhere (CDN, edge, SSR) without state management concerns. Scale your frontend independently of your agent infrastructure. -## Checklist +## CI/CD pipeline + +A typical pipeline deploys the Python agent and Angular frontend in parallel since they are independent artifacts. + +```yaml +name: Deploy +on: + push: + branches: [main] + +jobs: + deploy-agent: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: '3.12' + - run: pip install langgraph-cli + - run: langgraph deploy --project my-agent + env: + LANGSMITH_API_KEY: ${{ secrets.LANGSMITH_API_KEY }} + + deploy-angular: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version: '22' + - run: npm ci + - name: Generate production environment + run: | + cat > src/environments/environment.prod.ts << 'EOF' + export const environment = { + production: true, + langgraphUrl: '${{ secrets.LANGGRAPH_URL }}', + langsmithApiKey: '${{ secrets.LANGSMITH_API_KEY }}', + }; + EOF + - run: npx ng build --configuration production + - name: Deploy to hosting + run: | + # Replace with your hosting provider's CLI + # e.g., npx vercel deploy --prod dist/my-app/browser + echo "Deploy dist/ to your hosting platform" +``` + +## Monitoring + +### LangSmith observability + +When `LANGCHAIN_TRACING_V2=true` is set in your agent environment, every run is automatically traced in LangSmith. No code changes are needed. + +Key metrics to track in production: + +| Metric | Where to find it | Why it matters | +|--------|-------------------|----------------| +| End-to-end latency | LangSmith Runs tab | Directly affects user-perceived responsiveness | +| Error rate | LangSmith Runs tab, filter by error | Spike detection for broken tools or provider outages | +| Token usage | LangSmith per-run detail | Cost control and budget alerting | +| Time to first token | Angular performance monitoring | Stream startup latency visible to users | +| Thread count | LangGraph Platform dashboard | Capacity planning | + +### Client-side monitoring + +Track stream health from your Angular app: + +```typescript +const status = this.chat.status(); // 'idle' | 'streaming' | 'error' +const isStreaming = this.chat.isStreaming(); + +// Log stream lifecycle for your APM tool +effect(() => { + const s = this.chat.status(); + if (s === 'error') { + this.analytics.trackError('stream_error', this.chat.error()); + } +}); +``` + +## Deployment checklist -Point to your LangGraph Cloud deployment URL. +Point `provideStreamResource({ apiUrl })` to your LangGraph Cloud deployment URL via `environment.prod.ts`. + + +Add an HTTP interceptor to attach `x-api-key` headers to all LangGraph requests. + + +Add your Angular app's origin to the `allow_origins` list in `langgraph.json`. + + +Show user-friendly error messages for 401, 429, 503, and network failures. Provide retry buttons. - -Show user-friendly error messages and retry buttons. + +Store `runId` and use `joinStream()` to reconnect after network interruptions. -Store threadId in localStorage or a backend so users can resume conversations. +Store `threadId` in `localStorage` or a backend so users can resume conversations across sessions. -Set `throttle` option if token-by-token updates are too frequent for your UI. +Set the `throttle` option if token-by-token updates are too frequent for your UI rendering. + + +Set `LANGCHAIN_TRACING_V2=true` in your agent environment for production observability. + + +Add `environment.prod.ts` to `.gitignore`. Generate it from CI secrets at build time. + + +Automate agent and Angular deployments on push to your main branch. + + +Confirm LangSmith traces are arriving and set up alerts for error rate spikes and latency regressions. @@ -93,15 +409,21 @@ Set `throttle` option if token-by-token updates are too frequent for your UI. - Test agent interactions deterministically before deploying. + Test agent interactions deterministically before deploying to production. Store thread IDs so users can resume conversations across sessions. - Tune streaming options like throttle for production performance. + Tune streaming options like throttle and stream modes for production performance. + + + Understand the agent patterns your deployment will serve. Full reference for provideStreamResource configuration options. + + Deep dive into error recovery patterns beyond basic error boundaries. + diff --git a/apps/website/content/docs-v2/guides/interrupts.mdx b/apps/website/content/docs-v2/guides/interrupts.mdx index 8063b852f..bb3bb7a2a 100644 --- a/apps/website/content/docs-v2/guides/interrupts.mdx +++ b/apps/website/content/docs-v2/guides/interrupts.mdx @@ -1,95 +1,561 @@ # Interrupts -Interrupts let your LangGraph agent pause execution and wait for human input. streamResource() surfaces interrupts as Angular Signals, making it easy to build approval flows, confirmation dialogs, and human-in-the-loop experiences. +Interrupts let your LangGraph agent pause mid-execution and hand control to a human. The agent proposes an action, the graph freezes, your Angular UI shows an approval dialog, the user decides, and the agent resumes with the human's decision. streamResource() surfaces interrupts as Angular Signals, so building approval flows, confirmation dialogs, and multi-step review experiences requires no manual event wiring. -Use interrupts for human approval, late-binding decisions, or any step where the agent needs external input before continuing. +Use interrupts when an agent action is irreversible (sending an email, placing an order, deleting data), when the agent needs a human decision it cannot make on its own, or when compliance requires explicit approval before execution. -## Basic interrupt handling +## The Interrupt Lifecycle -When an agent interrupts, the `interrupt()` signal contains the interrupt data. +Before diving into code, understand the five-stage lifecycle that every interrupt follows: - - + + +The agent reasons about the user's request and determines an action that requires human approval. It builds a structured payload describing what it wants to do. + + +The agent node calls `raise Interrupt(value={...})`, which freezes the graph. The interrupt payload is persisted in the checkpoint and streamed to the client. + + +streamResource() updates the `interrupt()` signal. Your Angular template detects the change through OnPush change detection and renders an approval dialog with the interrupt payload. + + +The user reviews the proposed action and clicks Approve or Reject. Your component calls `agent.submit()` with a resume payload containing the decision. + + +LangGraph resumes the graph from the interrupted checkpoint. The next node receives the human's decision and either executes or aborts the action. + + + +## Python: Raising an Interrupt + +An interrupt is raised inside any graph node by calling `raise Interrupt(value={...})`. The value can be any JSON-serializable object — it becomes the payload your Angular component displays. + + + + +```python +from langgraph.graph import END, START, StateGraph +from langgraph.types import Interrupt, Command +from langchain_openai import ChatOpenAI +from typing_extensions import TypedDict, Annotated +from operator import add + +llm = ChatOpenAI(model="gpt-5-mini") + +class State(TypedDict): + messages: Annotated[list, add] + proposed_action: dict + approval_result: dict + +def plan_action(state: State) -> dict: + """Agent analyzes the request and proposes an action.""" + response = llm.invoke([ + {"role": "system", "content": ( + "Analyze the user's request. If it requires sending " + "an email, modifying data, or any irreversible action, " + "return a JSON action plan with keys: action, target, " + "description, risk_level." + )}, + *state["messages"] + ]) + action = parse_json(response.content) + return { + "proposed_action": action, + "messages": [response], + } + +def request_approval(state: State) -> dict: + """Pause the graph and ask the human for approval.""" + action = state["proposed_action"] + raise Interrupt(value={ + "action": action["action"], + "target": action["target"], + "description": action["description"], + "risk_level": action.get("risk_level", "medium"), + }) + +def execute_action(state: State) -> dict: + """Run the approved action or explain the rejection.""" + result = state.get("approval_result", {}) + if result.get("approved"): + # Execute the real action + outcome = perform_action(state["proposed_action"]) + return { + "messages": [{"role": "assistant", "content": ( + f"Done. {outcome}" + )}] + } + else: + reason = result.get("reason", "No reason given") + return { + "messages": [{"role": "assistant", "content": ( + f"Action cancelled. Reason: {reason}" + )}] + } + +# Build the graph: plan → approve → execute +builder = StateGraph(State) +builder.add_node("plan", plan_action) +builder.add_node("approve", request_approval) +builder.add_node("execute", execute_action) +builder.add_edge(START, "plan") +builder.add_edge("plan", "approve") +builder.add_edge("approve", "execute") +builder.add_edge("execute", END) + +graph = builder.compile() +``` + + + + +```json +{ + "dependencies": ["."], + "graphs": { + "approval_agent": "./src/approval_agent/agent.py:graph" + }, + "env": ".env", + "python_version": "3.12" +} +``` + + + + + +Place the `raise Interrupt()` call in its own dedicated node. This gives you a clean three-node pattern (plan, approve, execute) where the interrupt sits between reasoning and action. If you raise an interrupt inside a node that also does work, the work before the interrupt runs twice on resume. + + +## Angular: Building an Approval Component + +When the agent raises an interrupt, streamResource() populates the `interrupt()` signal with the interrupt payload. Your component reads this signal to render a dialog and calls `submit()` to resume. + + + ```typescript -// approval.component.ts +import { + Component, + computed, + signal, + ChangeDetectionStrategy, +} from '@angular/core'; +import { streamResource, BaseMessage } from '@cacheplane/stream-resource'; + interface ApprovalPayload { action: string; + target: string; description: string; - risk: 'low' | 'medium' | 'high'; + risk_level: 'low' | 'medium' | 'high'; } -const agent = streamResource({ - assistantId: 'approval_agent', -}); +interface AgentState { + messages: BaseMessage[]; + proposed_action: ApprovalPayload; + approval_result: { approved: boolean; reason?: string }; +} + +@Component({ + selector: 'app-approval', + templateUrl: './approval.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class ApprovalComponent { + agent = streamResource({ + assistantId: 'approval_agent', + }); + + messages = computed(() => this.agent.messages()); + pendingApproval = computed(() => this.agent.interrupt()); + isLoading = computed(() => this.agent.isLoading()); + + rejectionReason = signal(''); + + riskClass = computed(() => { + const interrupt = this.pendingApproval(); + if (!interrupt) return ''; + const level = interrupt.value?.risk_level ?? 'medium'; + return `risk-${level}`; + }); + + send(input: string) { + this.agent.submit({ + messages: [{ role: 'user', content: input }], + }); + } -// Check for pending interrupts -pendingApproval = computed(() => agent.interrupt()); + approve() { + this.agent.submit(null, { + resume: { approved: true }, + }); + } + + reject() { + this.agent.submit(null, { + resume: { + approved: false, + reason: this.rejectionReason() || 'User rejected', + }, + }); + this.rejectionReason.set(''); + } +} ``` - + ```html - + +
    + @for (msg of messages(); track msg) { +
    {{ msg.content }}
    + } + + @if (isLoading()) { +
    Agent is working...
    + } +
    + + @if (pendingApproval(); as approval) { -
    -

    Agent needs approval

    -

    {{ approval.value.description }}

    -

    Risk level: {{ approval.value.risk }}

    - - +
    +

    Agent Needs Approval

    + +
    +
    Action
    +
    {{ approval.value.action }}
    + +
    Target
    +
    {{ approval.value.target }}
    + +
    Description
    +
    {{ approval.value.description }}
    + +
    Risk Level
    +
    + + {{ approval.value.risk_level | titlecase }} + +
    +
    + +
    + + +
    + +
    + + +
    } + + +@if (!pendingApproval()) { +
    + + +
    +} ``` -## Resuming from an interrupt +## Multi-Step Approval Pattern + +Some workflows require multiple approvals in sequence. For example, an agent that plans a multi-step deployment might need approval at each stage. Each node in the graph can raise its own interrupt. + + + + +```python +from langgraph.graph import END, START, StateGraph +from langgraph.types import Interrupt +from typing_extensions import TypedDict, Annotated +from operator import add + +class DeployState(TypedDict): + messages: Annotated[list, add] + plan: list[dict] + current_step: int + completed_steps: list[str] + +def create_plan(state: DeployState) -> dict: + """Generate a multi-step deployment plan.""" + plan = [ + {"step": "backup", "description": "Back up current database"}, + {"step": "migrate", "description": "Run schema migrations"}, + {"step": "deploy", "description": "Deploy new application version"}, + ] + return {"plan": plan, "current_step": 0} + +def approve_step(state: DeployState) -> dict: + """Interrupt for each step that needs approval.""" + step_index = state["current_step"] + step = state["plan"][step_index] + raise Interrupt(value={ + "step_number": step_index + 1, + "total_steps": len(state["plan"]), + "step": step["step"], + "description": step["description"], + "completed": state.get("completed_steps", []), + }) + +def execute_step(state: DeployState) -> dict: + """Execute the approved step and advance.""" + step = state["plan"][state["current_step"]] + # ... perform the actual deployment step ... + return { + "completed_steps": [step["step"]], + "current_step": state["current_step"] + 1, + "messages": [{"role": "assistant", "content": ( + f"Completed: {step['description']}" + )}], + } -Call `submit()` with the resume payload to continue execution. +def should_continue(state: DeployState) -> str: + if state["current_step"] < len(state["plan"]): + return "approve_step" + return END + +builder = StateGraph(DeployState) +builder.add_node("create_plan", create_plan) +builder.add_node("approve_step", approve_step) +builder.add_node("execute_step", execute_step) +builder.add_edge(START, "create_plan") +builder.add_edge("create_plan", "approve_step") +builder.add_edge("approve_step", "execute_step") +builder.add_conditional_edges("execute_step", should_continue) + +graph = builder.compile() +``` + + + ```typescript -approve() { - this.agent.submit(null, { resume: { approved: true } }); +import { + Component, + computed, + ChangeDetectionStrategy, +} from '@angular/core'; +import { streamResource, BaseMessage } from '@cacheplane/stream-resource'; + +interface StepApproval { + step_number: number; + total_steps: number; + step: string; + description: string; + completed: string[]; +} + +@Component({ + selector: 'app-deploy-approval', + templateUrl: './approval.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class DeployApprovalComponent { + agent = streamResource<{ + messages: BaseMessage[]; + plan: { step: string; description: string }[]; + current_step: number; + completed_steps: string[]; + }>({ + assistantId: 'deploy_agent', + }); + + currentStep = computed(() => { + const interrupt = this.agent.interrupt(); + return interrupt?.value as StepApproval | null; + }); + + progress = computed(() => { + const step = this.currentStep(); + if (!step) return 0; + return (step.completed.length / step.total_steps) * 100; + }); + + allInterrupts = computed(() => this.agent.interrupts()); + + approveStep() { + this.agent.submit(null, { resume: { approved: true } }); + } + + abortDeploy() { + this.agent.submit(null, { + resume: { approved: false, reason: 'Deployment aborted by user' }, + }); + } } +``` + + + + +```html +@if (currentStep(); as step) { +
    +

    Step {{ step.step_number }} of {{ step.total_steps }}

    + + +
    +
    +
    -reject() { - this.agent.submit(null, { resume: { approved: false, reason: 'User rejected' } }); + + @if (step.completed.length) { +
      + @for (done of step.completed; track done) { +
    • {{ done }}
    • + } +
    + } + + +
    + {{ step.step }} +

    {{ step.description }}

    +
    + +
    + + +
    +
    } ``` -## Multiple interrupts +
    +
    + +## Typed Interrupt Payloads with BagTemplate -The `interrupts()` signal tracks all interrupts received during a run, not just the current one. +By default, `interrupt()` returns an untyped object. The BagTemplate generic parameter on streamResource() lets you define the exact shape of your interrupt payloads, giving you full TypeScript safety throughout your component. + +BagTemplate is a type parameter on the streamResource configuration that maps signal names to their types. When you specify an interrupt type through BagTemplate, the `interrupt()` signal returns a properly typed object instead of `unknown`. This means your template expressions, computed signals, and event handlers all benefit from compile-time checking. ```typescript -// Track interrupt history -allInterrupts = computed(() => agent.interrupts()); -latestInterrupt = computed(() => agent.interrupt()); -interruptCount = computed(() => agent.interrupts().length); +import { streamResource, BagTemplate } from '@cacheplane/stream-resource'; + +// Define the exact shape of your interrupt payload +interface DeployApproval { + step_number: number; + total_steps: number; + step: string; + description: string; + completed: string[]; +} + +// Pass the interrupt type via BagTemplate +const agent = streamResource< + DeployState, + BagTemplate<{ interrupt: DeployApproval }> +>({ + assistantId: 'deploy_agent', +}); + +// Now interrupt() is typed — no casting needed +const step = agent.interrupt(); +// ^? Signal<{ value: DeployApproval } | null> + +// TypeScript catches errors at compile time +const num = step?.value.step_number; // number — correct +const bad = step?.value.nonexistent; // Error — property doesn't exist ``` - -Use the BagTemplate generic parameter to type your interrupt payloads for full TypeScript safety. + +Define your interrupt payload interfaces alongside your Python state schema. This creates a contract between your agent and your UI. When the Python payload shape changes, the TypeScript interface should change too. Consider generating types from a shared schema to keep them in sync. + + +## Timeout Handling + +Interrupts pause graph execution indefinitely by default — the agent waits until a human responds. In production, you often need to handle cases where no one responds within a reasonable time. There are two strategies for managing interrupt timeouts. + +**Server-side timeout with a background task:** Schedule a background job that checks for stale interrupts and resumes them with a default decision. + +```python +async def check_stale_interrupts(): + """Periodic task to auto-reject stale interrupts.""" + threads = await client.threads.search( + status="interrupted", + metadata={"interrupt_type": "approval"}, + ) + for thread in threads: + created = thread.updated_at + if (now() - created).total_seconds() > 3600: # 1 hour timeout + await client.runs.create( + thread["thread_id"], + assistant_id="approval_agent", + input=None, + command={"resume": { + "approved": False, + "reason": "Auto-rejected: approval timeout", + }}, + ) +``` + +**Client-side timeout in Angular:** Use a timer in your component to auto-reject if the user does not act. + +```typescript +import { effect } from '@angular/core'; +import { timer } from 'rxjs'; + +// Watch for interrupts and start a timeout +effect(() => { + const interrupt = this.agent.interrupt(); + if (interrupt) { + const sub = timer(5 * 60 * 1000).subscribe(() => { + // Auto-reject after 5 minutes of inaction + this.agent.submit(null, { + resume: { approved: false, reason: 'Approval timeout' }, + }); + }); + // Clean up if user responds before timeout + return () => sub.unsubscribe(); + } +}); +``` + + +Avoid running both server-side and client-side timeouts simultaneously. If both fire, the second resume call will fail because the graph already moved past the interrupt. Choose server-side timeouts for reliability (works even if the browser closes) or client-side timeouts for immediacy. + + + +Because interrupts are checkpointed, the user can close their browser, come back hours later, and still approve or reject the pending action. The graph state is frozen in the checkpoint store, not in browser memory. ## What's Next + + Give your agent short-term and long-term memory with the Store API. + - Resume conversations across page refreshes with thread persistence. + Configure checkpointers that keep interrupt state across deployments. - Stream token-by-token responses and tool progress in real time. + Stream token-by-token responses alongside interrupt events. Script interrupt events deterministically with MockStreamTransport. - - Full reference for streamResource options and returned signals. - diff --git a/apps/website/content/docs-v2/guides/memory.mdx b/apps/website/content/docs-v2/guides/memory.mdx index f2da7b9d5..ca507fdcc 100644 --- a/apps/website/content/docs-v2/guides/memory.mdx +++ b/apps/website/content/docs-v2/guides/memory.mdx @@ -1,82 +1,414 @@ # Memory -Memory in LangGraph preserves useful context that later steps can read back. streamResource() exposes memory through the messages and state signals, with thread persistence providing cross-session continuity. +Memory gives your LangGraph agent the ability to recall past interactions, user preferences, and learned facts. There are two distinct kinds: short-term memory scoped to a single thread (conversation), and long-term memory that persists across threads using the LangGraph Store API. streamResource() surfaces both through Angular Signals so your components stay reactive without manual state wiring. -Short-term memory lives within a thread (conversation history). Long-term memory persists across threads via LangGraph's memory store. +Short-term memory lives within a thread — it is the conversation history plus any custom state fields your agent accumulates during a run. Long-term memory lives in the LangGraph Store and survives across threads, users, and sessions. Think of short-term as "what happened in this conversation" and long-term as "what the agent knows about this user." -## Short-term memory (thread-scoped) +## Agent State with Custom Memory Fields -Every message in a thread is automatically preserved. When you reconnect with the same `threadId`, the full conversation history is restored. +Every LangGraph agent has a state schema. You control what the agent remembers by adding fields to that schema. Messages accumulate automatically, but you can define any additional fields the agent should track. -```typescript -const chat = streamResource<{ messages: BaseMessage[] }>({ - assistantId: 'memory_agent', - threadId: signal(userId()), // User-specific thread -}); + + -// Messages accumulate across the conversation -const messageCount = computed(() => chat.messages().length); +```python +from typing_extensions import TypedDict, Annotated +from operator import add +from langgraph.graph import END, START, StateGraph +from langchain_openai import ChatOpenAI -// Resume where you left off on next visit -// threadId persists, so history is restored -``` +llm = ChatOpenAI(model="gpt-5-mini") + +class State(TypedDict): + messages: Annotated[list, add] + user_preferences: dict # Accumulated user preferences + conversation_summary: str # Rolling summary of past context + mentioned_topics: list[str] # Topics the user has brought up + +def call_model(state: State) -> dict: + system = "You are a helpful assistant." + if state.get("conversation_summary"): + system += f"\n\nPrevious context: {state['conversation_summary']}" + if state.get("user_preferences"): + system += f"\n\nUser preferences: {state['user_preferences']}" -## Accessing agent state as memory + response = llm.invoke([ + {"role": "system", "content": system}, + *state["messages"] + ]) + return {"messages": [response]} -The `value()` signal contains the full agent state, which can include custom memory fields. +def update_memory(state: State) -> dict: + """Extract preferences and topics from the latest exchange.""" + extraction = llm.invoke([ + {"role": "system", "content": ( + "Extract any user preferences and topics from " + "this conversation. Return JSON with keys: " + "preferences (dict), topics (list[str]), summary (str)." + )}, + *state["messages"][-4:] # Last two exchanges + ]) + parsed = parse_json(extraction.content) + return { + "user_preferences": { + **state.get("user_preferences", {}), + **parsed.get("preferences", {}), + }, + "mentioned_topics": parsed.get("topics", []), + "conversation_summary": parsed.get("summary", ""), + } + +builder = StateGraph(State) +builder.add_node("model", call_model) +builder.add_node("update_memory", update_memory) +builder.add_edge(START, "model") +builder.add_edge("model", "update_memory") +builder.add_edge("update_memory", END) + +graph = builder.compile() +``` + + + ```typescript +import { Component, computed, ChangeDetectionStrategy } from '@angular/core'; +import { streamResource, BaseMessage } from '@cacheplane/stream-resource'; + interface AgentState { messages: BaseMessage[]; - userPreferences: { theme: string; language: string }; - projectContext: { name: string; files: string[] }; + user_preferences: Record; + conversation_summary: string; + mentioned_topics: string[]; } -const agent = streamResource({ - assistantId: 'context_agent', - threadId: signal(projectId()), -}); +@Component({ + selector: 'app-memory-chat', + templateUrl: './memory.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class MemoryChatComponent { + agent = streamResource({ + assistantId: 'memory_agent', + threadId: signal(localStorage.getItem('memory-thread')), + onThreadId: (id) => localStorage.setItem('memory-thread', id), + }); + + // Reactive memory signals derived from agent state + preferences = computed(() => this.agent.value()?.user_preferences ?? {}); + summary = computed(() => this.agent.value()?.conversation_summary ?? ''); + topics = computed(() => this.agent.value()?.mentioned_topics ?? []); + messages = computed(() => this.agent.messages()); -// Read memory fields from agent state -const prefs = computed(() => agent.value().userPreferences); -const context = computed(() => agent.value().projectContext); + send(input: string) { + this.agent.submit({ + messages: [{ role: 'user', content: input }], + }); + } +} ``` -## Cross-session memory + + + +```html +
    + @for (msg of messages(); track msg) { +
    {{ msg.content }}
    + } + + @if (agent.isLoading()) { +
    Agent is thinking...
    + } +
    + + + +``` + +
    +
    + + +When `update_memory` returns `user_preferences`, the dict is merged into the existing state. For list fields using the `Annotated[list, add]` reducer, new items are appended. Design your state schema with these merge semantics in mind. + + +## Short-Term Memory (Thread-Scoped) + +Short-term memory is the simplest form: the conversation history and any accumulated state fields within a single thread. Every message, tool call, and state update is automatically checkpointed. When a user reconnects with the same `threadId`, the full history is restored. -Thread persistence enables memory that spans sessions. The agent decides what to store in its state. +```python +from langgraph.checkpoint.postgres import PostgresSaver + +checkpointer = PostgresSaver.from_connection_string(DATABASE_URL) +graph = builder.compile(checkpointer=checkpointer) + +# Every invocation within the same thread accumulates state +result = graph.invoke( + {"messages": [{"role": "user", "content": "I prefer dark mode"}]}, + config={"configurable": {"thread_id": "user_42_session"}} +) + +# Later invocation — same thread, memory intact +result = graph.invoke( + {"messages": [{"role": "user", "content": "What theme do I like?"}]}, + config={"configurable": {"thread_id": "user_42_session"}} +) +# Agent responds: "You mentioned you prefer dark mode." +``` + +On the Angular side, thread-scoped memory requires no extra code. The `threadId` signal handles it: ```typescript -// User returns days later — same threadId resumes context -const agent = streamResource({ +const chat = streamResource({ assistantId: 'memory_agent', - threadId: signal(localStorage.getItem('agent-thread')), - onThreadId: (id) => localStorage.setItem('agent-thread', id), + threadId: signal(userId()), // Same user = same thread = same memory }); -// Agent recalls past decisions, preferences, and context -// No explicit memory management needed on the Angular side +// chat.messages() restores full history on reconnect +// chat.value() restores all custom state fields +``` + +## Long-Term Memory (Cross-Thread) with the Store API + +Short-term memory disappears when you start a new thread. For knowledge that should persist across conversations — user preferences, learned facts, project context — use the LangGraph Store API. The Store is a key-value layer that any node can read from and write to, independent of the current thread. + + + + +```python +from langgraph.graph import END, START, StateGraph, MessagesState +from langgraph.store.base import BaseStore +from langchain_openai import ChatOpenAI + +llm = ChatOpenAI(model="gpt-5-mini") + +def recall_memories(state: MessagesState, *, store: BaseStore, config) -> dict: + """Load long-term memories for this user before responding.""" + user_id = config["configurable"]["user_id"] + + # Fetch all memories in this user's namespace + memories = store.search(("memories", user_id)) + memory_text = "\n".join( + f"- {m.value['content']}" for m in memories + ) + + system = ( + "You are a helpful assistant with long-term memory.\n\n" + f"What you remember about this user:\n{memory_text}" + ) + response = llm.invoke([ + {"role": "system", "content": system}, + *state["messages"] + ]) + return {"messages": [response]} + +def save_memories(state: MessagesState, *, store: BaseStore, config) -> dict: + """Extract and persist new facts to the Store.""" + user_id = config["configurable"]["user_id"] + + extraction = llm.invoke([ + {"role": "system", "content": ( + "Extract new facts about the user from the latest " + "exchange. Return a JSON list of strings. " + "Return [] if nothing new." + )}, + *state["messages"][-4:] + ]) + facts = parse_json(extraction.content) + + for fact in facts: + store.put( + ("memories", user_id), + key=str(uuid4()), + value={"content": fact}, + ) + + return {} + +builder = StateGraph(MessagesState) +builder.add_node("recall", recall_memories) +builder.add_node("save", save_memories) +builder.add_edge(START, "recall") +builder.add_edge("recall", "save") +builder.add_edge("save", END) + +graph = builder.compile() +``` + + + + +```typescript +import { Component, computed, signal, ChangeDetectionStrategy } from '@angular/core'; +import { streamResource, BaseMessage } from '@cacheplane/stream-resource'; + +@Component({ + selector: 'app-longterm-chat', + templateUrl: './memory.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class LongTermChatComponent { + // Each conversation gets a new thread, but the agent + // remembers the user across all of them via the Store. + agent = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'memory_agent', + config: { configurable: { user_id: 'user_42' } }, + }); + + messages = computed(() => this.agent.messages()); + + send(input: string) { + this.agent.submit({ + messages: [{ role: 'user', content: input }], + }); + } +} +``` + + + + +```html +
    + @for (msg of messages(); track msg) { +
    {{ msg.content }}
    + } + + @if (agent.isLoading()) { +
    Thinking...
    + } + +
    + + +
    +
    +``` + +
    +
    + + +The checkpointer saves thread state (short-term memory). The Store saves cross-thread knowledge (long-term memory). They serve different purposes and you will typically use both. The checkpointer is configured at compile time; the Store is injected into nodes that declare a `store` parameter. + + +## Semantic Memory with Vector Search + +For agents that accumulate hundreds or thousands of memories, keyword matching is not enough. The Store API supports semantic search with embeddings, so your agent can retrieve the most relevant memories for any given context. + +```python +from langchain_openai import OpenAIEmbeddings +from langgraph.store.base import BaseStore + +def recall_relevant(state: MessagesState, *, store: BaseStore, config) -> dict: + """Retrieve memories semantically related to the current question.""" + user_id = config["configurable"]["user_id"] + query = state["messages"][-1].content + + # Vector search — returns memories ranked by cosine similarity + results = store.search( + ("memories", user_id), + query=query, + limit=5, + ) + + memory_text = "\n".join( + f"- [{r.score:.2f}] {r.value['content']}" for r in results + ) + + response = llm.invoke([ + {"role": "system", "content": ( + "Relevant memories (similarity score in brackets):\n" + f"{memory_text}\n\n" + "Use these memories to personalize your response." + )}, + *state["messages"] + ]) + return {"messages": [response]} +``` + +The `store.search()` call accepts a `query` string and returns results ranked by vector similarity. You control how many results to retrieve with the `limit` parameter. Each result includes a `score` field (0 to 1) indicating how relevant the memory is to the query. + + +Semantic search requires an embedding model configured on the Store. LangGraph Platform handles this configuration in `langgraph.json`. When running locally, pass the embeddings provider when constructing your Store instance. + + +## Surfacing Memory in Angular with value() + +The `value()` signal is the primary way memory surfaces in your Angular components. It contains the full agent state object, including all custom memory fields. Because it is a Signal, your template re-renders automatically through OnPush change detection whenever the agent state changes. + +```typescript +// The value() signal contains everything the agent knows +const state = agent.value(); + +// Access specific memory fields +const prefs = state?.user_preferences; +const summary = state?.conversation_summary; +const topics = state?.mentioned_topics; + +// Compose derived signals for template binding +const hasMemory = computed(() => { + const val = agent.value(); + return val?.conversation_summary || val?.mentioned_topics?.length; +}); ``` - -The agent controls what gets stored in memory. streamResource() just surfaces the current state. Design your agent's state schema to include the fields you want to persist. +For long-term memory stored in the Store, the agent must explicitly include retrieved memories in its response or state output. The Store lives server-side; your Angular app only sees what the agent puts into the thread state. + +## Memory Best Practices + + +Every field in your state schema is persisted by the checkpointer. Only include fields the agent actively uses. Avoid dumping raw LLM outputs into state — extract structured data instead. + + + +Thread state grows with every message and state update. For long-running conversations, consider summarizing older messages into a `conversation_summary` field and trimming the message list. This keeps checkpoints small and LLM context windows manageable. + + + +Use hierarchical namespaces like `("memories", user_id)` or `("project", project_id, "notes")` to keep long-term memories organized. This also makes cleanup straightforward — delete an entire namespace when a user requests data removal. ## What's Next - Save thread IDs and resume conversations across sessions. + Configure checkpointers and thread storage for production deployments. Replay and branch agent runs from any past checkpoint. - - Understand how agent state flows into Angular Signals. + + Pause for human input before the agent acts on its memory. - - Test memory and state behavior with MockStreamTransport. + + How agent state flows from LangGraph into Angular Signals. diff --git a/apps/website/content/docs-v2/guides/persistence.mdx b/apps/website/content/docs-v2/guides/persistence.mdx index 2eb55eca9..73b62cc72 100644 --- a/apps/website/content/docs-v2/guides/persistence.mdx +++ b/apps/website/content/docs-v2/guides/persistence.mdx @@ -1,47 +1,260 @@ # Persistence -Thread persistence keeps conversations alive across page refreshes, browser restarts, and session changes. streamResource() manages thread state through the `threadId` signal and `onThreadId` callback. +Thread persistence keeps conversations alive across page refreshes, browser restarts, and server deployments. This guide covers configuring checkpointers on the Python side and wiring up thread management in your Angular components with streamResource(). -LangGraph checkpoints state at every super-step. streamResource() connects to these checkpoints via thread IDs, letting you resume exactly where you left off. +LangGraph checkpoints agent state at every super-step. Each checkpoint is keyed by a thread ID. streamResource() connects to these checkpoints automatically, so your users resume exactly where they left off — even if your server restarted between sessions. -## Basic thread persistence +## Python: Checkpointer Setup -Save the thread ID to localStorage so conversations survive page refreshes. +Every LangGraph agent needs a checkpointer to persist state between invocations. The checkpointer you choose depends on your environment. - - + + + +```python +from langgraph.checkpoint.memory import MemorySaver +from langgraph.graph import START, END, MessagesState, StateGraph +from langchain_openai import ChatOpenAI + +llm = ChatOpenAI(model="gpt-5-mini") + +def call_model(state: MessagesState) -> dict: + return {"messages": [llm.invoke(state["messages"])]} + +builder = StateGraph(MessagesState) +builder.add_node("model", call_model) +builder.add_edge(START, "model") +builder.add_edge("model", END) + +# MemorySaver stores checkpoints in-process memory +# Fast for development — lost when the process restarts +graph = builder.compile(checkpointer=MemorySaver()) +``` + + + + +```python +from langgraph.checkpoint.sqlite.aio import AsyncSqliteSaver +from langgraph.graph import START, END, MessagesState, StateGraph + +# Persists to a local file — survives restarts, zero infrastructure +async with AsyncSqliteSaver.from_conn_string("checkpoints.db") as checkpointer: + builder = StateGraph(MessagesState) + builder.add_node("model", call_model) + builder.add_edge(START, "model") + builder.add_edge("model", END) + + graph = builder.compile(checkpointer=checkpointer) +``` + + + + +```python +from langgraph.checkpoint.postgres.aio import AsyncPostgresSaver +from langgraph.graph import START, END, MessagesState, StateGraph + +DATABASE_URL = "postgresql://user:pass@localhost:5432/myapp" + +async with AsyncPostgresSaver.from_conn_string(DATABASE_URL) as checkpointer: + # Run migrations once on startup + await checkpointer.setup() + + builder = StateGraph(MessagesState) + builder.add_node("model", call_model) + builder.add_edge(START, "model") + builder.add_edge("model", END) + + graph = builder.compile(checkpointer=checkpointer) +``` + + + + + +MemorySaver is for development only — all state vanishes when the process exits. For anything users depend on, use PostgresSaver. SqliteSaver is a middle ground for prototypes and single-server deployments where you need persistence without a database. + + +## Python: Thread IDs in Graph Invocation + +The thread ID is how LangGraph associates a conversation with its checkpoint history. Pass it in the `configurable` dict every time you invoke the graph: + +```python +# First message creates the thread +result = graph.invoke( + {"messages": [{"role": "user", "content": "What is LangGraph?"}]}, + config={"configurable": {"thread_id": "user_123"}} +) + +# Second message continues the same conversation +result = graph.invoke( + {"messages": [{"role": "user", "content": "How does it handle state?"}]}, + config={"configurable": {"thread_id": "user_123"}} +) +# The agent sees both messages — the full history is restored from the checkpoint +``` + + +Use stable, user-scoped identifiers for thread IDs. A common pattern is `f"{user_id}_{session_id}"` — this prevents cross-user data leaks and lets one user have multiple conversations. + + +## Angular: Basic Thread Persistence + +Save the thread ID to localStorage so conversations survive page refreshes. streamResource() handles thread creation and restoration automatically. + + + ```typescript -// chat.component.ts -const chat = streamResource<{ messages: BaseMessage[] }>({ - assistantId: 'chat_agent', - threadId: signal(localStorage.getItem('threadId')), - onThreadId: (id) => localStorage.setItem('threadId', id), -}); +import { ChangeDetectionStrategy, Component } from '@angular/core'; +import { signal } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; + +@Component({ + selector: 'app-chat', + templateUrl: './chat.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class ChatComponent { + chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'chat_agent', + // Restore thread from localStorage on mount + threadId: signal(localStorage.getItem('threadId')), + // Persist thread ID whenever a new thread is created + onThreadId: (id) => localStorage.setItem('threadId', id), + }); + + send(text: string) { + this.chat.submit({ messages: [{ role: 'user', content: text }] }); + } +} ``` - + ```html - - + @for (msg of chat.messages(); track $index) { -

    {{ msg.content }}

    +
    +

    {{ msg.content }}

    +
    +} + +@if (chat.isLoading()) { +
    Agent is thinking...
    +} +``` + +
    +
    + +## Angular: Thread-List Component + +A real chat application needs a sidebar showing all conversations. Here is a full thread-list component that manages multiple threads alongside your chat resource. + + + + +```typescript +import { ChangeDetectionStrategy, Component, signal, computed } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; + +interface Thread { + id: string; + title: string; + updatedAt: Date; +} + +@Component({ + selector: 'app-thread-list', + templateUrl: './thread-list.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class ThreadListComponent { + threads = signal(this.loadThreads()); + activeThreadId = signal(null); + + chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'chat_agent', + threadId: this.activeThreadId, + onThreadId: (id) => { + this.activeThreadId.set(id); + this.addThread(id, 'New conversation'); + }, + }); + + activeThread = computed(() => + this.threads().find((t) => t.id === this.activeThreadId()) + ); + + selectThread(id: string) { + this.activeThreadId.set(id); + } + + newConversation() { + this.chat.switchThread(null); + // A new thread ID is assigned on the next submit + } + + private addThread(id: string, title: string) { + this.threads.update((list) => [ + { id, title, updatedAt: new Date() }, + ...list.filter((t) => t.id !== id), + ]); + this.saveThreads(); + } + + private loadThreads(): Thread[] { + return JSON.parse(localStorage.getItem('threads') ?? '[]'); + } + + private saveThreads() { + localStorage.setItem('threads', JSON.stringify(this.threads())); + } } ``` + + + +```html + + +
    + @if (chat.isThreadLoading()) { +
    Loading conversation...
    + } @else { + @for (msg of chat.messages(); track $index) { +
    {{ msg.content }}
    + } + } +
    +``` +
    -## Reactive thread switching +## Reactive Thread Switching -Pass a Signal as `threadId` to reactively switch between conversations. +When you pass a Signal as `threadId`, streamResource() reacts to every change. Set the signal and the conversation switches automatically — no imperative calls needed. ```typescript -// conversation-list.component.ts activeThreadId = signal(null); chat = streamResource<{ messages: BaseMessage[] }>({ @@ -50,57 +263,89 @@ chat = streamResource<{ messages: BaseMessage[] }>({ onThreadId: (id) => this.activeThreadId.set(id), }); -// Switch to a different conversation +// Clicking a thread in the sidebar triggers a reactive switch selectThread(id: string) { this.activeThreadId.set(id); - // streamResource automatically loads the new thread's state + // streamResource detects the signal change, fetches the thread's + // checkpoint from the server, and updates all derived signals } ``` -Use the `isThreadLoading()` signal to show a loading indicator while thread state is being fetched from the server. +Use the `isThreadLoading()` signal to show a skeleton UI while streamResource() fetches checkpoint state from the server. This avoids a flash of empty content when switching threads. -## Manual thread switching +## Manual Thread Switching -Use `switchThread()` for imperative thread changes that also reset derived state. +Use `switchThread()` for imperative thread changes. This is useful when you want to explicitly control when the switch happens — for example, after an animation completes or a modal closes. ```typescript -// Reset and start a new conversation +// Start a fresh conversation (null = new thread on next submit) newConversation() { this.chat.switchThread(null); - // Creates a new thread on next submit } -// Switch to a specific thread +// Jump to a specific thread loadConversation(threadId: string) { this.chat.switchThread(threadId); } + +// Fork a conversation — create a new thread from current state +forkConversation() { + this.chat.switchThread(null); + this.chat.submit({ + messages: this.chat.messages(), + }); +} ``` -## Checkpoint recovery +## Checkpoint Recovery -When a connection drops, streamResource() can rejoin an in-progress run. +When a connection drops mid-stream, `joinStream()` reconnects to an in-progress run without restarting the agent. This prevents duplicate work and lost tokens. ```typescript -// Rejoin a running stream +// Rejoin a running stream after a network interruption await chat.joinStream(runId, lastEventId); -// Picks up from where the connection was lost +// Picks up from the last event — no duplicate agent execution ``` + +In most cases streamResource() handles reconnection internally. Use `joinStream()` directly only when you need explicit control — for example, when restoring a run ID from a URL parameter after a full page reload. + + +## Thread Lifecycle + + + +streamResource() reads the `threadId` signal. If it contains a value, the existing thread's checkpoint is fetched from the server. + + +If `threadId` is null, streamResource() creates a new thread via the LangGraph API and fires `onThreadId` with the new ID. + + +Each super-step is checkpointed server-side. The `messages()` signal updates in real time as events arrive. + + +Setting the `threadId` signal (or calling `switchThread()`) loads the target thread's latest checkpoint. All signals update to reflect the restored state. + + +`joinStream()` reconnects to the in-progress run. The agent does not restart — streaming resumes from the last received event. + + + ## What's Next - Pause agent execution and wait for human input with interrupt signals. + Pause agent execution and wait for human approval before continuing. - Preserve context across sessions using LangGraph's memory store. + Preserve long-term context across sessions with LangGraph's memory store. Stream token-by-token responses and tool progress in real time. - Test agent interactions deterministically with MockStreamTransport. + Test thread persistence and switching deterministically with MockStreamTransport. diff --git a/apps/website/content/docs-v2/guides/streaming.mdx b/apps/website/content/docs-v2/guides/streaming.mdx index b3dc03962..0a9df886a 100644 --- a/apps/website/content/docs-v2/guides/streaming.mdx +++ b/apps/website/content/docs-v2/guides/streaming.mdx @@ -15,7 +15,7 @@ Create a `streamResource` in your component, pass it a message, and bind to the ```typescript import { Component, computed } from '@angular/core'; -import { streamResource } from '@stream-resource/angular'; +import { streamResource } from '@cacheplane/stream-resource'; import { BaseMessage } from '@langchain/core/messages'; @Component({ selector: 'app-chat', templateUrl: './chat.component.html' }) @@ -24,10 +24,10 @@ export class ChatComponent { assistantId: 'chat_agent', }); - readonly isStreaming = computed(() => this.chat.status() === 'streaming'); + readonly isStreaming = computed(() => this.chat.status() === 'loading'); send(text: string) { - this.chat.stream({ messages: [{ role: 'user', content: text }] }); + this.chat.submit({ messages: [{ role: 'user', content: text }] }); } } ``` @@ -126,7 +126,7 @@ If the SSE connection drops or the agent throws, `status()` transitions to `'err ```typescript import { Component, computed, effect } from '@angular/core'; -import { streamResource } from '@stream-resource/angular'; +import { streamResource } from '@cacheplane/stream-resource'; import { BaseMessage } from '@langchain/core/messages'; @Component({ selector: 'app-chat', templateUrl: './chat.component.html' }) @@ -139,7 +139,7 @@ export class ChatComponent { retry() { // Re-stream using the same thread so context is preserved - this.chat.stream(); + this.chat.submit(); } } ``` @@ -184,22 +184,22 @@ The value is in milliseconds. A `throttle` of `0` (default) disables batching an | Background summarisation | 150 ms | -Each call to `chat.stream()` opens a new SSE connection. Connections are automatically closed when the agent run completes or when the Angular component is destroyed — you do not need to manage the lifecycle manually. +Each call to `chat.submit()` opens a new SSE connection. Connections are automatically closed when the agent run completes or when the Angular component is destroyed — you do not need to manage the lifecycle manually. ## What's Next - + Resume conversations across page reloads using thread IDs and checkpointers. - + Pause agent execution mid-stream to collect human input before continuing. - + Unit-test components that use streamResource with the built-in test harness. - + Full option reference for streamResource(), including all configuration keys. diff --git a/apps/website/content/docs-v2/guides/subgraphs.mdx b/apps/website/content/docs-v2/guides/subgraphs.mdx index 2663160e1..5519b47be 100644 --- a/apps/website/content/docs-v2/guides/subgraphs.mdx +++ b/apps/website/content/docs-v2/guides/subgraphs.mdx @@ -76,7 +76,7 @@ const pipelineStatus = computed(() => { Render live progress for each subagent using the signals above. - + ```typescript import { computed } from '@angular/core'; @@ -183,16 +183,13 @@ Use **subagents** when tasks are independent and can run in parallel, when each ## What's Next - + Understand how streamResource() surfaces tokens, status, and errors in real time. - + Write unit and integration tests for orchestrator graphs and subagent interactions. - + Full reference for streamResource() options, signals, and subagent configuration. - - Patterns for retries, fallbacks, and surfacing errors from deeply nested agents. - diff --git a/apps/website/content/docs-v2/guides/testing.mdx b/apps/website/content/docs-v2/guides/testing.mdx index 0d0f7f80f..453bd54ce 100644 --- a/apps/website/content/docs-v2/guides/testing.mdx +++ b/apps/website/content/docs-v2/guides/testing.mdx @@ -1,19 +1,55 @@ # Testing -MockStreamTransport lets you test agent interactions deterministically without a running LangGraph server. Script exact event sequences and step through them in your Angular test specs. +MockStreamTransport lets you test agent interactions deterministically without a running LangGraph server. Script exact event sequences, step through streaming lifecycles, and verify every signal transition in your Angular test specs. -MockStreamTransport eliminates network dependencies, timing issues, and server state. Every test run produces identical results. +MockStreamTransport eliminates network dependencies, timing issues, and server state. Every test run produces identical results. Your CI pipeline stays green. -## Basic test setup +## Python: Testing the Agent -Create a MockStreamTransport with scripted events and pass it to streamResource. +Before testing the Angular side, make sure your agent logic is correct. LangGraph agents are plain Python functions — test them directly with pytest. + +```python +import pytest +from langchain_core.messages import HumanMessage +from my_agent.agent import graph + +@pytest.mark.asyncio +async def test_agent_responds(): + result = await graph.ainvoke( + {"messages": [HumanMessage(content="Hello")]}, + config={"configurable": {"thread_id": "test_1"}}, + ) + assert len(result["messages"]) >= 2 + assert result["messages"][-1].type == "ai" + +@pytest.mark.asyncio +async def test_agent_uses_tools(): + result = await graph.ainvoke( + {"messages": [HumanMessage(content="Search for LangGraph docs")]}, + config={"configurable": {"thread_id": "test_2"}}, + ) + # Verify the agent called the search tool + tool_messages = [m for m in result["messages"] if m.type == "tool"] + assert len(tool_messages) > 0 +``` + + +With MemorySaver and a mocked LLM, agent tests run in milliseconds. Use `langchain_core.language_models.FakeListChatModel` to remove the LLM dependency entirely. + + +## MockStreamTransport: Basic Setup + +On the Angular side, MockStreamTransport replaces the real HTTP transport. Create it inside `TestBed.runInInjectionContext` so streamResource() has access to Angular's dependency injection. + + + ```typescript import { TestBed } from '@angular/core/testing'; -import { MockStreamTransport } from '@cacheplane/stream-resource'; -import type { StreamEvent } from '@cacheplane/stream-resource'; +import { MockStreamTransport, streamResource } from '@cacheplane/stream-resource'; +import type { BaseMessage } from '@cacheplane/stream-resource'; describe('ChatComponent', () => { it('should display agent messages', () => { @@ -25,9 +61,12 @@ describe('ChatComponent', () => { transport, }); - // Emit a values event + // Emit a values event — simulates the agent responding transport.emit([ - { type: 'values', messages: [{ role: 'assistant', content: 'Hello!' }] }, + { + type: 'values', + messages: [{ role: 'assistant', content: 'Hello!' }], + }, ]); expect(chat.messages().length).toBe(1); @@ -37,72 +76,441 @@ describe('ChatComponent', () => { }); ``` -## Scripting event sequences + + + +```typescript +import { ChangeDetectionStrategy, Component } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; + +@Component({ + selector: 'app-chat', + templateUrl: './chat.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class ChatComponent { + chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'chat_agent', + }); + + send(text: string) { + this.chat.submit({ messages: [{ role: 'user', content: text }] }); + } +} +``` + + + + +## Scripted Event Sequences -Pass event batches to the constructor for sequential playback. +Pass event batches to the constructor for sequential playback. Each call to `nextBatch()` advances one step — giving you frame-by-frame control over what the component sees. ```typescript const transport = new MockStreamTransport([ - // Batch 1: Initial response + // Batch 1: Agent starts thinking [{ type: 'values', messages: [{ role: 'assistant', content: 'Analyzing...' }] }], - // Batch 2: Final response - [{ type: 'values', messages: [{ role: 'assistant', content: 'Done!' }] }], + // Batch 2: Agent finishes + [{ type: 'values', messages: [{ role: 'assistant', content: 'Here is your answer.' }] }], ]); -// Advance through batches -const batch1 = transport.nextBatch(); // First batch -const batch2 = transport.nextBatch(); // Second batch +TestBed.runInInjectionContext(() => { + const chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'test_agent', + transport, + }); + + chat.submit({ messages: [{ role: 'user', content: 'Explain signals' }] }); + + // Step through each batch + transport.nextBatch(); + expect(chat.messages()[0].content).toBe('Analyzing...'); + + transport.nextBatch(); + expect(chat.messages()[0].content).toBe('Here is your answer.'); +}); ``` -## Testing interrupts +## Testing the Streaming Lifecycle + +The most common test pattern verifies the full submit-to-resolved lifecycle: submit triggers loading, values arrive, and the status settles to resolved. -Script an interrupt event to test human-in-the-loop flows. + + ```typescript -it('should handle interrupts', () => { - const transport = new MockStreamTransport(); +import { TestBed } from '@angular/core/testing'; +import { MockStreamTransport, streamResource } from '@cacheplane/stream-resource'; + +describe('streaming lifecycle', () => { + it('should transition through loading → values → resolved', () => { + const transport = new MockStreamTransport([ + [{ type: 'values', messages: [{ role: 'assistant', content: 'Thinking...' }] }], + [{ type: 'values', messages: [{ role: 'assistant', content: 'Done!' }] }], + ]); - TestBed.runInInjectionContext(() => { - const agent = streamResource({ - assistantId: 'approval_agent', - transport, + TestBed.runInInjectionContext(() => { + const chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'test_agent', + transport, + }); + + // Initial state + expect(chat.status()).toBe('idle'); + expect(chat.messages()).toEqual([]); + + // Submit triggers loading + chat.submit({ messages: [{ role: 'user', content: 'Hello' }] }); + expect(chat.status()).toBe('loading'); + expect(chat.isLoading()).toBe(true); + + // First batch — partial response + transport.nextBatch(); + expect(chat.messages()[0].content).toBe('Thinking...'); + expect(chat.status()).toBe('loading'); + + // Second batch — final response + transport.nextBatch(); + expect(chat.messages()[0].content).toBe('Done!'); + + // Stream completes + transport.complete(); + expect(chat.status()).toBe('resolved'); + expect(chat.isLoading()).toBe(false); }); + }); +}); +``` - // Emit an interrupt - transport.emit([ - { type: 'interrupt', value: { action: 'delete', risk: 'high' } }, - ]); + + + +```typescript +import { ChangeDetectionStrategy, Component } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; - expect(agent.interrupt()).toBeDefined(); - expect(agent.interrupt()?.value.risk).toBe('high'); +@Component({ + selector: 'app-chat', + template: ` + @if (chat.isLoading()) { +
    Thinking...
    + } + @for (msg of chat.messages(); track $index) { +
    {{ msg.content }}
    + } + `, + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class ChatComponent { + chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'chat_agent', + }); + + send(text: string) { + this.chat.submit({ messages: [{ role: 'user', content: text }] }); + } +} +``` + +
    +
    + +## Testing Interrupts + +Script an interrupt event to test human-in-the-loop flows. Verify the interrupt signal surfaces the payload, then resume and confirm the agent continues. + + + + +```typescript +import { TestBed } from '@angular/core/testing'; +import { MockStreamTransport, streamResource } from '@cacheplane/stream-resource'; + +describe('interrupt handling', () => { + it('should surface interrupt and resume on approval', () => { + const transport = new MockStreamTransport(); + + TestBed.runInInjectionContext(() => { + const agent = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'approval_agent', + transport, + }); + + // Agent hits an interrupt + transport.emit([ + { + type: 'interrupt', + value: { action: 'delete_account', risk: 'high' }, + }, + ]); + + // Verify interrupt signal + expect(agent.interrupt()).toBeDefined(); + expect(agent.interrupt()?.value.action).toBe('delete_account'); + expect(agent.interrupt()?.value.risk).toBe('high'); + + // User approves — resume the agent + agent.submit(null, { resume: { approved: true } }); + + // Agent continues after approval + transport.emit([ + { + type: 'values', + messages: [{ role: 'assistant', content: 'Account deleted.' }], + }, + ]); + + expect(agent.interrupt()).toBeNull(); + expect(agent.messages()[0].content).toBe('Account deleted.'); + }); }); }); ``` -## Testing errors + + + +```typescript +import { ChangeDetectionStrategy, Component } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; + +@Component({ + selector: 'app-approval', + template: ` + @if (agent.interrupt(); as interrupt) { +
    +

    Action: {{ interrupt.value.action }}

    +

    Risk: {{ interrupt.value.risk }}

    + + +
    + } + `, + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class ApprovalComponent { + agent = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'approval_agent', + }); + + approve() { + this.agent.submit(null, { resume: { approved: true } }); + } + + reject() { + this.agent.submit(null, { resume: { approved: false } }); + } +} +``` + +
    +
    + +## Testing Errors + +Inject errors with `emitError()` to verify your component handles failures gracefully. -Inject errors to test error handling. + + ```typescript -it('should surface errors', () => { - const transport = new MockStreamTransport(); +import { TestBed } from '@angular/core/testing'; +import { MockStreamTransport, streamResource } from '@cacheplane/stream-resource'; + +describe('error handling', () => { + it('should surface errors and set error status', () => { + const transport = new MockStreamTransport(); + + TestBed.runInInjectionContext(() => { + const chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'test_agent', + transport, + }); + + chat.submit({ messages: [{ role: 'user', content: 'Hello' }] }); - TestBed.runInInjectionContext(() => { - const chat = streamResource({ - assistantId: 'test_agent', - transport, + // Simulate a connection failure + transport.emitError(new Error('Connection lost')); + + expect(chat.error()).toBeDefined(); + expect(chat.error()?.message).toBe('Connection lost'); + expect(chat.status()).toBe('error'); + expect(chat.isLoading()).toBe(false); }); + }); + + it('should recover from errors on retry', () => { + const transport = new MockStreamTransport(); + + TestBed.runInInjectionContext(() => { + const chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'test_agent', + transport, + }); - transport.emitError(new Error('Connection lost')); + // First attempt fails + chat.submit({ messages: [{ role: 'user', content: 'Hello' }] }); + transport.emitError(new Error('Timeout')); + expect(chat.status()).toBe('error'); - expect(chat.error()).toBeDefined(); - expect(chat.status()).toBe('error'); + // Retry succeeds + chat.submit({ messages: [{ role: 'user', content: 'Hello' }] }); + transport.emit([ + { + type: 'values', + messages: [{ role: 'assistant', content: 'Sorry for the delay!' }], + }, + ]); + + expect(chat.status()).not.toBe('error'); + expect(chat.messages()[0].content).toBe('Sorry for the delay!'); + }); }); }); ``` - -streamResource() must be called within an Angular injection context. In tests, wrap calls in `TestBed.runInInjectionContext()`. + + + +```typescript +import { ChangeDetectionStrategy, Component } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; + +@Component({ + selector: 'app-chat', + template: ` + @if (chat.error(); as err) { +
    +

    {{ err.message }}

    + +
    + } + `, + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class ChatComponent { + chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'chat_agent', + }); + private lastMessage = ''; + + send(text: string) { + this.lastMessage = text; + this.chat.submit({ messages: [{ role: 'user', content: text }] }); + } + + retry() { + this.send(this.lastMessage); + } +} +``` + +
    +
    + +## Testing Thread Switching + +Verify that switching threads loads the correct conversation state and clears the previous thread's messages. + +```typescript +describe('thread switching', () => { + it('should load new thread state on switch', () => { + const transport = new MockStreamTransport(); + + TestBed.runInInjectionContext(() => { + const threadId = signal('thread_A'); + + const chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'test_agent', + threadId, + transport, + }); + + // Thread A has messages + transport.emit([ + { + type: 'values', + messages: [{ role: 'assistant', content: 'Thread A response' }], + }, + ]); + expect(chat.messages()[0].content).toBe('Thread A response'); + + // Switch to thread B + chat.switchThread('thread_B'); + + // Thread B loads its own state + transport.emit([ + { + type: 'values', + messages: [{ role: 'assistant', content: 'Thread B response' }], + }, + ]); + expect(chat.messages()[0].content).toBe('Thread B response'); + }); + }); + + it('should create a new thread when switching to null', () => { + const transport = new MockStreamTransport(); + + TestBed.runInInjectionContext(() => { + const chat = streamResource<{ messages: BaseMessage[] }>({ + assistantId: 'test_agent', + transport, + }); + + // Start a conversation + transport.emit([ + { + type: 'values', + messages: [{ role: 'assistant', content: 'Hello' }], + }, + ]); + + // Switch to new thread + chat.switchThread(null); + expect(chat.messages()).toEqual([]); + }); + }); +}); +``` + +## Test Setup Workflow + + + +Make sure `@cacheplane/stream-resource` is available in your test environment. MockStreamTransport ships with the main package — no extra install needed. + + +Instantiate `MockStreamTransport` with optional pre-scripted batches for sequential playback, or leave it empty for imperative `emit()` calls. + + +Call `TestBed.runInInjectionContext(() => { ... })` so streamResource() can access Angular's injector for signal creation and cleanup. + + +Pass the transport to streamResource() via the `transport` option. All other options (assistantId, threadId, onThreadId) work identically to production code. + + +Use `transport.emit()` for ad-hoc events, `transport.nextBatch()` for pre-scripted sequences, or `transport.emitError()` for failure scenarios. + + +Read signals like `chat.messages()`, `chat.status()`, `chat.interrupt()`, and `chat.error()` to verify your component reacts correctly. + + + +## Integration Testing + +For end-to-end confidence, run tests against a real LangGraph dev server. The LangGraph CLI starts a local server that your tests can hit directly. + +```bash +# Start the dev server +langgraph dev --config langgraph.json + +# Run Angular tests against it (no MockStreamTransport needed) +ng test --watch=false +``` + + +Integration tests hit a real server and (potentially) a real LLM. Reserve them for CI pipelines or pre-release smoke tests. Use MockStreamTransport for the vast majority of your test suite — it runs in milliseconds with zero external dependencies. ## What's Next @@ -112,10 +520,10 @@ streamResource() must be called within an Angular injection context. In tests, w Understand the SSE event model your tests simulate. - Test human-in-the-loop approval flows with scripted interrupt events. + Build human-in-the-loop approval flows tested with scripted interrupt events. - - Configure streamResource() for production LangGraph Cloud. + + Thread persistence patterns that pair with thread-switching tests. Full reference for MockStreamTransport options and methods. diff --git a/apps/website/content/docs-v2/guides/time-travel.mdx b/apps/website/content/docs-v2/guides/time-travel.mdx index 37501fbb9..743025e54 100644 --- a/apps/website/content/docs-v2/guides/time-travel.mdx +++ b/apps/website/content/docs-v2/guides/time-travel.mdx @@ -80,7 +80,7 @@ Expose checkpoint history directly in your component to let users scrub through ```typescript import { Component, inject, computed } from '@angular/core'; -import { streamResource } from '@stream-resource/angular'; +import { streamResource } from '@cacheplane/stream-resource'; import { AgentService } from './agent.service'; @Component({ @@ -159,16 +159,13 @@ Time travel is most useful during development. Inspect why an agent chose a part ## What's Next - + Configure thread storage so checkpoints survive page reloads and are available across sessions. - + Understand how streamResource() surfaces incremental updates and how history integrates with live streaming state. - + Full reference for streamResource() options, signals, and the submit() API including checkpoint parameters. - - Deep dive into branch management, merging strategies, and presenting multi-branch UIs to end users. - diff --git a/apps/website/next-env.d.ts b/apps/website/next-env.d.ts index c4b7818fb..fdbfe5258 100644 --- a/apps/website/next-env.d.ts +++ b/apps/website/next-env.d.ts @@ -1,6 +1,6 @@ /// /// -import "./.next/dev/types/routes.d.ts"; +import "./../../dist/apps/website/.next/types/routes.d.ts"; // NOTE: This file should not be edited // see https://nextjs.org/docs/app/api-reference/config/typescript for more information. diff --git a/docs/superpowers/plans/2026-04-04-docs-comprehensive-overhaul.md b/docs/superpowers/plans/2026-04-04-docs-comprehensive-overhaul.md new file mode 100644 index 000000000..409188dc1 --- /dev/null +++ b/docs/superpowers/plans/2026-04-04-docs-comprehensive-overhaul.md @@ -0,0 +1,234 @@ +# Comprehensive Docs Overhaul — Master Plan + +> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. + +**Goal:** Bring all 18 docs pages to gold standard quality — every page shows both Python agent code AND Angular streamResource code, uses correct MDX syntax, has 200+ lines of rich content, and tells the product story. + +**Architecture:** Each task rewrites one MDX file. The gold standard is `introduction.mdx` (337 lines) and `langgraph-basics.mdx` (384 lines). Every page should pair Python LangGraph patterns with Angular streamResource consumption, use correct Tab label syntax, include Callouts, Steps, and CardGroup navigation. + +**Tech Stack:** MDX with custom components (Callout, Steps, Tabs/Tab with label prop, CardGroup/Card, FeatureChips, ArchFlowDiagram) + +--- + +## Phase 0: Critical Fixes (do first, affects all pages) + +### Task 0: Fix Cross-Cutting Issues + +**Files:** Multiple + +- [ ] **Step 1: Fix import path inconsistency** + +Search all MDX and TSX files for `@ngxp/stream-resource` and `@stream-resource/angular`. Replace ALL with `@cacheplane/stream-resource`. + +Run: `grep -rn "@ngxp/stream-resource\|@stream-resource/angular" apps/website/content/docs-v2/ apps/website/src/` + +Replace all occurrences with `@cacheplane/stream-resource`. + +- [ ] **Step 2: Fix API method inconsistency** + +Search for `.stream(` in docs (should be `.submit(`). Search for `status() === 'streaming'` (should be `status() === 'loading'`). + +- [ ] **Step 3: Fix broken links** + +Search for `/docs-v2/` (should be `/docs/`). Search for `/docs/guides/branching` and `/docs/guides/error-handling` (don't exist — remove or replace). + +- [ ] **Step 4: Fix unclosed code fence in state-management.mdx** + +Line ~60 has an unclosed TypeScript code fence that swallows the rest of the page. + +- [ ] **Step 5: Fix .tsx file extensions** + +Search for `.tsx` in Tab labels (should be `.ts` — this is Angular, not React). + +- [ ] **Step 6: Commit** + +```bash +git add -A +git commit -m "fix(website): resolve import paths, API naming, broken links, code fence" +``` + +--- + +## Phase 1: Rewrite THIN Pages (highest impact) + +Each page below needs to be expanded to 200+ lines with Python + Angular code pairs. + +### Task 1: Rewrite `concepts/angular-signals.mdx` (76 → 250+ lines) + +Current: Surface-level primer. No Python code. No streaming lifecycle explanation. + +New content needed: +- How `toSignal()` converts BehaviorSubjects internally +- Streaming lifecycle: idle → loading → streaming tokens → resolved +- `computed()` for derived AI state (message count, last message, tool progress) +- `effect()` for side effects (analytics, logging, error reporting) +- A complete component example showing all signal patterns +- Performance: why Signals + OnPush is efficient for high-frequency streaming +- Python agent code showing what produces the streaming events that Signals consume + +### Task 2: Rewrite `concepts/agent-architecture.mdx` (70 → 250+ lines) + +Current: 5-bullet overview, single code snippet, 3-line pattern list. + +New content needed: +- Full ReAct agent pattern with Python code + Angular streamResource code +- Tool calling: Python `@tool` decorator → Angular `toolCalls()` signal +- Multi-agent: Python supervisor graph → Angular `subagents()` signal +- Error handling and recovery patterns +- Planning phase: how LLMs decide actions +- Checkpointing: how `history()` and `branch()` expose decisions + +### Task 3: Rewrite `concepts/state-management.mdx` (83 → 200+ lines) + +Current: Has syntax error (unclosed code fence). No Python code. ASCII diagram. + +New content needed: +- Fix unclosed code fence +- Python TypedDict with reducers → TypeScript interface mapping +- How `Annotated[list, add]` works and why messages accumulate +- State updates during streaming (partial values) +- Checkpoint model: persistence, restore, branching +- Tabs showing Python state definition + Angular consumption +- Replace ASCII diagram with Steps component + +### Task 4: Rewrite `guides/memory.mdx` (83 → 200+ lines) + +Current: Thinnest guide. No Tabs, no Python, no template code. + +New content needed: +- Python: agent state with memory fields, LangGraph Store API +- Short-term (thread-scoped) vs long-term (cross-thread) memory +- Semantic memory with vector search +- Tabs: TypeScript component + Angular template for memory-aware UI +- How memory updates surface through `value()` signal + +### Task 5: Rewrite `guides/interrupts.mdx` (96 → 200+ lines) + +Current: No Python code. Dangling reference to BagTemplate. Tab syntax wrong. + +New content needed: +- Python: `raise Interrupt(value={...})` in agent node +- Python: graph structure with approval node +- Full approval component: TypeScript + Template in Tabs +- Multi-step approval pattern +- Typed interrupt payloads with BagTemplate (explain the reference) +- Steps component for interrupt lifecycle +- Fix Tab syntax to use `label` prop + +### Task 6: Rewrite `guides/persistence.mdx` (107 → 200+ lines) + +Current: No Python code. Tab syntax wrong. + +New content needed: +- Python: checkpointer setup (MemorySaver, PostgresSaver) +- Python: thread_id in graph invocation +- Full thread-list component: TypeScript + Template +- Thread switching UI pattern +- Fix Tab syntax to use `label` prop + +### Task 7: Rewrite `guides/testing.mdx` (124 → 200+ lines) + +Current: No Tabs, no Python, no template code. + +New content needed: +- Python: how to test the agent side +- Tabs: spec file + component file pairs +- Testing subagent interactions +- Testing interrupts and thread switching +- Integration testing with real LangGraph dev server +- Steps for test setup workflow + +### Task 8: Rewrite `guides/deployment.mdx` (108 → 200+ lines) + +Current: Tab syntax wrong. Introduction page has better deployment content. + +New content needed: +- Python: LangGraph Cloud deployment (langgraph.json, CLI) +- LangSmith deployment walkthrough +- Authentication / API key configuration +- CORS configuration for SSE +- CI/CD pipeline example +- Monitoring and health checks +- Fix Tab syntax to use `label` prop + +--- + +## Phase 2: Polish CLOSE Pages + +### Task 9: Polish `guides/streaming.mdx` (206 lines — fix issues) + +Fix: +- Import path: `@stream-resource/angular` → `@cacheplane/stream-resource` +- `.stream()` → `.submit()` +- `'streaming'` status → `'loading'` +- Add Python agent showing `stream_mode` configuration +- Add `ChangeDetectionStrategy.OnPush` to component + +### Task 10: Polish `guides/time-travel.mdx` (175 lines — fix issues) + +Fix: +- `.tsx` extension in Tab label → `.ts` +- Remove broken link to `/docs-v2/guides/branching` +- Add Python checkpointer setup code +- Expand to 200+ lines + +### Task 11: Polish `guides/subgraphs.mdx` (199 lines — fix issues) + +Fix: +- `.tsx` extension in Tab label → `.ts` +- Remove broken link to `/docs-v2/guides/error-handling` +- Add Python subgraph composition code + +### Task 12: Polish `getting-started/quickstart.mdx` (131 lines) + +Fix: +- Tab syntax: `items={[...]}` → `` +- Replace plain `##` numbered headings with `/` +- Add `ChangeDetectionStrategy.OnPush` +- Add error display (`chat.error()`) to template +- Add agent setup context or link + +### Task 13: Polish `getting-started/installation.mdx` (103 lines) + +Fix: +- Tab syntax: `items={[...]}` → `` +- Fix `process.env` error → use Angular `environment.ts` +- Fix verify example (needs injection context) +- Add troubleshooting section +- Expand "Next steps" to 4+ cards + +--- + +## Phase 3: Expand API Pages + +### Task 14: Expand 4 API Reference Pages + +Fix import path `@ngxp/stream-resource` → `@cacheplane/stream-resource` in all 4. +Add "What's Next" CardGroup to all 4. +Expand intros with more context about when/why to use each. + +--- + +## Execution Strategy + +**Phase 0** (Task 0): Do first — fixes affect all pages. Single commit. +**Phase 1** (Tasks 1-8): Highest impact. 8 full rewrites. Dispatch as parallel subagents. +**Phase 2** (Tasks 9-13): Polish passes. 5 targeted fixes. Dispatch as parallel subagents. +**Phase 3** (Task 14): API pages. Single task. + +Total: 15 tasks, ~14 files rewritten. + +## Quality Checklist (apply to every page) + +- [ ] 200+ lines of content +- [ ] Python LangGraph code showing the agent/server pattern +- [ ] Angular streamResource code showing the frontend consumption +- [ ] Both paired together to tell the product story +- [ ] All imports use `@cacheplane/stream-resource` +- [ ] All Tab components use `` syntax +- [ ] `ChangeDetectionStrategy.OnPush` in component examples +- [ ] At least 2 Callouts (tip, info, or warning) +- [ ] "What's Next" CardGroup with 4+ cards +- [ ] No broken links +- [ ] Correct API method names (`.submit()`, not `.stream()`) +- [ ] Correct status values (`'loading'`, not `'streaming'`) From 349578a986eeb26686e10bcd935b4430c134cd68 Mon Sep 17 00:00:00 2001 From: Brian Love Date: Sat, 4 Apr 2026 17:10:44 -0700 Subject: [PATCH 009/187] =?UTF-8?q?docs:=20Phase=202+3=20=E2=80=94=20polis?= =?UTF-8?q?h=20guides,=20fix=20quickstart/install,=20expand=20API=20pages?= =?UTF-8?q?=20(#13)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * docs(website): polish quickstart and installation with fixes * docs(website): expand API reference pages with navigation and context * docs(website): polish streaming, time-travel, subgraphs with Python code * fix(website): fix Callout type warn in installation --- .../docs-v2/api/fetch-stream-transport.mdx | 43 +++++- .../docs-v2/api/mock-stream-transport.mdx | 87 ++++++++++-- .../docs-v2/api/provide-stream-resource.mdx | 48 ++++++- .../content/docs-v2/api/stream-resource.mdx | 53 ++++++- .../docs-v2/getting-started/installation.mdx | 51 +++++-- .../docs-v2/getting-started/quickstart.mdx | 39 +++-- .../content/docs-v2/guides/streaming.mdx | 66 ++++++++- .../content/docs-v2/guides/subgraphs.mdx | 121 +++++++++++++--- .../content/docs-v2/guides/time-travel.mdx | 134 +++++++++++++++++- 9 files changed, 582 insertions(+), 60 deletions(-) diff --git a/apps/website/content/docs-v2/api/fetch-stream-transport.mdx b/apps/website/content/docs-v2/api/fetch-stream-transport.mdx index be313baed..790698e84 100644 --- a/apps/website/content/docs-v2/api/fetch-stream-transport.mdx +++ b/apps/website/content/docs-v2/api/fetch-stream-transport.mdx @@ -2,7 +2,12 @@ `FetchStreamTransport` is the production-ready transport that opens a real server-sent event connection using the browser's `fetch` API and reads a `ReadableStream` response body. It is the default transport you register with `provideStreamResource` in production builds. -You rarely need to interact with `FetchStreamTransport` directly — simply provide it once at the application level and every `streamResource` will use it automatically. You would reach for it explicitly only when constructing a resource outside the normal DI tree or when you need to override the transport for a single resource while keeping the global default intact. +## When you interact with it directly + +In most apps you will never import or inject `FetchStreamTransport` by name — you register it once in `provideStreamResource` and forget about it. The two cases where you reach for it explicitly are: + +1. **Per-resource override** — you want one resource to use a different transport than the global default while everything else stays on `FetchStreamTransport`. +2. **Outside the DI tree** — you are constructing a resource in a context where global providers are not available and you need to supply the transport manually. ```ts import { inject } from '@angular/core'; @@ -15,10 +20,46 @@ const events = streamResource({ }); ``` +## How it works + +`FetchStreamTransport` makes a `fetch` call to the given URL and expects the server to respond with `Content-Type: text/event-stream`. It then reads the `ReadableStream` body line-by-line, parses SSE `data:` fields, and emits each parsed JSON value into the resource signal. + +The transport handles: + +- **Backpressure** — reads chunks at the pace the browser delivers them +- **Cancellation** — aborts the underlying `fetch` when `interrupt()` is called or the resource is destroyed +- **Error propagation** — network errors and non-2xx responses surface through `resource.error()` + `FetchStreamTransport` implements the `StreamTransport` interface. You can create custom transports (e.g. WebSocket-backed) by implementing the same interface and providing them in place of this class. +## What's Next + + + + Learn how the SSE lifecycle maps to resource signals and how to handle reconnects. + + + Server configuration for SSE: headers, timeouts, and edge runtime considerations. + + + The test-time counterpart — push values synchronously without a real server. + + + {/* Auto-rendered from api-docs.json — see page component */} diff --git a/apps/website/content/docs-v2/api/mock-stream-transport.mdx b/apps/website/content/docs-v2/api/mock-stream-transport.mdx index d9ebd13c8..31fbb93fd 100644 --- a/apps/website/content/docs-v2/api/mock-stream-transport.mdx +++ b/apps/website/content/docs-v2/api/mock-stream-transport.mdx @@ -2,31 +2,100 @@ `MockStreamTransport` is a test-friendly transport that replaces real network calls with an in-memory event emitter. Use it in unit and component tests to push values on demand and assert against your component's reactive state without a running server. +## Complete test example + +The pattern below covers the full lifecycle: configure the transport in `TestBed`, create the component, emit values, and assert signal state. + ```ts +import { Component, inject } from '@angular/core'; import { TestBed } from '@angular/core/testing'; import { provideStreamResource, MockStreamTransport, + streamResource, } from '@cacheplane/stream-resource'; -beforeEach(() => { - TestBed.configureTestingModule({ - providers: [provideStreamResource({ transport: MockStreamTransport })], +@Component({ template: '' }) +class RepoComponent { + readonly repo = streamResource<{ name: string }>({ + url: () => '/api/repos/42', }); -}); +} -it('reflects streamed value', () => { - const transport = TestBed.inject(MockStreamTransport); - // Emit a value into the stream - transport.emit('/api/repos/42', { id: 42, name: 'my-repo' }); - // Assert your component's signal updated accordingly +describe('RepoComponent', () => { + let transport: MockStreamTransport; + + beforeEach(() => { + TestBed.configureTestingModule({ + imports: [RepoComponent], + providers: [provideStreamResource({ transport: MockStreamTransport })], + }); + transport = TestBed.inject(MockStreamTransport); + }); + + it('reflects the streamed value', () => { + const fixture = TestBed.createComponent(RepoComponent); + fixture.detectChanges(); + + // Push a value into the stream — synchronous, no fakeAsync needed + transport.emit('/api/repos/42', { name: 'my-repo' }); + fixture.detectChanges(); + + expect(fixture.componentInstance.repo.value()).toEqual({ name: 'my-repo' }); + expect(fixture.componentInstance.repo.status()).toBe('streaming'); + }); + + it('surfaces errors through the error signal', () => { + const fixture = TestBed.createComponent(RepoComponent); + fixture.detectChanges(); + + transport.error('/api/repos/42', new Error('not found')); + fixture.detectChanges(); + + expect(fixture.componentInstance.repo.status()).toBe('error'); + expect(fixture.componentInstance.repo.error()).toBeInstanceOf(Error); + }); }); ``` +## MockStreamTransport API + +| Method | Description | +|--------|-------------| +| `emit(url, value)` | Pushes a single value into the stream at the given URL path. | +| `error(url, err)` | Triggers an error on the stream at the given URL path. | +| `complete(url)` | Closes the stream cleanly, as if the server sent the final event. | + Because `MockStreamTransport` is synchronous by default, you can emit values and assert state changes in the same test tick — no `fakeAsync` or `tick` required. +## What's Next + + + + Full testing patterns including component harnesses and multi-stream scenarios. + + + The production transport that MockStreamTransport replaces in tests. + + + Full reference for the primitive you are testing against. + + + {/* Auto-rendered from api-docs.json — see page component */} diff --git a/apps/website/content/docs-v2/api/provide-stream-resource.mdx b/apps/website/content/docs-v2/api/provide-stream-resource.mdx index 4863cf489..3375f0b2f 100644 --- a/apps/website/content/docs-v2/api/provide-stream-resource.mdx +++ b/apps/website/content/docs-v2/api/provide-stream-resource.mdx @@ -1,6 +1,8 @@ # provideStreamResource() -`provideStreamResource` is the provider factory that registers `stream-resource` in Angular's dependency injection system. Call it inside `bootstrapApplication` (or an `ApplicationConfig`) to configure the transport and any global defaults used by every `streamResource` in your app. +`provideStreamResource` is the provider factory that registers `stream-resource` in Angular's dependency injection system. Call it once inside `bootstrapApplication` (or an `ApplicationConfig`) to configure the transport and any global defaults used by every `streamResource` in your app. + +This is the single configuration point for the entire library. Rather than configuring each resource individually, you declare your transport here and every `streamResource` call throughout the app inherits it automatically. ```ts import { bootstrapApplication } from '@angular/platform-browser'; @@ -19,10 +21,54 @@ bootstrapApplication(AppComponent, { }); ``` +## Global configuration + +| Option | Type | Description | +|--------|------|-------------| +| `transport` | `Type` | The transport class to inject when resources request `StreamTransport`. Required. | + +## Swapping transports by environment + +Because `provideStreamResource` accepts a class token, you can vary the transport based on your environment without touching any component code: + +```ts +// main.ts — production +provideStreamResource({ transport: FetchStreamTransport }) + +// main.spec.ts / TestBed — tests +provideStreamResource({ transport: MockStreamTransport }) +``` + Swap `FetchStreamTransport` for `MockStreamTransport` (or any custom class implementing the `StreamTransport` interface) to change the transport for all resources at once — useful for testing or SSR. +## What's Next + + + + Step-by-step setup guide including peer dependencies and NgModule support. + + + Configure transports for production, SSR, and edge runtimes. + + + Full reference for the core primitive you configure here. + + + {/* Auto-rendered from api-docs.json — see page component */} diff --git a/apps/website/content/docs-v2/api/stream-resource.mdx b/apps/website/content/docs-v2/api/stream-resource.mdx index e383d3164..fb4135410 100644 --- a/apps/website/content/docs-v2/api/stream-resource.mdx +++ b/apps/website/content/docs-v2/api/stream-resource.mdx @@ -2,6 +2,8 @@ `streamResource` is the core primitive of the library. It creates a reactive resource that opens a server-sent event stream, tracks loading and error states, and exposes the latest emitted value — all within Angular's signal-based reactivity model. +When the `url` signal changes, the resource tears down the previous connection and opens a fresh one automatically. You never write subscription management or cleanup logic yourself. + ```ts import { streamResource } from '@cacheplane/stream-resource'; @@ -12,10 +14,31 @@ const repo = streamResource({ }); // Use in template -// repo.value() — latest emitted value (or undefined) -// repo.status() — 'idle' | 'loading' | 'streaming' | 'error' +// repo.value() — latest emitted value (or undefined) +// repo.status() — 'idle' | 'loading' | 'streaming' | 'error' +// repo.error() — the thrown error, when status is 'error' +// repo.interrupt() — call to cancel the stream immediately ``` +## Key signals + +| Signal | Type | Description | +|--------|------|-------------| +| `value()` | `T \| undefined` | The latest value emitted by the stream. Starts as `undefined` and updates with each SSE event. | +| `status()` | `'idle' \| 'loading' \| 'streaming' \| 'error'` | Lifecycle state of the current connection. | +| `error()` | `unknown` | The error thrown when `status()` is `'error'`. `undefined` otherwise. | +| `interrupt()` | `() => void` | Closes the active stream without an error — useful for user-initiated cancellation. | + +## When to use + +Use `streamResource` whenever your UI needs to react to a live data stream from the server: + +- **AI / LLM responses** — stream tokens into a chat bubble as they arrive +- **Live feeds** — stock tickers, activity logs, or progress updates +- **Long-running jobs** — subscribe to backend task progress over SSE + +For plain HTTP requests that return a single value and complete, Angular's built-in `resource()` or `httpResource()` is a better fit. + `streamResource` must be called during construction, inside an injection context (e.g. a component constructor, field initializer, or a function @@ -23,4 +46,30 @@ const repo = streamResource({ will throw. +## What's Next + + + + Build your first streaming component end-to-end in under five minutes. + + + Deep dive into SSE lifecycle, error handling, and reconnect strategies. + + + Understand how stream-resource integrates with Angular's reactivity model. + + + {/* Auto-rendered from api-docs.json — see page component */} diff --git a/apps/website/content/docs-v2/getting-started/installation.mdx b/apps/website/content/docs-v2/getting-started/installation.mdx index 8200f7f67..83ff4fe0f 100644 --- a/apps/website/content/docs-v2/getting-started/installation.mdx +++ b/apps/website/content/docs-v2/getting-started/installation.mdx @@ -32,11 +32,12 @@ Add `provideStreamResource()` to your application configuration. This sets globa // app.config.ts import { ApplicationConfig } from '@angular/core'; import { provideStreamResource } from '@cacheplane/stream-resource'; +import { environment } from '../environments/environment'; export const appConfig: ApplicationConfig = { providers: [ provideStreamResource({ - apiUrl: process.env['LANGGRAPH_URL'] ?? 'http://localhost:2024', + apiUrl: environment.langgraphUrl, }), ], }; @@ -51,7 +52,14 @@ Any option passed to `streamResource()` directly overrides the global provider c -For local development, run a LangGraph server: +For local development, configure your environment and run a LangGraph server: + +```typescript +// src/environments/environment.ts +export const environment = { + langgraphUrl: 'http://localhost:2024', +}; +``` ```bash # Start LangGraph dev server @@ -66,9 +74,10 @@ langgraph dev For production, point to your LangGraph Cloud deployment: ```typescript -provideStreamResource({ - apiUrl: 'https://your-project.langgraph.app', -}) +// src/environments/environment.prod.ts +export const environment = { + langgraphUrl: 'https://your-project.langgraph.app', +}; ``` @@ -76,19 +85,27 @@ provideStreamResource({ ## Verify installation -Create a minimal test to verify the setup works: +Create a minimal component to verify the setup works. `streamResource()` must be called in an injection context (a component field initializer or inside `inject()`). ```typescript -import { streamResource } from '@cacheplane/stream-resource'; +// In a component field initializer (injection context) +const test = streamResource({ assistantId: 'chat_agent' }); +console.log(test.status()); // 'idle' — setup is correct +``` -// In a component -const test = streamResource({ - assistantId: 'chat_agent', -}); +## Troubleshooting -// If status() returns 'idle', the setup is correct -console.log(test.status()); // 'idle' -``` + + +**Version mismatch** -- If you see errors about missing APIs or unknown decorators, confirm your Angular version is 20 or later. Run `ng version` to check. Earlier versions do not support the injection context APIs that streamResource() relies on. + +**CORS errors** -- If the browser console shows `Access-Control-Allow-Origin` errors, your LangGraph server is not configured for cross-origin requests. The LangGraph dev server allows all origins by default. For production, make sure your deployment's CORS policy includes your Angular app's domain. + +**Connection refused** -- If you see `ERR_CONNECTION_REFUSED`, verify your LangGraph server is running and that the `apiUrl` matches the correct host and port. Run `langgraph dev` and confirm the server starts at the expected address (default `http://localhost:2024`). + +**"NullInjectorError: No provider for StreamResourceConfig"** -- You forgot to add `provideStreamResource()` to your `appConfig` providers array. See the [Configure the provider](#configure-the-provider) section above. + + ## Next steps @@ -99,4 +116,10 @@ console.log(test.status()); // 'idle' Understand how Signals power streamResource + + Graphs, nodes, edges, and state for Angular developers + + + Complete streamResource() function reference + diff --git a/apps/website/content/docs-v2/getting-started/quickstart.mdx b/apps/website/content/docs-v2/getting-started/quickstart.mdx index de1aee2fb..90cce9b3b 100644 --- a/apps/website/content/docs-v2/getting-started/quickstart.mdx +++ b/apps/website/content/docs-v2/getting-started/quickstart.mdx @@ -6,13 +6,15 @@ Build a streaming chat component with streamResource() in 5 minutes. Angular 20+ project with Node.js 18+. If you need setup help, see the [Installation](/docs/getting-started/installation) guide.
    -## 1. Install + + ```bash npm install @cacheplane/stream-resource ``` -## 2. Configure the provider + + Add `provideStreamResource()` to your application config with your LangGraph Platform URL. @@ -29,7 +31,8 @@ export const appConfig: ApplicationConfig = { }; ``` -## 3. Create a chat component + + Use `streamResource()` in a component field initializer. Every property on the returned ref is an Angular Signal. @@ -38,17 +41,19 @@ Use `streamResource()` in a component field initializer. Every property on the r ```typescript // chat.component.ts -import { Component, signal, computed } from '@angular/core'; +import { Component, ChangeDetectionStrategy, signal, computed } from '@angular/core'; import { streamResource } from '@cacheplane/stream-resource'; import type { BaseMessage } from '@langchain/core/messages'; @Component({ selector: 'app-chat', templateUrl: './chat.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, }) export class ChatComponent { input = signal(''); + // 'chat_agent' maps to the key in your langgraph.json "graphs" config chat = streamResource<{ messages: BaseMessage[] }>({ assistantId: 'chat_agent', threadId: signal(localStorage.getItem('threadId')), @@ -82,6 +87,11 @@ export class ChatComponent {
    Agent is thinking...
    } + + @if (chat.error(); as err) { +
    {{ err.message }}
    + } +
    -## 4. Start your LangGraph server + + Make sure your LangGraph agent is running at the URL you configured. @@ -104,7 +115,8 @@ Make sure your LangGraph agent is running at the URL you configured. langgraph dev ``` -## 5. Run your app + + ```bash ng serve @@ -112,9 +124,12 @@ ng serve Open `http://localhost:4200` and start chatting with your agent. + + + ## Next steps - + Learn about token-by-token updates and stream modes @@ -124,7 +139,13 @@ Open `http://localhost:4200` and start chatting with your agent. Add human-in-the-loop approval flows - - Test your agent integration deterministically + + Deep dive into how Signals power streamResource + + + Graphs, nodes, edges, and state for Angular developers + + + Complete streamResource() function reference diff --git a/apps/website/content/docs-v2/guides/streaming.mdx b/apps/website/content/docs-v2/guides/streaming.mdx index 0a9df886a..3e10aec03 100644 --- a/apps/website/content/docs-v2/guides/streaming.mdx +++ b/apps/website/content/docs-v2/guides/streaming.mdx @@ -6,19 +6,67 @@ StreamResource provides token-by-token streaming from LangGraph agents via Serve Make sure you've completed the Installation guide first. -## Basic streaming +## How streaming works -Create a `streamResource` in your component, pass it a message, and bind to the resulting signals. +Streaming starts on the agent side. LangGraph's `astream()` method controls what data is sent over the SSE connection. On the Angular side, `streamResource()` consumes those events and maps them to Signals. + + +```python +from langgraph.graph import END, START, MessagesState, StateGraph +from langchain_openai import ChatOpenAI + +llm = ChatOpenAI(model="gpt-5-mini", streaming=True) + +def call_model(state: MessagesState) -> dict: + response = llm.invoke(state["messages"]) + return {"messages": [response]} + +builder = StateGraph(MessagesState) +builder.add_node("call_model", call_model) +builder.add_edge(START, "call_model") +builder.add_edge("call_model", END) + +graph = builder.compile() + +# Stream modes control what SSE chunks contain: + +# "values" — full state snapshot after each node +async for chunk in graph.astream( + {"messages": [("user", "Hello")]}, + stream_mode="values", +): + print(chunk) + +# "messages" — individual message tokens as generated +async for chunk in graph.astream( + {"messages": [("user", "Hello")]}, + stream_mode="messages", +): + print(chunk) + +# "events" — raw run events (on_chain_start, on_llm_stream, etc.) +async for event in graph.astream_events( + {"messages": [("user", "Hello")]}, + version="v2", +): + print(event["event"], event.get("data")) +``` + + ```typescript -import { Component, computed } from '@angular/core'; +import { Component, computed, ChangeDetectionStrategy } from '@angular/core'; import { streamResource } from '@cacheplane/stream-resource'; import { BaseMessage } from '@langchain/core/messages'; -@Component({ selector: 'app-chat', templateUrl: './chat.component.html' }) +@Component({ + selector: 'app-chat', + templateUrl: './chat.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, +}) export class ChatComponent { readonly chat = streamResource<{ messages: BaseMessage[] }>({ assistantId: 'chat_agent', @@ -60,7 +108,7 @@ The `status()` signal reports the current lifecycle state of the SSE connection: No active stream. The resource is ready to accept a new message. - + Tokens are arriving over the SSE connection. Signal values update in real-time with each chunk. @@ -125,11 +173,15 @@ If the SSE connection drops or the agent throws, `status()` transitions to `'err ```typescript -import { Component, computed, effect } from '@angular/core'; +import { Component, computed, ChangeDetectionStrategy } from '@angular/core'; import { streamResource } from '@cacheplane/stream-resource'; import { BaseMessage } from '@langchain/core/messages'; -@Component({ selector: 'app-chat', templateUrl: './chat.component.html' }) +@Component({ + selector: 'app-chat', + templateUrl: './chat.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, +}) export class ChatComponent { readonly chat = streamResource<{ messages: BaseMessage[] }>({ assistantId: 'chat_agent', diff --git a/apps/website/content/docs-v2/guides/subgraphs.mdx b/apps/website/content/docs-v2/guides/subgraphs.mdx index 5519b47be..3c0f114fa 100644 --- a/apps/website/content/docs-v2/guides/subgraphs.mdx +++ b/apps/website/content/docs-v2/guides/subgraphs.mdx @@ -6,6 +6,102 @@ Subgraphs let you compose complex agents from smaller, focused units. streamReso LangGraph calls them subgraphs (modular graph composition). Deep Agents calls them subagents (task delegation). streamResource() supports both patterns through the same API. +## How subgraph composition works + +Subgraph composition starts on the agent side. Each subgraph is a fully compiled `StateGraph` that can be added as a node in a parent graph. + + + + +```python +from langgraph.graph import END, START, MessagesState, StateGraph +from langchain_openai import ChatOpenAI + +llm = ChatOpenAI(model="gpt-5-mini") + +# --- Research subgraph --- +def search_web(state: MessagesState) -> dict: + query = state["messages"][-1].content + results = web_search(query) + return {"messages": [{"role": "assistant", "content": results}]} + +def summarize_results(state: MessagesState) -> dict: + response = llm.invoke(state["messages"]) + return {"messages": [response]} + +research_builder = StateGraph(MessagesState) +research_builder.add_node("search", search_web) +research_builder.add_node("summarize", summarize_results) +research_builder.add_edge(START, "search") +research_builder.add_edge("search", "summarize") +research_builder.add_edge("summarize", END) + +research_subgraph = research_builder.compile() + +# --- Analysis subgraph --- +def analyze_data(state: MessagesState) -> dict: + response = llm.invoke([ + {"role": "system", "content": "Analyze the data and provide insights."}, + *state["messages"], + ]) + return {"messages": [response]} + +analysis_builder = StateGraph(MessagesState) +analysis_builder.add_node("analyze", analyze_data) +analysis_builder.add_edge(START, "analyze") +analysis_builder.add_edge("analyze", END) + +analysis_subgraph = analysis_builder.compile() + +# --- Parent orchestrator --- +def route_task(state: MessagesState) -> str: + last = state["messages"][-1].content.lower() + if "research" in last or "search" in last: + return "research" + return "analyze" + +builder = StateGraph(MessagesState) +builder.add_node("research", research_subgraph) +builder.add_node("analyze", analysis_subgraph) +builder.add_conditional_edges(START, route_task) +builder.add_edge("research", END) +builder.add_edge("analyze", END) + +graph = builder.compile() +``` + + + + +```typescript +import { Component, computed, inject, effect, ChangeDetectionStrategy } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; + +@Component({ + selector: 'app-orchestrator', + templateUrl: './orchestrator.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class OrchestratorComponent { + readonly orchestrator = streamResource({ + assistantId: 'orchestrator', + subagentToolNames: ['research', 'analyze'], + }); + + readonly running = computed(() => this.orchestrator.activeSubagents()); + readonly runningCount = computed(() => this.running().length); + + send(text: string) { + this.orchestrator.submit({ + messages: [{ role: 'user', content: text }], + }); + } +} +``` + + + + ## Tracking subagent execution The `subagents()` signal contains a Map of active subagent streams. Use it to inspect the full set of delegated tasks and their current state. @@ -64,7 +160,7 @@ const pipelineStatus = computed(() => { return { total: entries.length, pending: entries.filter(([, a]) => a.status() === 'pending').length, - running: entries.filter(([, a]) => a.status() === 'streaming').length, + running: entries.filter(([, a]) => a.status() === 'loading').length, done: entries.filter(([, a]) => a.status() === 'complete').length, failed: entries.filter(([, a]) => a.status() === 'error').length, }; @@ -78,21 +174,12 @@ Render live progress for each subagent using the signals above. ```typescript -import { computed } from '@angular/core'; +import { Component, computed, inject, ChangeDetectionStrategy } from '@angular/core'; @Component({ selector: 'app-subagent-progress', - template: ` - @for (entry of subagentEntries(); track entry[0]) { -
    - {{ entry[0] }} - {{ entry[1].status() }} - @if (entry[1].status() === 'error') { - {{ entry[1].error()?.message }} - } -
    - } - `, + templateUrl: './progress-panel.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, }) export class SubagentProgressComponent { orchestrator = inject(OrchestratorService).resource; @@ -103,9 +190,8 @@ export class SubagentProgressComponent { } ```
    - + ```html - @for (entry of subagentEntries(); track entry[0]) {
    {{ entry[0] }} @@ -113,7 +199,7 @@ export class SubagentProgressComponent { {{ entry[1].status() }} - @if (entry[1].status() === 'streaming') { + @if (entry[1].status() === 'loading') { } @@ -186,6 +272,9 @@ Use **subagents** when tasks are independent and can run in parallel, when each Understand how streamResource() surfaces tokens, status, and errors in real time. + + Inspect earlier states and replay alternate execution paths with checkpoint history. + Write unit and integration tests for orchestrator graphs and subagent interactions. diff --git a/apps/website/content/docs-v2/guides/time-travel.mdx b/apps/website/content/docs-v2/guides/time-travel.mdx index 743025e54..570c1c084 100644 --- a/apps/website/content/docs-v2/guides/time-travel.mdx +++ b/apps/website/content/docs-v2/guides/time-travel.mdx @@ -6,6 +6,97 @@ Time travel lets you inspect earlier states and replay alternate execution paths Debug agent decisions, explore alternate paths, and build undo/redo experiences for your users. Time travel works with any LangGraph agent that persists checkpoints to a thread. +## How checkpointing works + +Time travel depends on checkpointing on the agent side. LangGraph automatically saves a checkpoint after every node execution when you compile your graph with a checkpointer. + + + + +```python +from langgraph.graph import END, START, MessagesState, StateGraph +from langgraph.checkpoint.memory import MemorySaver +from langchain_openai import ChatOpenAI + +llm = ChatOpenAI(model="gpt-5-mini") + +def call_model(state: MessagesState) -> dict: + response = llm.invoke(state["messages"]) + return {"messages": [response]} + +builder = StateGraph(MessagesState) +builder.add_node("call_model", call_model) +builder.add_edge(START, "call_model") +builder.add_edge("call_model", END) + +# Compile with a checkpointer to enable time travel +checkpointer = MemorySaver() +graph = builder.compile(checkpointer=checkpointer) + +# Run the graph with a thread ID +config = {"configurable": {"thread_id": "user_123"}} +result = graph.invoke( + {"messages": [("user", "What is LangGraph?")]}, + config=config, +) + +# Browse checkpoint history server-side +for state in graph.get_state_history(config): + print(f"Step: {state.metadata.get('step', '?')}") + print(f" Checkpoint: {state.config['configurable']['checkpoint_id']}") + print(f" Messages: {len(state.values.get('messages', []))}") + +# Replay from a specific checkpoint +past_config = { + "configurable": { + "thread_id": "user_123", + "checkpoint_id": "", + } +} +past_state = graph.get_state(past_config) +``` + + + + +```typescript +import { Component, inject, computed, ChangeDetectionStrategy } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; +import { AgentService } from './agent.service'; + +@Component({ + selector: 'app-history-viewer', + templateUrl: './history-viewer.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class HistoryViewerComponent { + private agentService = inject(AgentService); + readonly agent = this.agentService.agent; + + readonly checkpoints = computed(() => this.agent.history()); + readonly checkpointCount = computed(() => this.agent.history().length); + + readonly activeIndex = computed(() => + this.checkpoints().length - 1 + ); + + fork(index: number) { + const checkpoint = this.checkpoints()[index]; + this.agent.submit( + { messages: [{ role: 'user', content: 'Try a different approach' }] }, + { checkpoint: checkpoint.checkpoint } + ); + } + + formatTime(isoString: string): string { + return new Date(isoString).toLocaleTimeString(); + } +} +``` + + + + ## Browsing execution history The `history()` signal contains an array of `ThreadState` checkpoints ordered from oldest to newest. Each checkpoint captures the complete agent state at that point in execution, including messages, intermediate results, and any custom state fields. @@ -79,13 +170,14 @@ Expose checkpoint history directly in your component to let users scrub through ```typescript -import { Component, inject, computed } from '@angular/core'; +import { Component, inject, computed, ChangeDetectionStrategy } from '@angular/core'; import { streamResource } from '@cacheplane/stream-resource'; import { AgentService } from './agent.service'; @Component({ selector: 'app-history-viewer', templateUrl: './history-viewer.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, }) export class HistoryViewerComponent { private agentService = inject(AgentService); @@ -152,6 +244,43 @@ compareCheckpoints(indexA: number, indexB: number) { Use the comparison result to render a diff view, highlight changed fields in your UI, or log what the agent modified during a specific step. +## Replaying with modified input + +Combine forking with new input to explore how the agent would have responded differently. This is the core of the undo/redo experience. + +```typescript +@Component({ + selector: 'app-replay', + templateUrl: './replay.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class ReplayComponent { + readonly agent = inject(AgentService).agent; + + readonly history = computed(() => this.agent.history()); + readonly canUndo = computed(() => this.history().length > 1); + + undo() { + const history = this.history(); + if (history.length < 2) return; + + // Go back one step + const previousCheckpoint = history[history.length - 2]; + this.agent.submit(undefined, { + checkpoint: previousCheckpoint.checkpoint, + }); + } + + replayWith(index: number, newMessage: string) { + const checkpoint = this.history()[index]; + this.agent.submit( + { messages: [{ role: 'user', content: newMessage }] }, + { checkpoint: checkpoint.checkpoint } + ); + } +} +``` + Time travel is most useful during development. Inspect why an agent chose a particular path by comparing adjacent checkpoints, then fork to test alternatives without restarting the conversation. Combine `history()` with Angular DevTools to watch checkpoint arrays update in real time as the agent streams. @@ -165,6 +294,9 @@ Time travel is most useful during development. Inspect why an agent chose a part Understand how streamResource() surfaces incremental updates and how history integrates with live streaming state. + + Compose multi-agent systems with orchestrators and track subagent execution. + Full reference for streamResource() options, signals, and the submit() API including checkpoint parameters. From 14e8c8045e7db930fedc8822625e976848d8d6e3 Mon Sep 17 00:00:00 2001 From: Brian Love Date: Sat, 4 Apr 2026 17:10:44 -0700 Subject: [PATCH 010/187] =?UTF-8?q?docs:=20Phase=202+3=20=E2=80=94=20polis?= =?UTF-8?q?h=20guides,=20fix=20quickstart/install,=20expand=20API=20pages?= =?UTF-8?q?=20(#13)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * docs(website): polish quickstart and installation with fixes * docs(website): expand API reference pages with navigation and context * docs(website): polish streaming, time-travel, subgraphs with Python code * fix(website): fix Callout type warn in installation --- .../docs-v2/api/fetch-stream-transport.mdx | 43 +++++- .../docs-v2/api/mock-stream-transport.mdx | 87 ++++++++++-- .../docs-v2/api/provide-stream-resource.mdx | 48 ++++++- .../content/docs-v2/api/stream-resource.mdx | 53 ++++++- .../docs-v2/getting-started/installation.mdx | 51 +++++-- .../docs-v2/getting-started/quickstart.mdx | 39 +++-- .../content/docs-v2/guides/streaming.mdx | 66 ++++++++- .../content/docs-v2/guides/subgraphs.mdx | 121 +++++++++++++--- .../content/docs-v2/guides/time-travel.mdx | 134 +++++++++++++++++- 9 files changed, 582 insertions(+), 60 deletions(-) diff --git a/apps/website/content/docs-v2/api/fetch-stream-transport.mdx b/apps/website/content/docs-v2/api/fetch-stream-transport.mdx index be313baed..790698e84 100644 --- a/apps/website/content/docs-v2/api/fetch-stream-transport.mdx +++ b/apps/website/content/docs-v2/api/fetch-stream-transport.mdx @@ -2,7 +2,12 @@ `FetchStreamTransport` is the production-ready transport that opens a real server-sent event connection using the browser's `fetch` API and reads a `ReadableStream` response body. It is the default transport you register with `provideStreamResource` in production builds. -You rarely need to interact with `FetchStreamTransport` directly — simply provide it once at the application level and every `streamResource` will use it automatically. You would reach for it explicitly only when constructing a resource outside the normal DI tree or when you need to override the transport for a single resource while keeping the global default intact. +## When you interact with it directly + +In most apps you will never import or inject `FetchStreamTransport` by name — you register it once in `provideStreamResource` and forget about it. The two cases where you reach for it explicitly are: + +1. **Per-resource override** — you want one resource to use a different transport than the global default while everything else stays on `FetchStreamTransport`. +2. **Outside the DI tree** — you are constructing a resource in a context where global providers are not available and you need to supply the transport manually. ```ts import { inject } from '@angular/core'; @@ -15,10 +20,46 @@ const events = streamResource({ }); ``` +## How it works + +`FetchStreamTransport` makes a `fetch` call to the given URL and expects the server to respond with `Content-Type: text/event-stream`. It then reads the `ReadableStream` body line-by-line, parses SSE `data:` fields, and emits each parsed JSON value into the resource signal. + +The transport handles: + +- **Backpressure** — reads chunks at the pace the browser delivers them +- **Cancellation** — aborts the underlying `fetch` when `interrupt()` is called or the resource is destroyed +- **Error propagation** — network errors and non-2xx responses surface through `resource.error()` + `FetchStreamTransport` implements the `StreamTransport` interface. You can create custom transports (e.g. WebSocket-backed) by implementing the same interface and providing them in place of this class. +## What's Next + + + + Learn how the SSE lifecycle maps to resource signals and how to handle reconnects. + + + Server configuration for SSE: headers, timeouts, and edge runtime considerations. + + + The test-time counterpart — push values synchronously without a real server. + + + {/* Auto-rendered from api-docs.json — see page component */} diff --git a/apps/website/content/docs-v2/api/mock-stream-transport.mdx b/apps/website/content/docs-v2/api/mock-stream-transport.mdx index d9ebd13c8..31fbb93fd 100644 --- a/apps/website/content/docs-v2/api/mock-stream-transport.mdx +++ b/apps/website/content/docs-v2/api/mock-stream-transport.mdx @@ -2,31 +2,100 @@ `MockStreamTransport` is a test-friendly transport that replaces real network calls with an in-memory event emitter. Use it in unit and component tests to push values on demand and assert against your component's reactive state without a running server. +## Complete test example + +The pattern below covers the full lifecycle: configure the transport in `TestBed`, create the component, emit values, and assert signal state. + ```ts +import { Component, inject } from '@angular/core'; import { TestBed } from '@angular/core/testing'; import { provideStreamResource, MockStreamTransport, + streamResource, } from '@cacheplane/stream-resource'; -beforeEach(() => { - TestBed.configureTestingModule({ - providers: [provideStreamResource({ transport: MockStreamTransport })], +@Component({ template: '' }) +class RepoComponent { + readonly repo = streamResource<{ name: string }>({ + url: () => '/api/repos/42', }); -}); +} -it('reflects streamed value', () => { - const transport = TestBed.inject(MockStreamTransport); - // Emit a value into the stream - transport.emit('/api/repos/42', { id: 42, name: 'my-repo' }); - // Assert your component's signal updated accordingly +describe('RepoComponent', () => { + let transport: MockStreamTransport; + + beforeEach(() => { + TestBed.configureTestingModule({ + imports: [RepoComponent], + providers: [provideStreamResource({ transport: MockStreamTransport })], + }); + transport = TestBed.inject(MockStreamTransport); + }); + + it('reflects the streamed value', () => { + const fixture = TestBed.createComponent(RepoComponent); + fixture.detectChanges(); + + // Push a value into the stream — synchronous, no fakeAsync needed + transport.emit('/api/repos/42', { name: 'my-repo' }); + fixture.detectChanges(); + + expect(fixture.componentInstance.repo.value()).toEqual({ name: 'my-repo' }); + expect(fixture.componentInstance.repo.status()).toBe('streaming'); + }); + + it('surfaces errors through the error signal', () => { + const fixture = TestBed.createComponent(RepoComponent); + fixture.detectChanges(); + + transport.error('/api/repos/42', new Error('not found')); + fixture.detectChanges(); + + expect(fixture.componentInstance.repo.status()).toBe('error'); + expect(fixture.componentInstance.repo.error()).toBeInstanceOf(Error); + }); }); ``` +## MockStreamTransport API + +| Method | Description | +|--------|-------------| +| `emit(url, value)` | Pushes a single value into the stream at the given URL path. | +| `error(url, err)` | Triggers an error on the stream at the given URL path. | +| `complete(url)` | Closes the stream cleanly, as if the server sent the final event. | + Because `MockStreamTransport` is synchronous by default, you can emit values and assert state changes in the same test tick — no `fakeAsync` or `tick` required. +## What's Next + + + + Full testing patterns including component harnesses and multi-stream scenarios. + + + The production transport that MockStreamTransport replaces in tests. + + + Full reference for the primitive you are testing against. + + + {/* Auto-rendered from api-docs.json — see page component */} diff --git a/apps/website/content/docs-v2/api/provide-stream-resource.mdx b/apps/website/content/docs-v2/api/provide-stream-resource.mdx index 4863cf489..3375f0b2f 100644 --- a/apps/website/content/docs-v2/api/provide-stream-resource.mdx +++ b/apps/website/content/docs-v2/api/provide-stream-resource.mdx @@ -1,6 +1,8 @@ # provideStreamResource() -`provideStreamResource` is the provider factory that registers `stream-resource` in Angular's dependency injection system. Call it inside `bootstrapApplication` (or an `ApplicationConfig`) to configure the transport and any global defaults used by every `streamResource` in your app. +`provideStreamResource` is the provider factory that registers `stream-resource` in Angular's dependency injection system. Call it once inside `bootstrapApplication` (or an `ApplicationConfig`) to configure the transport and any global defaults used by every `streamResource` in your app. + +This is the single configuration point for the entire library. Rather than configuring each resource individually, you declare your transport here and every `streamResource` call throughout the app inherits it automatically. ```ts import { bootstrapApplication } from '@angular/platform-browser'; @@ -19,10 +21,54 @@ bootstrapApplication(AppComponent, { }); ``` +## Global configuration + +| Option | Type | Description | +|--------|------|-------------| +| `transport` | `Type` | The transport class to inject when resources request `StreamTransport`. Required. | + +## Swapping transports by environment + +Because `provideStreamResource` accepts a class token, you can vary the transport based on your environment without touching any component code: + +```ts +// main.ts — production +provideStreamResource({ transport: FetchStreamTransport }) + +// main.spec.ts / TestBed — tests +provideStreamResource({ transport: MockStreamTransport }) +``` + Swap `FetchStreamTransport` for `MockStreamTransport` (or any custom class implementing the `StreamTransport` interface) to change the transport for all resources at once — useful for testing or SSR. +## What's Next + + + + Step-by-step setup guide including peer dependencies and NgModule support. + + + Configure transports for production, SSR, and edge runtimes. + + + Full reference for the core primitive you configure here. + + + {/* Auto-rendered from api-docs.json — see page component */} diff --git a/apps/website/content/docs-v2/api/stream-resource.mdx b/apps/website/content/docs-v2/api/stream-resource.mdx index e383d3164..fb4135410 100644 --- a/apps/website/content/docs-v2/api/stream-resource.mdx +++ b/apps/website/content/docs-v2/api/stream-resource.mdx @@ -2,6 +2,8 @@ `streamResource` is the core primitive of the library. It creates a reactive resource that opens a server-sent event stream, tracks loading and error states, and exposes the latest emitted value — all within Angular's signal-based reactivity model. +When the `url` signal changes, the resource tears down the previous connection and opens a fresh one automatically. You never write subscription management or cleanup logic yourself. + ```ts import { streamResource } from '@cacheplane/stream-resource'; @@ -12,10 +14,31 @@ const repo = streamResource({ }); // Use in template -// repo.value() — latest emitted value (or undefined) -// repo.status() — 'idle' | 'loading' | 'streaming' | 'error' +// repo.value() — latest emitted value (or undefined) +// repo.status() — 'idle' | 'loading' | 'streaming' | 'error' +// repo.error() — the thrown error, when status is 'error' +// repo.interrupt() — call to cancel the stream immediately ``` +## Key signals + +| Signal | Type | Description | +|--------|------|-------------| +| `value()` | `T \| undefined` | The latest value emitted by the stream. Starts as `undefined` and updates with each SSE event. | +| `status()` | `'idle' \| 'loading' \| 'streaming' \| 'error'` | Lifecycle state of the current connection. | +| `error()` | `unknown` | The error thrown when `status()` is `'error'`. `undefined` otherwise. | +| `interrupt()` | `() => void` | Closes the active stream without an error — useful for user-initiated cancellation. | + +## When to use + +Use `streamResource` whenever your UI needs to react to a live data stream from the server: + +- **AI / LLM responses** — stream tokens into a chat bubble as they arrive +- **Live feeds** — stock tickers, activity logs, or progress updates +- **Long-running jobs** — subscribe to backend task progress over SSE + +For plain HTTP requests that return a single value and complete, Angular's built-in `resource()` or `httpResource()` is a better fit. + `streamResource` must be called during construction, inside an injection context (e.g. a component constructor, field initializer, or a function @@ -23,4 +46,30 @@ const repo = streamResource({ will throw. +## What's Next + + + + Build your first streaming component end-to-end in under five minutes. + + + Deep dive into SSE lifecycle, error handling, and reconnect strategies. + + + Understand how stream-resource integrates with Angular's reactivity model. + + + {/* Auto-rendered from api-docs.json — see page component */} diff --git a/apps/website/content/docs-v2/getting-started/installation.mdx b/apps/website/content/docs-v2/getting-started/installation.mdx index 8200f7f67..83ff4fe0f 100644 --- a/apps/website/content/docs-v2/getting-started/installation.mdx +++ b/apps/website/content/docs-v2/getting-started/installation.mdx @@ -32,11 +32,12 @@ Add `provideStreamResource()` to your application configuration. This sets globa // app.config.ts import { ApplicationConfig } from '@angular/core'; import { provideStreamResource } from '@cacheplane/stream-resource'; +import { environment } from '../environments/environment'; export const appConfig: ApplicationConfig = { providers: [ provideStreamResource({ - apiUrl: process.env['LANGGRAPH_URL'] ?? 'http://localhost:2024', + apiUrl: environment.langgraphUrl, }), ], }; @@ -51,7 +52,14 @@ Any option passed to `streamResource()` directly overrides the global provider c -For local development, run a LangGraph server: +For local development, configure your environment and run a LangGraph server: + +```typescript +// src/environments/environment.ts +export const environment = { + langgraphUrl: 'http://localhost:2024', +}; +``` ```bash # Start LangGraph dev server @@ -66,9 +74,10 @@ langgraph dev For production, point to your LangGraph Cloud deployment: ```typescript -provideStreamResource({ - apiUrl: 'https://your-project.langgraph.app', -}) +// src/environments/environment.prod.ts +export const environment = { + langgraphUrl: 'https://your-project.langgraph.app', +}; ``` @@ -76,19 +85,27 @@ provideStreamResource({ ## Verify installation -Create a minimal test to verify the setup works: +Create a minimal component to verify the setup works. `streamResource()` must be called in an injection context (a component field initializer or inside `inject()`). ```typescript -import { streamResource } from '@cacheplane/stream-resource'; +// In a component field initializer (injection context) +const test = streamResource({ assistantId: 'chat_agent' }); +console.log(test.status()); // 'idle' — setup is correct +``` -// In a component -const test = streamResource({ - assistantId: 'chat_agent', -}); +## Troubleshooting -// If status() returns 'idle', the setup is correct -console.log(test.status()); // 'idle' -``` + + +**Version mismatch** -- If you see errors about missing APIs or unknown decorators, confirm your Angular version is 20 or later. Run `ng version` to check. Earlier versions do not support the injection context APIs that streamResource() relies on. + +**CORS errors** -- If the browser console shows `Access-Control-Allow-Origin` errors, your LangGraph server is not configured for cross-origin requests. The LangGraph dev server allows all origins by default. For production, make sure your deployment's CORS policy includes your Angular app's domain. + +**Connection refused** -- If you see `ERR_CONNECTION_REFUSED`, verify your LangGraph server is running and that the `apiUrl` matches the correct host and port. Run `langgraph dev` and confirm the server starts at the expected address (default `http://localhost:2024`). + +**"NullInjectorError: No provider for StreamResourceConfig"** -- You forgot to add `provideStreamResource()` to your `appConfig` providers array. See the [Configure the provider](#configure-the-provider) section above. + + ## Next steps @@ -99,4 +116,10 @@ console.log(test.status()); // 'idle' Understand how Signals power streamResource + + Graphs, nodes, edges, and state for Angular developers + + + Complete streamResource() function reference + diff --git a/apps/website/content/docs-v2/getting-started/quickstart.mdx b/apps/website/content/docs-v2/getting-started/quickstart.mdx index de1aee2fb..90cce9b3b 100644 --- a/apps/website/content/docs-v2/getting-started/quickstart.mdx +++ b/apps/website/content/docs-v2/getting-started/quickstart.mdx @@ -6,13 +6,15 @@ Build a streaming chat component with streamResource() in 5 minutes. Angular 20+ project with Node.js 18+. If you need setup help, see the [Installation](/docs/getting-started/installation) guide. -## 1. Install + + ```bash npm install @cacheplane/stream-resource ``` -## 2. Configure the provider + + Add `provideStreamResource()` to your application config with your LangGraph Platform URL. @@ -29,7 +31,8 @@ export const appConfig: ApplicationConfig = { }; ``` -## 3. Create a chat component + + Use `streamResource()` in a component field initializer. Every property on the returned ref is an Angular Signal. @@ -38,17 +41,19 @@ Use `streamResource()` in a component field initializer. Every property on the r ```typescript // chat.component.ts -import { Component, signal, computed } from '@angular/core'; +import { Component, ChangeDetectionStrategy, signal, computed } from '@angular/core'; import { streamResource } from '@cacheplane/stream-resource'; import type { BaseMessage } from '@langchain/core/messages'; @Component({ selector: 'app-chat', templateUrl: './chat.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, }) export class ChatComponent { input = signal(''); + // 'chat_agent' maps to the key in your langgraph.json "graphs" config chat = streamResource<{ messages: BaseMessage[] }>({ assistantId: 'chat_agent', threadId: signal(localStorage.getItem('threadId')), @@ -82,6 +87,11 @@ export class ChatComponent {
    Agent is thinking...
    } + + @if (chat.error(); as err) { +
    {{ err.message }}
    + } +
    -## 4. Start your LangGraph server + + Make sure your LangGraph agent is running at the URL you configured. @@ -104,7 +115,8 @@ Make sure your LangGraph agent is running at the URL you configured. langgraph dev ``` -## 5. Run your app + + ```bash ng serve @@ -112,9 +124,12 @@ ng serve Open `http://localhost:4200` and start chatting with your agent. + + + ## Next steps - + Learn about token-by-token updates and stream modes @@ -124,7 +139,13 @@ Open `http://localhost:4200` and start chatting with your agent. Add human-in-the-loop approval flows - - Test your agent integration deterministically + + Deep dive into how Signals power streamResource + + + Graphs, nodes, edges, and state for Angular developers + + + Complete streamResource() function reference diff --git a/apps/website/content/docs-v2/guides/streaming.mdx b/apps/website/content/docs-v2/guides/streaming.mdx index 0a9df886a..3e10aec03 100644 --- a/apps/website/content/docs-v2/guides/streaming.mdx +++ b/apps/website/content/docs-v2/guides/streaming.mdx @@ -6,19 +6,67 @@ StreamResource provides token-by-token streaming from LangGraph agents via Serve Make sure you've completed the Installation guide first. -## Basic streaming +## How streaming works -Create a `streamResource` in your component, pass it a message, and bind to the resulting signals. +Streaming starts on the agent side. LangGraph's `astream()` method controls what data is sent over the SSE connection. On the Angular side, `streamResource()` consumes those events and maps them to Signals. + + +```python +from langgraph.graph import END, START, MessagesState, StateGraph +from langchain_openai import ChatOpenAI + +llm = ChatOpenAI(model="gpt-5-mini", streaming=True) + +def call_model(state: MessagesState) -> dict: + response = llm.invoke(state["messages"]) + return {"messages": [response]} + +builder = StateGraph(MessagesState) +builder.add_node("call_model", call_model) +builder.add_edge(START, "call_model") +builder.add_edge("call_model", END) + +graph = builder.compile() + +# Stream modes control what SSE chunks contain: + +# "values" — full state snapshot after each node +async for chunk in graph.astream( + {"messages": [("user", "Hello")]}, + stream_mode="values", +): + print(chunk) + +# "messages" — individual message tokens as generated +async for chunk in graph.astream( + {"messages": [("user", "Hello")]}, + stream_mode="messages", +): + print(chunk) + +# "events" — raw run events (on_chain_start, on_llm_stream, etc.) +async for event in graph.astream_events( + {"messages": [("user", "Hello")]}, + version="v2", +): + print(event["event"], event.get("data")) +``` + + ```typescript -import { Component, computed } from '@angular/core'; +import { Component, computed, ChangeDetectionStrategy } from '@angular/core'; import { streamResource } from '@cacheplane/stream-resource'; import { BaseMessage } from '@langchain/core/messages'; -@Component({ selector: 'app-chat', templateUrl: './chat.component.html' }) +@Component({ + selector: 'app-chat', + templateUrl: './chat.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, +}) export class ChatComponent { readonly chat = streamResource<{ messages: BaseMessage[] }>({ assistantId: 'chat_agent', @@ -60,7 +108,7 @@ The `status()` signal reports the current lifecycle state of the SSE connection: No active stream. The resource is ready to accept a new message. - + Tokens are arriving over the SSE connection. Signal values update in real-time with each chunk. @@ -125,11 +173,15 @@ If the SSE connection drops or the agent throws, `status()` transitions to `'err ```typescript -import { Component, computed, effect } from '@angular/core'; +import { Component, computed, ChangeDetectionStrategy } from '@angular/core'; import { streamResource } from '@cacheplane/stream-resource'; import { BaseMessage } from '@langchain/core/messages'; -@Component({ selector: 'app-chat', templateUrl: './chat.component.html' }) +@Component({ + selector: 'app-chat', + templateUrl: './chat.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, +}) export class ChatComponent { readonly chat = streamResource<{ messages: BaseMessage[] }>({ assistantId: 'chat_agent', diff --git a/apps/website/content/docs-v2/guides/subgraphs.mdx b/apps/website/content/docs-v2/guides/subgraphs.mdx index 5519b47be..3c0f114fa 100644 --- a/apps/website/content/docs-v2/guides/subgraphs.mdx +++ b/apps/website/content/docs-v2/guides/subgraphs.mdx @@ -6,6 +6,102 @@ Subgraphs let you compose complex agents from smaller, focused units. streamReso LangGraph calls them subgraphs (modular graph composition). Deep Agents calls them subagents (task delegation). streamResource() supports both patterns through the same API. +## How subgraph composition works + +Subgraph composition starts on the agent side. Each subgraph is a fully compiled `StateGraph` that can be added as a node in a parent graph. + + + + +```python +from langgraph.graph import END, START, MessagesState, StateGraph +from langchain_openai import ChatOpenAI + +llm = ChatOpenAI(model="gpt-5-mini") + +# --- Research subgraph --- +def search_web(state: MessagesState) -> dict: + query = state["messages"][-1].content + results = web_search(query) + return {"messages": [{"role": "assistant", "content": results}]} + +def summarize_results(state: MessagesState) -> dict: + response = llm.invoke(state["messages"]) + return {"messages": [response]} + +research_builder = StateGraph(MessagesState) +research_builder.add_node("search", search_web) +research_builder.add_node("summarize", summarize_results) +research_builder.add_edge(START, "search") +research_builder.add_edge("search", "summarize") +research_builder.add_edge("summarize", END) + +research_subgraph = research_builder.compile() + +# --- Analysis subgraph --- +def analyze_data(state: MessagesState) -> dict: + response = llm.invoke([ + {"role": "system", "content": "Analyze the data and provide insights."}, + *state["messages"], + ]) + return {"messages": [response]} + +analysis_builder = StateGraph(MessagesState) +analysis_builder.add_node("analyze", analyze_data) +analysis_builder.add_edge(START, "analyze") +analysis_builder.add_edge("analyze", END) + +analysis_subgraph = analysis_builder.compile() + +# --- Parent orchestrator --- +def route_task(state: MessagesState) -> str: + last = state["messages"][-1].content.lower() + if "research" in last or "search" in last: + return "research" + return "analyze" + +builder = StateGraph(MessagesState) +builder.add_node("research", research_subgraph) +builder.add_node("analyze", analysis_subgraph) +builder.add_conditional_edges(START, route_task) +builder.add_edge("research", END) +builder.add_edge("analyze", END) + +graph = builder.compile() +``` + + + + +```typescript +import { Component, computed, inject, effect, ChangeDetectionStrategy } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; + +@Component({ + selector: 'app-orchestrator', + templateUrl: './orchestrator.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class OrchestratorComponent { + readonly orchestrator = streamResource({ + assistantId: 'orchestrator', + subagentToolNames: ['research', 'analyze'], + }); + + readonly running = computed(() => this.orchestrator.activeSubagents()); + readonly runningCount = computed(() => this.running().length); + + send(text: string) { + this.orchestrator.submit({ + messages: [{ role: 'user', content: text }], + }); + } +} +``` + + + + ## Tracking subagent execution The `subagents()` signal contains a Map of active subagent streams. Use it to inspect the full set of delegated tasks and their current state. @@ -64,7 +160,7 @@ const pipelineStatus = computed(() => { return { total: entries.length, pending: entries.filter(([, a]) => a.status() === 'pending').length, - running: entries.filter(([, a]) => a.status() === 'streaming').length, + running: entries.filter(([, a]) => a.status() === 'loading').length, done: entries.filter(([, a]) => a.status() === 'complete').length, failed: entries.filter(([, a]) => a.status() === 'error').length, }; @@ -78,21 +174,12 @@ Render live progress for each subagent using the signals above. ```typescript -import { computed } from '@angular/core'; +import { Component, computed, inject, ChangeDetectionStrategy } from '@angular/core'; @Component({ selector: 'app-subagent-progress', - template: ` - @for (entry of subagentEntries(); track entry[0]) { -
    - {{ entry[0] }} - {{ entry[1].status() }} - @if (entry[1].status() === 'error') { - {{ entry[1].error()?.message }} - } -
    - } - `, + templateUrl: './progress-panel.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, }) export class SubagentProgressComponent { orchestrator = inject(OrchestratorService).resource; @@ -103,9 +190,8 @@ export class SubagentProgressComponent { } ```
    - + ```html - @for (entry of subagentEntries(); track entry[0]) {
    {{ entry[0] }} @@ -113,7 +199,7 @@ export class SubagentProgressComponent { {{ entry[1].status() }} - @if (entry[1].status() === 'streaming') { + @if (entry[1].status() === 'loading') { } @@ -186,6 +272,9 @@ Use **subagents** when tasks are independent and can run in parallel, when each Understand how streamResource() surfaces tokens, status, and errors in real time. + + Inspect earlier states and replay alternate execution paths with checkpoint history. + Write unit and integration tests for orchestrator graphs and subagent interactions. diff --git a/apps/website/content/docs-v2/guides/time-travel.mdx b/apps/website/content/docs-v2/guides/time-travel.mdx index 743025e54..570c1c084 100644 --- a/apps/website/content/docs-v2/guides/time-travel.mdx +++ b/apps/website/content/docs-v2/guides/time-travel.mdx @@ -6,6 +6,97 @@ Time travel lets you inspect earlier states and replay alternate execution paths Debug agent decisions, explore alternate paths, and build undo/redo experiences for your users. Time travel works with any LangGraph agent that persists checkpoints to a thread. +## How checkpointing works + +Time travel depends on checkpointing on the agent side. LangGraph automatically saves a checkpoint after every node execution when you compile your graph with a checkpointer. + + + + +```python +from langgraph.graph import END, START, MessagesState, StateGraph +from langgraph.checkpoint.memory import MemorySaver +from langchain_openai import ChatOpenAI + +llm = ChatOpenAI(model="gpt-5-mini") + +def call_model(state: MessagesState) -> dict: + response = llm.invoke(state["messages"]) + return {"messages": [response]} + +builder = StateGraph(MessagesState) +builder.add_node("call_model", call_model) +builder.add_edge(START, "call_model") +builder.add_edge("call_model", END) + +# Compile with a checkpointer to enable time travel +checkpointer = MemorySaver() +graph = builder.compile(checkpointer=checkpointer) + +# Run the graph with a thread ID +config = {"configurable": {"thread_id": "user_123"}} +result = graph.invoke( + {"messages": [("user", "What is LangGraph?")]}, + config=config, +) + +# Browse checkpoint history server-side +for state in graph.get_state_history(config): + print(f"Step: {state.metadata.get('step', '?')}") + print(f" Checkpoint: {state.config['configurable']['checkpoint_id']}") + print(f" Messages: {len(state.values.get('messages', []))}") + +# Replay from a specific checkpoint +past_config = { + "configurable": { + "thread_id": "user_123", + "checkpoint_id": "", + } +} +past_state = graph.get_state(past_config) +``` + + + + +```typescript +import { Component, inject, computed, ChangeDetectionStrategy } from '@angular/core'; +import { streamResource } from '@cacheplane/stream-resource'; +import { AgentService } from './agent.service'; + +@Component({ + selector: 'app-history-viewer', + templateUrl: './history-viewer.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class HistoryViewerComponent { + private agentService = inject(AgentService); + readonly agent = this.agentService.agent; + + readonly checkpoints = computed(() => this.agent.history()); + readonly checkpointCount = computed(() => this.agent.history().length); + + readonly activeIndex = computed(() => + this.checkpoints().length - 1 + ); + + fork(index: number) { + const checkpoint = this.checkpoints()[index]; + this.agent.submit( + { messages: [{ role: 'user', content: 'Try a different approach' }] }, + { checkpoint: checkpoint.checkpoint } + ); + } + + formatTime(isoString: string): string { + return new Date(isoString).toLocaleTimeString(); + } +} +``` + + + + ## Browsing execution history The `history()` signal contains an array of `ThreadState` checkpoints ordered from oldest to newest. Each checkpoint captures the complete agent state at that point in execution, including messages, intermediate results, and any custom state fields. @@ -79,13 +170,14 @@ Expose checkpoint history directly in your component to let users scrub through ```typescript -import { Component, inject, computed } from '@angular/core'; +import { Component, inject, computed, ChangeDetectionStrategy } from '@angular/core'; import { streamResource } from '@cacheplane/stream-resource'; import { AgentService } from './agent.service'; @Component({ selector: 'app-history-viewer', templateUrl: './history-viewer.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, }) export class HistoryViewerComponent { private agentService = inject(AgentService); @@ -152,6 +244,43 @@ compareCheckpoints(indexA: number, indexB: number) { Use the comparison result to render a diff view, highlight changed fields in your UI, or log what the agent modified during a specific step. +## Replaying with modified input + +Combine forking with new input to explore how the agent would have responded differently. This is the core of the undo/redo experience. + +```typescript +@Component({ + selector: 'app-replay', + templateUrl: './replay.component.html', + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class ReplayComponent { + readonly agent = inject(AgentService).agent; + + readonly history = computed(() => this.agent.history()); + readonly canUndo = computed(() => this.history().length > 1); + + undo() { + const history = this.history(); + if (history.length < 2) return; + + // Go back one step + const previousCheckpoint = history[history.length - 2]; + this.agent.submit(undefined, { + checkpoint: previousCheckpoint.checkpoint, + }); + } + + replayWith(index: number, newMessage: string) { + const checkpoint = this.history()[index]; + this.agent.submit( + { messages: [{ role: 'user', content: newMessage }] }, + { checkpoint: checkpoint.checkpoint } + ); + } +} +``` + Time travel is most useful during development. Inspect why an agent chose a particular path by comparing adjacent checkpoints, then fork to test alternatives without restarting the conversation. Combine `history()` with Angular DevTools to watch checkpoint arrays update in real time as the agent streams. @@ -165,6 +294,9 @@ Time travel is most useful during development. Inspect why an agent chose a part Understand how streamResource() surfaces incremental updates and how history integrates with live streaming state. + + Compose multi-agent systems with orchestrators and track subagent execution. + Full reference for streamResource() options, signals, and the submit() API including checkpoint parameters. From 89c2ecf2bcd3d121f7f2f28139ba68cf8047e235 Mon Sep 17 00:00:00 2001 From: Brian Love Date: Sun, 5 Apr 2026 07:15:52 -0700 Subject: [PATCH 011/187] feat(cockpit): complete cockpit application with 14 capability examples (#2) Cockpit is an integrated developer reference surface for @cacheplane/stream-resource. It embeds running Angular examples, shows syntax-highlighted source code, renders tutorial documentation, and displays auto-generated API references for each capability. Shared libraries: - @cacheplane/design-tokens: colors, glass, gradient, glow, typography tokens - @cacheplane/ui-react: GlassPanel, GlassButton, Callout, Steps, Tabs, Card, CodeGroup, NavLink - @cacheplane/chat: Angular chat component library (cp-chat, cp-chat-message, cp-chat-input) 14 capability examples (Angular + Python + docs + e2e each): - LangGraph: streaming, persistence, interrupts, memory, durable-execution, subgraphs, time-travel, deployment-runtime - Deep Agents: planning, filesystem, subagents, memory, skills, sandboxes Unified harness: - Capability registry as single source of truth - Serve orchestrator with --capability and --all modes - Per-capability Nx serve targets - CI build job for all Angular apps - LangGraph deployment matrix for all 14 backends - Smoke e2e test suite (28 tests: 14 UI render + 14 send/receive) Light glassmorphism theme matching the website design with frosted glass panels, warm-to-cool gradient backgrounds, and EB Garamond/Inter/JetBrains Mono typography. --- .claude/launch.json | 6 + .github/workflows/ci.yml | 14 +- .github/workflows/deploy-langgraph.yml | 67 + .gitignore | 7 + apps/cockpit/e2e/all-examples-smoke.spec.ts | 65 + apps/cockpit/package.json | 8 +- apps/cockpit/postcss.config.mjs | 6 + apps/cockpit/project.json | 161 ++ apps/cockpit/scripts/capability-registry.ts | 38 + .../scripts/generate-combined-langgraph.ts | 13 + apps/cockpit/scripts/serve-example.ts | 41 + apps/cockpit/src/app/[...slug]/page.tsx | 12 +- apps/cockpit/src/app/cockpit.css | 503 +++-- apps/cockpit/src/app/layout.tsx | 13 +- apps/cockpit/src/app/page.tsx | 5 +- .../src/components/api-mode/api-mode.spec.tsx | 64 + .../src/components/api-mode/api-mode.tsx | 161 ++ apps/cockpit/src/components/cockpit-shell.tsx | 162 +- .../components/code-mode/code-mode.spec.tsx | 108 +- .../src/components/code-mode/code-mode.tsx | 132 +- .../src/components/code-pane/code-pane.tsx | 2 +- .../components/docs-mode/docs-mode.spec.tsx | 31 - .../src/components/docs-mode/docs-mode.tsx | 62 - .../src/components/docs-pane/docs-pane.tsx | 24 - .../src/components/language-switcher.spec.tsx | 88 - .../src/components/language-switcher.tsx | 35 - .../components/modes/mode-switcher.spec.tsx | 16 +- .../src/components/modes/mode-switcher.tsx | 69 +- .../narrative-docs/narrative-docs.spec.tsx | 23 + .../narrative-docs/narrative-docs.tsx | 69 + .../components/navigation/navigation-tree.tsx | 31 - .../src/components/pane-rendering.spec.tsx | 43 +- .../prompt-drawer/prompt-drawer.spec.tsx | 69 - .../prompt-drawer/prompt-drawer.tsx | 78 - .../components/prompt-pane/prompt-pane.tsx | 18 - .../src/components/run-mode/run-mode.spec.tsx | 26 +- .../src/components/run-mode/run-mode.tsx | 40 +- .../sidebar/cockpit-sidebar.spec.tsx | 5 +- .../components/sidebar/cockpit-sidebar.tsx | 17 +- .../sidebar/language-picker.spec.tsx | 2 +- .../components/sidebar/language-picker.tsx | 77 +- .../components/sidebar/navigation-groups.tsx | 137 +- apps/cockpit/src/components/ui/tabs.tsx | 55 + apps/cockpit/src/lib/cockpit-page.ts | 4 +- apps/cockpit/src/lib/content-bundle.spec.ts | 195 ++ apps/cockpit/src/lib/content-bundle.ts | 162 ++ apps/cockpit/src/lib/extract-docs.spec.ts | 77 + apps/cockpit/src/lib/extract-docs.ts | 161 ++ apps/cockpit/src/lib/render-markdown.spec.ts | 107 + apps/cockpit/src/lib/render-markdown.ts | 187 ++ apps/cockpit/src/lib/route-resolution.spec.ts | 42 +- apps/cockpit/src/lib/route-resolution.ts | 40 +- apps/cockpit/src/lib/utils.ts | 1 + apps/cockpit/tsconfig.json | 10 +- apps/website/src/app/page.tsx | 2 +- .../src/components/docs/ApiDocRenderer.tsx | 2 +- .../src/components/docs/ApiRefTable.tsx | 2 +- .../src/components/docs/CopyPromptButton.tsx | 2 +- .../src/components/docs/DocsBreadcrumb.tsx | 2 +- .../src/components/docs/DocsPrevNext.tsx | 2 +- .../src/components/docs/DocsSearch.tsx | 2 +- .../src/components/docs/DocsSidebarNew.tsx | 2 +- .../src/components/docs/mdx/Callout.tsx | 2 +- apps/website/src/components/docs/mdx/Card.tsx | 2 +- .../src/components/docs/mdx/CodeGroup.tsx | 2 +- .../website/src/components/docs/mdx/Steps.tsx | 2 +- .../src/components/landing/ArchDiagram.tsx | 2 +- .../src/components/landing/CapabilityCard.tsx | 2 +- .../src/components/landing/CockpitCTA.tsx | 2 +- .../src/components/landing/CodeBlock.tsx | 2 +- .../components/landing/DeepAgentsShowcase.tsx | 2 +- .../src/components/landing/FeatureStrip.tsx | 2 +- .../components/landing/GenerativeUIFrame.tsx | 2 +- .../src/components/landing/HeroTwoCol.tsx | 2 +- .../components/landing/LangGraphShowcase.tsx | 2 +- .../src/components/landing/StatsStrip.tsx | 2 +- .../src/components/landing/ValueProps.tsx | 2 +- .../src/components/landing/ValuePropsTabs.tsx | 2 +- .../src/components/pricing/CompareTable.tsx | 2 +- .../src/components/pricing/LeadForm.tsx | 2 +- .../src/components/pricing/PricingGrid.tsx | 2 +- apps/website/src/components/shared/Footer.tsx | 2 +- .../src/components/shared/InstallStrip.tsx | 2 +- apps/website/src/components/shared/Nav.tsx | 2 +- apps/website/tsconfig.json | 3 +- .../filesystem/angular/e2e/filesystem.spec.ts | 21 + .../filesystem/angular/package.json | 11 + .../filesystem/angular/project.json | 38 + .../filesystem/angular/proxy.conf.json | 9 + .../filesystem/angular/src/app/app.config.ts | 18 + .../angular/src/app/filesystem.component.ts | 89 + .../environments/environment.development.ts | 11 + .../angular/src/environments/environment.ts | 11 + .../filesystem/angular/src/index.html | 11 + .../filesystem/angular/src/main.ts | 7 + .../filesystem/angular/src/styles.css | 12 + .../filesystem/angular/tsconfig.app.json | 9 + .../filesystem/angular/tsconfig.json | 16 + .../deep-agents/filesystem/python/.gitignore | 3 + .../filesystem/python/docs/guide.md | 156 ++ .../filesystem/python/langgraph.json | 7 + .../filesystem/python/prompts/filesystem.md | 8 +- .../filesystem/python/pyproject.toml | 21 + .../filesystem/python/src/graph.py | 64 + .../filesystem/python/src/index.ts | 15 +- .../filesystem/python/tsconfig.json | 2 +- cockpit/deep-agents/filesystem/python/uv.lock | 1718 +++++++++++++++++ .../memory/angular/e2e/memory.spec.ts | 21 + .../deep-agents/memory/angular/package.json | 11 + .../deep-agents/memory/angular/project.json | 38 + .../memory/angular/proxy.conf.json | 9 + .../memory/angular/src/app/app.config.ts | 18 + .../angular/src/app/memory.component.ts | 57 + .../environments/environment.development.ts | 11 + .../angular/src/environments/environment.ts | 11 + .../deep-agents/memory/angular/src/index.html | 11 + .../deep-agents/memory/angular/src/main.ts | 7 + .../deep-agents/memory/angular/src/styles.css | 12 + .../memory/angular/tsconfig.app.json | 9 + .../deep-agents/memory/angular/tsconfig.json | 16 + cockpit/deep-agents/memory/python/.gitignore | 3 + .../deep-agents/memory/python/docs/guide.md | 146 ++ .../deep-agents/memory/python/langgraph.json | 7 + .../memory/python/prompts/memory.md | 12 +- .../deep-agents/memory/python/pyproject.toml | 21 + .../deep-agents/memory/python/src/graph.py | 78 + .../deep-agents/memory/python/src/index.ts | 15 +- .../deep-agents/memory/python/tsconfig.json | 2 +- cockpit/deep-agents/memory/python/uv.lock | 1718 +++++++++++++++++ .../planning/angular/e2e/planning.spec.ts | 21 + .../deep-agents/planning/angular/package.json | 11 + .../deep-agents/planning/angular/project.json | 38 + .../planning/angular/proxy.conf.json | 9 + .../planning/angular/src/app/app.config.ts | 18 + .../angular/src/app/planning.component.ts | 65 + .../environments/environment.development.ts | 11 + .../angular/src/environments/environment.ts | 11 + .../planning/angular/src/index.html | 11 + .../deep-agents/planning/angular/src/main.ts | 7 + .../planning/angular/src/styles.css | 12 + .../planning/angular/tsconfig.app.json | 9 + .../planning/angular/tsconfig.json | 16 + .../deep-agents/planning/python/.gitignore | 3 + .../deep-agents/planning/python/docs/guide.md | 152 ++ .../planning/python/langgraph.json | 7 + .../planning/python/prompts/planning.md | 8 +- .../planning/python/pyproject.toml | 21 + .../deep-agents/planning/python/src/graph.py | 65 + .../deep-agents/planning/python/src/index.ts | 15 +- .../deep-agents/planning/python/tsconfig.json | 2 +- cockpit/deep-agents/planning/python/uv.lock | 1718 +++++++++++++++++ .../sandboxes/angular/e2e/sandboxes.spec.ts | 21 + .../sandboxes/angular/package.json | 11 + .../sandboxes/angular/project.json | 38 + .../sandboxes/angular/proxy.conf.json | 9 + .../sandboxes/angular/src/app/app.config.ts | 18 + .../angular/src/app/sandboxes.component.ts | 96 + .../environments/environment.development.ts | 11 + .../angular/src/environments/environment.ts | 11 + .../sandboxes/angular/src/index.html | 11 + .../deep-agents/sandboxes/angular/src/main.ts | 7 + .../sandboxes/angular/src/styles.css | 12 + .../sandboxes/angular/tsconfig.app.json | 9 + .../sandboxes/angular/tsconfig.json | 16 + .../deep-agents/sandboxes/python/.gitignore | 3 + .../sandboxes/python/docs/guide.md | 171 ++ .../sandboxes/python/langgraph.json | 7 + .../sandboxes/python/prompts/sandboxes.md | 11 +- .../sandboxes/python/pyproject.toml | 21 + .../deep-agents/sandboxes/python/src/graph.py | 102 + .../deep-agents/sandboxes/python/src/index.ts | 15 +- .../sandboxes/python/tsconfig.json | 2 +- cockpit/deep-agents/sandboxes/python/uv.lock | 1718 +++++++++++++++++ .../skills/angular/e2e/skills.spec.ts | 21 + .../deep-agents/skills/angular/package.json | 11 + .../deep-agents/skills/angular/project.json | 38 + .../skills/angular/proxy.conf.json | 9 + .../skills/angular/src/app/app.config.ts | 18 + .../angular/src/app/skills.component.ts | 92 + .../environments/environment.development.ts | 11 + .../angular/src/environments/environment.ts | 11 + .../deep-agents/skills/angular/src/index.html | 11 + .../deep-agents/skills/angular/src/main.ts | 7 + .../deep-agents/skills/angular/src/styles.css | 12 + .../skills/angular/tsconfig.app.json | 9 + .../deep-agents/skills/angular/tsconfig.json | 16 + cockpit/deep-agents/skills/python/.gitignore | 3 + .../deep-agents/skills/python/docs/guide.md | 167 ++ .../deep-agents/skills/python/langgraph.json | 7 + .../skills/python/prompts/skills.md | 11 +- .../deep-agents/skills/python/pyproject.toml | 21 + .../deep-agents/skills/python/src/graph.py | 96 + .../deep-agents/skills/python/src/index.ts | 15 +- .../deep-agents/skills/python/tsconfig.json | 2 +- cockpit/deep-agents/skills/python/uv.lock | 1718 +++++++++++++++++ .../subagents/angular/e2e/subagents.spec.ts | 21 + .../subagents/angular/package.json | 11 + .../subagents/angular/project.json | 38 + .../subagents/angular/proxy.conf.json | 9 + .../subagents/angular/src/app/app.config.ts | 18 + .../angular/src/app/subagents.component.ts | 67 + .../environments/environment.development.ts | 11 + .../angular/src/environments/environment.ts | 11 + .../subagents/angular/src/index.html | 11 + .../deep-agents/subagents/angular/src/main.ts | 7 + .../subagents/angular/src/styles.css | 12 + .../subagents/angular/tsconfig.app.json | 9 + .../subagents/angular/tsconfig.json | 16 + .../deep-agents/subagents/python/.gitignore | 3 + .../subagents/python/docs/guide.md | 155 ++ .../subagents/python/langgraph.json | 7 + .../subagents/python/prompts/subagents.md | 13 +- .../subagents/python/pyproject.toml | 21 + .../deep-agents/subagents/python/src/graph.py | 115 ++ .../deep-agents/subagents/python/src/index.ts | 15 +- .../subagents/python/tsconfig.json | 2 +- cockpit/deep-agents/subagents/python/uv.lock | 1718 +++++++++++++++++ .../angular/e2e/deployment-runtime.spec.ts | 21 + .../deployment-runtime/angular/package.json | 11 + .../deployment-runtime/angular/project.json | 38 + .../angular/proxy.conf.json | 9 + .../angular/src/app/app.config.ts | 19 + .../src/app/deployment-runtime.component.ts | 90 + .../environments/environment.development.ts | 11 + .../angular/src/environments/environment.ts | 11 + .../deployment-runtime/angular/src/index.html | 11 + .../deployment-runtime/angular/src/main.ts | 7 + .../deployment-runtime/angular/src/styles.css | 12 + .../angular/tsconfig.app.json | 9 + .../deployment-runtime/angular/tsconfig.json | 16 + .../deployment-runtime/python/.gitignore | 3 + .../deployment-runtime/python/docs/guide.md | 176 ++ .../deployment-runtime/python/langgraph.json | 7 + .../deployment-runtime/python/pyproject.toml | 21 + .../deployment-runtime/python/src/graph.py | 48 + .../deployment-runtime/python/src/index.ts | 15 +- .../deployment-runtime/python/uv.lock | 1718 +++++++++++++++++ .../angular/e2e/durable-execution.spec.ts | 21 + .../durable-execution/angular/package.json | 11 + .../durable-execution/angular/project.json | 38 + .../durable-execution/angular/proxy.conf.json | 9 + .../angular/src/app/app.config.ts | 18 + .../src/app/durable-execution.component.ts | 96 + .../environments/environment.development.ts | 11 + .../angular/src/environments/environment.ts | 11 + .../durable-execution/angular/src/index.html | 11 + .../durable-execution/angular/src/main.ts | 7 + .../durable-execution/angular/src/styles.css | 12 + .../angular/tsconfig.app.json | 9 + .../durable-execution/angular/tsconfig.json | 16 + .../durable-execution/python/.gitignore | 3 + .../durable-execution/python/docs/guide.md | 159 ++ .../durable-execution/python/langgraph.json | 7 + .../python/prompts/durable-execution.md | 12 +- .../durable-execution/python/pyproject.toml | 21 + .../durable-execution/python/src/graph.py | 94 + .../durable-execution/python/src/index.ts | 23 +- .../durable-execution/python/uv.lock | 1718 +++++++++++++++++ .../interrupts/angular/e2e/interrupts.spec.ts | 21 + .../langgraph/interrupts/angular/package.json | 11 + .../langgraph/interrupts/angular/project.json | 38 + .../interrupts/angular/proxy.conf.json | 9 + .../interrupts/angular/src/app/app.config.ts | 18 + .../angular/src/app/interrupts.component.ts | 83 + .../environments/environment.development.ts | 11 + .../angular/src/environments/environment.ts | 11 + .../interrupts/angular/src/index.html | 11 + .../langgraph/interrupts/angular/src/main.ts | 7 + .../interrupts/angular/src/styles.css | 12 + .../interrupts/angular/tsconfig.app.json | 9 + .../interrupts/angular/tsconfig.json | 16 + .../langgraph/interrupts/python/.gitignore | 3 + .../langgraph/interrupts/python/docs/guide.md | 149 ++ .../interrupts/python/langgraph.json | 7 + .../interrupts/python/prompts/interrupts.md | 8 +- .../interrupts/python/pyproject.toml | 21 + .../langgraph/interrupts/python/src/graph.py | 52 + .../langgraph/interrupts/python/src/index.ts | 15 +- cockpit/langgraph/interrupts/python/uv.lock | 1718 +++++++++++++++++ .../memory/angular/e2e/memory.spec.ts | 21 + cockpit/langgraph/memory/angular/package.json | 11 + cockpit/langgraph/memory/angular/project.json | 38 + .../langgraph/memory/angular/proxy.conf.json | 9 + .../memory/angular/src/app/app.config.ts | 18 + .../angular/src/app/memory.component.ts | 82 + .../environments/environment.development.ts | 11 + .../angular/src/environments/environment.ts | 11 + .../langgraph/memory/angular/src/index.html | 11 + cockpit/langgraph/memory/angular/src/main.ts | 7 + .../langgraph/memory/angular/src/styles.css | 12 + .../memory/angular/tsconfig.app.json | 9 + .../langgraph/memory/angular/tsconfig.json | 16 + cockpit/langgraph/memory/python/.gitignore | 3 + cockpit/langgraph/memory/python/docs/guide.md | 164 ++ .../langgraph/memory/python/langgraph.json | 7 + .../langgraph/memory/python/prompts/memory.md | 12 +- .../langgraph/memory/python/pyproject.toml | 21 + cockpit/langgraph/memory/python/src/graph.py | 115 ++ cockpit/langgraph/memory/python/src/index.ts | 15 +- cockpit/langgraph/memory/python/uv.lock | 1718 +++++++++++++++++ .../angular/e2e/persistence.spec.ts | 21 + .../persistence/angular/package.json | 11 + .../persistence/angular/project.json | 38 + .../persistence/angular/proxy.conf.json | 9 + .../persistence/angular/src/app/app.config.ts | 18 + .../angular/src/app/persistence.component.ts | 88 + .../environments/environment.development.ts | 11 + .../angular/src/environments/environment.ts | 11 + .../persistence/angular/src/index.html | 11 + .../langgraph/persistence/angular/src/main.ts | 7 + .../persistence/angular/src/styles.css | 12 + .../persistence/angular/tsconfig.app.json | 9 + .../persistence/angular/tsconfig.json | 16 + .../langgraph/persistence/python/.gitignore | 3 + .../persistence/python/docs/guide.md | 152 ++ .../persistence/python/langgraph.json | 7 + .../persistence/python/prompts/persistence.md | 9 +- .../persistence/python/pyproject.toml | 21 + .../langgraph/persistence/python/src/graph.py | 40 + .../langgraph/persistence/python/src/index.ts | 15 +- cockpit/langgraph/persistence/python/uv.lock | 1718 +++++++++++++++++ .../streaming/angular/e2e/streaming.spec.ts | 31 + .../langgraph/streaming/angular/package.json | 11 + .../langgraph/streaming/angular/project.json | 38 + .../streaming/angular/proxy.conf.json | 9 + .../streaming/angular/src/app/app.config.ts | 18 + .../angular/src/app/streaming.component.ts | 47 + .../environments/environment.development.ts | 11 + .../angular/src/environments/environment.ts | 11 + .../streaming/angular/src/index.html | 11 + .../langgraph/streaming/angular/src/main.ts | 7 + .../streaming/angular/src/styles.css | 12 + .../streaming/angular/tsconfig.app.json | 9 + .../langgraph/streaming/angular/tsconfig.json | 16 + .../langgraph/streaming/angular/vercel.json | 6 + cockpit/langgraph/streaming/python/.gitignore | 3 + .../langgraph/streaming/python/docs/guide.md | 123 ++ .../langgraph/streaming/python/langgraph.json | 7 + .../streaming/python/prompts/streaming.md | 10 +- .../langgraph/streaming/python/pyproject.toml | 21 + .../streaming/python/requirements.txt | 5 + .../langgraph/streaming/python/src/graph.py | 53 + .../langgraph/streaming/python/src/index.ts | 15 +- cockpit/langgraph/streaming/python/uv.lock | 1718 +++++++++++++++++ .../subgraphs/angular/e2e/subgraphs.spec.ts | 21 + .../langgraph/subgraphs/angular/package.json | 11 + .../langgraph/subgraphs/angular/project.json | 38 + .../subgraphs/angular/proxy.conf.json | 9 + .../subgraphs/angular/src/app/app.config.ts | 18 + .../angular/src/app/subgraphs.component.ts | 66 + .../environments/environment.development.ts | 11 + .../angular/src/environments/environment.ts | 11 + .../subgraphs/angular/src/index.html | 11 + .../langgraph/subgraphs/angular/src/main.ts | 7 + .../subgraphs/angular/src/styles.css | 12 + .../subgraphs/angular/tsconfig.app.json | 9 + .../langgraph/subgraphs/angular/tsconfig.json | 16 + cockpit/langgraph/subgraphs/python/.gitignore | 3 + .../langgraph/subgraphs/python/docs/guide.md | 132 ++ .../langgraph/subgraphs/python/langgraph.json | 7 + .../subgraphs/python/prompts/subgraphs.md | 11 +- .../langgraph/subgraphs/python/pyproject.toml | 21 + .../langgraph/subgraphs/python/src/graph.py | 63 + .../langgraph/subgraphs/python/src/index.ts | 14 +- cockpit/langgraph/subgraphs/python/uv.lock | 1718 +++++++++++++++++ .../angular/e2e/time-travel.spec.ts | 21 + .../time-travel/angular/package.json | 11 + .../time-travel/angular/project.json | 38 + .../time-travel/angular/proxy.conf.json | 9 + .../time-travel/angular/src/app/app.config.ts | 18 + .../angular/src/app/time-travel.component.ts | 84 + .../environments/environment.development.ts | 11 + .../angular/src/environments/environment.ts | 11 + .../time-travel/angular/src/index.html | 11 + .../langgraph/time-travel/angular/src/main.ts | 7 + .../time-travel/angular/src/styles.css | 12 + .../time-travel/angular/tsconfig.app.json | 9 + .../time-travel/angular/tsconfig.json | 16 + .../langgraph/time-travel/python/.gitignore | 3 + .../time-travel/python/docs/guide.md | 145 ++ .../time-travel/python/langgraph.json | 7 + .../time-travel/python/prompts/time-travel.md | 16 +- .../time-travel/python/pyproject.toml | 21 + .../langgraph/time-travel/python/src/graph.py | 45 + .../langgraph/time-travel/python/src/index.ts | 15 +- cockpit/langgraph/time-travel/python/uv.lock | 1718 +++++++++++++++++ .../2026-04-03-cockpit-example-harness.md | 1263 ++++++++++++ .../2026-04-03-cockpit-shadcn-refactor.md | 822 ++++++++ .../2026-04-03-cockpit-ui-simplification.md | 497 +++++ .../plans/2026-04-03-content-bundle.md | 1300 +++++++++++++ .../plans/2026-04-03-docs-mode-redesign.md | 883 +++++++++ .../2026-04-03-langsmith-angular-runtime.md | 455 +++++ .../plans/2026-04-03-narrative-docs.md | 596 ++++++ .../2026-04-04-cockpit-code-review-fixes.md | 412 ++++ .../2026-04-04-cockpit-design-alignment.md | 183 ++ .../plans/2026-04-04-cockpit-light-theme.md | 287 +++ .../plans/2026-04-04-deep-agents-examples.md | 115 ++ .../plans/2026-04-04-design-tokens-lib.md | 450 +++++ .../plans/2026-04-04-langgraph-examples.md | 562 ++++++ .../plans/2026-04-04-ui-react-lib.md | 493 +++++ .../plans/2026-04-04-unified-harness.md | 400 ++++ .../2026-04-04-website-token-migration.md | 97 + ...26-04-03-cockpit-example-harness-design.md | 174 ++ .../specs/2026-04-03-docs-mode-redesign.md | 160 ++ ...smith-deployment-angular-runtime-design.md | 124 ++ ...2026-04-03-narrative-docs-system-design.md | 126 ++ .../specs/2026-04-04-cockpit-design-polish.md | 54 + .../2026-04-04-design-system-architecture.md | 253 +++ .../2026-04-04-langgraph-examples-design.md | 199 ++ ...26-04-04-unified-example-harness-design.md | 215 +++ libs/chat/ng-package.json | 7 + libs/chat/package.json | 10 + libs/chat/project.json | 16 + libs/chat/src/index.ts | 4 + libs/chat/src/lib/chat-input.component.ts | 42 + libs/chat/src/lib/chat-message.component.ts | 28 + libs/chat/src/lib/chat.component.ts | 73 + libs/chat/src/lib/chat.types.ts | 4 + libs/chat/tsconfig.json | 12 + libs/chat/tsconfig.lib.json | 9 + libs/design-tokens/package.json | 6 + libs/design-tokens/project.json | 27 + libs/design-tokens/src/index.ts | 6 + libs/design-tokens/src/lib/colors.ts | 34 + libs/design-tokens/src/lib/glass.ts | 19 + libs/design-tokens/src/lib/glow.ts | 17 + libs/design-tokens/src/lib/gradients.ts | 18 + libs/design-tokens/src/lib/tokens.spec.ts | 47 + libs/design-tokens/src/lib/tokens.ts | 20 + libs/design-tokens/src/lib/typography.ts | 17 + libs/design-tokens/tsconfig.json | 7 + libs/design-tokens/tsconfig.lib.json | 9 + libs/design-tokens/vite.config.mts | 11 + libs/ui-react/package.json | 10 + libs/ui-react/project.json | 27 + libs/ui-react/src/index.ts | 10 + libs/ui-react/src/lib/.gitkeep | 0 libs/ui-react/src/lib/callout.tsx | 38 + libs/ui-react/src/lib/card.tsx | 32 + libs/ui-react/src/lib/code-group.tsx | 38 + libs/ui-react/src/lib/components.spec.tsx | 64 + libs/ui-react/src/lib/css-vars.ts | 39 + libs/ui-react/src/lib/glass-button.tsx | 48 + libs/ui-react/src/lib/glass-panel.tsx | 28 + libs/ui-react/src/lib/nav-link.tsx | 28 + libs/ui-react/src/lib/steps.tsx | 33 + libs/ui-react/src/lib/tabs.tsx | 38 + libs/ui-react/src/lib/utils.ts | 6 + libs/ui-react/tsconfig.json | 10 + libs/ui-react/tsconfig.lib.json | 9 + libs/ui-react/vite.config.mts | 11 + package-lock.json | 311 ++- tsconfig.base.json | 5 +- 453 files changed, 45497 insertions(+), 1096 deletions(-) create mode 100644 .github/workflows/deploy-langgraph.yml create mode 100644 apps/cockpit/e2e/all-examples-smoke.spec.ts create mode 100644 apps/cockpit/postcss.config.mjs create mode 100644 apps/cockpit/scripts/capability-registry.ts create mode 100644 apps/cockpit/scripts/generate-combined-langgraph.ts create mode 100644 apps/cockpit/scripts/serve-example.ts create mode 100644 apps/cockpit/src/components/api-mode/api-mode.spec.tsx create mode 100644 apps/cockpit/src/components/api-mode/api-mode.tsx delete mode 100644 apps/cockpit/src/components/docs-mode/docs-mode.spec.tsx delete mode 100644 apps/cockpit/src/components/docs-mode/docs-mode.tsx delete mode 100644 apps/cockpit/src/components/docs-pane/docs-pane.tsx delete mode 100644 apps/cockpit/src/components/language-switcher.spec.tsx delete mode 100644 apps/cockpit/src/components/language-switcher.tsx create mode 100644 apps/cockpit/src/components/narrative-docs/narrative-docs.spec.tsx create mode 100644 apps/cockpit/src/components/narrative-docs/narrative-docs.tsx delete mode 100644 apps/cockpit/src/components/navigation/navigation-tree.tsx delete mode 100644 apps/cockpit/src/components/prompt-drawer/prompt-drawer.spec.tsx delete mode 100644 apps/cockpit/src/components/prompt-drawer/prompt-drawer.tsx delete mode 100644 apps/cockpit/src/components/prompt-pane/prompt-pane.tsx create mode 100644 apps/cockpit/src/components/ui/tabs.tsx create mode 100644 apps/cockpit/src/lib/content-bundle.spec.ts create mode 100644 apps/cockpit/src/lib/content-bundle.ts create mode 100644 apps/cockpit/src/lib/extract-docs.spec.ts create mode 100644 apps/cockpit/src/lib/extract-docs.ts create mode 100644 apps/cockpit/src/lib/render-markdown.spec.ts create mode 100644 apps/cockpit/src/lib/render-markdown.ts create mode 100644 apps/cockpit/src/lib/utils.ts create mode 100644 cockpit/deep-agents/filesystem/angular/e2e/filesystem.spec.ts create mode 100644 cockpit/deep-agents/filesystem/angular/package.json create mode 100644 cockpit/deep-agents/filesystem/angular/project.json create mode 100644 cockpit/deep-agents/filesystem/angular/proxy.conf.json create mode 100644 cockpit/deep-agents/filesystem/angular/src/app/app.config.ts create mode 100644 cockpit/deep-agents/filesystem/angular/src/app/filesystem.component.ts create mode 100644 cockpit/deep-agents/filesystem/angular/src/environments/environment.development.ts create mode 100644 cockpit/deep-agents/filesystem/angular/src/environments/environment.ts create mode 100644 cockpit/deep-agents/filesystem/angular/src/index.html create mode 100644 cockpit/deep-agents/filesystem/angular/src/main.ts create mode 100644 cockpit/deep-agents/filesystem/angular/src/styles.css create mode 100644 cockpit/deep-agents/filesystem/angular/tsconfig.app.json create mode 100644 cockpit/deep-agents/filesystem/angular/tsconfig.json create mode 100644 cockpit/deep-agents/filesystem/python/.gitignore create mode 100644 cockpit/deep-agents/filesystem/python/docs/guide.md create mode 100644 cockpit/deep-agents/filesystem/python/langgraph.json create mode 100644 cockpit/deep-agents/filesystem/python/pyproject.toml create mode 100644 cockpit/deep-agents/filesystem/python/src/graph.py create mode 100644 cockpit/deep-agents/filesystem/python/uv.lock create mode 100644 cockpit/deep-agents/memory/angular/e2e/memory.spec.ts create mode 100644 cockpit/deep-agents/memory/angular/package.json create mode 100644 cockpit/deep-agents/memory/angular/project.json create mode 100644 cockpit/deep-agents/memory/angular/proxy.conf.json create mode 100644 cockpit/deep-agents/memory/angular/src/app/app.config.ts create mode 100644 cockpit/deep-agents/memory/angular/src/app/memory.component.ts create mode 100644 cockpit/deep-agents/memory/angular/src/environments/environment.development.ts create mode 100644 cockpit/deep-agents/memory/angular/src/environments/environment.ts create mode 100644 cockpit/deep-agents/memory/angular/src/index.html create mode 100644 cockpit/deep-agents/memory/angular/src/main.ts create mode 100644 cockpit/deep-agents/memory/angular/src/styles.css create mode 100644 cockpit/deep-agents/memory/angular/tsconfig.app.json create mode 100644 cockpit/deep-agents/memory/angular/tsconfig.json create mode 100644 cockpit/deep-agents/memory/python/.gitignore create mode 100644 cockpit/deep-agents/memory/python/docs/guide.md create mode 100644 cockpit/deep-agents/memory/python/langgraph.json create mode 100644 cockpit/deep-agents/memory/python/pyproject.toml create mode 100644 cockpit/deep-agents/memory/python/src/graph.py create mode 100644 cockpit/deep-agents/memory/python/uv.lock create mode 100644 cockpit/deep-agents/planning/angular/e2e/planning.spec.ts create mode 100644 cockpit/deep-agents/planning/angular/package.json create mode 100644 cockpit/deep-agents/planning/angular/project.json create mode 100644 cockpit/deep-agents/planning/angular/proxy.conf.json create mode 100644 cockpit/deep-agents/planning/angular/src/app/app.config.ts create mode 100644 cockpit/deep-agents/planning/angular/src/app/planning.component.ts create mode 100644 cockpit/deep-agents/planning/angular/src/environments/environment.development.ts create mode 100644 cockpit/deep-agents/planning/angular/src/environments/environment.ts create mode 100644 cockpit/deep-agents/planning/angular/src/index.html create mode 100644 cockpit/deep-agents/planning/angular/src/main.ts create mode 100644 cockpit/deep-agents/planning/angular/src/styles.css create mode 100644 cockpit/deep-agents/planning/angular/tsconfig.app.json create mode 100644 cockpit/deep-agents/planning/angular/tsconfig.json create mode 100644 cockpit/deep-agents/planning/python/.gitignore create mode 100644 cockpit/deep-agents/planning/python/docs/guide.md create mode 100644 cockpit/deep-agents/planning/python/langgraph.json create mode 100644 cockpit/deep-agents/planning/python/pyproject.toml create mode 100644 cockpit/deep-agents/planning/python/src/graph.py create mode 100644 cockpit/deep-agents/planning/python/uv.lock create mode 100644 cockpit/deep-agents/sandboxes/angular/e2e/sandboxes.spec.ts create mode 100644 cockpit/deep-agents/sandboxes/angular/package.json create mode 100644 cockpit/deep-agents/sandboxes/angular/project.json create mode 100644 cockpit/deep-agents/sandboxes/angular/proxy.conf.json create mode 100644 cockpit/deep-agents/sandboxes/angular/src/app/app.config.ts create mode 100644 cockpit/deep-agents/sandboxes/angular/src/app/sandboxes.component.ts create mode 100644 cockpit/deep-agents/sandboxes/angular/src/environments/environment.development.ts create mode 100644 cockpit/deep-agents/sandboxes/angular/src/environments/environment.ts create mode 100644 cockpit/deep-agents/sandboxes/angular/src/index.html create mode 100644 cockpit/deep-agents/sandboxes/angular/src/main.ts create mode 100644 cockpit/deep-agents/sandboxes/angular/src/styles.css create mode 100644 cockpit/deep-agents/sandboxes/angular/tsconfig.app.json create mode 100644 cockpit/deep-agents/sandboxes/angular/tsconfig.json create mode 100644 cockpit/deep-agents/sandboxes/python/.gitignore create mode 100644 cockpit/deep-agents/sandboxes/python/docs/guide.md create mode 100644 cockpit/deep-agents/sandboxes/python/langgraph.json create mode 100644 cockpit/deep-agents/sandboxes/python/pyproject.toml create mode 100644 cockpit/deep-agents/sandboxes/python/src/graph.py create mode 100644 cockpit/deep-agents/sandboxes/python/uv.lock create mode 100644 cockpit/deep-agents/skills/angular/e2e/skills.spec.ts create mode 100644 cockpit/deep-agents/skills/angular/package.json create mode 100644 cockpit/deep-agents/skills/angular/project.json create mode 100644 cockpit/deep-agents/skills/angular/proxy.conf.json create mode 100644 cockpit/deep-agents/skills/angular/src/app/app.config.ts create mode 100644 cockpit/deep-agents/skills/angular/src/app/skills.component.ts create mode 100644 cockpit/deep-agents/skills/angular/src/environments/environment.development.ts create mode 100644 cockpit/deep-agents/skills/angular/src/environments/environment.ts create mode 100644 cockpit/deep-agents/skills/angular/src/index.html create mode 100644 cockpit/deep-agents/skills/angular/src/main.ts create mode 100644 cockpit/deep-agents/skills/angular/src/styles.css create mode 100644 cockpit/deep-agents/skills/angular/tsconfig.app.json create mode 100644 cockpit/deep-agents/skills/angular/tsconfig.json create mode 100644 cockpit/deep-agents/skills/python/.gitignore create mode 100644 cockpit/deep-agents/skills/python/docs/guide.md create mode 100644 cockpit/deep-agents/skills/python/langgraph.json create mode 100644 cockpit/deep-agents/skills/python/pyproject.toml create mode 100644 cockpit/deep-agents/skills/python/src/graph.py create mode 100644 cockpit/deep-agents/skills/python/uv.lock create mode 100644 cockpit/deep-agents/subagents/angular/e2e/subagents.spec.ts create mode 100644 cockpit/deep-agents/subagents/angular/package.json create mode 100644 cockpit/deep-agents/subagents/angular/project.json create mode 100644 cockpit/deep-agents/subagents/angular/proxy.conf.json create mode 100644 cockpit/deep-agents/subagents/angular/src/app/app.config.ts create mode 100644 cockpit/deep-agents/subagents/angular/src/app/subagents.component.ts create mode 100644 cockpit/deep-agents/subagents/angular/src/environments/environment.development.ts create mode 100644 cockpit/deep-agents/subagents/angular/src/environments/environment.ts create mode 100644 cockpit/deep-agents/subagents/angular/src/index.html create mode 100644 cockpit/deep-agents/subagents/angular/src/main.ts create mode 100644 cockpit/deep-agents/subagents/angular/src/styles.css create mode 100644 cockpit/deep-agents/subagents/angular/tsconfig.app.json create mode 100644 cockpit/deep-agents/subagents/angular/tsconfig.json create mode 100644 cockpit/deep-agents/subagents/python/.gitignore create mode 100644 cockpit/deep-agents/subagents/python/docs/guide.md create mode 100644 cockpit/deep-agents/subagents/python/langgraph.json create mode 100644 cockpit/deep-agents/subagents/python/pyproject.toml create mode 100644 cockpit/deep-agents/subagents/python/src/graph.py create mode 100644 cockpit/deep-agents/subagents/python/uv.lock create mode 100644 cockpit/langgraph/deployment-runtime/angular/e2e/deployment-runtime.spec.ts create mode 100644 cockpit/langgraph/deployment-runtime/angular/package.json create mode 100644 cockpit/langgraph/deployment-runtime/angular/project.json create mode 100644 cockpit/langgraph/deployment-runtime/angular/proxy.conf.json create mode 100644 cockpit/langgraph/deployment-runtime/angular/src/app/app.config.ts create mode 100644 cockpit/langgraph/deployment-runtime/angular/src/app/deployment-runtime.component.ts create mode 100644 cockpit/langgraph/deployment-runtime/angular/src/environments/environment.development.ts create mode 100644 cockpit/langgraph/deployment-runtime/angular/src/environments/environment.ts create mode 100644 cockpit/langgraph/deployment-runtime/angular/src/index.html create mode 100644 cockpit/langgraph/deployment-runtime/angular/src/main.ts create mode 100644 cockpit/langgraph/deployment-runtime/angular/src/styles.css create mode 100644 cockpit/langgraph/deployment-runtime/angular/tsconfig.app.json create mode 100644 cockpit/langgraph/deployment-runtime/angular/tsconfig.json create mode 100644 cockpit/langgraph/deployment-runtime/python/.gitignore create mode 100644 cockpit/langgraph/deployment-runtime/python/docs/guide.md create mode 100644 cockpit/langgraph/deployment-runtime/python/langgraph.json create mode 100644 cockpit/langgraph/deployment-runtime/python/pyproject.toml create mode 100644 cockpit/langgraph/deployment-runtime/python/src/graph.py create mode 100644 cockpit/langgraph/deployment-runtime/python/uv.lock create mode 100644 cockpit/langgraph/durable-execution/angular/e2e/durable-execution.spec.ts create mode 100644 cockpit/langgraph/durable-execution/angular/package.json create mode 100644 cockpit/langgraph/durable-execution/angular/project.json create mode 100644 cockpit/langgraph/durable-execution/angular/proxy.conf.json create mode 100644 cockpit/langgraph/durable-execution/angular/src/app/app.config.ts create mode 100644 cockpit/langgraph/durable-execution/angular/src/app/durable-execution.component.ts create mode 100644 cockpit/langgraph/durable-execution/angular/src/environments/environment.development.ts create mode 100644 cockpit/langgraph/durable-execution/angular/src/environments/environment.ts create mode 100644 cockpit/langgraph/durable-execution/angular/src/index.html create mode 100644 cockpit/langgraph/durable-execution/angular/src/main.ts create mode 100644 cockpit/langgraph/durable-execution/angular/src/styles.css create mode 100644 cockpit/langgraph/durable-execution/angular/tsconfig.app.json create mode 100644 cockpit/langgraph/durable-execution/angular/tsconfig.json create mode 100644 cockpit/langgraph/durable-execution/python/.gitignore create mode 100644 cockpit/langgraph/durable-execution/python/docs/guide.md create mode 100644 cockpit/langgraph/durable-execution/python/langgraph.json create mode 100644 cockpit/langgraph/durable-execution/python/pyproject.toml create mode 100644 cockpit/langgraph/durable-execution/python/src/graph.py create mode 100644 cockpit/langgraph/durable-execution/python/uv.lock create mode 100644 cockpit/langgraph/interrupts/angular/e2e/interrupts.spec.ts create mode 100644 cockpit/langgraph/interrupts/angular/package.json create mode 100644 cockpit/langgraph/interrupts/angular/project.json create mode 100644 cockpit/langgraph/interrupts/angular/proxy.conf.json create mode 100644 cockpit/langgraph/interrupts/angular/src/app/app.config.ts create mode 100644 cockpit/langgraph/interrupts/angular/src/app/interrupts.component.ts create mode 100644 cockpit/langgraph/interrupts/angular/src/environments/environment.development.ts create mode 100644 cockpit/langgraph/interrupts/angular/src/environments/environment.ts create mode 100644 cockpit/langgraph/interrupts/angular/src/index.html create mode 100644 cockpit/langgraph/interrupts/angular/src/main.ts create mode 100644 cockpit/langgraph/interrupts/angular/src/styles.css create mode 100644 cockpit/langgraph/interrupts/angular/tsconfig.app.json create mode 100644 cockpit/langgraph/interrupts/angular/tsconfig.json create mode 100644 cockpit/langgraph/interrupts/python/.gitignore create mode 100644 cockpit/langgraph/interrupts/python/docs/guide.md create mode 100644 cockpit/langgraph/interrupts/python/langgraph.json create mode 100644 cockpit/langgraph/interrupts/python/pyproject.toml create mode 100644 cockpit/langgraph/interrupts/python/src/graph.py create mode 100644 cockpit/langgraph/interrupts/python/uv.lock create mode 100644 cockpit/langgraph/memory/angular/e2e/memory.spec.ts create mode 100644 cockpit/langgraph/memory/angular/package.json create mode 100644 cockpit/langgraph/memory/angular/project.json create mode 100644 cockpit/langgraph/memory/angular/proxy.conf.json create mode 100644 cockpit/langgraph/memory/angular/src/app/app.config.ts create mode 100644 cockpit/langgraph/memory/angular/src/app/memory.component.ts create mode 100644 cockpit/langgraph/memory/angular/src/environments/environment.development.ts create mode 100644 cockpit/langgraph/memory/angular/src/environments/environment.ts create mode 100644 cockpit/langgraph/memory/angular/src/index.html create mode 100644 cockpit/langgraph/memory/angular/src/main.ts create mode 100644 cockpit/langgraph/memory/angular/src/styles.css create mode 100644 cockpit/langgraph/memory/angular/tsconfig.app.json create mode 100644 cockpit/langgraph/memory/angular/tsconfig.json create mode 100644 cockpit/langgraph/memory/python/.gitignore create mode 100644 cockpit/langgraph/memory/python/docs/guide.md create mode 100644 cockpit/langgraph/memory/python/langgraph.json create mode 100644 cockpit/langgraph/memory/python/pyproject.toml create mode 100644 cockpit/langgraph/memory/python/src/graph.py create mode 100644 cockpit/langgraph/memory/python/uv.lock create mode 100644 cockpit/langgraph/persistence/angular/e2e/persistence.spec.ts create mode 100644 cockpit/langgraph/persistence/angular/package.json create mode 100644 cockpit/langgraph/persistence/angular/project.json create mode 100644 cockpit/langgraph/persistence/angular/proxy.conf.json create mode 100644 cockpit/langgraph/persistence/angular/src/app/app.config.ts create mode 100644 cockpit/langgraph/persistence/angular/src/app/persistence.component.ts create mode 100644 cockpit/langgraph/persistence/angular/src/environments/environment.development.ts create mode 100644 cockpit/langgraph/persistence/angular/src/environments/environment.ts create mode 100644 cockpit/langgraph/persistence/angular/src/index.html create mode 100644 cockpit/langgraph/persistence/angular/src/main.ts create mode 100644 cockpit/langgraph/persistence/angular/src/styles.css create mode 100644 cockpit/langgraph/persistence/angular/tsconfig.app.json create mode 100644 cockpit/langgraph/persistence/angular/tsconfig.json create mode 100644 cockpit/langgraph/persistence/python/.gitignore create mode 100644 cockpit/langgraph/persistence/python/docs/guide.md create mode 100644 cockpit/langgraph/persistence/python/langgraph.json create mode 100644 cockpit/langgraph/persistence/python/pyproject.toml create mode 100644 cockpit/langgraph/persistence/python/src/graph.py create mode 100644 cockpit/langgraph/persistence/python/uv.lock create mode 100644 cockpit/langgraph/streaming/angular/e2e/streaming.spec.ts create mode 100644 cockpit/langgraph/streaming/angular/package.json create mode 100644 cockpit/langgraph/streaming/angular/project.json create mode 100644 cockpit/langgraph/streaming/angular/proxy.conf.json create mode 100644 cockpit/langgraph/streaming/angular/src/app/app.config.ts create mode 100644 cockpit/langgraph/streaming/angular/src/app/streaming.component.ts create mode 100644 cockpit/langgraph/streaming/angular/src/environments/environment.development.ts create mode 100644 cockpit/langgraph/streaming/angular/src/environments/environment.ts create mode 100644 cockpit/langgraph/streaming/angular/src/index.html create mode 100644 cockpit/langgraph/streaming/angular/src/main.ts create mode 100644 cockpit/langgraph/streaming/angular/src/styles.css create mode 100644 cockpit/langgraph/streaming/angular/tsconfig.app.json create mode 100644 cockpit/langgraph/streaming/angular/tsconfig.json create mode 100644 cockpit/langgraph/streaming/angular/vercel.json create mode 100644 cockpit/langgraph/streaming/python/.gitignore create mode 100644 cockpit/langgraph/streaming/python/docs/guide.md create mode 100644 cockpit/langgraph/streaming/python/langgraph.json create mode 100644 cockpit/langgraph/streaming/python/pyproject.toml create mode 100644 cockpit/langgraph/streaming/python/requirements.txt create mode 100644 cockpit/langgraph/streaming/python/src/graph.py create mode 100644 cockpit/langgraph/streaming/python/uv.lock create mode 100644 cockpit/langgraph/subgraphs/angular/e2e/subgraphs.spec.ts create mode 100644 cockpit/langgraph/subgraphs/angular/package.json create mode 100644 cockpit/langgraph/subgraphs/angular/project.json create mode 100644 cockpit/langgraph/subgraphs/angular/proxy.conf.json create mode 100644 cockpit/langgraph/subgraphs/angular/src/app/app.config.ts create mode 100644 cockpit/langgraph/subgraphs/angular/src/app/subgraphs.component.ts create mode 100644 cockpit/langgraph/subgraphs/angular/src/environments/environment.development.ts create mode 100644 cockpit/langgraph/subgraphs/angular/src/environments/environment.ts create mode 100644 cockpit/langgraph/subgraphs/angular/src/index.html create mode 100644 cockpit/langgraph/subgraphs/angular/src/main.ts create mode 100644 cockpit/langgraph/subgraphs/angular/src/styles.css create mode 100644 cockpit/langgraph/subgraphs/angular/tsconfig.app.json create mode 100644 cockpit/langgraph/subgraphs/angular/tsconfig.json create mode 100644 cockpit/langgraph/subgraphs/python/.gitignore create mode 100644 cockpit/langgraph/subgraphs/python/docs/guide.md create mode 100644 cockpit/langgraph/subgraphs/python/langgraph.json create mode 100644 cockpit/langgraph/subgraphs/python/pyproject.toml create mode 100644 cockpit/langgraph/subgraphs/python/src/graph.py create mode 100644 cockpit/langgraph/subgraphs/python/uv.lock create mode 100644 cockpit/langgraph/time-travel/angular/e2e/time-travel.spec.ts create mode 100644 cockpit/langgraph/time-travel/angular/package.json create mode 100644 cockpit/langgraph/time-travel/angular/project.json create mode 100644 cockpit/langgraph/time-travel/angular/proxy.conf.json create mode 100644 cockpit/langgraph/time-travel/angular/src/app/app.config.ts create mode 100644 cockpit/langgraph/time-travel/angular/src/app/time-travel.component.ts create mode 100644 cockpit/langgraph/time-travel/angular/src/environments/environment.development.ts create mode 100644 cockpit/langgraph/time-travel/angular/src/environments/environment.ts create mode 100644 cockpit/langgraph/time-travel/angular/src/index.html create mode 100644 cockpit/langgraph/time-travel/angular/src/main.ts create mode 100644 cockpit/langgraph/time-travel/angular/src/styles.css create mode 100644 cockpit/langgraph/time-travel/angular/tsconfig.app.json create mode 100644 cockpit/langgraph/time-travel/angular/tsconfig.json create mode 100644 cockpit/langgraph/time-travel/python/.gitignore create mode 100644 cockpit/langgraph/time-travel/python/docs/guide.md create mode 100644 cockpit/langgraph/time-travel/python/langgraph.json create mode 100644 cockpit/langgraph/time-travel/python/pyproject.toml create mode 100644 cockpit/langgraph/time-travel/python/src/graph.py create mode 100644 cockpit/langgraph/time-travel/python/uv.lock create mode 100644 docs/superpowers/plans/2026-04-03-cockpit-example-harness.md create mode 100644 docs/superpowers/plans/2026-04-03-cockpit-shadcn-refactor.md create mode 100644 docs/superpowers/plans/2026-04-03-cockpit-ui-simplification.md create mode 100644 docs/superpowers/plans/2026-04-03-content-bundle.md create mode 100644 docs/superpowers/plans/2026-04-03-docs-mode-redesign.md create mode 100644 docs/superpowers/plans/2026-04-03-langsmith-angular-runtime.md create mode 100644 docs/superpowers/plans/2026-04-03-narrative-docs.md create mode 100644 docs/superpowers/plans/2026-04-04-cockpit-code-review-fixes.md create mode 100644 docs/superpowers/plans/2026-04-04-cockpit-design-alignment.md create mode 100644 docs/superpowers/plans/2026-04-04-cockpit-light-theme.md create mode 100644 docs/superpowers/plans/2026-04-04-deep-agents-examples.md create mode 100644 docs/superpowers/plans/2026-04-04-design-tokens-lib.md create mode 100644 docs/superpowers/plans/2026-04-04-langgraph-examples.md create mode 100644 docs/superpowers/plans/2026-04-04-ui-react-lib.md create mode 100644 docs/superpowers/plans/2026-04-04-unified-harness.md create mode 100644 docs/superpowers/plans/2026-04-04-website-token-migration.md create mode 100644 docs/superpowers/specs/2026-04-03-cockpit-example-harness-design.md create mode 100644 docs/superpowers/specs/2026-04-03-docs-mode-redesign.md create mode 100644 docs/superpowers/specs/2026-04-03-langsmith-deployment-angular-runtime-design.md create mode 100644 docs/superpowers/specs/2026-04-03-narrative-docs-system-design.md create mode 100644 docs/superpowers/specs/2026-04-04-cockpit-design-polish.md create mode 100644 docs/superpowers/specs/2026-04-04-design-system-architecture.md create mode 100644 docs/superpowers/specs/2026-04-04-langgraph-examples-design.md create mode 100644 docs/superpowers/specs/2026-04-04-unified-example-harness-design.md create mode 100644 libs/chat/ng-package.json create mode 100644 libs/chat/package.json create mode 100644 libs/chat/project.json create mode 100644 libs/chat/src/index.ts create mode 100644 libs/chat/src/lib/chat-input.component.ts create mode 100644 libs/chat/src/lib/chat-message.component.ts create mode 100644 libs/chat/src/lib/chat.component.ts create mode 100644 libs/chat/src/lib/chat.types.ts create mode 100644 libs/chat/tsconfig.json create mode 100644 libs/chat/tsconfig.lib.json create mode 100644 libs/design-tokens/package.json create mode 100644 libs/design-tokens/project.json create mode 100644 libs/design-tokens/src/index.ts create mode 100644 libs/design-tokens/src/lib/colors.ts create mode 100644 libs/design-tokens/src/lib/glass.ts create mode 100644 libs/design-tokens/src/lib/glow.ts create mode 100644 libs/design-tokens/src/lib/gradients.ts create mode 100644 libs/design-tokens/src/lib/tokens.spec.ts create mode 100644 libs/design-tokens/src/lib/tokens.ts create mode 100644 libs/design-tokens/src/lib/typography.ts create mode 100644 libs/design-tokens/tsconfig.json create mode 100644 libs/design-tokens/tsconfig.lib.json create mode 100644 libs/design-tokens/vite.config.mts create mode 100644 libs/ui-react/package.json create mode 100644 libs/ui-react/project.json create mode 100644 libs/ui-react/src/index.ts create mode 100644 libs/ui-react/src/lib/.gitkeep create mode 100644 libs/ui-react/src/lib/callout.tsx create mode 100644 libs/ui-react/src/lib/card.tsx create mode 100644 libs/ui-react/src/lib/code-group.tsx create mode 100644 libs/ui-react/src/lib/components.spec.tsx create mode 100644 libs/ui-react/src/lib/css-vars.ts create mode 100644 libs/ui-react/src/lib/glass-button.tsx create mode 100644 libs/ui-react/src/lib/glass-panel.tsx create mode 100644 libs/ui-react/src/lib/nav-link.tsx create mode 100644 libs/ui-react/src/lib/steps.tsx create mode 100644 libs/ui-react/src/lib/tabs.tsx create mode 100644 libs/ui-react/src/lib/utils.ts create mode 100644 libs/ui-react/tsconfig.json create mode 100644 libs/ui-react/tsconfig.lib.json create mode 100644 libs/ui-react/vite.config.mts diff --git a/.claude/launch.json b/.claude/launch.json index 344aa68fb..ff4c563eb 100644 --- a/.claude/launch.json +++ b/.claude/launch.json @@ -6,6 +6,12 @@ "runtimeExecutable": "/bin/bash", "runtimeArgs": ["-c", "export PATH=/Users/blove/.nvm/versions/node/v22.14.0/bin:$PATH && npx nx serve website"], "port": 3000 + }, + { + "name": "cockpit", + "runtimeExecutable": "/bin/bash", + "runtimeArgs": ["-c", "export PATH=/Users/blove/.nvm/versions/node/v22.14.0/bin:$PATH && npx nx serve cockpit --port 4201"], + "port": 4201 } ] } diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c41fa320d..d2f0b0b33 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -49,6 +49,18 @@ jobs: - run: npx nx build cockpit --skip-nx-cache - run: npx nx test cockpit --skip-nx-cache + cockpit-examples-build: + name: Cockpit — build all examples + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6.0.2 + - uses: actions/setup-node@v6.3.0 + with: + node-version: 22 + cache: npm + - run: npm ci + - run: npx nx run-many -t build --projects='cockpit-*-angular' --skip-nx-cache + cockpit-smoke: name: Cockpit — representative capability smoke runs-on: ubuntu-latest @@ -150,7 +162,7 @@ jobs: deploy: name: Deploy → Vercel - needs: [library, website, cockpit, cockpit-smoke, cockpit-secret-integration, cockpit-deploy-smoke, mcp, chat-agent-smoke, cockpit-e2e, website-e2e] + needs: [library, website, cockpit, cockpit-examples-build, cockpit-smoke, cockpit-secret-integration, cockpit-deploy-smoke, mcp, chat-agent-smoke, cockpit-e2e, website-e2e] runs-on: ubuntu-latest # Only deploy on pushes to main, not on pull requests if: github.ref == 'refs/heads/main' && github.event_name == 'push' diff --git a/.github/workflows/deploy-langgraph.yml b/.github/workflows/deploy-langgraph.yml new file mode 100644 index 000000000..d1dfd4a98 --- /dev/null +++ b/.github/workflows/deploy-langgraph.yml @@ -0,0 +1,67 @@ +name: Deploy LangGraph + +on: + push: + branches: [main] + paths: + - 'cockpit/**/python/**' + workflow_dispatch: + inputs: + capability: + description: 'Capability path (e.g., langgraph/streaming)' + required: false + type: string + +jobs: + deploy: + name: Deploy to LangGraph Cloud + runs-on: ubuntu-latest + strategy: + matrix: + include: + - name: langgraph-streaming + path: cockpit/langgraph/streaming/python + - name: langgraph-persistence + path: cockpit/langgraph/persistence/python + - name: langgraph-interrupts + path: cockpit/langgraph/interrupts/python + - name: langgraph-memory + path: cockpit/langgraph/memory/python + - name: langgraph-durable-execution + path: cockpit/langgraph/durable-execution/python + - name: langgraph-subgraphs + path: cockpit/langgraph/subgraphs/python + - name: langgraph-time-travel + path: cockpit/langgraph/time-travel/python + - name: langgraph-deployment-runtime + path: cockpit/langgraph/deployment-runtime/python + - name: deep-agents-planning + path: cockpit/deep-agents/planning/python + - name: deep-agents-filesystem + path: cockpit/deep-agents/filesystem/python + - name: deep-agents-subagents + path: cockpit/deep-agents/subagents/python + - name: deep-agents-memory + path: cockpit/deep-agents/memory/python + - name: deep-agents-skills + path: cockpit/deep-agents/skills/python + - name: deep-agents-sandboxes + path: cockpit/deep-agents/sandboxes/python + steps: + - uses: actions/checkout@v6.0.2 + + - uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Install langgraph-cli + run: pip install langgraph-cli + + - name: Deploy ${{ matrix.name }} + if: | + github.event_name == 'workflow_dispatch' && (inputs.capability == '' || contains(matrix.path, inputs.capability)) + || github.event_name == 'push' + working-directory: ${{ matrix.path }} + run: langgraph deploy + env: + LANGSMITH_API_KEY: ${{ secrets.LANGSMITH_API_KEY }} diff --git a/.gitignore b/.gitignore index d0244912e..f6d9a43d9 100644 --- a/.gitignore +++ b/.gitignore @@ -27,3 +27,10 @@ apps/website/public/demo/ .next out .vercel + +# LangGraph +.langgraph_api/ +langgraph-combined.json + +# Playwright +test-results/ diff --git a/apps/cockpit/e2e/all-examples-smoke.spec.ts b/apps/cockpit/e2e/all-examples-smoke.spec.ts new file mode 100644 index 000000000..0ee338241 --- /dev/null +++ b/apps/cockpit/e2e/all-examples-smoke.spec.ts @@ -0,0 +1,65 @@ +import { expect, test } from '@playwright/test'; + +/** + * Smoke test that verifies every capability example's Angular app is running + * and can render the chat interface. Requires all 14 Angular apps to be served. + * + * Run with: npx playwright test apps/cockpit/e2e/all-examples-smoke.spec.ts + * + * Prerequisites: + * npx tsx apps/cockpit/scripts/serve-example.ts --all + * OR: nx run cockpit:serve-all + */ + +const EXAMPLES = [ + { name: 'streaming', port: 4300, selector: 'app-streaming' }, + { name: 'persistence', port: 4301, selector: 'app-persistence' }, + { name: 'interrupts', port: 4302, selector: 'app-interrupts' }, + { name: 'memory', port: 4303, selector: 'app-memory' }, + { name: 'durable-execution', port: 4304, selector: 'app-durable-execution' }, + { name: 'subgraphs', port: 4305, selector: 'app-subgraphs' }, + { name: 'time-travel', port: 4306, selector: 'app-time-travel' }, + { name: 'deployment-runtime', port: 4307, selector: 'app-deployment-runtime' }, + { name: 'planning', port: 4310, selector: 'app-planning' }, + { name: 'filesystem', port: 4311, selector: 'app-filesystem' }, + { name: 'da-subagents', port: 4312, selector: 'app-subagents' }, + { name: 'da-memory', port: 4313, selector: 'app-da-memory' }, + { name: 'skills', port: 4314, selector: 'app-skills' }, + { name: 'sandboxes', port: 4315, selector: 'app-sandboxes' }, +] as const; + +test.describe('All Examples Smoke Test', () => { + for (const example of EXAMPLES) { + test(`${example.name} (port ${example.port}) renders chat UI`, async ({ page }) => { + await page.goto(`http://localhost:${example.port}`, { timeout: 15000 }); + await page.waitForSelector(example.selector, { state: 'attached', timeout: 10000 }); + + // Verify the chat component renders + await expect(page.locator('cp-chat')).toBeVisible({ timeout: 5000 }); + + // Verify input and send button exist + await expect(page.locator('input[name="prompt"]')).toBeVisible({ timeout: 5000 }); + await expect(page.locator('button[type="submit"]')).toBeVisible({ timeout: 5000 }); + }); + } +}); + +test.describe('All Examples Send Message Test', () => { + // This test requires a running LangGraph backend with OPENAI_API_KEY + test.skip(({ }, testInfo) => !process.env['OPENAI_API_KEY'], 'Requires OPENAI_API_KEY'); + + for (const example of EXAMPLES) { + test(`${example.name} (port ${example.port}) sends and receives a message`, async ({ page }) => { + await page.goto(`http://localhost:${example.port}`, { timeout: 15000 }); + await page.waitForSelector(example.selector, { state: 'attached', timeout: 10000 }); + + // Type and send a message + await page.fill('input[name="prompt"]', 'hello'); + await page.click('button[type="submit"]'); + + // Wait for AI response + await expect(page.locator('.cp-message--ai, [class*="message--ai"]')).toBeVisible({ timeout: 30000 }); + await expect(page.locator('.cp-message--ai .cp-message__content, [class*="message__content"]')).not.toBeEmpty({ timeout: 30000 }); + }); + } +}); diff --git a/apps/cockpit/package.json b/apps/cockpit/package.json index 2611f8aaf..fd8250ec8 100644 --- a/apps/cockpit/package.json +++ b/apps/cockpit/package.json @@ -3,8 +3,14 @@ "version": "0.0.1", "private": true, "dependencies": { + "@radix-ui/react-slot": "^1.1.0", + "@radix-ui/react-tabs": "^1.1.0", + "class-variance-authority": "^0.7.0", + "clsx": "^2.1.1", + "marked": "^15.0.0", "next": "~16.1.6", "react": "^19.0.0", - "react-dom": "^19.0.0" + "react-dom": "^19.0.0", + "tailwind-merge": "^2.5.0" } } diff --git a/apps/cockpit/postcss.config.mjs b/apps/cockpit/postcss.config.mjs new file mode 100644 index 000000000..2e3384d2f --- /dev/null +++ b/apps/cockpit/postcss.config.mjs @@ -0,0 +1,6 @@ +// apps/cockpit/postcss.config.mjs +export default { + plugins: { + '@tailwindcss/postcss': {}, + }, +}; diff --git a/apps/cockpit/project.json b/apps/cockpit/project.json index e7cdf7457..f9272999d 100644 --- a/apps/cockpit/project.json +++ b/apps/cockpit/project.json @@ -51,6 +51,167 @@ "options": { "config": "apps/cockpit/playwright.config.ts" } + }, + "serve-streaming": { + "executor": "nx:run-commands", + "options": { + "commands": [ + "npx nx serve cockpit --port 4201", + "npx nx serve cockpit-langgraph-streaming-angular --port 4300", + "cd cockpit/langgraph/streaming/python && source $HOME/.local/bin/env 2>/dev/null; uv sync && uv run langgraph dev --port 8123" + ], + "parallel": true + } + }, + "serve-persistence": { + "executor": "nx:run-commands", + "options": { + "commands": [ + "npx nx serve cockpit --port 4201", + "npx nx serve cockpit-langgraph-persistence-angular --port 4301", + "cd cockpit/langgraph/persistence/python && source $HOME/.local/bin/env 2>/dev/null; uv sync && uv run langgraph dev --port 8123" + ], + "parallel": true + } + }, + "serve-interrupts": { + "executor": "nx:run-commands", + "options": { + "commands": [ + "npx nx serve cockpit --port 4201", + "npx nx serve cockpit-langgraph-interrupts-angular --port 4302", + "cd cockpit/langgraph/interrupts/python && source $HOME/.local/bin/env 2>/dev/null; uv sync && uv run langgraph dev --port 8123" + ], + "parallel": true + } + }, + "serve-memory": { + "executor": "nx:run-commands", + "options": { + "commands": [ + "npx nx serve cockpit --port 4201", + "npx nx serve cockpit-langgraph-memory-angular --port 4303", + "cd cockpit/langgraph/memory/python && source $HOME/.local/bin/env 2>/dev/null; uv sync && uv run langgraph dev --port 8123" + ], + "parallel": true + } + }, + "serve-durable-execution": { + "executor": "nx:run-commands", + "options": { + "commands": [ + "npx nx serve cockpit --port 4201", + "npx nx serve cockpit-langgraph-durable-execution-angular --port 4304", + "cd cockpit/langgraph/durable-execution/python && source $HOME/.local/bin/env 2>/dev/null; uv sync && uv run langgraph dev --port 8123" + ], + "parallel": true + } + }, + "serve-subgraphs": { + "executor": "nx:run-commands", + "options": { + "commands": [ + "npx nx serve cockpit --port 4201", + "npx nx serve cockpit-langgraph-subgraphs-angular --port 4305", + "cd cockpit/langgraph/subgraphs/python && source $HOME/.local/bin/env 2>/dev/null; uv sync && uv run langgraph dev --port 8123" + ], + "parallel": true + } + }, + "serve-time-travel": { + "executor": "nx:run-commands", + "options": { + "commands": [ + "npx nx serve cockpit --port 4201", + "npx nx serve cockpit-langgraph-time-travel-angular --port 4306", + "cd cockpit/langgraph/time-travel/python && source $HOME/.local/bin/env 2>/dev/null; uv sync && uv run langgraph dev --port 8123" + ], + "parallel": true + } + }, + "serve-deployment-runtime": { + "executor": "nx:run-commands", + "options": { + "commands": [ + "npx nx serve cockpit --port 4201", + "npx nx serve cockpit-langgraph-deployment-runtime-angular --port 4307", + "cd cockpit/langgraph/deployment-runtime/python && source $HOME/.local/bin/env 2>/dev/null; uv sync && uv run langgraph dev --port 8123" + ], + "parallel": true + } + }, + "serve-planning": { + "executor": "nx:run-commands", + "options": { + "commands": [ + "npx nx serve cockpit --port 4201", + "npx nx serve cockpit-deep-agents-planning-angular --port 4310", + "cd cockpit/deep-agents/planning/python && source $HOME/.local/bin/env 2>/dev/null; uv sync && uv run langgraph dev --port 8123" + ], + "parallel": true + } + }, + "serve-filesystem": { + "executor": "nx:run-commands", + "options": { + "commands": [ + "npx nx serve cockpit --port 4201", + "npx nx serve cockpit-deep-agents-filesystem-angular --port 4311", + "cd cockpit/deep-agents/filesystem/python && source $HOME/.local/bin/env 2>/dev/null; uv sync && uv run langgraph dev --port 8123" + ], + "parallel": true + } + }, + "serve-da-subagents": { + "executor": "nx:run-commands", + "options": { + "commands": [ + "npx nx serve cockpit --port 4201", + "npx nx serve cockpit-deep-agents-subagents-angular --port 4312", + "cd cockpit/deep-agents/subagents/python && source $HOME/.local/bin/env 2>/dev/null; uv sync && uv run langgraph dev --port 8123" + ], + "parallel": true + } + }, + "serve-da-memory": { + "executor": "nx:run-commands", + "options": { + "commands": [ + "npx nx serve cockpit --port 4201", + "npx nx serve cockpit-deep-agents-memory-angular --port 4313", + "cd cockpit/deep-agents/memory/python && source $HOME/.local/bin/env 2>/dev/null; uv sync && uv run langgraph dev --port 8123" + ], + "parallel": true + } + }, + "serve-skills": { + "executor": "nx:run-commands", + "options": { + "commands": [ + "npx nx serve cockpit --port 4201", + "npx nx serve cockpit-deep-agents-skills-angular --port 4314", + "cd cockpit/deep-agents/skills/python && source $HOME/.local/bin/env 2>/dev/null; uv sync && uv run langgraph dev --port 8123" + ], + "parallel": true + } + }, + "serve-sandboxes": { + "executor": "nx:run-commands", + "options": { + "commands": [ + "npx nx serve cockpit --port 4201", + "npx nx serve cockpit-deep-agents-sandboxes-angular --port 4315", + "cd cockpit/deep-agents/sandboxes/python && source $HOME/.local/bin/env 2>/dev/null; uv sync && uv run langgraph dev --port 8123" + ], + "parallel": true + } + }, + "serve-all": { + "executor": "nx:run-commands", + "options": { + "command": "npx tsx apps/cockpit/scripts/serve-example.ts --all", + "cwd": "." + } } } } diff --git a/apps/cockpit/scripts/capability-registry.ts b/apps/cockpit/scripts/capability-registry.ts new file mode 100644 index 000000000..dfaa13d7a --- /dev/null +++ b/apps/cockpit/scripts/capability-registry.ts @@ -0,0 +1,38 @@ +/** + * Single source of truth for all cockpit capability examples. + * Used by serve, build, test, and deploy scripts. + */ +export interface Capability { + id: string; + product: 'langgraph' | 'deep-agents'; + topic: string; + angularProject: string; + port: number; + pythonDir: string; + graphName: string; +} + +export const capabilities: readonly Capability[] = [ + { id: 'streaming', product: 'langgraph', topic: 'streaming', angularProject: 'cockpit-langgraph-streaming-angular', port: 4300, pythonDir: 'cockpit/langgraph/streaming/python', graphName: 'streaming' }, + { id: 'persistence', product: 'langgraph', topic: 'persistence', angularProject: 'cockpit-langgraph-persistence-angular', port: 4301, pythonDir: 'cockpit/langgraph/persistence/python', graphName: 'persistence' }, + { id: 'interrupts', product: 'langgraph', topic: 'interrupts', angularProject: 'cockpit-langgraph-interrupts-angular', port: 4302, pythonDir: 'cockpit/langgraph/interrupts/python', graphName: 'interrupts' }, + { id: 'memory', product: 'langgraph', topic: 'memory', angularProject: 'cockpit-langgraph-memory-angular', port: 4303, pythonDir: 'cockpit/langgraph/memory/python', graphName: 'memory' }, + { id: 'durable-execution', product: 'langgraph', topic: 'durable-execution', angularProject: 'cockpit-langgraph-durable-execution-angular', port: 4304, pythonDir: 'cockpit/langgraph/durable-execution/python', graphName: 'durable-execution' }, + { id: 'subgraphs', product: 'langgraph', topic: 'subgraphs', angularProject: 'cockpit-langgraph-subgraphs-angular', port: 4305, pythonDir: 'cockpit/langgraph/subgraphs/python', graphName: 'subgraphs' }, + { id: 'time-travel', product: 'langgraph', topic: 'time-travel', angularProject: 'cockpit-langgraph-time-travel-angular', port: 4306, pythonDir: 'cockpit/langgraph/time-travel/python', graphName: 'time-travel' }, + { id: 'deployment-runtime', product: 'langgraph', topic: 'deployment-runtime', angularProject: 'cockpit-langgraph-deployment-runtime-angular', port: 4307, pythonDir: 'cockpit/langgraph/deployment-runtime/python', graphName: 'deployment-runtime' }, + { id: 'planning', product: 'deep-agents', topic: 'planning', angularProject: 'cockpit-deep-agents-planning-angular', port: 4310, pythonDir: 'cockpit/deep-agents/planning/python', graphName: 'planning' }, + { id: 'filesystem', product: 'deep-agents', topic: 'filesystem', angularProject: 'cockpit-deep-agents-filesystem-angular', port: 4311, pythonDir: 'cockpit/deep-agents/filesystem/python', graphName: 'filesystem' }, + { id: 'da-subagents', product: 'deep-agents', topic: 'subagents', angularProject: 'cockpit-deep-agents-subagents-angular', port: 4312, pythonDir: 'cockpit/deep-agents/subagents/python', graphName: 'da-subagents' }, + { id: 'da-memory', product: 'deep-agents', topic: 'memory', angularProject: 'cockpit-deep-agents-memory-angular', port: 4313, pythonDir: 'cockpit/deep-agents/memory/python', graphName: 'da-memory' }, + { id: 'skills', product: 'deep-agents', topic: 'skills', angularProject: 'cockpit-deep-agents-skills-angular', port: 4314, pythonDir: 'cockpit/deep-agents/skills/python', graphName: 'skills' }, + { id: 'sandboxes', product: 'deep-agents', topic: 'sandboxes', angularProject: 'cockpit-deep-agents-sandboxes-angular', port: 4315, pythonDir: 'cockpit/deep-agents/sandboxes/python', graphName: 'sandboxes' }, +] as const; + +export function findCapability(id: string): Capability | undefined { + return capabilities.find((c) => c.id === id); +} + +export function allAngularProjects(): string[] { + return capabilities.map((c) => c.angularProject); +} diff --git a/apps/cockpit/scripts/generate-combined-langgraph.ts b/apps/cockpit/scripts/generate-combined-langgraph.ts new file mode 100644 index 000000000..1c1f44abf --- /dev/null +++ b/apps/cockpit/scripts/generate-combined-langgraph.ts @@ -0,0 +1,13 @@ +import { writeFileSync } from 'fs'; +import { resolve } from 'path'; +import { capabilities } from './capability-registry'; + +const graphs: Record = {}; +for (const c of capabilities) { + graphs[c.graphName] = `./${c.pythonDir}/src/graph.py:graph`; +} + +const config = { graphs, dependencies: capabilities.map((c) => `./${c.pythonDir}/pyproject.toml`), env: '.env' }; +const out = resolve(process.cwd(), 'langgraph-combined.json'); +writeFileSync(out, JSON.stringify(config, null, 2) + '\n'); +console.log(`Generated ${out} with ${Object.keys(graphs).length} graphs`); diff --git a/apps/cockpit/scripts/serve-example.ts b/apps/cockpit/scripts/serve-example.ts new file mode 100644 index 000000000..cca450f9b --- /dev/null +++ b/apps/cockpit/scripts/serve-example.ts @@ -0,0 +1,41 @@ +import { spawn, type ChildProcess } from 'child_process'; +import { capabilities, findCapability } from './capability-registry'; + +const args = process.argv.slice(2); +const capabilityArg = args.find((a) => a.startsWith('--capability='))?.split('=')[1]; +const allMode = args.includes('--all'); + +if (!capabilityArg && !allMode) { + console.log('Usage:'); + console.log(' npx tsx apps/cockpit/scripts/serve-example.ts --capability=streaming'); + console.log(' npx tsx apps/cockpit/scripts/serve-example.ts --all'); + console.log('\nCapabilities:'); + capabilities.forEach((c) => console.log(` ${c.id.padEnd(22)} port ${c.port} ${c.product}/${c.topic}`)); + process.exit(0); +} + +const procs: ChildProcess[] = []; + +function run(label: string, cmd: string, color: string): void { + const proc = spawn('bash', ['-c', cmd], { stdio: ['inherit', 'pipe', 'pipe'], env: { ...process.env } }); + proc.stdout?.on('data', (d) => String(d).split('\n').filter(Boolean).forEach((l) => console.log(`\x1b[${color}m[${label}]\x1b[0m ${l}`))); + proc.stderr?.on('data', (d) => String(d).split('\n').filter(Boolean).forEach((l) => console.error(`\x1b[${color}m[${label}]\x1b[0m ${l}`))); + procs.push(proc); +} + +function cleanup() { procs.forEach((p) => p.kill()); process.exit(0); } +process.on('SIGINT', cleanup); +process.on('SIGTERM', cleanup); + +run('cockpit', 'npx nx serve cockpit --port 4201', '36'); + +if (allMode) { + capabilities.forEach((c) => run(c.id, `npx nx serve ${c.angularProject} --port ${c.port}`, '33')); + console.log('\n🚀 Starting cockpit + all 14 examples\n'); +} else { + const cap = findCapability(capabilityArg!); + if (!cap) { console.error(`Unknown: ${capabilityArg}`); process.exit(1); } + run(cap.id, `npx nx serve ${cap.angularProject} --port ${cap.port}`, '33'); + run(`${cap.id}-py`, `cd ${cap.pythonDir} && source $HOME/.local/bin/env 2>/dev/null; uv sync && uv run langgraph dev --port 8123`, '35'); + console.log(`\n🚀 ${cap.id}: cockpit=4201 angular=${cap.port} langgraph=8123\n`); +} diff --git a/apps/cockpit/src/app/[...slug]/page.tsx b/apps/cockpit/src/app/[...slug]/page.tsx index 257a47fea..0f2b14af6 100644 --- a/apps/cockpit/src/app/[...slug]/page.tsx +++ b/apps/cockpit/src/app/[...slug]/page.tsx @@ -1,6 +1,13 @@ import { redirect } from 'next/navigation'; import { CockpitShell } from '../../components/cockpit-shell'; -import { getCockpitPageModel } from '../../lib/cockpit-page'; +import { getContentBundle } from '../../lib/content-bundle'; +import { cockpitManifest, getCockpitPageModel } from '../../lib/cockpit-page'; + +export async function generateStaticParams() { + return cockpitManifest.map((entry) => ({ + slug: [entry.product, entry.section, entry.topic, entry.page, entry.language], + })); +} export default async function CockpitRoutePage({ params, @@ -16,11 +23,14 @@ export default async function CockpitRoutePage({ redirect(canonicalPath); } + const contentBundle = await getContentBundle(presentation); + return ( ); } diff --git a/apps/cockpit/src/app/cockpit.css b/apps/cockpit/src/app/cockpit.css index abb520260..875289e17 100644 --- a/apps/cockpit/src/app/cockpit.css +++ b/apps/cockpit/src/app/cockpit.css @@ -1,290 +1,245 @@ -:root { - color-scheme: dark; - --cockpit-bg: #08111f; - --cockpit-panel: #0f1b2d; - --cockpit-panel-border: rgba(138, 170, 214, 0.18); - --cockpit-panel-strong: #14243d; - --cockpit-text: #edf3ff; - --cockpit-text-muted: #96a8c7; - --cockpit-accent: #7dd3fc; - --cockpit-accent-strong: #38bdf8; - --cockpit-shadow: 0 24px 80px rgba(3, 9, 18, 0.45); -} - -* { - box-sizing: border-box; -} - -html, -body { - margin: 0; - min-height: 100%; - background: - radial-gradient(circle at top, rgba(61, 118, 168, 0.18), transparent 30%), - linear-gradient(180deg, #09101a 0%, var(--cockpit-bg) 100%); - color: var(--cockpit-text); - font-family: - Inter, - ui-sans-serif, - system-ui, - -apple-system, - BlinkMacSystemFont, - 'Segoe UI', - sans-serif; +@import "tailwindcss"; + +@theme inline { + --color-background: var(--background); + --color-foreground: var(--foreground); + --color-primary: var(--primary); + --color-primary-foreground: var(--primary-foreground); + --color-card: var(--card); + --color-card-foreground: var(--card-foreground); + --color-muted: var(--muted); + --color-muted-foreground: var(--muted-foreground); + --color-border: var(--border); + --color-input: var(--input); + --color-ring: var(--ring); } -a { - color: inherit; - text-decoration: none; -} - -button { - font: inherit; -} - -.cockpit-app { - min-height: 100vh; -} - -.cockpit-shell { - display: grid; - grid-template-columns: 18rem minmax(0, 1fr); - min-height: 100vh; -} - -.cockpit-sidebar, -.cockpit-shell__workspace, -.cockpit-prompt-drawer { - background: rgba(8, 17, 31, 0.88); - backdrop-filter: blur(14px); -} - -.cockpit-sidebar { - display: grid; - gap: 1.5rem; - padding: 1.75rem 1.25rem; - border-right: 1px solid var(--cockpit-panel-border); +:root { + /* shadcn semantic vars — light palette */ + --background: #f8f9fc; + --foreground: #1a1a2e; + --primary: #004090; + --primary-foreground: #ffffff; + --card: rgba(255, 255, 255, 0.45); + --card-foreground: #1a1a2e; + --muted: rgba(0, 64, 144, 0.06); + --muted-foreground: #555770; + --border: rgba(0, 64, 144, 0.15); + --input: rgba(0, 64, 144, 0.15); + --ring: #004090; +} + +/* Shiki code blocks — preserve dark background from theme */ +pre.shiki { + padding: 1rem; + border-radius: 0.5rem; + overflow-x: auto; + font-size: 0.85rem; + line-height: 1.6; +} + +/* ── Doc components ────────────────────────────────────────── */ + +.doc-summary { + background: rgba(0, 64, 144, 0.04); + border: 1px solid rgba(0, 64, 144, 0.12); + border-radius: 0.5rem; + padding: 0.75rem 1rem; + margin-bottom: 1.5rem; + font-size: 0.9rem; + color: var(--ds-text-secondary); + line-height: 1.6; } -.cockpit-sidebar__header, -.cockpit-shell__header { - display: grid; - gap: 0.5rem; +.doc-callout { + border-radius: 0.5rem; + padding: 0.75rem 1rem; + margin: 1.25rem 0; + font-size: 0.85rem; + line-height: 1.6; } - -.cockpit-sidebar h1, -.cockpit-shell__header h2, -.cockpit-run-mode__surface h2, -.cockpit-code-mode__header h2, -.cockpit-prompt-drawer h2, -.cockpit-run-mode__context h3 { - margin: 0; - font-family: - 'Iowan Old Style', - 'Palatino Linotype', - 'Book Antiqua', - Georgia, - serif; +.doc-callout__label { + font-size: 0.7rem; font-weight: 600; - letter-spacing: -0.02em; -} - -.cockpit-shell__workspace { - display: grid; - gap: 1.5rem; - padding: 1.75rem; -} - -.cockpit-shell__actions, -.cockpit-shell__header, -.cockpit-shell__mode-surface, -.cockpit-run-mode, -.cockpit-code-mode, -.cockpit-prompt-drawer__header { - display: grid; - gap: 1rem; -} - -.cockpit-shell__header { - grid-template-columns: minmax(0, 1fr) auto; - align-items: end; -} - -.cockpit-shell__actions { - grid-auto-flow: column; + text-transform: uppercase; + letter-spacing: 0.06em; + margin-bottom: 0.25rem; +} +.doc-callout__content { color: var(--ds-text-secondary); } +.doc-callout--tip { + background: rgba(0, 64, 144, 0.04); + border: 1px solid rgba(0, 64, 144, 0.12); +} +.doc-callout--tip .doc-callout__label { color: var(--ds-accent); } +.doc-callout--note { + background: rgba(250, 204, 21, 0.06); + border: 1px solid rgba(250, 204, 21, 0.2); +} +.doc-callout--note .doc-callout__label { color: #b8960f; } +.doc-callout--warning { + background: rgba(255, 107, 107, 0.06); + border: 1px solid rgba(255, 107, 107, 0.2); +} +.doc-callout--warning .doc-callout__label { color: #e04545; } + +.doc-steps { margin: 1.5rem 0; } +.doc-step { display: flex; gap: 0.75rem; } +.doc-step__indicator { + display: flex; + flex-direction: column; align-items: center; -} - -.cockpit-shell__actions button, -.cockpit-prompt-drawer button, -.cockpit-code-mode__tabs button, -[aria-label='Primary modes'] button, -[aria-haspopup='menu'] { - border: 1px solid var(--cockpit-panel-border); - background: rgba(15, 27, 45, 0.9); - color: var(--cockpit-text); - border-radius: 999px; - padding: 0.75rem 1rem; -} - -.cockpit-shell__actions button:last-child, -[aria-label='Primary modes'] button[aria-pressed='true'] { - background: linear-gradient(180deg, rgba(56, 189, 248, 0.28), rgba(56, 189, 248, 0.14)); - border-color: rgba(125, 211, 252, 0.35); -} - -.cockpit-eyebrow, -.cockpit-code-path { - margin: 0; - color: var(--cockpit-text-muted); - font-family: - 'SFMono-Regular', - ui-monospace, - 'Cascadia Code', - 'Source Code Pro', - Menlo, - monospace; - font-size: 0.8rem; -} - -.cockpit-shell__mode-surface, -.cockpit-run-mode__surface, -.cockpit-run-mode__context, -.cockpit-code-mode, -.cockpit-prompt-drawer, -[aria-label='Primary modes'], -[role='menu'], -[aria-label='Prompt copy'], -[aria-label='Docs mode'] { - border: 1px solid var(--cockpit-panel-border); - background: rgba(15, 27, 45, 0.72); - box-shadow: var(--cockpit-shadow); -} - -.cockpit-shell__mode-surface, -[aria-label='Docs mode'], -.cockpit-code-mode { - padding: 1.25rem; -} - -[aria-label='Primary modes'] { - display: inline-flex; - gap: 0.75rem; - width: fit-content; - padding: 0.5rem; - border-radius: 999px; -} - -.cockpit-run-mode { - grid-template-columns: minmax(0, 1.8fr) minmax(16rem, 0.95fr); - align-items: start; -} - -.cockpit-run-mode__surface, -.cockpit-run-mode__context { - padding: 1.25rem; - min-height: 100%; -} - -.cockpit-run-mode__viewport { - display: grid; - place-items: center; - min-height: 22rem; - border: 1px dashed rgba(125, 211, 252, 0.24); - background: linear-gradient(180deg, rgba(20, 36, 61, 0.5), rgba(8, 17, 31, 0.3)); -} - -.cockpit-code-mode__tabs, -.cockpit-prompt-drawer__tabs { + flex-shrink: 0; +} +.doc-step__number { + width: 1.5rem; + height: 1.5rem; + border-radius: 50%; + background: var(--ds-accent); + color: #fff; + font-size: 0.7rem; + font-weight: 700; display: flex; - gap: 0.75rem; - flex-wrap: wrap; + align-items: center; + justify-content: center; +} +.doc-step__line { + width: 2px; + flex: 1; + background: rgba(0, 64, 144, 0.15); + margin: 0.375rem 0; + min-height: 1rem; +} +.doc-step:last-child .doc-step__line { display: none; } +.doc-step__body { flex: 1; padding-bottom: 1.5rem; } +.doc-step:last-child .doc-step__body { padding-bottom: 0; } +.doc-step__title { + font-size: 0.95rem; + font-weight: 600; + color: var(--ds-text-primary); + margin-bottom: 0.25rem; } - -.cockpit-code-mode__editor, -.cockpit-prompt-drawer__body, -[aria-label='Docs mode'] section { - border-top: 1px solid var(--cockpit-panel-border); - padding-top: 1rem; +.doc-step__content { + font-size: 0.85rem; + color: var(--ds-text-secondary); + line-height: 1.7; } +.doc-step__content p { margin: 0.5rem 0; } +.doc-step__content pre.shiki { margin: 0.5rem 0; border-radius: 0.5rem; } -.cockpit-prompt-drawer { - position: fixed; - top: 1rem; - right: 1rem; - bottom: 1rem; - width: min(28rem, calc(100vw - 2rem)); - padding: 1.25rem; - border-radius: 1.25rem; - overflow: auto; - z-index: 20; +.doc-codeblock { + border: 1px solid rgba(0, 64, 144, 0.12); + border-radius: 0.5rem; + overflow: hidden; + margin: 0.75rem 0; } - -[aria-label='Language picker'] { - margin-top: 0.75rem; - display: grid; +.doc-codeblock__header { + display: flex; + align-items: center; gap: 0.5rem; + padding: 0.4rem 0.75rem; + border-bottom: 1px solid rgba(138, 170, 214, 0.12); + background: rgba(26, 27, 38, 0.95); + font-size: 0.7rem; +} +.doc-codeblock__file { color: #a9b1d6; font-family: var(--font-mono); } +.doc-codeblock__lang { + padding: 0.1rem 0.35rem; + border-radius: 0.2rem; + background: rgba(0, 64, 144, 0.15); + color: var(--ds-accent-light); + font-size: 0.6rem; +} +.doc-codeblock__copy { + margin-left: auto; + padding: 0.1rem 0.5rem; + border: 1px solid rgba(255, 255, 255, 0.15); + border-radius: 0.25rem; + background: transparent; + color: #a9b1d6; + font-size: 0.65rem; + cursor: pointer; +} +.doc-codeblock__copy:hover { color: #e0e0e0; } +.doc-codeblock pre.shiki { margin: 0; border-radius: 0; border: none; } +.code-mode-block pre.shiki { margin: 0; border-radius: 0; border: none; } + +.doc-prompt { + background: rgba(168, 85, 247, 0.04); + border: 1px solid rgba(168, 85, 247, 0.2); + border-radius: 0.5rem; + overflow: hidden; + margin: 1.25rem 0; +} +.doc-prompt__header { + display: flex; + justify-content: space-between; + align-items: center; + padding: 0.5rem 0.75rem; + border-bottom: 1px solid rgba(168, 85, 247, 0.15); + background: rgba(168, 85, 247, 0.06); } - -[aria-label='Cockpit navigation'] { - display: grid; - gap: 1.25rem; -} - -[aria-label='Cockpit navigation'] h2, -[aria-label='Cockpit navigation'] h3 { - margin: 0 0 0.5rem; -} - -[aria-label='Cockpit navigation'] ul { - margin: 0; - padding: 0; - list-style: none; - display: grid; - gap: 0.4rem; -} - -[aria-current='page'] { - color: var(--cockpit-accent); -} - -pre, -code { - font-family: - 'SFMono-Regular', - ui-monospace, - 'Cascadia Code', - 'Source Code Pro', - Menlo, - monospace; -} - -pre { - margin: 0; - white-space: pre-wrap; +.doc-prompt__label { + font-size: 0.7rem; + font-weight: 600; + color: #9333ea; + text-transform: uppercase; + letter-spacing: 0.06em; +} +.doc-prompt__copy { + font-size: 0.65rem; + color: #9333ea; + padding: 0.1rem 0.5rem; + border: 1px solid rgba(168, 85, 247, 0.25); + border-radius: 0.25rem; + background: rgba(168, 85, 247, 0.08); + cursor: pointer; +} +.doc-prompt__copy:hover { background: rgba(168, 85, 247, 0.15); } +.doc-prompt__content { + padding: 0.75rem; + font-size: 0.85rem; + color: var(--ds-text-secondary); + line-height: 1.7; +} +.doc-prompt__content code { + background: rgba(168, 85, 247, 0.1); + padding: 0.1rem 0.3rem; + border-radius: 0.2rem; + color: #9333ea; + font-size: 0.8rem; } -@media (max-width: 960px) { - .cockpit-shell { - grid-template-columns: 1fr; - } - - .cockpit-shell__header, - .cockpit-run-mode { - grid-template-columns: 1fr; - } - - .cockpit-shell__actions { - grid-auto-flow: row; - justify-items: start; - } - - .cockpit-prompt-drawer { - top: auto; - left: 1rem; - right: 1rem; - bottom: 1rem; - width: auto; - max-height: 70vh; - } -} +.doc-api-table { margin: 1.25rem 0; } +.doc-api-table table { width: 100%; border-collapse: collapse; font-size: 0.8rem; } +.doc-api-table th { + text-align: left; + padding: 0.5rem 0.75rem; + color: var(--ds-text-muted); + font-weight: 500; + font-size: 0.65rem; + text-transform: uppercase; + letter-spacing: 0.06em; + border-bottom: 1px solid var(--border); +} +.doc-api-table td { + padding: 0.5rem 0.75rem; + border-bottom: 1px solid rgba(0, 64, 144, 0.08); + color: var(--ds-text-secondary); +} +.doc-api-table code { + background: var(--ds-accent-surface); + padding: 0.1rem 0.3rem; + border-radius: 0.2rem; + color: var(--ds-accent); + font-size: 0.75rem; +} + +/* Narrative docs heading alignment with website */ +.docs-article h1, +.docs-article h2, +.docs-article h3 { + font-family: var(--ds-font-serif); +} +.docs-article h1 { font-size: 1.875rem; } +.docs-article h2 { font-size: 1.5rem; } +.docs-article h3 { font-size: 1.25rem; } diff --git a/apps/cockpit/src/app/layout.tsx b/apps/cockpit/src/app/layout.tsx index 58ae4634b..e0775a1d7 100644 --- a/apps/cockpit/src/app/layout.tsx +++ b/apps/cockpit/src/app/layout.tsx @@ -1,4 +1,5 @@ import type { ReactNode } from 'react'; +import { cssVars } from '@cacheplane/ui-react'; import './cockpit.css'; export const metadata = { @@ -12,8 +13,16 @@ interface RootLayoutProps { export default function RootLayout({ children }: RootLayoutProps) { return ( - - {children} + + + {children} + ); } diff --git a/apps/cockpit/src/app/page.tsx b/apps/cockpit/src/app/page.tsx index 795c65d5d..c808f2590 100644 --- a/apps/cockpit/src/app/page.tsx +++ b/apps/cockpit/src/app/page.tsx @@ -1,15 +1,18 @@ import React from 'react'; import { CockpitShell } from '../components/cockpit-shell'; +import { getContentBundle } from '../lib/content-bundle'; import { getCockpitPageModel } from '../lib/cockpit-page'; -export default function CockpitHomePage() { +export default async function CockpitHomePage() { const { entry, presentation, navigationTree } = getCockpitPageModel(); + const contentBundle = await getContentBundle(presentation); return ( ); } diff --git a/apps/cockpit/src/components/api-mode/api-mode.spec.tsx b/apps/cockpit/src/components/api-mode/api-mode.spec.tsx new file mode 100644 index 000000000..d88c218c4 --- /dev/null +++ b/apps/cockpit/src/components/api-mode/api-mode.spec.tsx @@ -0,0 +1,64 @@ +import React from 'react'; +import { renderToStaticMarkup } from 'react-dom/server'; +import { describe, expect, it } from 'vitest'; +import { ApiMode } from './api-mode'; + +describe('ApiMode', () => { + it('renders doc sections with signatures, descriptions, params, and returns', () => { + const html = renderToStaticMarkup( + ', + description: 'Streams a response from the backend.', + params: [{ name: 'prompt', description: 'The user message' }], + returns: 'Observable emitting tokens', + sourceFile: 'streaming.service.ts', + language: 'typescript', + }, + { + title: 'StreamingGraph', + signature: 'class StreamingGraph', + description: 'Streams LLM responses.', + params: [], + returns: null, + sourceFile: 'graph.py', + language: 'python', + }, + ]} + /> + ); + + expect(html).toContain('StreamingComponent'); + expect(html).toContain('export class StreamingComponent'); + expect(html).toContain('Renders a streaming chat UI.'); + expect(html).toContain('streaming.component.ts'); + + expect(html).toContain('stream'); + expect(html).toContain('prompt'); + expect(html).toContain('The user message'); + expect(html).toContain('Observable emitting tokens'); + + expect(html).toContain('StreamingGraph'); + expect(html).toContain('class StreamingGraph'); + expect(html).toContain('graph.py'); + + expect(html).toContain('TypeScript'); + expect(html).toContain('Python'); + }); + + it('renders empty state when no doc sections', () => { + const html = renderToStaticMarkup(); + expect(html).toContain('No API documentation extracted'); + }); +}); diff --git a/apps/cockpit/src/components/api-mode/api-mode.tsx b/apps/cockpit/src/components/api-mode/api-mode.tsx new file mode 100644 index 000000000..342e5758b --- /dev/null +++ b/apps/cockpit/src/components/api-mode/api-mode.tsx @@ -0,0 +1,161 @@ +import React from 'react'; +import type { DocSection } from '../../lib/extract-docs'; + +interface ApiModeProps { + docSections: DocSection[]; + hasCodeFiles?: boolean; +} + +function renderInlineCode(text: string): React.ReactNode[] { + const parts = text.split(/(`[^`]+`)/g); + return parts.map((part, i) => { + if (part.startsWith('`') && part.endsWith('`')) { + return ( + + {part.slice(1, -1)} + + ); + } + return {part}; + }); +} + +function DocArticle({ section }: { section: DocSection }) { + return ( +
    +
    +
    +

    + {section.title} +

    + + {section.sourceFile} + +
    +
    +          {section.signature}
    +        
    +
    + +
    +

    + {renderInlineCode(section.description)} +

    + + {section.params.length > 0 ? ( +
    +
    + Parameters +
    +
    + {section.params.map((param) => ( +
    + + {param.name} + + {renderInlineCode(param.description)} +
    + ))} +
    +
    + ) : null} + + {section.returns ? ( +
    +
    + Returns +
    +

    + {renderInlineCode(section.returns)} +

    +
    + ) : null} +
    +
    + ); +} + +export function ApiMode({ docSections, hasCodeFiles = false }: ApiModeProps) { + if (docSections.length === 0) { + return ( +
    +

    + {hasCodeFiles + ? 'Add JSDoc comments to your TypeScript files or docstrings to your Python files to see API documentation here.' + : 'No API documentation extracted yet — add JSDoc to TypeScript files or docstrings to Python files.'} +

    +
    + ); + } + + const LANGUAGE_LABELS: Record = { + typescript: 'TypeScript', + python: 'Python', + }; + + const tsSections = docSections.filter((s) => s.language === 'typescript'); + const pySections = docSections.filter((s) => s.language === 'python'); + + return ( +
    + {tsSections.length > 0 ? ( +
    +

    + {LANGUAGE_LABELS[tsSections[0]?.language] ?? 'TypeScript'} +

    + {tsSections.map((section) => ( + + ))} +
    + ) : null} + + {pySections.length > 0 ? ( +
    +

    + {LANGUAGE_LABELS[pySections[0]?.language] ?? 'Python'} +

    + {pySections.map((section) => ( + + ))} +
    + ) : null} +
    + ); +} diff --git a/apps/cockpit/src/components/cockpit-shell.tsx b/apps/cockpit/src/components/cockpit-shell.tsx index 7aae1816a..d244638dc 100644 --- a/apps/cockpit/src/components/cockpit-shell.tsx +++ b/apps/cockpit/src/components/cockpit-shell.tsx @@ -1,27 +1,24 @@ 'use client'; -import React, { useEffect, useMemo, useState } from 'react'; -import { cockpitManifest } from '../../../../libs/cockpit-registry/src/index'; +import React, { useEffect, useState } from 'react'; +import { cockpitManifest } from '@cacheplane/cockpit-registry'; +import type { ContentBundle } from '../lib/content-bundle'; import type { CapabilityPresentation, NavigationProduct } from '../lib/route-resolution'; import { CodeMode } from './code-mode/code-mode'; -import { DocsMode } from './docs-mode/docs-mode'; +import { ApiMode } from './api-mode/api-mode'; +import { NarrativeDocs } from './narrative-docs/narrative-docs'; import { ModeSwitcher } from './modes/mode-switcher'; -import { PromptDrawer } from './prompt-drawer/prompt-drawer'; import { RunMode } from './run-mode/run-mode'; import { CockpitSidebar } from './sidebar/cockpit-sidebar'; -const PRIMARY_MODES = ['Run', 'Code', 'Docs'] as const; +const PRIMARY_MODES = ['Run', 'Code', 'Docs', 'API'] as const; type PrimaryMode = (typeof PRIMARY_MODES)[number]; -const DEFAULT_FRONTEND_ASSET_PATHS = [ - 'apps/cockpit/src/app/page.tsx', - 'apps/cockpit/src/components/cockpit-shell.tsx', -] as const; - interface CockpitShellProps { navigationTree: NavigationProduct[]; presentation: CapabilityPresentation; entryTitle: string; + contentBundle: ContentBundle; } const toLabel = (value: string) => @@ -30,40 +27,28 @@ const toLabel = (value: string) => .map((part) => part.charAt(0).toUpperCase() + part.slice(1)) .join(' '); +function MenuIcon() { + return ( + + ); +} + export function CockpitShell({ navigationTree, presentation, entryTitle, + contentBundle, }: CockpitShellProps) { const [isHydrated, setIsHydrated] = useState(false); const [activeMode, setActiveMode] = useState('Run'); - const [isPromptDrawerOpen, setIsPromptDrawerOpen] = useState(false); + const [isSidebarOpen, setIsSidebarOpen] = useState(false); const isCapability = presentation.kind === 'capability'; - const codeAssetPaths = useMemo( - () => - isCapability - ? Array.from(new Set([...DEFAULT_FRONTEND_ASSET_PATHS, ...presentation.codeAssetPaths])) - : [...DEFAULT_FRONTEND_ASSET_PATHS], - [isCapability, presentation] - ); - const promptAssetPaths = isCapability ? presentation.promptAssetPaths : []; + const codeAssetPaths = isCapability ? presentation.codeAssetPaths : []; + const backendAssetPaths = isCapability ? (presentation.backendAssetPaths ?? []) : []; const entry = presentation.entry; const contextLabel = `${toLabel(entry.product)} / ${toLabel(entry.section)} / ${entry.topic}`; - const docsSections = [ - { - title: 'Start from the runnable surface', - body: `Run ${entryTitle} first, then switch to Code to inspect the frontend shell and capability module paths that power it.`, - code: codeAssetPaths[0] ?? presentation.docsPath, - }, - { - title: 'Keep prompts close', - body: - promptAssetPaths.length > 0 - ? 'Use the prompt drawer when you want the prompt path without losing the current workspace mode.' - : 'Prompt assets are not available for this entry, so the guide stays focused on the runnable surface and implementation files.', - code: promptAssetPaths[0], - }, - ].filter((section) => Boolean(section.code || section.body)); useEffect(() => { setIsHydrated(true); @@ -72,69 +57,92 @@ export function CockpitShell({ return (
    - + {/* Desktop sidebar — hidden on mobile */} +
    + +
    -
    -
    -
    -

    {contextLabel}

    -

    {entryTitle}

    -

    - Start in Run, then move into the implementation files or guided docs as - needed. -

    + {/* Mobile sidebar overlay */} + {isSidebarOpen && ( + <> +
    setIsSidebarOpen(false)} + /> +
    +
    + + )} -
    - - {isCapability ? : null} +

    {contextLabel}

    + | +

    {entryTitle}

    +
    +
    +
    - - -
    +
    {activeMode === 'Run' ? ( ) : null} {activeMode === 'Code' ? ( - - ) : null} - {activeMode === 'Docs' ? ( - ) : null} + {activeMode === 'Docs' ? ( + + ) : null} + {activeMode === 'API' ? ( + + ) : null}
    - - setIsPromptDrawerOpen(false)} - />
    ); } diff --git a/apps/cockpit/src/components/code-mode/code-mode.spec.tsx b/apps/cockpit/src/components/code-mode/code-mode.spec.tsx index f138254d3..53dc0a0d4 100644 --- a/apps/cockpit/src/components/code-mode/code-mode.spec.tsx +++ b/apps/cockpit/src/components/code-mode/code-mode.spec.tsx @@ -2,63 +2,113 @@ import React from 'react'; import { act } from 'react'; import { createRoot } from 'react-dom/client'; -import { JSDOM } from 'jsdom'; import { afterEach, describe, expect, it } from 'vitest'; import { CodeMode } from './code-mode'; describe('CodeMode', () => { + let container: HTMLDivElement | undefined; + let root: ReturnType | undefined; + afterEach(() => { - globalThis.document?.body.replaceChildren(); + act(() => { + root?.unmount(); + }); + container?.remove(); }); - it('renders file tabs across the top with a single active file and no side file column', () => { - const dom = new JSDOM(''); - const { window } = dom; - - globalThis.window = window as unknown as Window & typeof globalThis; - globalThis.document = window.document; - globalThis.HTMLElement = window.HTMLElement; - globalThis.Node = window.Node; - globalThis.MouseEvent = window.MouseEvent; - - const container = document.createElement('div'); + it('renders Shiki-highlighted HTML for the active file', () => { + container = document.createElement('div'); document.body.appendChild(container); - const root = createRoot(container); + root = createRoot(container); + + const codeFiles: Record = { + 'apps/cockpit/src/app/page.tsx': '
    export default function Page() {}
    ', + 'cockpit/langgraph/streaming/python/src/index.ts': '
    const x = 1;
    ', + }; act(() => { - root.render( + root!.render( ); }); - const tabs = Array.from(container.querySelectorAll('[role="tab"]')); + expect(container.querySelector('.shiki')).not.toBeNull(); + expect(container.textContent).toContain('export default function Page() {}'); - expect(container.querySelector('[aria-label="File column"]')).toBeNull(); - expect(container.textContent).toContain('apps/cockpit/src/app/page.tsx'); - expect(container.textContent).not.toContain( - 'cockpit/langgraph/streaming/python/src/index.ts' - ); + const tabs = Array.from(container.querySelectorAll('[role="tab"]')); expect(tabs.map((tab) => tab.textContent)).toEqual(['page.tsx', 'index.ts']); - expect(tabs[0].getAttribute('aria-selected')).toBe('true'); act(() => { - (tabs[1] as HTMLElement).click(); + (tabs[1] as HTMLElement).dispatchEvent( + new MouseEvent('mousedown', { bubbles: true, cancelable: true, button: 0 }) + ); }); - expect(container.textContent).toContain( - 'cockpit/langgraph/streaming/python/src/index.ts' - ); - expect(tabs[1].getAttribute('aria-selected')).toBe('true'); - expect(container.textContent).not.toContain('apps/cockpit/src/app/page.tsx'); + expect(container.textContent).toContain('const x = 1;'); + }); + + it('renders a fallback message when codeFiles has no entry for a path', () => { + container = document.createElement('div'); + document.body.appendChild(container); + root = createRoot(container); act(() => { - root.unmount(); + root!.render( + + ); }); + + expect(container.textContent).toContain('No source available'); + }); + + it('renders prompt files as tabs after a separator', () => { + container = document.createElement('div'); + document.body.appendChild(container); + root = createRoot(container); + + const promptFiles: Record = { + 'prompts/system.md': 'You are a helpful assistant.', + }; + + act(() => { + root!.render( + const app = true;' }} + promptFiles={promptFiles} + /> + ); + }); + + const tabs = Array.from(container.querySelectorAll('[role="tab"]')); + const tabLabels = tabs.map((tab) => tab.textContent); + expect(tabLabels).toContain('app.tsx'); + expect(tabLabels).toContain('system.md'); + + act(() => { + const promptTab = tabs.find((tab) => tab.textContent === 'system.md') as HTMLElement; + promptTab.dispatchEvent( + new MouseEvent('mousedown', { bubbles: true, cancelable: true, button: 0 }) + ); + }); + + expect(container.textContent).toContain('You are a helpful assistant.'); }); }); diff --git a/apps/cockpit/src/components/code-mode/code-mode.tsx b/apps/cockpit/src/components/code-mode/code-mode.tsx index 6b4fd96bf..fb42db482 100644 --- a/apps/cockpit/src/components/code-mode/code-mode.tsx +++ b/apps/cockpit/src/components/code-mode/code-mode.tsx @@ -1,67 +1,115 @@ -import React, { useEffect, useState } from 'react'; +'use client'; + +import React from 'react'; +import { Tabs, TabsContent, TabsList, TabsTrigger } from '@/components/ui/tabs'; interface CodeModeProps { entryTitle: string; codeAssetPaths: readonly string[]; + backendAssetPaths: readonly string[]; + codeFiles: Record; + promptFiles: Record; } const getTabLabel = (path: string): string => path.split('/').pop() ?? path; -export function CodeMode({ entryTitle, codeAssetPaths }: CodeModeProps) { - const [activePath, setActivePath] = useState(codeAssetPaths[0] ?? ''); +function CodeFileContent({ path, content }: { path: string; content: string | undefined }) { + if (!content) { + return

    No source available for {getTabLabel(path)}

    ; + } + + return ( +
    +
    + {path} + +
    +
    +
    + ); +} - useEffect(() => { - if (!codeAssetPaths.includes(activePath)) { - setActivePath(codeAssetPaths[0] ?? ''); - } - }, [activePath, codeAssetPaths]); +export function CodeMode({ entryTitle, codeAssetPaths, backendAssetPaths, codeFiles, promptFiles }: CodeModeProps) { + const promptPaths = Object.keys(promptFiles); + const allPaths = [...codeAssetPaths, ...backendAssetPaths, ...promptPaths]; - if (codeAssetPaths.length === 0) { + if (allPaths.length === 0) { return ( -
    -

    Code

    -

    No code files are available for {entryTitle}.

    +
    +

    No files available for {entryTitle}.

    ); } - const activeIndex = codeAssetPaths.indexOf(activePath); - const resolvedActivePath = activeIndex >= 0 ? activePath : codeAssetPaths[0]; + const defaultPath = codeAssetPaths[0] ?? backendAssetPaths[0] ?? promptPaths[0]; return ( -
    -
    -

    Code

    -

    {entryTitle}

    -

    {resolvedActivePath}

    -
    +
    + + + {codeAssetPaths.map((path) => ( + + {getTabLabel(path)} + + ))} + {backendAssetPaths.map((path) => ( + + {getTabLabel(path)} + + ))} + {promptPaths.map((path) => ( + + {getTabLabel(path)} + + ))} + -
    - {codeAssetPaths.map((path) => { - const isActive = path === resolvedActivePath; + {[...codeAssetPaths, ...backendAssetPaths].map((path) => ( + + + + ))} + {promptPaths.map((path) => { + const content = promptFiles[path]; return ( - + + {content ? ( +
    {content}
    + ) : ( +

    No content for {getTabLabel(path)}

    + )} +
    ); })} -
    - -
    -

    Viewing {resolvedActivePath}

    -
    {`Source preview for ${getTabLabel(resolvedActivePath)}`}
    -
    +
    ); } diff --git a/apps/cockpit/src/components/code-pane/code-pane.tsx b/apps/cockpit/src/components/code-pane/code-pane.tsx index 5fe724068..ef1f2b1ca 100644 --- a/apps/cockpit/src/components/code-pane/code-pane.tsx +++ b/apps/cockpit/src/components/code-pane/code-pane.tsx @@ -6,5 +6,5 @@ interface CodePaneProps { } export function CodePane({ paths }: CodePaneProps) { - return ; + return ; } diff --git a/apps/cockpit/src/components/docs-mode/docs-mode.spec.tsx b/apps/cockpit/src/components/docs-mode/docs-mode.spec.tsx deleted file mode 100644 index b59b9a696..000000000 --- a/apps/cockpit/src/components/docs-mode/docs-mode.spec.tsx +++ /dev/null @@ -1,31 +0,0 @@ -import React from 'react'; -import { renderToStaticMarkup } from 'react-dom/server'; -import { describe, expect, it } from 'vitest'; -import { DocsMode } from './docs-mode'; - -describe('DocsMode', () => { - it('renders a documentation-style guide with title, body, code, and prompt copy affordances', () => { - const html = renderToStaticMarkup( - - ); - - expect(html).toContain('

    LangGraph Streaming

    '); - expect(html).toContain('Use the stream surface to inspect one runnable example end to end.'); - expect(html).toContain('Run'); - expect(html).toContain('npm run cockpit -- --mode=run'); - expect(html).toContain('Open prompt assets'); - expect(html).toContain('Start from the live surface'); - }); -}); diff --git a/apps/cockpit/src/components/docs-mode/docs-mode.tsx b/apps/cockpit/src/components/docs-mode/docs-mode.tsx deleted file mode 100644 index 621b148a2..000000000 --- a/apps/cockpit/src/components/docs-mode/docs-mode.tsx +++ /dev/null @@ -1,62 +0,0 @@ -import React from 'react'; - -interface DocsModeSection { - title: string; - body: string; - code?: string; -} - -interface DocsModeProps { - entryTitle: string; - docsPath: string; - summary: string; - sections: DocsModeSection[]; - promptCopy: string; -} - -function toSectionId(title: string) { - return title.toLowerCase().replace(/[^a-z0-9]+/g, '-').replace(/^-|-$/g, ''); -} - -export function DocsMode({ - entryTitle, - docsPath, - summary, - sections, - promptCopy, -}: DocsModeProps) { - return ( -
    -
    -

    Docs

    -

    {entryTitle}

    -

    {summary}

    -

    {docsPath}

    -

    - Use Run for the live surface, then switch to Code when - you need implementation detail. -

    -
    - -
    - {sections.map((section) => ( -
    -

    {section.title}

    -

    {section.body}

    - {section.code ? ( -
    -                {section.code}
    -              
    - ) : null} -
    - ))} -
    - -
    -

    Prompt assets

    -

    Keep the prompt close while you read the implementation guide.

    - -
    -
    - ); -} diff --git a/apps/cockpit/src/components/docs-pane/docs-pane.tsx b/apps/cockpit/src/components/docs-pane/docs-pane.tsx deleted file mode 100644 index b6bca0ebc..000000000 --- a/apps/cockpit/src/components/docs-pane/docs-pane.tsx +++ /dev/null @@ -1,24 +0,0 @@ -import React from 'react'; -import { DocsMode } from '../docs-mode/docs-mode'; - -interface DocsPaneProps { - path: string; -} - -export function DocsPane({ path }: DocsPaneProps) { - return ( - - ); -} diff --git a/apps/cockpit/src/components/language-switcher.spec.tsx b/apps/cockpit/src/components/language-switcher.spec.tsx deleted file mode 100644 index 1683eec79..000000000 --- a/apps/cockpit/src/components/language-switcher.spec.tsx +++ /dev/null @@ -1,88 +0,0 @@ -import React from 'react'; -import { renderToStaticMarkup } from 'react-dom/server'; -import { describe, expect, it } from 'vitest'; -import { cockpitManifest } from '../../../../libs/cockpit-registry/src/index'; -import { LanguageSwitcher } from './language-switcher'; - -describe('LanguageSwitcher', () => { - it('links to an equivalent page when that language exists', () => { - const manifest = [ - { - ...cockpitManifest.find( - (entry) => - entry.product === 'langgraph' && - entry.topic === 'streaming' && - entry.language === 'python' - )!, - supportedLanguages: ['python', 'typescript'], - equivalentPages: { - python: { - product: 'langgraph', - section: 'core-capabilities', - topic: 'streaming', - page: 'overview', - language: 'python', - }, - typescript: { - product: 'langgraph', - section: 'core-capabilities', - topic: 'streaming', - page: 'overview', - language: 'typescript', - }, - }, - }, - { - ...cockpitManifest.find( - (entry) => - entry.product === 'langgraph' && - entry.topic === 'streaming' && - entry.language === 'python' - )!, - language: 'typescript', - supportedLanguages: ['python', 'typescript'], - equivalentPages: { - python: { - product: 'langgraph', - section: 'core-capabilities', - topic: 'streaming', - page: 'overview', - language: 'python', - }, - typescript: { - product: 'langgraph', - section: 'core-capabilities', - topic: 'streaming', - page: 'overview', - language: 'typescript', - }, - }, - }, - ]; - - const html = renderToStaticMarkup( - - ); - - expect(html).toContain('/langgraph/core-capabilities/streaming/overview/typescript'); - }); - - it('falls back to product overview when no equivalent page exists', () => { - const html = renderToStaticMarkup( - - entry.product === 'langgraph' && - entry.topic === 'streaming' && - entry.language === 'python' - )!} - /> - ); - - expect(html).toContain('/langgraph/getting-started/overview/overview/python'); - }); -}); diff --git a/apps/cockpit/src/components/language-switcher.tsx b/apps/cockpit/src/components/language-switcher.tsx deleted file mode 100644 index efd4e7a4c..000000000 --- a/apps/cockpit/src/components/language-switcher.tsx +++ /dev/null @@ -1,35 +0,0 @@ -import React from 'react'; -import type { CockpitManifestEntry } from '../../../../libs/cockpit-registry/src/index'; -import { resolveManifestLanguage } from '../../../../libs/cockpit-registry/src/index'; -import { toCockpitPath } from '../lib/route-resolution'; - -const COCKPIT_LANGUAGES = ['python', 'typescript'] as const; - -interface LanguageSwitcherProps { - manifest: CockpitManifestEntry[]; - entry: CockpitManifestEntry; -} - -export function LanguageSwitcher({ manifest, entry }: LanguageSwitcherProps) { - return ( -
    - {COCKPIT_LANGUAGES.map((language) => { - const resolvedEntry = resolveManifestLanguage({ - manifest, - entry, - language, - }); - - return ( - - {language} - - ); - })} -
    - ); -} diff --git a/apps/cockpit/src/components/modes/mode-switcher.spec.tsx b/apps/cockpit/src/components/modes/mode-switcher.spec.tsx index 2479b09e7..0da2fc6ea 100644 --- a/apps/cockpit/src/components/modes/mode-switcher.spec.tsx +++ b/apps/cockpit/src/components/modes/mode-switcher.spec.tsx @@ -5,7 +5,7 @@ import { act } from 'react'; import { afterEach, describe, expect, it } from 'vitest'; import { ModeSwitcher } from './mode-switcher'; -const MODES = ['Run', 'Code', 'Docs'] as const; +const MODES = ['Run', 'Code'] as const; function ModeSwitcherHarness() { const [activeMode, setActiveMode] = useState<(typeof MODES)[number]>('Run'); @@ -33,7 +33,7 @@ describe('ModeSwitcher', () => { container?.remove(); }); - it('shows only Run, Code, and Docs with Run active by default', () => { + it('shows mode buttons with Run active by default', () => { container = document.createElement('div'); document.body.append(container); root = createRoot(container); @@ -42,10 +42,9 @@ describe('ModeSwitcher', () => { root.render(); }); - const buttons = Array.from(container.querySelectorAll('button')); + const buttons = Array.from(container.querySelectorAll('[data-mode-btn]')); - expect(buttons.map((button) => button.textContent)).toEqual(['Run', 'Code', 'Docs']); - expect(buttons[0].getAttribute('aria-pressed')).toBe('true'); + expect(buttons.map((b) => b.textContent)).toEqual(['Run', 'Code']); expect(container.textContent).toContain('Run content'); }); @@ -58,18 +57,17 @@ describe('ModeSwitcher', () => { root.render(); }); - const codeButton = Array.from(container.querySelectorAll('button')).find( - (button) => button.textContent === 'Code' + const codeButton = Array.from(container.querySelectorAll('[data-mode-btn]')).find( + (b) => b.textContent === 'Code' ); expect(codeButton).toBeDefined(); act(() => { - codeButton?.click(); + (codeButton as HTMLElement).click(); }); expect(container.textContent).toContain('Code content'); expect(container.textContent).not.toContain('Run content'); - expect(codeButton?.getAttribute('aria-pressed')).toBe('true'); }); }); diff --git a/apps/cockpit/src/components/modes/mode-switcher.tsx b/apps/cockpit/src/components/modes/mode-switcher.tsx index 9f48b3306..2e15c2771 100644 --- a/apps/cockpit/src/components/modes/mode-switcher.tsx +++ b/apps/cockpit/src/components/modes/mode-switcher.tsx @@ -1,4 +1,6 @@ -import React from 'react'; +'use client'; + +import React, { useRef, useEffect, useState } from 'react'; interface ModeSwitcherProps { modes: readonly T[]; @@ -11,17 +13,76 @@ export function ModeSwitcher({ activeMode, onChange, }: ModeSwitcherProps) { + const containerRef = useRef(null); + const [indicatorStyle, setIndicatorStyle] = useState({}); + + useEffect(() => { + const container = containerRef.current; + if (!container) return; + const activeIndex = modes.indexOf(activeMode); + const buttons = container.querySelectorAll('[data-mode-btn]'); + const btn = buttons[activeIndex]; + if (btn) { + setIndicatorStyle({ + left: btn.offsetLeft, + width: btn.offsetWidth, + }); + } + }, [activeMode, modes]); + return ( -
    +
    + {/* Sliding indicator */} +
    + {modes.map((mode) => { const isActive = mode === activeMode; - return ( diff --git a/apps/cockpit/src/components/narrative-docs/narrative-docs.spec.tsx b/apps/cockpit/src/components/narrative-docs/narrative-docs.spec.tsx new file mode 100644 index 000000000..d7392856b --- /dev/null +++ b/apps/cockpit/src/components/narrative-docs/narrative-docs.spec.tsx @@ -0,0 +1,23 @@ +import React from 'react'; +import { renderToStaticMarkup } from 'react-dom/server'; +import { describe, expect, it } from 'vitest'; +import { NarrativeDocs } from './narrative-docs'; + +describe('NarrativeDocs', () => { + it('renders narrative HTML content', () => { + const html = renderToStaticMarkup( + Streaming Guide

    Learn to stream.

    ', sourceFile: 'guide.md' }, + ]} + /> + ); + expect(html).toContain('Streaming Guide'); + expect(html).toContain('Learn to stream.'); + }); + + it('renders empty state when no docs', () => { + const html = renderToStaticMarkup(); + expect(html).toContain('No documentation available'); + }); +}); diff --git a/apps/cockpit/src/components/narrative-docs/narrative-docs.tsx b/apps/cockpit/src/components/narrative-docs/narrative-docs.tsx new file mode 100644 index 000000000..d6f8ce8d7 --- /dev/null +++ b/apps/cockpit/src/components/narrative-docs/narrative-docs.tsx @@ -0,0 +1,69 @@ +'use client'; + +import React, { useCallback } from 'react'; + +interface NarrativeDoc { + title: string; + html: string; + sourceFile: string; +} + +interface NarrativeDocsProps { + narrativeDocs: NarrativeDoc[]; +} + +export function NarrativeDocs({ narrativeDocs }: NarrativeDocsProps) { + const handleClick = useCallback((e: React.MouseEvent) => { + const target = e.target as HTMLElement; + + const copyCodeBtn = target.closest('[data-copy-code]') as HTMLElement | null; + if (copyCodeBtn) { + const codeBlock = copyCodeBtn.closest('.doc-codeblock'); + const code = codeBlock?.querySelector('pre code')?.textContent ?? ''; + navigator.clipboard.writeText(code); + copyCodeBtn.textContent = 'Copied!'; + setTimeout(() => { copyCodeBtn.textContent = 'Copy'; }, 1500); + return; + } + + const copyPromptBtn = target.closest('[data-copy-prompt]') as HTMLElement | null; + if (copyPromptBtn) { + const promptBlock = copyPromptBtn.closest('.doc-prompt'); + const text = promptBlock?.querySelector('.doc-prompt__content')?.textContent ?? ''; + navigator.clipboard.writeText(text); + copyPromptBtn.textContent = 'Copied!'; + setTimeout(() => { copyPromptBtn.textContent = 'Copy prompt'; }, 1500); + return; + } + }, []); + + if (narrativeDocs.length === 0) { + return ( +
    +

    No documentation available for this capability.

    +
    + ); + } + + return ( +
    + {narrativeDocs.map((doc) => ( +
    + ))} +
    + ); +} diff --git a/apps/cockpit/src/components/navigation/navigation-tree.tsx b/apps/cockpit/src/components/navigation/navigation-tree.tsx deleted file mode 100644 index cbc4de08a..000000000 --- a/apps/cockpit/src/components/navigation/navigation-tree.tsx +++ /dev/null @@ -1,31 +0,0 @@ -import React from 'react'; -import type { NavigationProduct } from '../../lib/route-resolution'; -import { toCockpitPath } from '../../lib/route-resolution'; - -interface NavigationTreeProps { - tree: NavigationProduct[]; -} - -export function NavigationTree({ tree }: NavigationTreeProps) { - return ( - - ); -} diff --git a/apps/cockpit/src/components/pane-rendering.spec.tsx b/apps/cockpit/src/components/pane-rendering.spec.tsx index 3ee10f1e7..8f56a0b0c 100644 --- a/apps/cockpit/src/components/pane-rendering.spec.tsx +++ b/apps/cockpit/src/components/pane-rendering.spec.tsx @@ -4,55 +4,48 @@ import { describe, expect, it } from 'vitest'; import { CodeMode } from './code-mode/code-mode'; import { CodePane } from './code-pane/code-pane'; import { CockpitShell } from './cockpit-shell'; -import { DocsPane } from './docs-pane/docs-pane'; -import { PromptPane } from './prompt-pane/prompt-pane'; -import { PromptDrawer } from './prompt-drawer/prompt-drawer'; + import { getCockpitPageModel } from '../lib/cockpit-page'; describe('metadata-driven panes', () => { - it('renders code, prompt, and docs panes from metadata values', () => { + it('renders code pane from metadata values', () => { const html = renderToStaticMarkup(
    - -
    ); expect(html).toContain('cockpit/langgraph/streaming/python/src/index.ts'); - expect(html).toContain('cockpit/langgraph/streaming/python/prompts/streaming.md'); - expect(html).toContain('/docs/langgraph/streaming'); }); }); describe('cockpit shell contract', () => { - it('renders a stable shell with a persistent sidebar, run-first modes, and no inline prompt pane by default', () => { + it('renders a simplified shell with sidebar, run/code modes, and compact header', () => { const model = getCockpitPageModel(); const html = renderToStaticMarkup( ); expect(html).toContain('Cockpit'); - expect(html).toContain('Explore the example surface'); expect(html).toContain('Deep Agents'); expect(html).toContain('LangGraph'); expect(html).toContain('Run'); expect(html).toContain('Code'); expect(html).toContain('Docs'); - expect(html).toContain('Run example'); - expect(html).toContain('Open prompt assets'); - expect(html).toContain('Interactive example'); - expect(html).not.toContain('

    Prompts

    '); + expect(html).not.toContain('Explore the example surface'); + expect(html).not.toContain('Run example'); + expect(html).not.toContain('Open prompt assets'); expect(html).not.toContain('aria-label="Prompt drawer"'); }); }); describe('refreshed shell structure', () => { - it('renders the key class-based structure for the full-height shell, top file tabs, and prompt slide-over', () => { + it('renders the key structure for the full-height shell and file tabs', () => { const model = getCockpitPageModel(); const html = renderToStaticMarkup(
    @@ -60,6 +53,7 @@ describe('refreshed shell structure', () => { navigationTree={model.navigationTree} presentation={model.presentation} entryTitle={model.entry.title} + contentBundle={{ codeFiles: {}, promptFiles: {}, runtimeUrl: null, docSections: [], narrativeDocs: [] }} /> { 'apps/cockpit/src/app/page.tsx', 'cockpit/langgraph/streaming/python/src/index.ts', ]} - /> - undefined} + backendAssetPaths={[]} + codeFiles={{}} + promptFiles={{}} />
    ); - expect(html).toContain('cockpit-shell'); - expect(html).toContain('cockpit-shell__workspace'); - expect(html).toContain('cockpit-sidebar'); - expect(html).toContain('cockpit-code-mode__tabs'); - expect(html).toContain('cockpit-prompt-drawer'); + expect(html).toContain('aria-label="Cockpit shell"'); + expect(html).toContain('aria-label="Cockpit sidebar"'); + expect(html).toContain('aria-label="Code mode"'); + expect(html).toContain('page.tsx'); + expect(html).toContain('index.ts'); }); }); diff --git a/apps/cockpit/src/components/prompt-drawer/prompt-drawer.spec.tsx b/apps/cockpit/src/components/prompt-drawer/prompt-drawer.spec.tsx deleted file mode 100644 index 01301d947..000000000 --- a/apps/cockpit/src/components/prompt-drawer/prompt-drawer.spec.tsx +++ /dev/null @@ -1,69 +0,0 @@ -/** @vitest-environment jsdom */ -import React from 'react'; -import { act } from 'react'; -import { createRoot } from 'react-dom/client'; -import { afterEach, describe, expect, it } from 'vitest'; -import { CockpitShell } from '../cockpit-shell'; -import { getCockpitPageModel } from '../../lib/cockpit-page'; - -describe('prompt drawer shell behavior', () => { - let container: HTMLDivElement | undefined; - let root: ReturnType | undefined; - - afterEach(() => { - act(() => { - root?.unmount(); - }); - container?.remove(); - }); - - it('opens prompt assets in a secondary slide-over and preserves the active mode when it closes', () => { - const model = getCockpitPageModel(); - container = document.createElement('div'); - document.body.append(container); - root = createRoot(container); - - act(() => { - root.render( - - ); - }); - - const codeButton = Array.from(container.querySelectorAll('button')).find( - (button) => button.textContent === 'Code' - ); - - act(() => { - codeButton?.click(); - }); - - expect(container.textContent).toContain('Code'); - expect(container.textContent).toContain('page.tsx'); - - const openPromptButton = Array.from(container.querySelectorAll('button')).find( - (button) => button.textContent === 'Open prompt assets' - ); - - act(() => { - openPromptButton?.click(); - }); - - expect(container.querySelector('[aria-label="Prompt drawer"]')).not.toBeNull(); - expect(container.textContent).toContain('streaming.md'); - - const closeButton = Array.from(container.querySelectorAll('button')).find( - (button) => button.textContent === 'Close' - ); - - act(() => { - closeButton?.click(); - }); - - expect(container.querySelector('[aria-label="Prompt drawer"]')).toBeNull(); - expect(container.textContent).toContain('page.tsx'); - }); -}); diff --git a/apps/cockpit/src/components/prompt-drawer/prompt-drawer.tsx b/apps/cockpit/src/components/prompt-drawer/prompt-drawer.tsx deleted file mode 100644 index e4bc27039..000000000 --- a/apps/cockpit/src/components/prompt-drawer/prompt-drawer.tsx +++ /dev/null @@ -1,78 +0,0 @@ -'use client'; - -import React, { useEffect, useState } from 'react'; - -interface PromptDrawerProps { - isOpen: boolean; - entryTitle: string; - paths: readonly string[]; - onClose: () => void; -} - -const getPromptLabel = (path: string): string => path.split('/').pop() ?? path; - -export function PromptDrawer({ - isOpen, - entryTitle, - paths, - onClose, -}: PromptDrawerProps) { - const [activePath, setActivePath] = useState(paths[0] ?? ''); - - useEffect(() => { - if (!paths.includes(activePath)) { - setActivePath(paths[0] ?? ''); - } - }, [activePath, paths]); - - if (!isOpen) { - return null; - } - - const resolvedActivePath = paths.includes(activePath) ? activePath : (paths[0] ?? ''); - - return ( - - ); -} diff --git a/apps/cockpit/src/components/prompt-pane/prompt-pane.tsx b/apps/cockpit/src/components/prompt-pane/prompt-pane.tsx deleted file mode 100644 index 79fcf94f6..000000000 --- a/apps/cockpit/src/components/prompt-pane/prompt-pane.tsx +++ /dev/null @@ -1,18 +0,0 @@ -import React from 'react'; - -interface PromptPaneProps { - paths: string[]; -} - -export function PromptPane({ paths }: PromptPaneProps) { - return ( -
    -

    Prompts

    -
      - {paths.map((path) => ( -
    • {path}
    • - ))} -
    -
    - ); -} diff --git a/apps/cockpit/src/components/run-mode/run-mode.spec.tsx b/apps/cockpit/src/components/run-mode/run-mode.spec.tsx index 8e4ea7680..4189a4cb7 100644 --- a/apps/cockpit/src/components/run-mode/run-mode.spec.tsx +++ b/apps/cockpit/src/components/run-mode/run-mode.spec.tsx @@ -4,22 +4,20 @@ import { describe, expect, it } from 'vitest'; import { RunMode } from './run-mode'; describe('RunMode', () => { - it('renders the live example surface and supporting implementation context', () => { + it('renders an iframe when runtimeUrl is provided', () => { const html = renderToStaticMarkup( - + ); + expect(html).toContain(' { + const html = renderToStaticMarkup( + + ); + expect(html).not.toContain(' -
    -

    Run

    -

    Interactive example

    -

    Open the working surface first, then move into code or docs as you need detail.

    -
    -

    {entryTitle}

    -

    Live example surface ready.

    -
    -
    +export function RunMode({ entryTitle, runtimeUrl }: RunModeProps) { + if (!runtimeUrl) { + return ( +
    +

    No runtime available. Start the local dev server to preview.

    +
    + ); + } - + return ( +
    +