@for (key of childKeys(); track key) {
}
@@ -18,4 +18,11 @@ import { RenderElementComponent } from '@cacheplane/render';
export class ContainerComponent {
readonly childKeys = input
([]);
readonly spec = input.required();
+ readonly direction = input<'row' | 'column'>('column');
+
+ readonly layoutClass = computed(() =>
+ this.direction() === 'row'
+ ? 'flex flex-row flex-wrap gap-3'
+ : 'flex flex-col gap-3'
+ );
}
diff --git a/cockpit/chat/generative-ui/angular/src/app/views/dashboard-grid.component.ts b/cockpit/chat/generative-ui/angular/src/app/views/dashboard-grid.component.ts
new file mode 100644
index 000000000..d6b8c92c0
--- /dev/null
+++ b/cockpit/chat/generative-ui/angular/src/app/views/dashboard-grid.component.ts
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: PolyForm-Noncommercial-1.0.0
+import { Component, input } from '@angular/core';
+import type { Spec } from '@json-render/core';
+import { RenderElementComponent } from '@cacheplane/render';
+
+@Component({
+ selector: 'app-dashboard-grid',
+ standalone: true,
+ imports: [RenderElementComponent],
+ template: `
+
+ @for (key of childKeys(); track key) {
+
+ }
+
+ `,
+})
+export class DashboardGridComponent {
+ readonly childKeys = input([]);
+ readonly spec = input.required();
+}
diff --git a/cockpit/chat/generative-ui/angular/src/app/views/data-grid.component.spec.ts b/cockpit/chat/generative-ui/angular/src/app/views/data-grid.component.spec.ts
new file mode 100644
index 000000000..e43382127
--- /dev/null
+++ b/cockpit/chat/generative-ui/angular/src/app/views/data-grid.component.spec.ts
@@ -0,0 +1,54 @@
+import { ComponentFixture, TestBed } from '@angular/core/testing';
+import { DataGridComponent } from './data-grid.component';
+
+describe('DataGridComponent', () => {
+ let fixture: ComponentFixture;
+
+ beforeEach(async () => {
+ await TestBed.configureTestingModule({
+ imports: [DataGridComponent],
+ }).compileComponents();
+ fixture = TestBed.createComponent(DataGridComponent);
+ });
+
+ it('renders skeleton rows when rows is null', () => {
+ fixture.componentRef.setInput('title', 'Churned');
+ fixture.componentRef.setInput('rows', null);
+ fixture.componentRef.setInput('columns', ['name', 'plan']);
+ fixture.detectChanges();
+ const el = fixture.nativeElement as HTMLElement;
+ const skeletonRows = el.querySelectorAll('.skeleton-row');
+ expect(skeletonRows.length).toBeGreaterThanOrEqual(3);
+ });
+
+ it('renders correct number of data rows', () => {
+ const rows = [
+ { name: 'Acme', plan: 'pro', mrr_lost: 450 },
+ { name: 'Widget', plan: 'starter', mrr_lost: 120 },
+ ];
+ fixture.componentRef.setInput('title', 'Churned');
+ fixture.componentRef.setInput('rows', rows);
+ fixture.componentRef.setInput('columns', ['name', 'plan', 'mrr_lost']);
+ fixture.detectChanges();
+ const tbody = fixture.nativeElement.querySelector('tbody');
+ expect(tbody.querySelectorAll('tr').length).toBe(2);
+ });
+
+ it('renders title-cased column headers', () => {
+ fixture.componentRef.setInput('title', 'Churned');
+ fixture.componentRef.setInput('rows', [{ name: 'Acme', mrr_lost: 450 }]);
+ fixture.componentRef.setInput('columns', ['name', 'mrr_lost']);
+ fixture.detectChanges();
+ const headers = fixture.nativeElement.querySelectorAll('th');
+ expect(headers[0].textContent.trim()).toBe('Name');
+ expect(headers[1].textContent.trim()).toBe('MRR Lost');
+ });
+
+ it('renders title', () => {
+ fixture.componentRef.setInput('title', 'Recently Churned');
+ fixture.componentRef.setInput('rows', []);
+ fixture.componentRef.setInput('columns', ['name']);
+ fixture.detectChanges();
+ expect(fixture.nativeElement.textContent).toContain('Recently Churned');
+ });
+});
diff --git a/cockpit/chat/generative-ui/angular/src/app/views/data-grid.component.ts b/cockpit/chat/generative-ui/angular/src/app/views/data-grid.component.ts
new file mode 100644
index 000000000..af8222c9b
--- /dev/null
+++ b/cockpit/chat/generative-ui/angular/src/app/views/data-grid.component.ts
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: PolyForm-Noncommercial-1.0.0
+import { Component, computed, input } from '@angular/core';
+
+@Component({
+ selector: 'app-data-grid',
+ standalone: true,
+ template: `
+
+
{{ title() }}
+ @if (isSkeleton()) {
+ @for (i of skeletonRows; track i) {
+
+ }
+ } @else {
+
+
+
+ @for (col of formattedColumns(); track col.key) {
+ | {{ col.label }} |
+ }
+
+
+
+ @for (row of rows(); track $index) {
+
+ @for (col of formattedColumns(); track col.key) {
+ | {{ row[col.key] }} |
+ }
+
+ }
+
+
+ }
+
+ `,
+ styleUrls: ['./skeleton.css'],
+})
+export class DataGridComponent {
+ readonly title = input('');
+ readonly rows = input[] | null>(null);
+ readonly columns = input([]);
+
+ readonly skeletonRows = [0, 1, 2, 3];
+
+ readonly isSkeleton = computed(() => this.rows() == null);
+
+ readonly formattedColumns = computed(() =>
+ this.columns().map(key => ({
+ key,
+ label: key
+ .split('_')
+ .map(word =>
+ word.length <= 3
+ ? word.toUpperCase()
+ : word.charAt(0).toUpperCase() + word.slice(1)
+ )
+ .join(' '),
+ }))
+ );
+}
diff --git a/cockpit/chat/generative-ui/angular/src/app/views/line-chart.component.spec.ts b/cockpit/chat/generative-ui/angular/src/app/views/line-chart.component.spec.ts
new file mode 100644
index 000000000..c2e844039
--- /dev/null
+++ b/cockpit/chat/generative-ui/angular/src/app/views/line-chart.component.spec.ts
@@ -0,0 +1,50 @@
+import { ComponentFixture, TestBed } from '@angular/core/testing';
+import { LineChartComponent } from './line-chart.component';
+
+describe('LineChartComponent', () => {
+ let fixture: ComponentFixture;
+
+ beforeEach(async () => {
+ await TestBed.configureTestingModule({
+ imports: [LineChartComponent],
+ }).compileComponents();
+ fixture = TestBed.createComponent(LineChartComponent);
+ });
+
+ it('renders skeleton when data is null', () => {
+ fixture.componentRef.setInput('title', 'MRR Trend');
+ fixture.componentRef.setInput('data', null);
+ fixture.componentRef.setInput('xKey', 'month');
+ fixture.componentRef.setInput('yKey', 'mrr');
+ fixture.detectChanges();
+ const el = fixture.nativeElement as HTMLElement;
+ expect(el.querySelector('.skeleton-chart')).toBeTruthy();
+ expect(el.querySelector('svg')).toBeFalsy();
+ });
+
+ it('renders SVG with correct number of data points', () => {
+ const data = [
+ { month: '2026-01', mrr: 37000 },
+ { month: '2026-02', mrr: 38500 },
+ { month: '2026-03', mrr: 40200 },
+ ];
+ fixture.componentRef.setInput('title', 'MRR Trend');
+ fixture.componentRef.setInput('data', data);
+ fixture.componentRef.setInput('xKey', 'month');
+ fixture.componentRef.setInput('yKey', 'mrr');
+ fixture.detectChanges();
+ const svg = fixture.nativeElement.querySelector('svg');
+ expect(svg).toBeTruthy();
+ const circles = svg.querySelectorAll('circle');
+ expect(circles.length).toBe(3);
+ });
+
+ it('renders title', () => {
+ fixture.componentRef.setInput('title', 'MRR Trend');
+ fixture.componentRef.setInput('data', [{ month: '2026-01', mrr: 37000 }]);
+ fixture.componentRef.setInput('xKey', 'month');
+ fixture.componentRef.setInput('yKey', 'mrr');
+ fixture.detectChanges();
+ expect(fixture.nativeElement.textContent).toContain('MRR Trend');
+ });
+});
diff --git a/cockpit/chat/generative-ui/angular/src/app/views/line-chart.component.ts b/cockpit/chat/generative-ui/angular/src/app/views/line-chart.component.ts
new file mode 100644
index 000000000..93127b6e2
--- /dev/null
+++ b/cockpit/chat/generative-ui/angular/src/app/views/line-chart.component.ts
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: PolyForm-Noncommercial-1.0.0
+import { Component, computed, input } from '@angular/core';
+
+@Component({
+ selector: 'app-line-chart',
+ standalone: true,
+ template: `
+
+
{{ title() }}
+ @if (isSkeleton()) {
+
+ } @else {
+
+ }
+
+ `,
+ styleUrls: ['./skeleton.css'],
+})
+export class LineChartComponent {
+ readonly title = input('');
+ readonly data = input[] | null>(null);
+ readonly xKey = input('');
+ readonly yKey = input('');
+
+ readonly width = 400;
+ readonly height = 200;
+ readonly padding = { top: 20, right: 20, bottom: 30, left: 50 };
+
+ readonly isSkeleton = computed(() => this.data() == null);
+
+ readonly points = computed(() => {
+ const d = this.data();
+ if (!d || d.length === 0) return [];
+ const xk = this.xKey();
+ const yk = this.yKey();
+ const values = d.map(item => Number(item[yk]) || 0);
+ const yMin = Math.min(...values);
+ const yMax = Math.max(...values);
+ const yRange = yMax - yMin || 1;
+ const plotW = this.width - this.padding.left - this.padding.right;
+ const plotH = this.height - this.padding.top - this.padding.bottom;
+
+ return d.map((item, i) => ({
+ x: this.padding.left + (d.length > 1 ? (i / (d.length - 1)) * plotW : plotW / 2),
+ y: this.padding.top + plotH - ((Number(item[yk]) || 0) - yMin) / yRange * plotH,
+ label: String(item[xk] ?? ''),
+ }));
+ });
+
+ readonly polylinePoints = computed(() =>
+ this.points().map(p => `${p.x},${p.y}`).join(' ')
+ );
+
+ readonly xLabels = computed(() => {
+ const pts = this.points();
+ if (pts.length <= 6) return pts;
+ const step = Math.ceil(pts.length / 6);
+ return pts.filter((_, i) => i % step === 0 || i === pts.length - 1);
+ });
+
+ readonly yGridLines = computed(() => {
+ const d = this.data();
+ if (!d || d.length === 0) return [];
+ const yk = this.yKey();
+ const values = d.map(item => Number(item[yk]) || 0);
+ const yMin = Math.min(...values);
+ const yMax = Math.max(...values);
+ const plotH = this.height - this.padding.top - this.padding.bottom;
+ const mid = (yMin + yMax) / 2;
+ return [
+ { value: yMax, y: this.padding.top, label: this.formatNumber(yMax) },
+ { value: mid, y: this.padding.top + plotH / 2, label: this.formatNumber(mid) },
+ { value: yMin, y: this.padding.top + plotH, label: this.formatNumber(yMin) },
+ ];
+ });
+
+ private formatNumber(n: number): string {
+ if (n >= 1000) return `${(n / 1000).toFixed(0)}k`;
+ return n.toFixed(0);
+ }
+}
diff --git a/cockpit/chat/generative-ui/angular/src/app/views/skeleton.css b/cockpit/chat/generative-ui/angular/src/app/views/skeleton.css
new file mode 100644
index 000000000..1bac950da
--- /dev/null
+++ b/cockpit/chat/generative-ui/angular/src/app/views/skeleton.css
@@ -0,0 +1,32 @@
+.skeleton {
+ background: linear-gradient(90deg, rgba(255,255,255,0.05) 25%, rgba(255,255,255,0.1) 50%, rgba(255,255,255,0.05) 75%);
+ background-size: 200% 100%;
+ animation: shimmer 1.5s infinite;
+ border-radius: 4px;
+}
+
+@keyframes shimmer {
+ 0% { background-position: 200% 0; }
+ 100% { background-position: -200% 0; }
+}
+
+.skeleton-text {
+ height: 1em;
+ width: 60%;
+}
+
+.skeleton-value {
+ height: 1.5em;
+ width: 40%;
+}
+
+.skeleton-chart {
+ height: 200px;
+ width: 100%;
+}
+
+.skeleton-row {
+ height: 2em;
+ width: 100%;
+ margin-bottom: 4px;
+}
diff --git a/cockpit/chat/generative-ui/angular/src/app/views/stat-card.component.spec.ts b/cockpit/chat/generative-ui/angular/src/app/views/stat-card.component.spec.ts
new file mode 100644
index 000000000..021e5667d
--- /dev/null
+++ b/cockpit/chat/generative-ui/angular/src/app/views/stat-card.component.spec.ts
@@ -0,0 +1,51 @@
+import { ComponentFixture, TestBed } from '@angular/core/testing';
+import { StatCardComponent } from './stat-card.component';
+
+describe('StatCardComponent', () => {
+ let fixture: ComponentFixture;
+
+ beforeEach(async () => {
+ await TestBed.configureTestingModule({
+ imports: [StatCardComponent],
+ }).compileComponents();
+ fixture = TestBed.createComponent(StatCardComponent);
+ });
+
+ it('renders skeleton when value is null', () => {
+ fixture.componentRef.setInput('label', 'MRR');
+ fixture.componentRef.setInput('value', null);
+ fixture.detectChanges();
+ const el = fixture.nativeElement as HTMLElement;
+ expect(el.querySelector('.skeleton')).toBeTruthy();
+ expect(el.textContent).toContain('MRR');
+ });
+
+ it('renders value and delta when provided', () => {
+ fixture.componentRef.setInput('label', 'MRR');
+ fixture.componentRef.setInput('value', 42000);
+ fixture.componentRef.setInput('delta', '+8.2%');
+ fixture.detectChanges();
+ const el = fixture.nativeElement as HTMLElement;
+ expect(el.querySelector('.skeleton')).toBeFalsy();
+ expect(el.textContent).toContain('42,000');
+ expect(el.textContent).toContain('+8.2%');
+ });
+
+ it('applies positive color to positive delta', () => {
+ fixture.componentRef.setInput('label', 'MRR');
+ fixture.componentRef.setInput('value', 42000);
+ fixture.componentRef.setInput('delta', '+8.2%');
+ fixture.detectChanges();
+ const deltaEl = fixture.nativeElement.querySelector('[data-testid="delta"]');
+ expect(deltaEl?.classList.contains('text-emerald-400')).toBe(true);
+ });
+
+ it('applies negative color to negative delta', () => {
+ fixture.componentRef.setInput('label', 'Churn');
+ fixture.componentRef.setInput('value', '3.2%');
+ fixture.componentRef.setInput('delta', '-0.4%');
+ fixture.detectChanges();
+ const deltaEl = fixture.nativeElement.querySelector('[data-testid="delta"]');
+ expect(deltaEl?.classList.contains('text-red-400')).toBe(true);
+ });
+});
diff --git a/cockpit/chat/generative-ui/angular/src/app/views/stat-card.component.ts b/cockpit/chat/generative-ui/angular/src/app/views/stat-card.component.ts
index 0d5407cb9..e865fc33b 100644
--- a/cockpit/chat/generative-ui/angular/src/app/views/stat-card.component.ts
+++ b/cockpit/chat/generative-ui/angular/src/app/views/stat-card.component.ts
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: PolyForm-Noncommercial-1.0.0
-import { Component, input } from '@angular/core';
+import { Component, computed, input } from '@angular/core';
@Component({
selector: 'app-stat-card',
@@ -7,11 +7,38 @@ import { Component, input } from '@angular/core';
template: `
{{ label() }}
-
{{ value() }}
+ @if (isSkeleton()) {
+
+
+ } @else {
+
{{ formattedValue() }}
+ @if (delta()) {
+
{{ delta() }}
+ }
+ }
`,
+ styleUrls: ['./skeleton.css'],
})
export class StatCardComponent {
readonly label = input('');
- readonly value = input('');
+ readonly value = input(null);
+ readonly delta = input(null);
+
+ readonly isSkeleton = computed(() => this.value() == null);
+
+ readonly formattedValue = computed(() => {
+ const v = this.value();
+ if (v == null) return '';
+ if (typeof v === 'number') return v.toLocaleString();
+ return String(v);
+ });
+
+ readonly deltaColor = computed(() => {
+ const d = this.delta();
+ if (!d) return '';
+ if (d.startsWith('+')) return 'text-emerald-400';
+ if (d.startsWith('-')) return 'text-red-400';
+ return 'text-white/60';
+ });
}
diff --git a/cockpit/chat/generative-ui/angular/src/app/views/weather-card.component.ts b/cockpit/chat/generative-ui/angular/src/app/views/weather-card.component.ts
deleted file mode 100644
index 9114640ec..000000000
--- a/cockpit/chat/generative-ui/angular/src/app/views/weather-card.component.ts
+++ /dev/null
@@ -1,32 +0,0 @@
-// SPDX-License-Identifier: PolyForm-Noncommercial-1.0.0
-import { Component, input } from '@angular/core';
-
-@Component({
- selector: 'app-weather-card',
- standalone: true,
- template: `
-
-
-
{{ city() }}
- {{ weatherEmoji() }}
-
-
{{ temperature() }}°F
-
{{ condition() }}
-
- `,
-})
-export class WeatherCardComponent {
- readonly city = input('');
- readonly temperature = input(0);
- readonly condition = input('');
-
- weatherEmoji(): string {
- const c = this.condition().toLowerCase();
- if (c.includes('sun') || c.includes('clear')) return '☀️';
- if (c.includes('cloud') || c.includes('overcast')) return '☁️';
- if (c.includes('rain')) return '🌧️';
- if (c.includes('snow')) return '❄️';
- if (c.includes('storm') || c.includes('thunder')) return '⛈️';
- return '🌤️';
- }
-}
diff --git a/cockpit/chat/generative-ui/angular/tsconfig.app.json b/cockpit/chat/generative-ui/angular/tsconfig.app.json
index 72c01e364..05072383f 100644
--- a/cockpit/chat/generative-ui/angular/tsconfig.app.json
+++ b/cockpit/chat/generative-ui/angular/tsconfig.app.json
@@ -5,5 +5,6 @@
"types": []
},
"files": ["src/main.ts"],
- "include": ["src/**/*.d.ts", "src/**/*.ts"]
+ "include": ["src/**/*.d.ts", "src/**/*.ts"],
+ "exclude": ["src/**/*.spec.ts"]
}
diff --git a/cockpit/chat/generative-ui/angular/tsconfig.json b/cockpit/chat/generative-ui/angular/tsconfig.json
index 3fd970371..af65cf0a6 100644
--- a/cockpit/chat/generative-ui/angular/tsconfig.json
+++ b/cockpit/chat/generative-ui/angular/tsconfig.json
@@ -19,6 +19,7 @@
"files": [],
"include": [],
"references": [
- { "path": "./tsconfig.app.json" }
+ { "path": "./tsconfig.app.json" },
+ { "path": "./tsconfig.spec.json" }
]
}
diff --git a/cockpit/chat/generative-ui/angular/tsconfig.spec.json b/cockpit/chat/generative-ui/angular/tsconfig.spec.json
new file mode 100644
index 000000000..676863a53
--- /dev/null
+++ b/cockpit/chat/generative-ui/angular/tsconfig.spec.json
@@ -0,0 +1,8 @@
+{
+ "extends": "./tsconfig.json",
+ "compilerOptions": {
+ "outDir": "../../../../dist/out-tsc",
+ "types": ["vitest/globals"]
+ },
+ "include": ["src/**/*.ts", "src/**/*.d.ts"]
+}
diff --git a/cockpit/chat/generative-ui/python/prompts/dashboard.md b/cockpit/chat/generative-ui/python/prompts/dashboard.md
new file mode 100644
index 000000000..288b521f2
--- /dev/null
+++ b/cockpit/chat/generative-ui/python/prompts/dashboard.md
@@ -0,0 +1,74 @@
+# SaaS Metrics Dashboard Agent
+
+You are a dashboard agent that builds interactive SaaS metrics dashboards using a JSON render spec format. You have access to tools that query SaaS metrics data.
+
+## Your Behavior
+
+### First message (no existing dashboard)
+
+1. Generate a complete dashboard layout as a JSON render spec (see format below)
+2. Call ALL four data tools to populate the dashboard
+3. After the tools return, provide a brief conversational summary
+
+### Follow-up messages (dashboard already exists)
+
+Categorize the user's request:
+
+- **Data change** (e.g., "show last 6 months", "filter to enterprise only"): Call only the relevant tool(s) with updated parameters. Do NOT regenerate the spec. Just respond conversationally confirming the update.
+- **Structural change** (e.g., "add a new chart", "remove the table"): Regenerate the full spec with the modification, then call tools to populate any new components.
+- **Question about data** (e.g., "why did churn spike?"): Respond conversationally in plain text. Do NOT output JSON or call tools.
+
+## JSON Render Spec Format
+
+Your spec response MUST be raw JSON only — no markdown, no code fences, no surrounding text.
+
+```
+{
+ "elements": { [key: string]: Element },
+ "root": string
+}
+```
+
+An Element has:
+```
+{
+ "type": string,
+ "props": { ... },
+ "children?": string[]
+}
+```
+
+### Props with State Bindings
+
+Use `{ "$state": "/json/pointer/path" }` for props that will be populated by tool results. The dashboard renders skeleton placeholders until the data arrives.
+
+Example: `"value": { "$state": "/mrr/value" }` — this prop will be populated when the `/mrr/value` state path receives data.
+
+## Available Component Types
+
+| Type | Props | Children | Description |
+|------|-------|----------|-------------|
+| `dashboard_grid` | *(none)* | Yes | Top-level vertical layout with section spacing |
+| `container` | `direction` ("row" or "column") | Yes | Flex layout container |
+| `stat_card` | `label` (string), `value` ($state), `delta` ($state) | No | Metric summary card |
+| `line_chart` | `title` (string), `data` ($state array), `xKey` (string), `yKey` (string) | No | SVG line chart |
+| `bar_chart` | `title` (string), `data` ($state array), `labelKey` (string), `valueKey` (string) | No | SVG bar chart |
+| `data_grid` | `title` (string), `rows` ($state array), `columns` (string[]) | No | Data table |
+
+## State Path Conventions
+
+Use these state paths to match what the tools populate:
+
+- `/mrr/value`, `/mrr/delta`, `/mrr/period` — from query_mrr
+- `/subscribers/total`, `/subscribers/delta` — from query_mrr
+- `/churn/rate`, `/churn/delta` — from query_mrr
+- `/arpu/value`, `/arpu/delta` — from query_mrr
+- `/mrr_trend` — array from query_mrr_trend
+- `/subscribers_by_plan` — array from query_subscribers_by_plan
+- `/churned_accounts` — array from query_churned_accounts
+
+## Example Spec
+
+For "show me the dashboard":
+
+{"elements":{"root":{"type":"dashboard_grid","children":["stats_row","charts_row","table_section"]},"stats_row":{"type":"container","props":{"direction":"row"},"children":["mrr_card","subscribers_card","churn_card","arpu_card"]},"mrr_card":{"type":"stat_card","props":{"label":"MRR","value":{"$state":"/mrr/value"},"delta":{"$state":"/mrr/delta"}}},"subscribers_card":{"type":"stat_card","props":{"label":"Active Subscribers","value":{"$state":"/subscribers/total"},"delta":{"$state":"/subscribers/delta"}}},"churn_card":{"type":"stat_card","props":{"label":"Churn Rate","value":{"$state":"/churn/rate"},"delta":{"$state":"/churn/delta"}}},"arpu_card":{"type":"stat_card","props":{"label":"ARPU","value":{"$state":"/arpu/value"},"delta":{"$state":"/arpu/delta"}}},"charts_row":{"type":"container","props":{"direction":"row"},"children":["trend_chart","plan_chart"]},"trend_chart":{"type":"line_chart","props":{"title":"MRR Trend","data":{"$state":"/mrr_trend"},"xKey":"month","yKey":"mrr"}},"plan_chart":{"type":"bar_chart","props":{"title":"Subscribers by Plan","data":{"$state":"/subscribers_by_plan"},"labelKey":"plan","valueKey":"count"}},"table_section":{"type":"data_grid","props":{"title":"Recently Churned","rows":{"$state":"/churned_accounts"},"columns":["name","plan","mrr_lost","date"]}}},"root":"root"}
diff --git a/cockpit/chat/generative-ui/python/prompts/generative-ui.md b/cockpit/chat/generative-ui/python/prompts/generative-ui.md
deleted file mode 100644
index 61afba57f..000000000
--- a/cockpit/chat/generative-ui/python/prompts/generative-ui.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# Generative UI Assistant
-
-You are a generative-UI assistant. You MUST respond with **raw JSON only** — no markdown, no code fences, no explanation text. Your entire response must be a single valid JSON object following the Spec format below.
-
-## Spec Schema
-
-A **Spec** is a JSON object with two required top-level keys:
-
-```
-{
- "elements": { [key: string]: Element },
- "root": string
-}
-```
-
-An **Element** has:
-
-```
-{
- "type": string, // component type name
- "props": { ... }, // component-specific properties
- "children?": string[] // ordered list of element keys (references into `elements`)
-}
-```
-
-## Available Component Types
-
-| Type | Props | Children |
-|-----------------|--------------------------------------------------------------|----------|
-| `container` | *(none)* | Yes |
-| `weather_card` | `city` (string), `temperature` (number), `condition` (string)| No |
-| `stat_card` | `label` (string), `value` (string) | No |
-
-## Rules
-
-1. Respond ONLY with valid JSON. No markdown. No code fences. No surrounding text.
-2. Every element referenced in a `children` array must exist as a key in `elements`.
-3. `root` must reference a key that exists in `elements`.
-4. Use `container` to group multiple cards together.
-5. Choose component types that best match the user's request.
-
-## Example Response
-
-If the user asks "What's the weather in Chicago and New York?", respond exactly like:
-
-{"elements":{"root":{"type":"container","props":{},"children":["chicago","nyc"]},"chicago":{"type":"weather_card","props":{"city":"Chicago","temperature":45,"condition":"Partly Cloudy"}},"nyc":{"type":"weather_card","props":{"city":"New York","temperature":52,"condition":"Sunny"}}},"root":"root"}
diff --git a/cockpit/chat/generative-ui/python/src/dashboard_tools.py b/cockpit/chat/generative-ui/python/src/dashboard_tools.py
new file mode 100644
index 000000000..827b0c46e
--- /dev/null
+++ b/cockpit/chat/generative-ui/python/src/dashboard_tools.py
@@ -0,0 +1,97 @@
+"""Mock SaaS metrics data tools for the generative-ui dashboard example."""
+
+from langchain_core.tools import tool
+
+# ── Hardcoded SaaS dataset ──────────────────────────────────────────────────
+
+_MRR_TREND = [
+ {"month": "2025-05", "mrr": 28000},
+ {"month": "2025-06", "mrr": 29500},
+ {"month": "2025-07", "mrr": 30200},
+ {"month": "2025-08", "mrr": 31800},
+ {"month": "2025-09", "mrr": 32500},
+ {"month": "2025-10", "mrr": 33000},
+ {"month": "2025-11", "mrr": 34200},
+ {"month": "2025-12", "mrr": 35800},
+ {"month": "2026-01", "mrr": 37000},
+ {"month": "2026-02", "mrr": 38500},
+ {"month": "2026-03", "mrr": 40200},
+ {"month": "2026-04", "mrr": 42000},
+]
+
+_SUBSCRIBERS_BY_PLAN = [
+ {"plan": "free", "count": 1200},
+ {"plan": "starter", "count": 850},
+ {"plan": "pro", "count": 420},
+ {"plan": "enterprise", "count": 95},
+]
+
+_CHURNED_ACCOUNTS = [
+ {"name": "Acme Corp", "plan": "pro", "mrr_lost": 450, "date": "2026-04-01"},
+ {"name": "Widgetly", "plan": "starter", "mrr_lost": 120, "date": "2026-03-28"},
+ {"name": "DataPipe Inc", "plan": "enterprise", "mrr_lost": 2400, "date": "2026-03-25"},
+ {"name": "NovaTech", "plan": "pro", "mrr_lost": 450, "date": "2026-03-20"},
+ {"name": "CloudSync", "plan": "starter", "mrr_lost": 120, "date": "2026-03-15"},
+ {"name": "ByteForge", "plan": "pro", "mrr_lost": 450, "date": "2026-03-10"},
+ {"name": "Quantum Labs", "plan": "enterprise", "mrr_lost": 2400, "date": "2026-03-05"},
+ {"name": "FlowState", "plan": "starter", "mrr_lost": 120, "date": "2026-02-28"},
+ {"name": "CipherNet", "plan": "pro", "mrr_lost": 450, "date": "2026-02-20"},
+ {"name": "Luminary AI", "plan": "starter", "mrr_lost": 120, "date": "2026-02-15"},
+]
+
+
+@tool
+def query_mrr() -> dict:
+ """Get current Monthly Recurring Revenue (MRR) with month-over-month delta."""
+ current = _MRR_TREND[-1]["mrr"]
+ previous = _MRR_TREND[-2]["mrr"]
+ delta_pct = ((current - previous) / previous) * 100
+ total_subs = sum(p["count"] for p in _SUBSCRIBERS_BY_PLAN)
+ arpu = round(current / total_subs, 2)
+ return {
+ "mrr": {"value": current, "delta": f"+{delta_pct:.1f}%", "period": "month"},
+ "subscribers": {"total": total_subs, "delta": "+42"},
+ "churn": {"rate": "3.2%", "delta": "-0.4%"},
+ "arpu": {"value": f"${arpu:.2f}", "delta": "+$1.20"},
+ }
+
+
+@tool
+def query_subscribers_by_plan(plans: list[str] | None = None) -> list[dict]:
+ """Get subscriber counts broken down by plan tier.
+
+ Args:
+ plans: Optional list of plan names to filter by (e.g., ["pro", "enterprise"]).
+ Returns all plans if not specified.
+ """
+ if plans:
+ return [p for p in _SUBSCRIBERS_BY_PLAN if p["plan"] in plans]
+ return _SUBSCRIBERS_BY_PLAN
+
+
+@tool
+def query_mrr_trend(months: int = 12) -> list[dict]:
+ """Get MRR trend over time.
+
+ Args:
+ months: Number of months to return (default 12). Valid values: 3, 6, 12, 24.
+ """
+ months = min(months, len(_MRR_TREND))
+ return _MRR_TREND[-months:]
+
+
+@tool
+def query_churned_accounts(limit: int = 5, plan: str | None = None) -> list[dict]:
+ """Get recently churned accounts.
+
+ Args:
+ limit: Maximum number of accounts to return (default 5).
+ plan: Optional plan name to filter by (e.g., "enterprise").
+ """
+ filtered = _CHURNED_ACCOUNTS
+ if plan:
+ filtered = [a for a in filtered if a["plan"] == plan]
+ return filtered[:limit]
+
+
+ALL_TOOLS = [query_mrr, query_subscribers_by_plan, query_mrr_trend, query_churned_accounts]
diff --git a/cockpit/chat/generative-ui/python/src/graph.py b/cockpit/chat/generative-ui/python/src/graph.py
index bdeedce3d..e5e6725e3 100644
--- a/cockpit/chat/generative-ui/python/src/graph.py
+++ b/cockpit/chat/generative-ui/python/src/graph.py
@@ -1,38 +1,135 @@
-"""
-Chat Generative UI Graph
+"""Multi-node LangGraph graph for the SaaS metrics dashboard.
-A LangGraph StateGraph that generates responses containing JSON render
-spec objects. The Angular frontend detects these specs in chat messages
-and renders them as live UI components using ChatGenerativeUiComponent.
+Flow:
+ router → generate_shell (first turn) or plan_tools (follow-up)
+ → call_tools → emit_state → respond
"""
+import json
from pathlib import Path
-from langgraph.graph import StateGraph, MessagesState, END
-from langchain_openai import ChatOpenAI
+from typing import Literal
+
from langchain_core.messages import SystemMessage
+from langchain_openai import ChatOpenAI
+from langgraph.graph import StateGraph, MessagesState, END
+from langgraph.prebuilt import ToolNode
+from langgraph.types import Command
+
+from src.dashboard_tools import ALL_TOOLS
+
+_PROMPT = (Path(__file__).parent.parent / "prompts" / "dashboard.md").read_text()
+
+_llm = ChatOpenAI(model="gpt-4o-mini", temperature=0, streaming=True)
+_llm_with_tools = _llm.bind_tools(ALL_TOOLS)
+
+
+class DashboardState(MessagesState):
+ """Extended state that persists the dashboard spec across turns."""
+ dashboard_spec: str | None
+
+
+def router(state: DashboardState) -> Command[Literal["generate_shell", "plan_tools"]]:
+ """Route based on whether a dashboard spec already exists."""
+ if state.get("dashboard_spec") is None:
+ return Command(goto="generate_shell")
+ return Command(goto="plan_tools")
+
+
+async def generate_shell(state: DashboardState) -> DashboardState:
+ """Generate the dashboard shell spec on first turn."""
+ messages = [SystemMessage(content=_PROMPT)] + state["messages"]
+ response = await _llm.ainvoke(messages)
+ spec_text = response.content if isinstance(response.content, str) else ""
+ return {
+ "messages": [response],
+ "dashboard_spec": spec_text,
+ }
+
+
+async def plan_tools(state: DashboardState) -> DashboardState:
+ """On follow-up turns, let the LLM decide which tools to call."""
+ context = (
+ f"The current dashboard spec is:\n{state['dashboard_spec']}\n\n"
+ "Based on the user's message, decide which tools to call to update the dashboard data. "
+ "If the user asks a question about the data that doesn't need fresh data, just respond conversationally."
+ )
+ messages = [SystemMessage(content=_PROMPT + "\n\n" + context)] + state["messages"]
+ response = await _llm_with_tools.ainvoke(messages)
+ return {"messages": [response]}
+
+
+def should_call_tools(state: DashboardState) -> Literal["call_tools", "respond"]:
+ """Check if the last message has tool calls."""
+ last = state["messages"][-1]
+ if hasattr(last, "tool_calls") and last.tool_calls:
+ return "call_tools"
+ return "respond"
+
+
+async def emit_state(state: DashboardState) -> DashboardState:
+ """Emit state_update custom events from tool results."""
+ from langchain_core.callbacks import adispatch_custom_event
+
+ tool_results = {}
+ for msg in reversed(state["messages"]):
+ if msg.type == "tool":
+ try:
+ data = json.loads(msg.content) if isinstance(msg.content, str) else msg.content
+ except (json.JSONDecodeError, TypeError):
+ continue
+
+ if msg.name == "query_mrr":
+ for section_key, section_val in data.items():
+ if isinstance(section_val, dict):
+ for k, v in section_val.items():
+ tool_results[f"/{section_key}/{k}"] = v
+ elif msg.name == "query_subscribers_by_plan":
+ tool_results["/subscribers_by_plan"] = data
+ elif msg.name == "query_mrr_trend":
+ tool_results["/mrr_trend"] = data
+ elif msg.name == "query_churned_accounts":
+ tool_results["/churned_accounts"] = data
+ elif msg.type == "ai":
+ break
+
+ if tool_results:
+ await adispatch_custom_event("state_update", {"updates": tool_results})
+
+ return state
+
+
+async def respond(state: DashboardState) -> DashboardState:
+ """Generate a brief conversational summary after tools have run."""
+ last = state["messages"][-1]
+ if last.type == "ai" and not (hasattr(last, "tool_calls") and last.tool_calls):
+ return state
-PROMPTS_DIR = Path(__file__).parent.parent / "prompts"
+ messages = [
+ SystemMessage(content="Provide a brief (1-2 sentence) conversational summary of what you just did. Do NOT output JSON.")
+ ] + state["messages"]
+ response = await _llm.ainvoke(messages)
+ return {"messages": [response]}
-def build_generative_ui_graph():
- """
- Constructs an agent that includes JSON render specs in its responses,
- enabling dynamic UI generation within chat messages.
- """
- llm = ChatOpenAI(model="gpt-5-mini", streaming=True)
+_builder = StateGraph(DashboardState)
+_builder.add_node("router", router)
+_builder.add_node("generate_shell", generate_shell)
+_builder.add_node("plan_tools", plan_tools)
+_builder.add_node("call_tools", ToolNode(ALL_TOOLS))
+_builder.add_node("emit_state", emit_state)
+_builder.add_node("respond", respond)
- async def generate(state: MessagesState) -> dict:
- system_prompt = (PROMPTS_DIR / "generative-ui.md").read_text()
- messages = [SystemMessage(content=system_prompt)] + state["messages"]
- response = await llm.ainvoke(messages)
- return {"messages": [response]}
+_builder.set_entry_point("router")
- graph = StateGraph(MessagesState)
- graph.add_node("generate", generate)
- graph.set_entry_point("generate")
- graph.add_edge("generate", END)
+# After shell generation, go to plan_tools to call all data tools
+_builder.add_edge("generate_shell", "plan_tools")
- return graph.compile()
+# After plan_tools, check if we need to call tools
+_builder.add_conditional_edges("plan_tools", should_call_tools)
+# Tool calling flow
+_builder.add_edge("call_tools", "emit_state")
+_builder.add_edge("emit_state", "respond")
+_builder.add_edge("respond", END)
-graph = build_generative_ui_graph()
+graph = _builder.compile()
diff --git a/cockpit/langgraph/streaming/python/prompts/dashboard.md b/cockpit/langgraph/streaming/python/prompts/dashboard.md
new file mode 100644
index 000000000..288b521f2
--- /dev/null
+++ b/cockpit/langgraph/streaming/python/prompts/dashboard.md
@@ -0,0 +1,74 @@
+# SaaS Metrics Dashboard Agent
+
+You are a dashboard agent that builds interactive SaaS metrics dashboards using a JSON render spec format. You have access to tools that query SaaS metrics data.
+
+## Your Behavior
+
+### First message (no existing dashboard)
+
+1. Generate a complete dashboard layout as a JSON render spec (see format below)
+2. Call ALL four data tools to populate the dashboard
+3. After the tools return, provide a brief conversational summary
+
+### Follow-up messages (dashboard already exists)
+
+Categorize the user's request:
+
+- **Data change** (e.g., "show last 6 months", "filter to enterprise only"): Call only the relevant tool(s) with updated parameters. Do NOT regenerate the spec. Just respond conversationally confirming the update.
+- **Structural change** (e.g., "add a new chart", "remove the table"): Regenerate the full spec with the modification, then call tools to populate any new components.
+- **Question about data** (e.g., "why did churn spike?"): Respond conversationally in plain text. Do NOT output JSON or call tools.
+
+## JSON Render Spec Format
+
+Your spec response MUST be raw JSON only — no markdown, no code fences, no surrounding text.
+
+```
+{
+ "elements": { [key: string]: Element },
+ "root": string
+}
+```
+
+An Element has:
+```
+{
+ "type": string,
+ "props": { ... },
+ "children?": string[]
+}
+```
+
+### Props with State Bindings
+
+Use `{ "$state": "/json/pointer/path" }` for props that will be populated by tool results. The dashboard renders skeleton placeholders until the data arrives.
+
+Example: `"value": { "$state": "/mrr/value" }` — this prop will be populated when the `/mrr/value` state path receives data.
+
+## Available Component Types
+
+| Type | Props | Children | Description |
+|------|-------|----------|-------------|
+| `dashboard_grid` | *(none)* | Yes | Top-level vertical layout with section spacing |
+| `container` | `direction` ("row" or "column") | Yes | Flex layout container |
+| `stat_card` | `label` (string), `value` ($state), `delta` ($state) | No | Metric summary card |
+| `line_chart` | `title` (string), `data` ($state array), `xKey` (string), `yKey` (string) | No | SVG line chart |
+| `bar_chart` | `title` (string), `data` ($state array), `labelKey` (string), `valueKey` (string) | No | SVG bar chart |
+| `data_grid` | `title` (string), `rows` ($state array), `columns` (string[]) | No | Data table |
+
+## State Path Conventions
+
+Use these state paths to match what the tools populate:
+
+- `/mrr/value`, `/mrr/delta`, `/mrr/period` — from query_mrr
+- `/subscribers/total`, `/subscribers/delta` — from query_mrr
+- `/churn/rate`, `/churn/delta` — from query_mrr
+- `/arpu/value`, `/arpu/delta` — from query_mrr
+- `/mrr_trend` — array from query_mrr_trend
+- `/subscribers_by_plan` — array from query_subscribers_by_plan
+- `/churned_accounts` — array from query_churned_accounts
+
+## Example Spec
+
+For "show me the dashboard":
+
+{"elements":{"root":{"type":"dashboard_grid","children":["stats_row","charts_row","table_section"]},"stats_row":{"type":"container","props":{"direction":"row"},"children":["mrr_card","subscribers_card","churn_card","arpu_card"]},"mrr_card":{"type":"stat_card","props":{"label":"MRR","value":{"$state":"/mrr/value"},"delta":{"$state":"/mrr/delta"}}},"subscribers_card":{"type":"stat_card","props":{"label":"Active Subscribers","value":{"$state":"/subscribers/total"},"delta":{"$state":"/subscribers/delta"}}},"churn_card":{"type":"stat_card","props":{"label":"Churn Rate","value":{"$state":"/churn/rate"},"delta":{"$state":"/churn/delta"}}},"arpu_card":{"type":"stat_card","props":{"label":"ARPU","value":{"$state":"/arpu/value"},"delta":{"$state":"/arpu/delta"}}},"charts_row":{"type":"container","props":{"direction":"row"},"children":["trend_chart","plan_chart"]},"trend_chart":{"type":"line_chart","props":{"title":"MRR Trend","data":{"$state":"/mrr_trend"},"xKey":"month","yKey":"mrr"}},"plan_chart":{"type":"bar_chart","props":{"title":"Subscribers by Plan","data":{"$state":"/subscribers_by_plan"},"labelKey":"plan","valueKey":"count"}},"table_section":{"type":"data_grid","props":{"title":"Recently Churned","rows":{"$state":"/churned_accounts"},"columns":["name","plan","mrr_lost","date"]}}},"root":"root"}
diff --git a/cockpit/langgraph/streaming/python/src/chat_graphs.py b/cockpit/langgraph/streaming/python/src/chat_graphs.py
index 2b2ee7300..8dc482171 100644
--- a/cockpit/langgraph/streaming/python/src/chat_graphs.py
+++ b/cockpit/langgraph/streaming/python/src/chat_graphs.py
@@ -44,4 +44,4 @@ async def generate(state: MessagesState) -> dict:
c_timeline = _build_prompt_graph("timeline.md")
c_tool_calls = _build_prompt_graph("tool-calls.md")
c_subagents = _build_prompt_graph("subagents.md")
-generative_ui = _build_prompt_graph("generative-ui.md")
+from src.dashboard_graph import graph as generative_ui
diff --git a/cockpit/langgraph/streaming/python/src/dashboard_graph.py b/cockpit/langgraph/streaming/python/src/dashboard_graph.py
new file mode 100644
index 000000000..8294e6695
--- /dev/null
+++ b/cockpit/langgraph/streaming/python/src/dashboard_graph.py
@@ -0,0 +1,135 @@
+"""Multi-node LangGraph graph for the SaaS metrics dashboard.
+
+Flow:
+ router → generate_shell (first turn) or plan_tools (follow-up)
+ → call_tools → emit_state → respond
+"""
+
+import json
+from pathlib import Path
+from typing import Annotated, Literal
+
+from langchain_core.messages import AIMessage, SystemMessage
+from langchain_openai import ChatOpenAI
+from langgraph.graph import StateGraph, MessagesState, END
+from langgraph.prebuilt import ToolNode
+from langgraph.types import Command
+
+from src.dashboard_tools import ALL_TOOLS
+
+_PROMPT = (Path(__file__).parent.parent / "prompts" / "dashboard.md").read_text()
+
+_llm = ChatOpenAI(model="gpt-4o-mini", temperature=0, streaming=True)
+_llm_with_tools = _llm.bind_tools(ALL_TOOLS)
+
+
+class DashboardState(MessagesState):
+ """Extended state that persists the dashboard spec across turns."""
+ dashboard_spec: str | None
+
+
+def router(state: DashboardState) -> Command[Literal["generate_shell", "plan_tools"]]:
+ """Route based on whether a dashboard spec already exists."""
+ if state.get("dashboard_spec") is None:
+ return Command(goto="generate_shell")
+ return Command(goto="plan_tools")
+
+
+async def generate_shell(state: DashboardState) -> DashboardState:
+ """Generate the dashboard shell spec on first turn."""
+ messages = [SystemMessage(content=_PROMPT)] + state["messages"]
+ response = await _llm.ainvoke(messages)
+ spec_text = response.content if isinstance(response.content, str) else ""
+ return {
+ "messages": [response],
+ "dashboard_spec": spec_text,
+ }
+
+
+async def plan_tools(state: DashboardState) -> DashboardState:
+ """On follow-up turns, let the LLM decide which tools to call."""
+ context = (
+ f"The current dashboard spec is:\n{state['dashboard_spec']}\n\n"
+ "Based on the user's message, decide which tools to call to update the dashboard data. "
+ "If the user asks a question about the data that doesn't need fresh data, just respond conversationally."
+ )
+ messages = [SystemMessage(content=_PROMPT + "\n\n" + context)] + state["messages"]
+ response = await _llm_with_tools.ainvoke(messages)
+ return {"messages": [response]}
+
+
+def should_call_tools(state: DashboardState) -> Literal["call_tools", "respond"]:
+ """Check if the last message has tool calls."""
+ last = state["messages"][-1]
+ if hasattr(last, "tool_calls") and last.tool_calls:
+ return "call_tools"
+ return "respond"
+
+
+async def emit_state(state: DashboardState) -> DashboardState:
+ """Emit state_update custom events from tool results."""
+ from langchain_core.callbacks import adispatch_custom_event
+
+ tool_results = {}
+ for msg in reversed(state["messages"]):
+ if msg.type == "tool":
+ try:
+ data = json.loads(msg.content) if isinstance(msg.content, str) else msg.content
+ except (json.JSONDecodeError, TypeError):
+ continue
+
+ if msg.name == "query_mrr":
+ for section_key, section_val in data.items():
+ if isinstance(section_val, dict):
+ for k, v in section_val.items():
+ tool_results[f"/{section_key}/{k}"] = v
+ elif msg.name == "query_subscribers_by_plan":
+ tool_results["/subscribers_by_plan"] = data
+ elif msg.name == "query_mrr_trend":
+ tool_results["/mrr_trend"] = data
+ elif msg.name == "query_churned_accounts":
+ tool_results["/churned_accounts"] = data
+ elif msg.type == "ai":
+ break
+
+ if tool_results:
+ await adispatch_custom_event("state_update", {"updates": tool_results})
+
+ return state
+
+
+async def respond(state: DashboardState) -> DashboardState:
+ """Generate a brief conversational summary after tools have run."""
+ last = state["messages"][-1]
+ if last.type == "ai" and not (hasattr(last, "tool_calls") and last.tool_calls):
+ return state
+
+ messages = [
+ SystemMessage(content="Provide a brief (1-2 sentence) conversational summary of what you just did. Do NOT output JSON.")
+ ] + state["messages"]
+ response = await _llm.ainvoke(messages)
+ return {"messages": [response]}
+
+
+_builder = StateGraph(DashboardState)
+_builder.add_node("router", router)
+_builder.add_node("generate_shell", generate_shell)
+_builder.add_node("plan_tools", plan_tools)
+_builder.add_node("call_tools", ToolNode(ALL_TOOLS))
+_builder.add_node("emit_state", emit_state)
+_builder.add_node("respond", respond)
+
+_builder.set_entry_point("router")
+
+# After shell generation, go to plan_tools to call all data tools
+_builder.add_edge("generate_shell", "plan_tools")
+
+# After plan_tools, check if we need to call tools
+_builder.add_conditional_edges("plan_tools", should_call_tools)
+
+# Tool calling flow
+_builder.add_edge("call_tools", "emit_state")
+_builder.add_edge("emit_state", "respond")
+_builder.add_edge("respond", END)
+
+graph = _builder.compile()
diff --git a/cockpit/langgraph/streaming/python/src/dashboard_tools.py b/cockpit/langgraph/streaming/python/src/dashboard_tools.py
new file mode 100644
index 000000000..827b0c46e
--- /dev/null
+++ b/cockpit/langgraph/streaming/python/src/dashboard_tools.py
@@ -0,0 +1,97 @@
+"""Mock SaaS metrics data tools for the generative-ui dashboard example."""
+
+from langchain_core.tools import tool
+
+# ── Hardcoded SaaS dataset ──────────────────────────────────────────────────
+
+_MRR_TREND = [
+ {"month": "2025-05", "mrr": 28000},
+ {"month": "2025-06", "mrr": 29500},
+ {"month": "2025-07", "mrr": 30200},
+ {"month": "2025-08", "mrr": 31800},
+ {"month": "2025-09", "mrr": 32500},
+ {"month": "2025-10", "mrr": 33000},
+ {"month": "2025-11", "mrr": 34200},
+ {"month": "2025-12", "mrr": 35800},
+ {"month": "2026-01", "mrr": 37000},
+ {"month": "2026-02", "mrr": 38500},
+ {"month": "2026-03", "mrr": 40200},
+ {"month": "2026-04", "mrr": 42000},
+]
+
+_SUBSCRIBERS_BY_PLAN = [
+ {"plan": "free", "count": 1200},
+ {"plan": "starter", "count": 850},
+ {"plan": "pro", "count": 420},
+ {"plan": "enterprise", "count": 95},
+]
+
+_CHURNED_ACCOUNTS = [
+ {"name": "Acme Corp", "plan": "pro", "mrr_lost": 450, "date": "2026-04-01"},
+ {"name": "Widgetly", "plan": "starter", "mrr_lost": 120, "date": "2026-03-28"},
+ {"name": "DataPipe Inc", "plan": "enterprise", "mrr_lost": 2400, "date": "2026-03-25"},
+ {"name": "NovaTech", "plan": "pro", "mrr_lost": 450, "date": "2026-03-20"},
+ {"name": "CloudSync", "plan": "starter", "mrr_lost": 120, "date": "2026-03-15"},
+ {"name": "ByteForge", "plan": "pro", "mrr_lost": 450, "date": "2026-03-10"},
+ {"name": "Quantum Labs", "plan": "enterprise", "mrr_lost": 2400, "date": "2026-03-05"},
+ {"name": "FlowState", "plan": "starter", "mrr_lost": 120, "date": "2026-02-28"},
+ {"name": "CipherNet", "plan": "pro", "mrr_lost": 450, "date": "2026-02-20"},
+ {"name": "Luminary AI", "plan": "starter", "mrr_lost": 120, "date": "2026-02-15"},
+]
+
+
+@tool
+def query_mrr() -> dict:
+ """Get current Monthly Recurring Revenue (MRR) with month-over-month delta."""
+ current = _MRR_TREND[-1]["mrr"]
+ previous = _MRR_TREND[-2]["mrr"]
+ delta_pct = ((current - previous) / previous) * 100
+ total_subs = sum(p["count"] for p in _SUBSCRIBERS_BY_PLAN)
+ arpu = round(current / total_subs, 2)
+ return {
+ "mrr": {"value": current, "delta": f"+{delta_pct:.1f}%", "period": "month"},
+ "subscribers": {"total": total_subs, "delta": "+42"},
+ "churn": {"rate": "3.2%", "delta": "-0.4%"},
+ "arpu": {"value": f"${arpu:.2f}", "delta": "+$1.20"},
+ }
+
+
+@tool
+def query_subscribers_by_plan(plans: list[str] | None = None) -> list[dict]:
+ """Get subscriber counts broken down by plan tier.
+
+ Args:
+ plans: Optional list of plan names to filter by (e.g., ["pro", "enterprise"]).
+ Returns all plans if not specified.
+ """
+ if plans:
+ return [p for p in _SUBSCRIBERS_BY_PLAN if p["plan"] in plans]
+ return _SUBSCRIBERS_BY_PLAN
+
+
+@tool
+def query_mrr_trend(months: int = 12) -> list[dict]:
+ """Get MRR trend over time.
+
+ Args:
+ months: Number of months to return (default 12). Valid values: 3, 6, 12, 24.
+ """
+ months = min(months, len(_MRR_TREND))
+ return _MRR_TREND[-months:]
+
+
+@tool
+def query_churned_accounts(limit: int = 5, plan: str | None = None) -> list[dict]:
+ """Get recently churned accounts.
+
+ Args:
+ limit: Maximum number of accounts to return (default 5).
+ plan: Optional plan name to filter by (e.g., "enterprise").
+ """
+ filtered = _CHURNED_ACCOUNTS
+ if plan:
+ filtered = [a for a in filtered if a["plan"] == plan]
+ return filtered[:limit]
+
+
+ALL_TOOLS = [query_mrr, query_subscribers_by_plan, query_mrr_trend, query_churned_accounts]
diff --git a/libs/agent/src/lib/agent.fn.ts b/libs/agent/src/lib/agent.fn.ts
index a5b496b73..33704a4c3 100644
--- a/libs/agent/src/lib/agent.fn.ts
+++ b/libs/agent/src/lib/agent.fn.ts
@@ -17,6 +17,7 @@ import type { BagTemplate, InferBag } from '@langchain/langgraph-sdk';
import {
AgentOptions,
AgentRef,
+ CustomStreamEvent,
StreamSubjects,
SubagentStreamRef,
ResourceStatus,
@@ -81,6 +82,7 @@ export function agent<
const toolProgress$ = new BehaviorSubject([]);
const toolCalls$ = new BehaviorSubject([]);
const subagents$ = new BehaviorSubject