diff --git a/backend/application/chat/utilities/notification_utils.py b/backend/application/chat/utilities/notification_utils.py index 9ff19c9..916e08c 100644 --- a/backend/application/chat/utilities/notification_utils.py +++ b/backend/application/chat/utilities/notification_utils.py @@ -191,11 +191,33 @@ async def notify_tool_progress( Send tool progress notification. Emits an event shaped for the UI to render progress bars/messages. + + Enhanced to support structured progress updates: + - If message starts with "MCP_UPDATE:", parse as JSON for special updates + - Supports canvas updates, system messages, and file artifacts during execution """ if not update_callback: return try: + # Check for structured progress updates + if message and message.startswith("MCP_UPDATE:"): + try: + structured_data = json.loads(message[11:]) # Remove "MCP_UPDATE:" prefix + await _handle_structured_progress_update( + tool_call_id=tool_call_id, + tool_name=tool_name, + progress=progress, + total=total, + structured_data=structured_data, + update_callback=update_callback + ) + return + except json.JSONDecodeError as e: + logger.warning(f"Failed to parse structured progress update: {e}") + # Fall through to regular progress handling + + # Regular progress notification pct: Optional[float] = None if total is not None and total != 0: try: @@ -216,6 +238,86 @@ async def notify_tool_progress( logger.warning(f"Failed to emit tool_progress: {e}") +async def _handle_structured_progress_update( + tool_call_id: str, + tool_name: str, + progress: float, + total: Optional[float], + structured_data: Dict[str, Any], + update_callback: UpdateCallback +) -> None: + """ + Handle structured progress updates from MCP servers. + + Supports: + - canvas_update: Display content in canvas during tool execution + - system_message: Add rich system messages to chat history + - artifacts: Send file artifacts during execution + """ + update_type = structured_data.get("type") + + if update_type == "canvas_update": + # Display content in canvas + content = structured_data.get("content") + if content: + await safe_notify(update_callback, { + "type": "canvas_content", + "content": content + }) + logger.info(f"Tool {tool_name} sent canvas update during execution") + + elif update_type == "system_message": + # Send rich system message to chat + msg_content = structured_data.get("message", "") + msg_subtype = structured_data.get("subtype", "info") + await safe_notify(update_callback, { + "type": "intermediate_update", + "update_type": "system_message", + "data": { + "message": msg_content, + "subtype": msg_subtype, + "tool_call_id": tool_call_id, + "tool_name": tool_name + } + }) + logger.info(f"Tool {tool_name} sent system message during execution") + + elif update_type == "artifacts": + # Send file artifacts during execution + artifacts = structured_data.get("artifacts", []) + display_config = structured_data.get("display") + if artifacts: + await safe_notify(update_callback, { + "type": "intermediate_update", + "update_type": "progress_artifacts", + "data": { + "artifacts": artifacts, + "display": display_config, + "tool_call_id": tool_call_id, + "tool_name": tool_name + } + }) + logger.info(f"Tool {tool_name} sent {len(artifacts)} artifact(s) during execution") + + # Still send progress info along with the structured update + pct: Optional[float] = None + if total is not None and total != 0: + try: + pct = (float(progress) / float(total)) * 100.0 + except Exception: + pct = None + + await safe_notify(update_callback, { + "type": "tool_progress", + "tool_call_id": tool_call_id, + "tool_name": tool_name, + "progress": progress, + "total": total, + "percentage": pct, + "message": structured_data.get("progress_message", "Processing..."), + }) + + async def notify_canvas_content( parsed_args: Dict[str, Any], update_callback: UpdateCallback diff --git a/backend/mcp/progress_updates_demo/QUICKSTART.md b/backend/mcp/progress_updates_demo/QUICKSTART.md new file mode 100644 index 0000000..897927a --- /dev/null +++ b/backend/mcp/progress_updates_demo/QUICKSTART.md @@ -0,0 +1,273 @@ +# MCP Progress Updates - Quick Start Guide + +This guide shows how to use the enhanced MCP progress reporting capabilities to send viewable updates to the frontend during tool execution. + +## Overview + +MCP servers can now send three types of intermediate updates: + +1. **Canvas Updates**: Display HTML visualizations in real-time +2. **System Messages**: Add rich status messages to chat history +3. **Progressive Artifacts**: Send files as they're generated + +## Basic Setup + +### 1. Enable the Demo Server + +Add to `config/overrides/mcp.json`: + +```json +{ + "servers": { + "progress_updates_demo": { + "command": ["python", "mcp/progress_updates_demo/main.py"], + "cwd": "backend", + "groups": ["users"], + "description": "Demo server showing enhanced progress updates" + } + } +} +``` + +### 2. Restart Backend + +```bash +# Stop the backend if running +# Then start it again +cd /path/to/atlas-ui-3 +cd backend +python main.py +``` + +### 3. Try It Out + +Open the Atlas UI and try these prompts: + +``` +Show me a task with canvas updates +Run task_with_system_messages +Generate artifacts progressively +``` + +## Creating Your Own Progress Updates + +### Example 1: Canvas Updates + +```python +from fastmcp import FastMCP, Context +import asyncio +import json + +mcp = FastMCP("MyServer") + +@mcp.tool +async def visualize_progress( + steps: int = 5, + ctx: Context | None = None +) -> dict: + """Shows visual progress in canvas.""" + + for step in range(1, steps + 1): + # Create HTML visualization + html = f""" + + +

Processing Step {step}/{steps}

+
+
+
+
+ + + """ + + # Send canvas update + if ctx: + update_payload = { + "type": "canvas_update", + "content": html, + "progress_message": f"Step {step}/{steps}" + } + await ctx.report_progress( + progress=step, + total=steps, + message=f"MCP_UPDATE:{json.dumps(update_payload)}" + ) + + await asyncio.sleep(1) + + return {"results": {"status": "completed"}} + +if __name__ == "__main__": + mcp.run() +``` + +### Example 2: System Messages + +```python +@mcp.tool +async def process_with_updates( + stages: list[str] = ["Init", "Process", "Finalize"], + ctx: Context | None = None +) -> dict: + """Shows status updates in chat.""" + + for i, stage in enumerate(stages, 1): + # Do work... + await asyncio.sleep(1) + + # Send system message + if ctx: + update_payload = { + "type": "system_message", + "message": f"**{stage}** - Completed successfully ✓", + "subtype": "success", + "progress_message": f"Completed {stage}" + } + await ctx.report_progress( + progress=i, + total=len(stages), + message=f"MCP_UPDATE:{json.dumps(update_payload)}" + ) + + return {"results": {"stages_completed": len(stages)}} +``` + +### Example 3: Progressive Artifacts + +```python +import base64 + +@mcp.tool +async def generate_reports( + count: int = 3, + ctx: Context | None = None +) -> dict: + """Generates and displays files progressively.""" + + for i in range(1, count + 1): + # Generate content + html_content = f""" + + +

Report {i}

+

Generated at step {i} of {count}

+ + + """ + + # Send artifact + if ctx: + artifact_data = { + "type": "artifacts", + "artifacts": [ + { + "name": f"report_{i}.html", + "b64": base64.b64encode(html_content.encode()).decode(), + "mime": "text/html", + "size": len(html_content), + "description": f"Report {i}", + "viewer": "html" + } + ], + "display": { + "open_canvas": True, + "primary_file": f"report_{i}.html" + }, + "progress_message": f"Generated report {i}" + } + await ctx.report_progress( + progress=i, + total=count, + message=f"MCP_UPDATE:{json.dumps(artifact_data)}" + ) + + await asyncio.sleep(1) + + return {"results": {"reports_generated": count}} +``` + +## Update Types Reference + +### Canvas Update + +```python +{ + "type": "canvas_update", + "content": "...", # HTML string to display + "progress_message": "Optional progress text" +} +``` + +### System Message + +```python +{ + "type": "system_message", + "message": "Status message text", + "subtype": "info", # or "success", "warning", "error" + "progress_message": "Optional progress text" +} +``` + +### Artifacts + +```python +{ + "type": "artifacts", + "artifacts": [ + { + "name": "filename.ext", + "b64": "base64_encoded_content", + "mime": "mime/type", + "size": 12345, + "description": "File description", + "viewer": "html" # or "image", "pdf", etc. + } + ], + "display": { + "open_canvas": True, + "primary_file": "filename.ext", + "mode": "replace" + }, + "progress_message": "Optional progress text" +} +``` + +## Tips + +- **Always include progress_message**: This shows in the progress bar +- **Test with short intervals**: Start with 1-2 second delays for testing +- **HTML is powerful**: Use any HTML/CSS for canvas visualizations +- **Artifacts are stored**: Files sent as artifacts are saved to S3 +- **Updates are async**: UI updates without blocking your tool + +## Troubleshooting + +### Updates not showing? + +1. Check the backend logs for errors +2. Verify JSON is valid: `json.dumps(payload)` +3. Ensure `ctx` parameter is not None +4. Check message format: must start with `"MCP_UPDATE:"` + +### Canvas not updating? + +- Verify content is valid HTML +- Check browser console for errors +- Try a simple HTML first: `"

Test

"` + +### Artifacts not displaying? + +- Ensure base64 encoding is correct +- Check MIME type matches content +- Verify viewer hint is supported: html, image, pdf, etc. + +## More Examples + +See `/backend/mcp/progress_updates_demo/main.py` for complete working examples. + +## Documentation + +Full documentation: [Developer Guide - Progress Updates](../docs/03_developer_guide.md#progress-updates-and-intermediate-results) diff --git a/backend/mcp/progress_updates_demo/README.md b/backend/mcp/progress_updates_demo/README.md new file mode 100644 index 0000000..301eb07 --- /dev/null +++ b/backend/mcp/progress_updates_demo/README.md @@ -0,0 +1,120 @@ +# Progress Updates Demo MCP Server + +This MCP server demonstrates the enhanced progress reporting capabilities that allow MCP servers to send viewable updates to the frontend during tool execution. + +## Features + +This demo shows three types of enhanced progress updates: + +1. **Canvas Updates**: Display HTML visualizations in the canvas panel during execution +2. **System Messages**: Send rich messages that appear in chat history +3. **Progress Artifacts**: Share file artifacts progressively as they're generated + +## Tools + +### `task_with_canvas_updates` + +Demonstrates sending HTML progress visualizations to the canvas during execution. + +**Parameters:** +- `task_name` (str): Name of the task (default: "demo") +- `steps` (int): Number of steps to process (default: 5) +- `interval_seconds` (int): Delay between steps (default: 2) + +### `task_with_system_messages` + +Demonstrates sending rich system messages to chat history during execution. + +**Parameters:** +- `task_name` (str): Name of the analysis task (default: "analysis") +- `stages` (int): Number of stages to process (default: 4) +- `interval_seconds` (int): Delay between stages (default: 2) + +### `task_with_artifacts` + +Demonstrates sending file artifacts progressively during execution. + +**Parameters:** +- `task_name` (str): Name of the processing task (default: "data_processing") +- `files_to_generate` (int): Number of intermediate files (default: 3) +- `interval_seconds` (int): Delay between file generation (default: 2) + +## How It Works + +MCP servers can send structured progress updates by encoding JSON data in the progress message field with the prefix `"MCP_UPDATE:"`. + +### Supported Update Types + +#### 1. Canvas Update +```python +update_payload = { + "type": "canvas_update", + "content": "...", # HTML content to display + "progress_message": "Processing..." # Optional progress text +} +await ctx.report_progress( + progress=step, + total=total_steps, + message=f"MCP_UPDATE:{json.dumps(update_payload)}" +) +``` + +#### 2. System Message +```python +update_payload = { + "type": "system_message", + "message": "Data validation complete!", + "subtype": "success", # or "info", "warning", "error" + "progress_message": "Validating data..." +} +await ctx.report_progress( + progress=step, + total=total_steps, + message=f"MCP_UPDATE:{json.dumps(update_payload)}" +) +``` + +#### 3. Artifacts +```python +update_payload = { + "type": "artifacts", + "artifacts": [ + { + "name": "result.html", + "b64": base64_encoded_content, + "mime": "text/html", + "size": content_size, + "description": "Intermediate result", + "viewer": "html" + } + ], + "display": { + "open_canvas": True, + "primary_file": "result.html", + "mode": "replace" + }, + "progress_message": "Generated result..." +} +await ctx.report_progress( + progress=step, + total=total_steps, + message=f"MCP_UPDATE:{json.dumps(update_payload)}" +) +``` + +## Usage + +Try these example prompts: + +``` +Show me a task with canvas updates +Run task_with_system_messages +Generate artifacts progressively +``` + +## Benefits + +- **Better UX**: Users see real-time progress with visual feedback +- **Reduced perceived latency**: Long-running tasks feel more responsive +- **More informative**: Rich context about what's happening at each stage +- **Flexible**: Can display any HTML content, images, or file artifacts diff --git a/backend/mcp/progress_updates_demo/main.py b/backend/mcp/progress_updates_demo/main.py new file mode 100755 index 0000000..03c94a5 --- /dev/null +++ b/backend/mcp/progress_updates_demo/main.py @@ -0,0 +1,498 @@ +#!/usr/bin/env python3 +""" +Progress Updates Demo MCP Server using FastMCP. + +This server demonstrates the enhanced progress reporting capabilities +that allow MCP servers to send viewable updates to the frontend during +tool execution, including: +- Canvas updates (plots, HTML, images) +- Rich system messages +- Progress artifacts + +To use these features from an MCP server, send special formatted messages +via ctx.report_progress() with the message field containing: + "MCP_UPDATE:{json_payload}" + +Supported update types: +- canvas_update: Display HTML/images in canvas during execution +- system_message: Add rich messages to chat history +- artifacts: Send file artifacts during execution +""" + +from __future__ import annotations + +import asyncio +import json +import base64 +from typing import Any, Dict + +from fastmcp import FastMCP, Context + + +# Initialize the MCP server +mcp = FastMCP("Progress Updates Demo") + + +def create_progress_html(step: int, total: int, message: str) -> str: + """Create an HTML progress visualization.""" + percentage = int((step / total) * 100) + return f""" + + + + + + +
+

Task Progress

+
Step {step} of {total}
+
+
{percentage}%
+
+

{message}

+
+ + + """ + + +def create_chart_html(data: Dict[str, int]) -> str: + """Create a simple bar chart HTML.""" + max_value = max(data.values()) if data else 1 + bars = "" + for label, value in data.items(): + percentage = int((value / max_value) * 100) + bars += f""" +
+
{label}
+
+
+ {value} +
+
+
+ """ + + return f""" + + + + + + +
+

Processing Results

+ {bars} +
+ + + """ + + +@mcp.tool +async def task_with_canvas_updates( + task_name: str = "demo", + steps: int = 5, + interval_seconds: int = 2, + ctx: Context | None = None, +) -> Dict[str, Any]: + """ + Execute a long-running task with visual progress updates in the canvas. + + This tool demonstrates how MCP servers can send canvas updates during + execution, allowing users to see real-time visual progress indicators. + + Args: + task_name: Name of the task to execute + steps: Number of steps to process (default: 5) + interval_seconds: Delay between steps (default: 2) + ctx: MCP context for progress reporting + + Returns: + Task completion summary with final results + """ + total = max(1, int(steps)) + interval = max(1, int(interval_seconds)) + + # Initial progress + if ctx: + await ctx.report_progress( + progress=0, + total=total, + message=f"Starting {task_name}..." + ) + + # Process each step and send visual updates as artifacts + for step in range(1, total + 1): + await asyncio.sleep(interval) + + # Create progress visualization HTML + html_content = create_progress_html(step, total, f"Processing {task_name}: Step {step}") + + # Send progress HTML as an artifact so it uses the HTML viewer + if ctx: + artifact_html = base64.b64encode(html_content.encode("utf-8")).decode("utf-8") + update_payload = { + "type": "artifacts", + "artifacts": [ + { + "name": f"progress_step_{step}.html", + "b64": artifact_html, + "mime": "text/html", + "size": len(html_content), + "description": f"Progress for {task_name} step {step}/{total}", + "viewer": "html", + } + ], + "display": { + "open_canvas": True, + "primary_file": f"progress_step_{step}.html", + "mode": "replace", + }, + "progress_message": f"{task_name}: Step {step}/{total}", + } + await ctx.report_progress( + progress=step, + total=total, + message=f"MCP_UPDATE:{json.dumps(update_payload)}", + ) + + # Final result with chart + result_data = { + "Items Processed": total * 10, + "Errors Found": 2, + "Warnings": 5, + "Success Rate": 95 + } + + chart_html = create_chart_html(result_data) + + return { + "results": { + "task": task_name, + "status": "completed", + "steps_completed": total, + "summary": result_data + }, + "artifacts": [ + { + "name": "final_results.html", + "b64": base64.b64encode(chart_html.encode('utf-8')).decode('utf-8'), + "mime": "text/html", + "size": len(chart_html), + "description": "Final processing results chart", + "viewer": "html" + } + ], + "display": { + "open_canvas": True, + "primary_file": "final_results.html", + "mode": "replace", + "viewer_hint": "html" + } + } + + +@mcp.tool +async def task_with_system_messages( + task_name: str = "analysis", + stages: int = 4, + interval_seconds: int = 2, + ctx: Context | None = None, +) -> Dict[str, Any]: + """ + Execute a task with rich system messages displayed in chat history. + + This tool demonstrates how MCP servers can send rich system messages + that appear as new items in the chat history during tool execution. + + Args: + task_name: Name of the analysis task + stages: Number of stages to process (default: 4) + interval_seconds: Delay between stages (default: 2) + ctx: MCP context for progress reporting + + Returns: + Analysis completion summary + """ + stage_names = [ + "Data Collection", + "Data Validation", + "Analysis", + "Report Generation" + ][:stages] + + total = len(stage_names) + + # Initial progress + if ctx: + await ctx.report_progress( + progress=0, + total=total, + message=f"Starting {task_name}..." + ) + + # Process each stage and send system messages + for i, stage in enumerate(stage_names, 1): + await asyncio.sleep(interval_seconds) + + # Send system message + if ctx: + update_payload = { + "type": "system_message", + "message": f"**{stage}** - Stage {i}/{total} completed successfully", + "subtype": "success", + "progress_message": f"{task_name}: {stage}" + } + await ctx.report_progress( + progress=i, + total=total, + message=f"MCP_UPDATE:{json.dumps(update_payload)}" + ) + + return { + "results": { + "task": task_name, + "status": "completed", + "stages_completed": total, + "completion_message": f"All {total} stages completed successfully" + } + } + + +@mcp.tool +async def task_with_artifacts( + task_name: str = "data_processing", + files_to_generate: int = 3, + interval_seconds: int = 2, + ctx: Context | None = None, +) -> Dict[str, Any]: + """ + Execute a task that generates and displays artifacts progressively. + + This tool demonstrates how MCP servers can send file artifacts during + execution, allowing users to see intermediate results as they're generated. + + Args: + task_name: Name of the processing task + files_to_generate: Number of intermediate files to create (default: 3) + interval_seconds: Delay between file generation (default: 2) + ctx: MCP context for progress reporting + + Returns: + Processing completion summary + """ + total = max(1, int(files_to_generate)) + interval = max(1, int(interval_seconds)) + + # Initial progress + if ctx: + await ctx.report_progress( + progress=0, + total=total, + message=f"Starting {task_name}..." + ) + + # Generate intermediate files + for file_num in range(1, total + 1): + await asyncio.sleep(interval) + + # Create intermediate result HTML + intermediate_html = f""" + + + + + + +
+

Intermediate Result {file_num}

+

Generated at step {file_num} of {total}

+

Processing status: In Progress

+
+ + + """ + + # Send artifact via structured progress message + if ctx: + artifact_data = { + "type": "artifacts", + "artifacts": [ + { + "name": f"intermediate_result_{file_num}.html", + "b64": base64.b64encode(intermediate_html.encode('utf-8')).decode('utf-8'), + "mime": "text/html", + "size": len(intermediate_html), + "description": f"Intermediate result {file_num}", + "viewer": "html" + } + ], + "display": { + "open_canvas": True, + "primary_file": f"intermediate_result_{file_num}.html", + "mode": "replace" + }, + "progress_message": f"Generated file {file_num}/{total}" + } + await ctx.report_progress( + progress=file_num, + total=total, + message=f"MCP_UPDATE:{json.dumps(artifact_data)}" + ) + + # Final result + final_html = """ + + + + + + +
+
+

Processing Complete!

+

All files have been generated successfully.

+
+ + + """ + + return { + "results": { + "task": task_name, + "status": "completed", + "files_generated": total + }, + "artifacts": [ + { + "name": "final_result.html", + "b64": base64.b64encode(final_html.encode('utf-8')).decode('utf-8'), + "mime": "text/html", + "size": len(final_html), + "description": "Final processing result", + "viewer": "html" + } + ], + "display": { + "open_canvas": True, + "primary_file": "final_result.html", + "mode": "replace", + "viewer_hint": "html" + } + } + + +if __name__ == "__main__": + mcp.run() diff --git a/backend/tests/test_mcp_progress_updates.py b/backend/tests/test_mcp_progress_updates.py new file mode 100644 index 0000000..bf0ff2e --- /dev/null +++ b/backend/tests/test_mcp_progress_updates.py @@ -0,0 +1,315 @@ +"""Tests for enhanced MCP progress update notifications.""" + +import pytest +import json +from unittest.mock import AsyncMock + +from application.chat.utilities.notification_utils import ( + notify_tool_progress, + _handle_structured_progress_update +) + + +@pytest.mark.asyncio +async def test_notify_tool_progress_regular(): + """Test regular progress notification without structured updates.""" + callback = AsyncMock() + + await notify_tool_progress( + tool_call_id="test-123", + tool_name="test_tool", + progress=5, + total=10, + message="Processing...", + update_callback=callback + ) + + callback.assert_called_once() + call_args = callback.call_args[0][0] + + assert call_args["type"] == "tool_progress" + assert call_args["tool_call_id"] == "test-123" + assert call_args["tool_name"] == "test_tool" + assert call_args["progress"] == 5 + assert call_args["total"] == 10 + assert call_args["percentage"] == 50.0 + assert call_args["message"] == "Processing..." + + +@pytest.mark.asyncio +async def test_notify_tool_progress_canvas_update(): + """Test canvas update via structured progress message.""" + callback = AsyncMock() + + update_payload = { + "type": "canvas_update", + "content": "Test", + "progress_message": "Updating canvas" + } + + await notify_tool_progress( + tool_call_id="test-123", + tool_name="test_tool", + progress=1, + total=5, + message=f"MCP_UPDATE:{json.dumps(update_payload)}", + update_callback=callback + ) + + # Should be called twice: once for canvas_content, once for tool_progress + assert callback.call_count == 2 + + # Check canvas_content message + canvas_call = callback.call_args_list[0][0][0] + assert canvas_call["type"] == "canvas_content" + assert canvas_call["content"] == "Test" + + # Check progress message + progress_call = callback.call_args_list[1][0][0] + assert progress_call["type"] == "tool_progress" + assert progress_call["message"] == "Updating canvas" + + +@pytest.mark.asyncio +async def test_notify_tool_progress_system_message(): + """Test system message via structured progress message.""" + callback = AsyncMock() + + update_payload = { + "type": "system_message", + "message": "Stage 1 completed", + "subtype": "success", + "progress_message": "Completed stage 1" + } + + await notify_tool_progress( + tool_call_id="test-123", + tool_name="test_tool", + progress=1, + total=3, + message=f"MCP_UPDATE:{json.dumps(update_payload)}", + update_callback=callback + ) + + # Should be called twice: once for intermediate_update, once for tool_progress + assert callback.call_count == 2 + + # Check system message + system_call = callback.call_args_list[0][0][0] + assert system_call["type"] == "intermediate_update" + assert system_call["update_type"] == "system_message" + assert system_call["data"]["message"] == "Stage 1 completed" + assert system_call["data"]["subtype"] == "success" + assert system_call["data"]["tool_call_id"] == "test-123" + assert system_call["data"]["tool_name"] == "test_tool" + + +@pytest.mark.asyncio +async def test_notify_tool_progress_artifacts(): + """Test artifacts via structured progress message.""" + callback = AsyncMock() + + update_payload = { + "type": "artifacts", + "artifacts": [ + { + "name": "result.html", + "b64": "PGh0bWw+PC9odG1sPg==", + "mime": "text/html", + "size": 100, + "viewer": "html" + } + ], + "display": { + "open_canvas": True, + "primary_file": "result.html" + }, + "progress_message": "Generated result" + } + + await notify_tool_progress( + tool_call_id="test-123", + tool_name="test_tool", + progress=2, + total=3, + message=f"MCP_UPDATE:{json.dumps(update_payload)}", + update_callback=callback + ) + + # Should be called twice: once for intermediate_update, once for tool_progress + assert callback.call_count == 2 + + # Check artifacts message + artifacts_call = callback.call_args_list[0][0][0] + assert artifacts_call["type"] == "intermediate_update" + assert artifacts_call["update_type"] == "progress_artifacts" + assert len(artifacts_call["data"]["artifacts"]) == 1 + assert artifacts_call["data"]["artifacts"][0]["name"] == "result.html" + assert artifacts_call["data"]["display"]["open_canvas"] is True + + +@pytest.mark.asyncio +async def test_notify_tool_progress_artifacts_inline_shape(): + """Progress artifacts should preserve inline-friendly fields for frontend rendering.""" + callback = AsyncMock() + + update_payload = { + "type": "artifacts", + "artifacts": [ + { + "name": "progress_step_1.html", + "b64": "PGgxPkhlbGxvPC9oMT4=", + "mime": "text/html", + "size": 42, + "description": "Step 1", + "viewer": "html", + } + ], + "display": { + "open_canvas": True, + "primary_file": "progress_step_1.html", + "mode": "replace", + }, + "progress_message": "demo: Step 1/3", + } + + await notify_tool_progress( + tool_call_id="call-1", + tool_name="progress_tool", + progress=1, + total=3, + message=f"MCP_UPDATE:{json.dumps(update_payload)}", + update_callback=callback, + ) + + # First callback should carry the raw artifact fields through untouched + artifacts_call = callback.call_args_list[0][0][0] + assert artifacts_call["type"] == "intermediate_update" + assert artifacts_call["update_type"] == "progress_artifacts" + + data = artifacts_call["data"] + assert data["tool_call_id"] == "call-1" + assert data["tool_name"] == "progress_tool" + + assert isinstance(data["artifacts"], list) + art = data["artifacts"][0] + # These fields are required for inline rendering on the frontend + assert art["name"] == "progress_step_1.html" + assert art["b64"] == "PGgxPkhlbGxvPC9oMT4=" + assert art["mime"] == "text/html" + assert art["viewer"] == "html" + assert art["size"] == 42 + assert art["description"] == "Step 1" + + display = data["display"] + assert display["open_canvas"] is True + assert display["primary_file"] == "progress_step_1.html" + assert display["mode"] == "replace" + + +@pytest.mark.asyncio +async def test_notify_tool_progress_invalid_json(): + """Test that invalid JSON in MCP_UPDATE falls back to regular progress.""" + callback = AsyncMock() + + await notify_tool_progress( + tool_call_id="test-123", + tool_name="test_tool", + progress=1, + total=5, + message="MCP_UPDATE:{invalid json}", + update_callback=callback + ) + + # Should fall back to regular progress notification + callback.assert_called_once() + call_args = callback.call_args[0][0] + assert call_args["type"] == "tool_progress" + assert "invalid json" in call_args["message"] + + +@pytest.mark.asyncio +async def test_notify_tool_progress_no_callback(): + """Test that progress with no callback doesn't raise errors.""" + # Should not raise any exceptions + await notify_tool_progress( + tool_call_id="test-123", + tool_name="test_tool", + progress=1, + total=5, + message="Test", + update_callback=None + ) + + +@pytest.mark.asyncio +async def test_handle_structured_progress_update_canvas(): + """Test _handle_structured_progress_update for canvas updates.""" + callback = AsyncMock() + + structured_data = { + "type": "canvas_update", + "content": "Test", + "progress_message": "Updating" + } + + await _handle_structured_progress_update( + tool_call_id="test-123", + tool_name="test_tool", + progress=1, + total=5, + structured_data=structured_data, + update_callback=callback + ) + + # Should send canvas_content and tool_progress + assert callback.call_count == 2 + assert callback.call_args_list[0][0][0]["type"] == "canvas_content" + assert callback.call_args_list[1][0][0]["type"] == "tool_progress" + + +@pytest.mark.asyncio +async def test_percentage_calculation(): + """Test percentage calculation in progress notifications.""" + callback = AsyncMock() + + # Test with valid total + await notify_tool_progress( + tool_call_id="test-123", + tool_name="test_tool", + progress=3, + total=4, + message="Test", + update_callback=callback + ) + + call_args = callback.call_args[0][0] + assert call_args["percentage"] == 75.0 + + # Test with zero total + callback.reset_mock() + await notify_tool_progress( + tool_call_id="test-123", + tool_name="test_tool", + progress=1, + total=0, + message="Test", + update_callback=callback + ) + + call_args = callback.call_args[0][0] + assert call_args["percentage"] is None + + # Test with None total (indeterminate progress) + callback.reset_mock() + await notify_tool_progress( + tool_call_id="test-123", + tool_name="test_tool", + progress=1, + total=None, + message="Test", + update_callback=callback + ) + + call_args = callback.call_args[0][0] + assert call_args["percentage"] is None diff --git a/config/overrides/progress-report-mcp copy.json b/config/overrides/progress-report-mcp copy.json new file mode 100644 index 0000000..7e035f4 --- /dev/null +++ b/config/overrides/progress-report-mcp copy.json @@ -0,0 +1,18 @@ +{ + "progress_updates_demo": { + "transport": "stdio", + "command": [ + "python", + "mcp/progress_updates_demo/main.py" + ], + "cwd": "backend", + "groups": [ + "users" + ], + "description": "Demo MCP server showing progress updates with canvas, system messages, and artifacts", + "author": "Atlas UI Team", + "short_description": "Progress updates demo", + "help_email": "support@chatui.example.com", + "compliance_level": "Public" + } +} diff --git a/docs/03_developer_guide.md b/docs/03_developer_guide.md index f118bdb..2dabd22 100644 --- a/docs/03_developer_guide.md +++ b/docs/03_developer_guide.md @@ -188,6 +188,201 @@ if __name__ == "__main__": This architecture ensures that your tool does not need to handle any S3 credentials, making the system more secure and easier to develop for. +## Progress Updates and Intermediate Results + +Long-running MCP tools can now send intermediate updates to the frontend during execution, providing users with real-time feedback. This includes: + +- **Canvas Updates**: Display HTML visualizations, plots, or images in the canvas panel as the tool progresses +- **System Messages**: Add rich, formatted messages to the chat history to show what's happening at each stage +- **Progressive Artifacts**: Send file artifacts as they're generated, rather than only at the end + +### Basic Progress Reporting + +FastMCP provides a `Context` object that tools can use to report progress: + +```python +from fastmcp import FastMCP, Context + +mcp = FastMCP("MyServer") + +@mcp.tool +async def long_task( + steps: int = 5, + ctx: Context | None = None +) -> dict: + """A tool that reports progress.""" + + for i in range(steps): + # Standard progress reporting + if ctx: + await ctx.report_progress( + progress=i, + total=steps, + message=f"Processing step {i+1} of {steps}" + ) + + # Do work... + await asyncio.sleep(1) + + return {"results": {"status": "completed", "steps": steps}} +``` + +This shows a progress bar in the UI with percentage and message updates. + +### Enhanced Progress Updates + +To send richer updates (canvas content, system messages, or artifacts), encode structured data in the progress message with the `MCP_UPDATE:` prefix: + +#### 1. Canvas Updates + +Display HTML content in the canvas panel during execution: + +```python +import json + +@mcp.tool +async def task_with_visualization( + steps: int = 5, + ctx: Context | None = None +) -> dict: + """Shows visual progress in the canvas.""" + + for step in range(1, steps + 1): + # Create HTML visualization + html_content = f""" + + +

Processing Step {step}/{steps}

+
+
+
+ + + """ + + # Send canvas update + if ctx: + update_payload = { + "type": "canvas_update", + "content": html_content, + "progress_message": f"Step {step}/{steps}" + } + await ctx.report_progress( + progress=step, + total=steps, + message=f"MCP_UPDATE:{json.dumps(update_payload)}" + ) + + return {"results": {"status": "completed"}} +``` + +#### 2. System Messages + +Add informative messages to the chat history: + +```python +@mcp.tool +async def task_with_status_updates( + stages: list[str], + ctx: Context | None = None +) -> dict: + """Reports status updates as chat messages.""" + + for i, stage in enumerate(stages, 1): + # Do work for this stage... + await process_stage(stage) + + # Send system message + if ctx: + update_payload = { + "type": "system_message", + "message": f"**{stage}** completed successfully", + "subtype": "success", # or "info", "warning", "error" + "progress_message": f"Completed {stage}" + } + await ctx.report_progress( + progress=i, + total=len(stages), + message=f"MCP_UPDATE:{json.dumps(update_payload)}" + ) + + return {"results": {"status": "completed", "stages": len(stages)}} +``` + +#### 3. Progressive Artifacts + +Send file artifacts as they're generated: + +```python +import base64 + +@mcp.tool +async def task_with_intermediate_files( + files_to_generate: int = 3, + ctx: Context | None = None +) -> dict: + """Generates and displays files progressively.""" + + for file_num in range(1, files_to_generate + 1): + # Generate file content + html_content = f"

Result {file_num}

" + + # Send artifact + if ctx: + artifact_data = { + "type": "artifacts", + "artifacts": [ + { + "name": f"result_{file_num}.html", + "b64": base64.b64encode(html_content.encode()).decode(), + "mime": "text/html", + "size": len(html_content), + "description": f"Intermediate result {file_num}", + "viewer": "html" + } + ], + "display": { + "open_canvas": True, + "primary_file": f"result_{file_num}.html", + "mode": "replace" + }, + "progress_message": f"Generated file {file_num}" + } + await ctx.report_progress( + progress=file_num, + total=files_to_generate, + message=f"MCP_UPDATE:{json.dumps(artifact_data)}" + ) + + return {"results": {"files_generated": files_to_generate}} +``` + +### Update Types Reference + +| Type | Fields | Description | +|------|--------|-------------| +| `canvas_update` | `content` (HTML string), `progress_message` (optional) | Displays HTML content in the canvas panel | +| `system_message` | `message` (string), `subtype` (info/success/warning/error), `progress_message` (optional) | Adds a formatted message to chat history | +| `artifacts` | `artifacts` (list), `display` (object), `progress_message` (optional) | Sends file artifacts with display hints | + +### Example: Complete Demo Server + +See `/backend/mcp/progress_updates_demo/` for a complete working example with three tools demonstrating all update types. To try it: + +1. Add the server to your `config/overrides/mcp.json`: +```json +{ + "progress_updates_demo": { + "command": ["python", "mcp/progress_updates_demo/main.py"], + "cwd": "backend", + "groups": ["users"], + "description": "Demo server showing enhanced progress updates" + } +} +``` + +2. Restart the backend and ask: "Show me a task with canvas updates" + ## Adding Custom Canvas Renderers The canvas panel displays tool-generated files (PDFs, images, HTML). To add support for new file types (e.g., `.stl`, `.obj`, `.ipynb`): diff --git a/frontend/src/components/CanvasPanel.jsx b/frontend/src/components/CanvasPanel.jsx index a1837d7..6ff5d86 100644 --- a/frontend/src/components/CanvasPanel.jsx +++ b/frontend/src/components/CanvasPanel.jsx @@ -111,6 +111,43 @@ const CanvasPanel = ({ isOpen, onClose, onWidthChange }) => { setFileError(null); try { + // Inline files (e.g., progress artifacts) are rendered from base64 content + if (currentFile.isInline && currentFile.content_base64) { + try { + if (currentFile.type === 'image') { + const byteCharacters = atob(currentFile.content_base64); + const byteNumbers = new Array(byteCharacters.length); + for (let i = 0; i < byteCharacters.length; i++) { + byteNumbers[i] = byteCharacters.charCodeAt(i); + } + const byteArray = new Uint8Array(byteNumbers); + const blob = new Blob([byteArray], { type: currentFile.mime_type || 'application/octet-stream' }); + const imageUrl = URL.createObjectURL(blob); + setCurrentFileContent({ type: 'image', url: imageUrl, file: currentFile }); + } else if (currentFile.type === 'pdf') { + const byteCharacters = atob(currentFile.content_base64); + const byteNumbers = new Array(byteCharacters.length); + for (let i = 0; i < byteCharacters.length; i++) { + byteNumbers[i] = byteCharacters.charCodeAt(i); + } + const byteArray = new Uint8Array(byteNumbers); + const blob = new Blob([byteArray], { type: currentFile.mime_type || 'application/pdf' }); + const pdfUrl = URL.createObjectURL(blob); + setCurrentFileContent({ type: 'pdf', url: pdfUrl, file: currentFile }); + } else { + const decoded = atob(currentFile.content_base64); + setCurrentFileContent({ type: currentFile.type, content: decoded, file: currentFile }); + } + } catch (error) { + console.error('Error decoding inline canvas file:', error); + setFileError('Failed to decode inline file content'); + setCurrentFileContent(null); + } finally { + setIsLoadingFile(false); + } + return; + } + // Fetch file content from the backend const response = await fetch(`/api/files/download/${currentFile.s3_key}`, { method: 'GET', @@ -164,7 +201,8 @@ const CanvasPanel = ({ isOpen, onClose, onWidthChange }) => { const handleDownload = () => { const currentFile = canvasFiles[currentCanvasFileIndex]; - if (currentFile && downloadFile) { + // Inline-only files are not downloadable via backend + if (currentFile && !currentFile.isInline && downloadFile) { downloadFile(currentFile.filename); } }; diff --git a/frontend/src/handlers/chat/websocketHandlers.js b/frontend/src/handlers/chat/websocketHandlers.js index 2f99ad5..a07952b 100644 --- a/frontend/src/handlers/chat/websocketHandlers.js +++ b/frontend/src/handlers/chat/websocketHandlers.js @@ -111,6 +111,53 @@ export function createWebSocketHandler(deps) { case 'tool_result': mapMessages(prev => prev.map(msg => msg.tool_call_id && msg.tool_call_id === updateData.tool_call_id ? { ...msg, content: `**Tool: ${updateData.tool_name}** - ${updateData.success ? 'Success' : 'Failed'}`, status: updateData.success ? 'completed' : 'failed', result: updateData.result || updateData.error || null } : msg)) break + case 'system_message': + // Rich system message from MCP server during tool execution + if (updateData && updateData.message) { + addMessage({ + role: 'system', + content: updateData.message, + type: 'system', + subtype: updateData.subtype || 'info', + tool_call_id: updateData.tool_call_id, + tool_name: updateData.tool_name, + timestamp: new Date().toISOString() + }) + } + break + case 'progress_artifacts': + // Handle artifacts sent during tool execution as inline canvas content + if (updateData && updateData.artifacts) { + const artifacts = updateData.artifacts + const display = updateData.display || {} + + const canvasFiles = artifacts + .filter(art => art.b64 && art.mime && art.viewer) + .map(art => ({ + filename: art.name, + content_base64: art.b64, + mime_type: art.mime, + type: art.viewer, + description: art.description || art.name, + // Inline artifacts are rendered from base64; no download key + isInline: true, + })) + + if (canvasFiles.length > 0) { + setCanvasFiles(canvasFiles) + if (display.primary_file) { + const idx = canvasFiles.findIndex(f => f.filename === display.primary_file) + setCurrentCanvasFileIndex(idx >= 0 ? idx : 0) + } else { + setCurrentCanvasFileIndex(0) + } + if (display.open_canvas) { + setCanvasContent('') + setCustomUIContent(null) + } + } + } + break case 'canvas_content': if (updateData && updateData.content) { setCanvasContent(typeof updateData.content === 'string' ? updateData.content : String(updateData.content || '')) diff --git a/frontend/src/handlers/chat/websocketHandlers.test.js b/frontend/src/handlers/chat/websocketHandlers.test.js new file mode 100644 index 0000000..fabdd28 --- /dev/null +++ b/frontend/src/handlers/chat/websocketHandlers.test.js @@ -0,0 +1,108 @@ +import { describe, it, expect, vi } from 'vitest' +import { createWebSocketHandler } from './websocketHandlers' + +const makeDeps = () => { + return { + addMessage: vi.fn(), + mapMessages: vi.fn(fn => { + // simple helper to let tests inspect mapping function behavior + const sample = [{ tool_call_id: 'call-1', status: 'calling' }] + fn(sample) + }), + setIsThinking: vi.fn(), + setCurrentAgentStep: vi.fn(), + setAgentPendingQuestion: vi.fn(), + setCanvasContent: vi.fn(), + setCanvasFiles: vi.fn(), + setCurrentCanvasFileIndex: vi.fn(), + setCustomUIContent: vi.fn(), + setSessionFiles: vi.fn(), + getFileType: vi.fn(), + triggerFileDownload: vi.fn(), + addAttachment: vi.fn(), + resolvePendingFileEvent: vi.fn(), + } +} + +describe('createWebSocketHandler – intermediate updates', () => { + it('adds a rich system message for system_message updates', () => { + const deps = makeDeps() + const handler = createWebSocketHandler(deps) + + const payload = { + type: 'intermediate_update', + update_type: 'system_message', + data: { + message: 'Stage 1 complete', + subtype: 'success', + tool_call_id: 'tool-123', + tool_name: 'progress_tool', + }, + } + + handler(payload) + + expect(deps.addMessage).toHaveBeenCalledTimes(1) + const msg = deps.addMessage.mock.calls[0][0] + expect(msg).toMatchObject({ + role: 'system', + content: 'Stage 1 complete', + type: 'system', + subtype: 'success', + tool_call_id: 'tool-123', + tool_name: 'progress_tool', + }) + expect(typeof msg.timestamp).toBe('string') + }) + + it('updates canvas files and respects display hints for progress_artifacts', () => { + const deps = makeDeps() + const handler = createWebSocketHandler(deps) + + const payload = { + type: 'intermediate_update', + update_type: 'progress_artifacts', + data: { + artifacts: [ + { + name: 'ignore.txt', + b64: 'AAA', + mime: 'text/plain', + }, + { + name: 'chart.html', + b64: 'BASE64', + mime: 'text/html', + viewer: 'html', + description: 'Chart artifact', + }, + ], + display: { + open_canvas: true, + primary_file: 'chart.html', + }, + }, + } + + handler(payload) + + // Should convert artifacts with viewer hints into canvas files + expect(deps.setCanvasFiles).toHaveBeenCalledTimes(1) + const canvasFiles = deps.setCanvasFiles.mock.calls[0][0] + expect(canvasFiles).toEqual([ + { + filename: 'chart.html', + content_base64: 'BASE64', + mime_type: 'text/html', + type: 'html', + description: 'Chart artifact', + isInline: true, + }, + ]) + + // Should select primary file and clear text/cutom UI when open_canvas + expect(deps.setCurrentCanvasFileIndex).toHaveBeenCalledWith(0) + expect(deps.setCanvasContent).toHaveBeenCalledWith('') + expect(deps.setCustomUIContent).toHaveBeenCalledWith(null) + }) +})