diff --git a/examples/new-api/textual_markdown_demo.py b/examples/new-api/textual_markdown_demo.py index c3dde2b3a..5f7d33eaa 100644 --- a/examples/new-api/textual_markdown_demo.py +++ b/examples/new-api/textual_markdown_demo.py @@ -362,6 +362,7 @@ def show_tool_call( name: str | None = None, highlight_index: int | None = None, max_item_length: int | None = None, + metadata: dict | None = None, ) -> None: self._app.handle_display_tool_call( agent_name=name, @@ -370,6 +371,7 @@ def show_tool_call( bottom_items=bottom_items, highlight_index=highlight_index, max_item_length=max_item_length, + metadata=metadata, ) def show_tool_result( @@ -680,23 +682,63 @@ def handle_display_tool_call( bottom_items: list[str] | None, highlight_index: int | None, max_item_length: int | None, + metadata: dict | None, ) -> None: - if tool_args: - try: - args_text = json.dumps(tool_args, indent=2, sort_keys=True) - except TypeError: # pragma: no cover - fallback for unserializable args - args_text = str(tool_args) - content = f"```json\n{args_text}\n```" + metadata = metadata or {} + + if metadata.get("variant") == "shell": + command = metadata.get("command") or tool_args.get("command") + command_display = command if isinstance(command, str) and command.strip() else None + if command_display: + content = f"```shell\n$ {command_display}\n```" + else: + content = "_No shell command provided._" + + details: list[str] = [] + shell_name = metadata.get("shell_name") + shell_path = metadata.get("shell_path") + if shell_name or shell_path: + if shell_name and shell_path and shell_path != shell_name: + details.append(f"shell: {shell_name} ({shell_path})") + elif shell_path: + details.append(f"shell: {shell_path}") + elif shell_name: + details.append(f"shell: {shell_name}") + working_dir = metadata.get("working_dir_display") or metadata.get("working_dir") + if working_dir: + details.append(f"cwd: {working_dir}") + + capability_bits: list[str] = [] + if metadata.get("streams_output"): + capability_bits.append("streams stdout/stderr") + if metadata.get("returns_exit_code"): + capability_bits.append("reports exit code") + + if capability_bits: + details.append("; ".join(capability_bits)) + + if details: + bullet_points = "\n".join(f"- {line}" for line in details) + content = f"{content}\n\n{bullet_points}" else: - content = "_No arguments provided._" + if tool_args: + try: + args_text = json.dumps(tool_args, indent=2, sort_keys=True) + except TypeError: # pragma: no cover - fallback for unserializable args + args_text = str(tool_args) + content = f"```json\n{args_text}\n```" + else: + content = "_No arguments provided._" self._active_assistant_message = None + right_info = "shell command" if metadata.get("variant") == "shell" else f"tool request - {tool_name}" + message = ChatMessage( role="tool_call", content=content, name=agent_name or "Tool", - right_info=f"tool request - {tool_name}", + right_info=right_info, bottom_metadata=bottom_items, highlight_index=highlight_index, max_item_length=max_item_length, diff --git a/examples/openapi/agent.py b/examples/openapi/agent.py index a1389e65e..12efaff54 100644 --- a/examples/openapi/agent.py +++ b/examples/openapi/agent.py @@ -1,16 +1,13 @@ import asyncio from fast_agent import FastAgent +from fast_agent.constants import DEFAULT_AGENT_INSTRUCTION # Create the application fast = FastAgent("fast-agent example") -default_instruction = """You are a helpful AI Agent. - -{{serverInstructions}} - -The current date is {{currentDate}}.""" +default_instruction = DEFAULT_AGENT_INSTRUCTION # Define the agent diff --git a/examples/setup/agent.py b/examples/setup/agent.py index 1cba33e03..bdae58cec 100644 --- a/examples/setup/agent.py +++ b/examples/setup/agent.py @@ -10,6 +10,8 @@ {{serverInstructions}} +{{agentSkills}} + The current date is {{currentDate}}.""" diff --git a/examples/setup/fastagent.config.yaml b/examples/setup/fastagent.config.yaml index e807a3a97..0d91c3e73 100644 --- a/examples/setup/fastagent.config.yaml +++ b/examples/setup/fastagent.config.yaml @@ -20,6 +20,12 @@ mcp_timeline: steps: 20 # number of timeline buckets to render step_seconds: 15 # seconds per bucket (accepts values like "45s", "2m") +#shell_execution: +# length of time before terminating subprocess +# timeout_seconds: 20 +# warning interval if no output seen +# warning_seconds: 5 + # Logging and Console Configuration: logger: # level: "debug" | "info" | "warning" | "error" diff --git a/pyproject.toml b/pyproject.toml index d9e30e85f..c605d4563 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,7 +15,7 @@ classifiers = [ requires-python = ">=3.13.5,<3.14" dependencies = [ "fastapi>=0.115.6", - "mcp==1.18.0", + "mcp==1.19.0", "opentelemetry-distro>=0.55b0", "opentelemetry-exporter-otlp-proto-http>=1.7.0", "pydantic-settings>=2.7.0", @@ -23,24 +23,25 @@ dependencies = [ "pyyaml>=6.0.2", "rich>=14.1.0", "typer>=0.15.1", - "anthropic>=0.69.0", - "openai>=2.3.0", + "anthropic>=0.71.0", + "openai[aiohttp]>=2.6.1", "azure-identity>=1.14.0", "boto3>=1.35.0", "prompt-toolkit>=3.0.52", - "aiohttp>=3.11.13", + "aiohttp>=3.13.1", "opentelemetry-instrumentation-openai>=0.43.1; python_version >= '3.10' and python_version < '4.0'", "opentelemetry-instrumentation-anthropic>=0.43.1; python_version >= '3.10' and python_version < '4.0'", "opentelemetry-instrumentation-mcp>=0.43.1; python_version >= '3.10' and python_version < '4.0'", - "google-genai>=1.33.0", + "google-genai>=1.46.0", "opentelemetry-instrumentation-google-genai>=0.3b0", "tensorzero>=2025.7.5", "deprecated>=1.2.18", - "a2a-sdk>=0.3.6", + "a2a-sdk>=0.3.10", "email-validator>=2.2.0", "pyperclip>=1.9.0", "keyring>=24.3.1", "textual>=6.2.1", + "python-frontmatter>=1.1.0", ] # For Azure OpenAI with DefaultAzureCredential support, install with: pip install fast-agent-mcp[azure] diff --git a/src/fast_agent/__init__.py b/src/fast_agent/__init__.py index 19ac92d18..dc528a4e5 100644 --- a/src/fast_agent/__init__.py +++ b/src/fast_agent/__init__.py @@ -23,6 +23,7 @@ OpenRouterSettings, OpenTelemetrySettings, Settings, + SkillsSettings, TensorZeroSettings, XAISettings, ) @@ -126,6 +127,7 @@ def __getattr__(name: str): "BedrockSettings", "HuggingFaceSettings", "LoggerSettings", + "SkillsSettings", # Progress and event tracking (lazy loaded) "ProgressAction", "ProgressEvent", diff --git a/src/fast_agent/agents/agent_types.py b/src/fast_agent/agents/agent_types.py index d2b368119..4ff2c06c6 100644 --- a/src/fast_agent/agents/agent_types.py +++ b/src/fast_agent/agents/agent_types.py @@ -4,10 +4,13 @@ from dataclasses import dataclass, field from enum import StrEnum, auto +from pathlib import Path from typing import Dict, List, Optional from mcp.client.session import ElicitationFnT +from fast_agent.skills import SkillManifest, SkillRegistry + # Forward imports to avoid circular dependencies from fast_agent.types import RequestParams @@ -36,6 +39,8 @@ class AgentConfig: tools: Optional[Dict[str, List[str]]] = None resources: Optional[Dict[str, List[str]]] = None prompts: Optional[Dict[str, List[str]]] = None + skills: SkillManifest | SkillRegistry | Path | str | None = None + skill_manifests: List[SkillManifest] = field(default_factory=list, repr=False) model: str | None = None use_history: bool = True default_request_params: RequestParams | None = None diff --git a/src/fast_agent/agents/llm_agent.py b/src/fast_agent/agents/llm_agent.py index ca17dda64..acfd46e7f 100644 --- a/src/fast_agent/agents/llm_agent.py +++ b/src/fast_agent/agents/llm_agent.py @@ -246,6 +246,7 @@ async def generate_impl( display_model = self.llm.model_name if self._llm else None remove_listener: Callable[[], None] | None = None + remove_tool_listener: Callable[[], None] | None = None with self.display.streaming_assistant_message( name=display_name, @@ -253,8 +254,12 @@ async def generate_impl( ) as stream_handle: try: remove_listener = self.llm.add_stream_listener(stream_handle.update) + remove_tool_listener = self.llm.add_tool_stream_listener( + stream_handle.handle_tool_event + ) except Exception: remove_listener = None + remove_tool_listener = None try: result, summary = await self._generate_with_summary( @@ -263,6 +268,8 @@ async def generate_impl( finally: if remove_listener: remove_listener() + if remove_tool_listener: + remove_tool_listener() if summary: summary_text = Text(f"\n\n{summary.message}", style="dim red italic") diff --git a/src/fast_agent/agents/llm_decorator.py b/src/fast_agent/agents/llm_decorator.py index 9fc7e0c8d..5f354b30a 100644 --- a/src/fast_agent/agents/llm_decorator.py +++ b/src/fast_agent/agents/llm_decorator.py @@ -718,6 +718,12 @@ def message_history(self) -> List[PromptMessageExtended]: return self._llm.message_history return [] + def pop_last_message(self) -> PromptMessageExtended | None: + """Remove and return the most recent message from the conversation history.""" + if self._llm: + return self._llm.pop_last_message() + return None + @property def usage_accumulator(self) -> UsageAccumulator | None: """ diff --git a/src/fast_agent/agents/mcp_agent.py b/src/fast_agent/agents/mcp_agent.py index 91b03ebbf..bdd8cd14d 100644 --- a/src/fast_agent/agents/mcp_agent.py +++ b/src/fast_agent/agents/mcp_agent.py @@ -42,12 +42,15 @@ from fast_agent.core.logging.logger import get_logger from fast_agent.interfaces import FastAgentLLMProtocol from fast_agent.mcp.mcp_aggregator import MCPAggregator, ServerStatus +from fast_agent.skills.registry import format_skills_for_prompt from fast_agent.tools.elicitation import ( get_elicitation_tool, run_elicitation_form, set_elicitation_input_callback, ) +from fast_agent.tools.shell_runtime import ShellRuntime from fast_agent.types import PromptMessageExtended, RequestParams +from fast_agent.ui import console # Define a TypeVar for models ModelT = TypeVar("ModelT", bound=BaseModel) @@ -59,6 +62,7 @@ from fast_agent.context import Context from fast_agent.llm.usage_tracking import UsageAccumulator + from fast_agent.skills import SkillManifest class McpAgent(ABC, ToolAgent): @@ -73,7 +77,6 @@ def __init__( self, config: AgentConfig, connection_persistence: bool = True, - # legacy human_input_callback removed context: "Context | None" = None, **kwargs, ) -> None: @@ -96,6 +99,69 @@ def __init__( self.instruction = self.config.instruction self.executor = context.executor if context else None self.logger = get_logger(f"{__name__}.{self._name}") + manifests: List[SkillManifest] = list(getattr(self.config, "skill_manifests", []) or []) + if not manifests and context and getattr(context, "skill_registry", None): + try: + manifests = list(context.skill_registry.load_manifests()) # type: ignore[assignment] + except Exception: + manifests = [] + + self._skill_manifests = list(manifests) + self._skill_map: Dict[str, SkillManifest] = { + manifest.name: manifest for manifest in manifests + } + self._agent_skills_warning_shown = False + shell_flag_requested = bool(context and getattr(context, "shell_runtime", False)) + skills_configured = bool(self._skill_manifests) + self._shell_runtime_activation_reason: str | None = None + + if shell_flag_requested and skills_configured: + self._shell_runtime_activation_reason = ( + "via --shell flag and agent skills configuration" + ) + elif shell_flag_requested: + self._shell_runtime_activation_reason = "via --shell flag" + elif skills_configured: + self._shell_runtime_activation_reason = "because agent skills are configured" + + # Get timeout configuration from context + timeout_seconds = 90 # default + warning_interval_seconds = 30 # default + if context and context.config: + shell_config = getattr(context.config, "shell_execution", None) + if shell_config: + timeout_seconds = getattr(shell_config, "timeout_seconds", 90) + warning_interval_seconds = getattr(shell_config, "warning_interval_seconds", 30) + + # Derive skills directory from this agent's manifests (respects per-agent config) + skills_directory = None + if self._skill_manifests: + # Get the skills directory from the first manifest's path + # Path structure: .fast-agent/skills/skill-name/SKILL.md + # So we need parent.parent of the manifest path + first_manifest = self._skill_manifests[0] + if first_manifest.path: + skills_directory = first_manifest.path.parent.parent + + self._shell_runtime = ShellRuntime( + self._shell_runtime_activation_reason, + self.logger, + timeout_seconds=timeout_seconds, + warning_interval_seconds=warning_interval_seconds, + skills_directory=skills_directory, + ) + self._shell_runtime_enabled = self._shell_runtime.enabled + self._shell_access_modes: tuple[str, ...] = () + if self._shell_runtime_enabled: + modes: list[str] = ["[red]direct[/red]"] + if skills_configured: + modes.append("skills") + if shell_flag_requested: + modes.append("command switch") + self._shell_access_modes = tuple(modes) + self._bash_tool = self._shell_runtime.tool + if self._shell_runtime_enabled: + self._shell_runtime.announce() # Store the default request params from config self._default_request_params = self.config.default_request_params @@ -207,6 +273,24 @@ async def _apply_instruction_templates(self) -> None: "{{serverInstructions}}", server_instructions ) + skills_placeholder_present = "{{agentSkills}}" in self.instruction + + if skills_placeholder_present: + agent_skills = format_skills_for_prompt(self._skill_manifests) + self.instruction = self.instruction.replace("{{agentSkills}}", agent_skills) + self._agent_skills_warning_shown = True + elif self._skill_manifests and not self._agent_skills_warning_shown: + warning_message = ( + "Agent skills are configured but the system prompt does not include {{agentSkills}}. " + "Skill descriptions will not be added to the system prompt." + ) + self.logger.warning(warning_message) + try: + console.console.print(f"[yellow]{warning_message}[/yellow]") + except Exception: # pragma: no cover - console fallback + pass + self._agent_skills_warning_shown = True + # Update default request params to match if self._default_request_params: self._default_request_params.systemPrompt = self.instruction @@ -315,11 +399,12 @@ async def list_tools(self) -> ListToolsResult: """ # Get all tools from the aggregator result = await self._aggregator.list_tools() + aggregator_tools = list(result.tools) # Apply filtering if tools are specified in config if self.config.tools is not None: filtered_tools = [] - for tool in result.tools: + for tool in aggregator_tools: # Extract server name from tool name, handling server names with hyphens server_name = None for configured_server in self.config.tools.keys(): @@ -334,7 +419,12 @@ async def list_tools(self) -> ListToolsResult: if self._matches_pattern(tool.name, pattern, server_name): filtered_tools.append(tool) break - result.tools = filtered_tools + aggregator_tools = filtered_tools + + result.tools = aggregator_tools + + if self._bash_tool and all(tool.name != self._bash_tool.name for tool in result.tools): + result.tools.append(self._bash_tool) # Append human input tool if enabled and available if self.config.human_input and getattr(self, "_human_input_tool", None): @@ -353,6 +443,9 @@ async def call_tool(self, name: str, arguments: Dict[str, Any] | None = None) -> Returns: Result of the tool call """ + if self._shell_runtime.tool and name == self._shell_runtime.tool.name: + return await self._shell_runtime.execute(arguments) + if name == HUMAN_INPUT_TOOL_NAME: # Call the elicitation-backed human input tool return await self._call_human_input_tool(arguments) @@ -615,6 +708,10 @@ async def run_tools(self, request: PromptMessageExtended) -> PromptMessageExtend namespaced_tool.tool.name for namespaced_tool in self._aggregator._namespaced_tool_map.values() ] + if self._shell_runtime.tool: + available_tools.append(self._shell_runtime.tool.name) + + available_tools = list(dict.fromkeys(available_tools)) # Process each tool call using our aggregator for correlation_id, tool_request in request.tool_calls.items(): @@ -628,6 +725,8 @@ async def run_tools(self, request: PromptMessageExtended) -> PromptMessageExtend tool_available = False if tool_name == HUMAN_INPUT_TOOL_NAME: tool_available = True + elif self._bash_tool and tool_name == self._bash_tool.name: + tool_available = True elif namespaced_tool: tool_available = True else: @@ -654,6 +753,14 @@ async def run_tools(self, request: PromptMessageExtended) -> PromptMessageExtend # Tool not found in list, no highlighting pass + metadata: dict[str, Any] | None = None + if ( + self._shell_runtime_enabled + and self._shell_runtime.tool + and display_tool_name == self._shell_runtime.tool.name + ): + metadata = self._shell_runtime.metadata(tool_args.get("command")) + self.display.show_tool_call( name=self._name, tool_args=tool_args, @@ -661,10 +768,11 @@ async def run_tools(self, request: PromptMessageExtended) -> PromptMessageExtend tool_name=display_tool_name, highlight_index=highlight_index, max_item_length=12, + metadata=metadata, ) try: - # Use our aggregator to call the MCP tool + # Use the appropriate handler for this tool result = await self.call_tool(tool_name, tool_args) tool_results[correlation_id] = result @@ -675,12 +783,13 @@ async def run_tools(self, request: PromptMessageExtended) -> PromptMessageExtend namespaced_tool.server_name ) - self.display.show_tool_result( - name=self._name, - result=result, - tool_name=display_tool_name, - skybridge_config=skybridge_config, - ) + if not getattr(result, "_suppress_display", False): + self.display.show_tool_result( + name=self._name, + result=result, + tool_name=display_tool_name, + skybridge_config=skybridge_config, + ) self.logger.debug(f"MCP tool {display_tool_name} executed successfully") except Exception as e: @@ -873,6 +982,9 @@ async def list_mcp_tools( result[special_server_name].append(self._human_input_tool) + # if self._skill_lookup_tool: + # result.setdefault("__skills__", []).append(self._skill_lookup_tool) + return result @property @@ -985,6 +1097,18 @@ async def convert(self, tool: Tool) -> AgentSkill: Convert a Tool to an AgentSkill. """ + if tool.name in self._skill_map: + manifest = self._skill_map[tool.name] + return AgentSkill( + id=f"skill:{manifest.name}", + name=manifest.name, + description=manifest.description or "", + tags=["skill"], + examples=None, + input_modes=None, + output_modes=None, + ) + _, tool_without_namespace = await self._parse_resource_name(tool.name, "tool") return AgentSkill( id=tool.name, diff --git a/src/fast_agent/cli/__main__.py b/src/fast_agent/cli/__main__.py index bf7e39ada..43e6dd4d5 100644 --- a/src/fast_agent/cli/__main__.py +++ b/src/fast_agent/cli/__main__.py @@ -1,3 +1,5 @@ +import asyncio +import json import sys from fast_agent.cli.constants import GO_SPECIFIC_OPTIONS, KNOWN_SUBCOMMANDS @@ -8,6 +10,39 @@ def main(): """Main entry point that handles auto-routing to 'go' command.""" + try: + loop = asyncio.get_event_loop() + + def _log_asyncio_exception(loop: asyncio.AbstractEventLoop, context: dict) -> None: + import logging + + logger = logging.getLogger("fast_agent.asyncio") + + message = context.get("message", "(no message)") + task = context.get("task") + future = context.get("future") + handle = context.get("handle") + source_traceback = context.get("source_traceback") + exception = context.get("exception") + + details = { + "message": message, + "task": repr(task) if task else None, + "future": repr(future) if future else None, + "handle": repr(handle) if handle else None, + "source_traceback": [str(frame) for frame in source_traceback] if source_traceback else None, + } + + logger.error("Unhandled asyncio error: %s", message) + logger.error("Asyncio context: %s", json.dumps(details, indent=2)) + + if exception: + logger.exception("Asyncio exception", exc_info=exception) + + loop.set_exception_handler(_log_asyncio_exception) + except RuntimeError: + # No running loop yet (rare for sync entry), safe to ignore + pass # Check if we should auto-route to 'go' if len(sys.argv) > 1: # Check if first arg is not already a subcommand diff --git a/src/fast_agent/cli/commands/check_config.py b/src/fast_agent/cli/commands/check_config.py index 4302d5297..24218989b 100644 --- a/src/fast_agent/cli/commands/check_config.py +++ b/src/fast_agent/cli/commands/check_config.py @@ -13,6 +13,7 @@ from fast_agent.llm.provider_key_manager import API_KEY_HINT_TEXT, ProviderKeyManager from fast_agent.llm.provider_types import Provider +from fast_agent.skills import SkillRegistry from fast_agent.ui.console import console app = typer.Typer( @@ -169,6 +170,7 @@ def get_config_summary(config_path: Optional[Path]) -> dict: "step_seconds": default_settings.mcp_timeline.step_seconds, }, "mcp_servers": [], + "skills_directory": None, } if not config_path: @@ -278,6 +280,13 @@ def get_config_summary(config_path: Optional[Path]) -> dict: result["mcp_servers"].append(server_info) + # Skills directory override + skills_cfg = config.get("skills") if isinstance(config, dict) else None + if isinstance(skills_cfg, dict): + directory_value = skills_cfg.get("directory") + if isinstance(directory_value, str) and directory_value.strip(): + result["skills_directory"] = directory_value.strip() + except Exception as e: # File exists but has parse errors result["status"] = "error" @@ -388,6 +397,18 @@ def _print_section_header(title: str, color: str = "blue") -> None: console.print(env_table) + def _relative_path(path: Path) -> str: + try: + return str(path.relative_to(cwd)) + except ValueError: + return str(path) + + skills_override = config_summary.get("skills_directory") + override_directory = Path(skills_override).expanduser() if skills_override else None + skills_registry = SkillRegistry(base_dir=cwd, override_directory=override_directory) + skills_dir = skills_registry.directory + skills_manifests, skill_errors = skills_registry.load_manifests_with_errors() + # Logger Settings panel with two-column layout logger = config_summary.get("logger", {}) logger_table = Table(show_header=True, box=None) @@ -613,6 +634,70 @@ def format_provider_row(provider, status): _print_section_header("MCP Servers", color="blue") console.print(servers_table) + _print_section_header("Agent Skills", color="blue") + if skills_dir: + console.print(f"Directory: [green]{_relative_path(skills_dir)}[/green]") + + if skills_manifests or skill_errors: + skills_table = Table(show_header=True, box=None) + skills_table.add_column("Name", style="cyan", header_style="bold bright_white") + skills_table.add_column("Description", style="white", header_style="bold bright_white") + skills_table.add_column("Source", style="dim", header_style="bold bright_white") + skills_table.add_column("Status", style="green", header_style="bold bright_white") + + def _truncate(text: str, length: int = 70) -> str: + if len(text) <= length: + return text + return text[: length - 3] + "..." + + for manifest in skills_manifests: + try: + relative_source = manifest.path.parent.relative_to(skills_dir) + source_display = str(relative_source) if relative_source != Path(".") else "." + except ValueError: + source_display = _relative_path(manifest.path.parent) + + skills_table.add_row( + manifest.name, + _truncate(manifest.description or ""), + source_display, + "[green]ok[/green]", + ) + + for error in skill_errors: + error_path_str = error.get("path", "") + source_display = "[dim]n/a[/dim]" + if error_path_str: + error_path = Path(error_path_str) + try: + relative_error = error_path.parent.relative_to(skills_dir) + source_display = str(relative_error) if relative_error != Path(".") else "." + except ValueError: + source_display = _relative_path(error_path.parent) + message = error.get("error", "Failed to parse skill manifest") + skills_table.add_row( + "[red]—[/red]", + "[red]n/a[/red]", + source_display, + f"[red]{_truncate(message, 60)}[/red]", + ) + + console.print(skills_table) + else: + console.print("[yellow]No skills found in the directory[/yellow]") + else: + if skills_registry.override_failed and override_directory: + console.print( + f"[red]Override directory not found:[/red] {_relative_path(override_directory)}" + ) + console.print( + "[yellow]Default folders were not loaded because the override failed[/yellow]" + ) + else: + console.print( + "[dim]Agent Skills not configured. Go to https://fast-agent.ai/agents/skills/[/dim]" + ) + # Show help tips if config_status == "not_found" or secrets_status == "not_found": console.print("\n[bold]Setup Tips:[/bold]") diff --git a/src/fast_agent/cli/commands/go.py b/src/fast_agent/cli/commands/go.py index 04d88690e..367a2d32b 100644 --- a/src/fast_agent/cli/commands/go.py +++ b/src/fast_agent/cli/commands/go.py @@ -1,9 +1,10 @@ """Run an interactive agent directly from the command line.""" import asyncio +import logging import shlex import sys -from typing import Dict, List, Optional +from pathlib import Path import typer @@ -11,6 +12,7 @@ from fast_agent.agents.llm_agent import LlmAgent from fast_agent.cli.commands.server_helpers import add_servers_to_config, generate_server_name from fast_agent.cli.commands.url_parser import generate_server_configs, parse_server_urls +from fast_agent.constants import DEFAULT_AGENT_INSTRUCTION from fast_agent.ui.console_display import ConsoleDisplay app = typer.Typer( @@ -18,28 +20,60 @@ context_settings={"allow_extra_args": True, "ignore_unknown_options": True}, ) -default_instruction = """You are a helpful AI Agent. +default_instruction = DEFAULT_AGENT_INSTRUCTION -{{serverInstructions}} -The current date is {{currentDate}}.""" +def _set_asyncio_exception_handler(loop: asyncio.AbstractEventLoop) -> None: + """Attach a detailed exception handler to the provided event loop.""" + + logger = logging.getLogger("fast_agent.asyncio") + + def _handler(_loop: asyncio.AbstractEventLoop, context: dict) -> None: + message = context.get("message", "(no message)") + task = context.get("task") + future = context.get("future") + handle = context.get("handle") + source_traceback = context.get("source_traceback") + exception = context.get("exception") + + details = { + "message": message, + "task": repr(task) if task else None, + "future": repr(future) if future else None, + "handle": repr(handle) if handle else None, + "source_traceback": [str(frame) for frame in source_traceback] + if source_traceback + else None, + } + + logger.error("Unhandled asyncio error: %s", message) + logger.error("Asyncio context: %s", details) + + if exception: + logger.exception("Asyncio exception", exc_info=exception) + + try: + loop.set_exception_handler(_handler) + except Exception: + logger = logging.getLogger("fast_agent.asyncio") + logger.exception("Failed to set asyncio exception handler") async def _run_agent( name: str = "fast-agent cli", instruction: str = default_instruction, - config_path: Optional[str] = None, - server_list: Optional[List[str]] = None, - model: Optional[str] = None, - message: Optional[str] = None, - prompt_file: Optional[str] = None, - url_servers: Optional[Dict[str, Dict[str, str]]] = None, - stdio_servers: Optional[Dict[str, Dict[str, str]]] = None, - agent_name: Optional[str] = "agent", + config_path: str | None = None, + server_list: list[str] | None = None, + model: str | None = None, + message: str | None = None, + prompt_file: str | None = None, + url_servers: dict[str, dict[str, str]] | None = None, + stdio_servers: dict[str, dict[str, str]] | None = None, + agent_name: str | None = "agent", + skills_directory: Path | None = None, + shell_runtime: bool = False, ) -> None: """Async implementation to run an interactive agent.""" - from pathlib import Path - from fast_agent.mcp.prompts.prompt_load import load_prompt # Create the FastAgent instance @@ -50,9 +84,15 @@ async def _run_agent( "ignore_unknown_args": True, "parse_cli_args": False, # Don't parse CLI args, we're handling it ourselves } + if skills_directory is not None: + fast_kwargs["skills_directory"] = skills_directory fast = FastAgent(**fast_kwargs) + if shell_runtime: + await fast.app.initialize() + setattr(fast.app.context, "shell_runtime", True) + # Add all dynamic servers to the configuration await add_servers_to_config(fast, url_servers) await add_servers_to_config(fast, stdio_servers) @@ -149,15 +189,17 @@ async def cli_agent(): def run_async_agent( name: str, instruction: str, - config_path: Optional[str] = None, - servers: Optional[str] = None, - urls: Optional[str] = None, - auth: Optional[str] = None, - model: Optional[str] = None, - message: Optional[str] = None, - prompt_file: Optional[str] = None, - stdio_commands: Optional[List[str]] = None, - agent_name: Optional[str] = None, + config_path: str | None = None, + servers: str | None = None, + urls: str | None = None, + auth: str | None = None, + model: str | None = None, + message: str | None = None, + prompt_file: str | None = None, + stdio_commands: list[str] | None = None, + agent_name: str | None = None, + skills_directory: Path | None = None, + shell_enabled: bool = False, ): """Run the async agent function with proper loop handling.""" server_list = servers.split(",") if servers else None @@ -240,10 +282,12 @@ def run_async_agent( # Instead, create a new loop loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) + _set_asyncio_exception_handler(loop) except RuntimeError: # No event loop exists, so we'll create one loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) + _set_asyncio_exception_handler(loop) try: loop.run_until_complete( @@ -258,6 +302,8 @@ def run_async_agent( url_servers=url_servers, stdio_servers=stdio_servers, agent_name=agent_name, + skills_directory=skills_directory, + shell_runtime=shell_enabled, ) ) finally: @@ -280,39 +326,49 @@ def run_async_agent( def go( ctx: typer.Context, name: str = typer.Option("fast-agent", "--name", help="Name for the agent"), - instruction: Optional[str] = typer.Option( + instruction: str | None = typer.Option( None, "--instruction", "-i", help="Path to file or URL containing instruction for the agent" ), - config_path: Optional[str] = typer.Option( - None, "--config-path", "-c", help="Path to config file" - ), - servers: Optional[str] = typer.Option( + config_path: str | None = typer.Option(None, "--config-path", "-c", help="Path to config file"), + servers: str | None = typer.Option( None, "--servers", help="Comma-separated list of server names to enable from config" ), - urls: Optional[str] = typer.Option( + urls: str | None = typer.Option( None, "--url", help="Comma-separated list of HTTP/SSE URLs to connect to" ), - auth: Optional[str] = typer.Option( + auth: str | None = typer.Option( None, "--auth", help="Bearer token for authorization with URL-based servers" ), - model: Optional[str] = typer.Option( + model: str | None = typer.Option( None, "--model", "--models", help="Override the default model (e.g., haiku, sonnet, gpt-4)" ), - message: Optional[str] = typer.Option( + message: str | None = typer.Option( None, "--message", "-m", help="Message to send to the agent (skips interactive mode)" ), - prompt_file: Optional[str] = typer.Option( + prompt_file: str | None = typer.Option( None, "--prompt-file", "-p", help="Path to a prompt file to use (either text or JSON)" ), - npx: Optional[str] = typer.Option( + skills_dir: Path | None = typer.Option( + None, + "--skills-dir", + "--skills", + help="Override the default skills directory", + ), + npx: str | None = typer.Option( None, "--npx", help="NPX package and args to run as MCP server (quoted)" ), - uvx: Optional[str] = typer.Option( + uvx: str | None = typer.Option( None, "--uvx", help="UVX package and args to run as MCP server (quoted)" ), - stdio: Optional[str] = typer.Option( + stdio: str | None = typer.Option( None, "--stdio", help="Command to run as STDIO MCP server (quoted)" ), + shell: bool = typer.Option( + False, + "--shell", + "-x", + help="Enable a local shell runtime and expose the execute tool (bash or pwsh).", + ), ) -> None: """ Run an interactive agent directly from the command line. @@ -328,6 +384,7 @@ def go( fast-agent go --uvx "mcp-server-fetch --verbose" fast-agent go --stdio "python my_server.py --debug" fast-agent go --stdio "uv run server.py --config=settings.json" + fast-agent go --skills /path/to/myskills -x This will start an interactive session with the agent, using the specified model and instruction. It will use the default configuration from fastagent.config.yaml @@ -341,12 +398,15 @@ def go( --auth Bearer token for authorization with URL-based servers --message, -m Send a single message and exit --prompt-file, -p Use a prompt file instead of interactive mode + --skills Override the default skills folder + --shell, -x Enable local shell runtime --npx NPX package and args to run as MCP server (quoted) --uvx UVX package and args to run as MCP server (quoted) --stdio Command to run as STDIO MCP server (quoted) """ # Collect all stdio commands from convenience options stdio_commands = [] + shell_enabled = shell if npx: stdio_commands.append(f"npx {npx}") @@ -357,6 +417,8 @@ def go( if stdio: stdio_commands.append(stdio) + # When shell is enabled we don't add an MCP stdio server; handled inside the agent + # Resolve instruction from file/URL or use default resolved_instruction = default_instruction # Default agent_name = "agent" @@ -396,4 +458,6 @@ def go( prompt_file=prompt_file, stdio_commands=stdio_commands, agent_name=agent_name, + skills_directory=skills_dir, + shell_enabled=shell_enabled, ) diff --git a/src/fast_agent/cli/constants.py b/src/fast_agent/cli/constants.py index 7825d2583..076a8f642 100644 --- a/src/fast_agent/cli/constants.py +++ b/src/fast_agent/cli/constants.py @@ -19,7 +19,19 @@ "--name", "--config-path", "-c", + "--shell", + "-x", } # Known subcommands that should not trigger auto-routing -KNOWN_SUBCOMMANDS = {"go", "setup", "check", "auth", "bootstrap", "quickstart", "--help", "-h", "--version"} +KNOWN_SUBCOMMANDS = { + "go", + "setup", + "check", + "auth", + "bootstrap", + "quickstart", + "--help", + "-h", + "--version", +} diff --git a/src/fast_agent/cli/main.py b/src/fast_agent/cli/main.py index d63e8f9d0..5db0a1dd7 100644 --- a/src/fast_agent/cli/main.py +++ b/src/fast_agent/cli/main.py @@ -62,6 +62,7 @@ def _print_section_header(title: str, color: str = "blue") -> None: table.add_column("Description", header_style="bold bright_white") table.add_row("[bold]go[/bold]", "Start an interactive session") + table.add_row("go --shell", "Start an interactive session with a local shell tool") table.add_row("check", "Show current configuration") table.add_row("auth", "Manage OAuth tokens and keyring") table.add_row("setup", "Create agent template and configuration") diff --git a/src/fast_agent/config.py b/src/fast_agent/config.py index b850be79d..1f46bd8f4 100644 --- a/src/fast_agent/config.py +++ b/src/fast_agent/config.py @@ -95,18 +95,41 @@ def _coerce_steps(cls, value: Any) -> int: raise ValueError("Timeline steps must be greater than zero.") return value - @field_validator("step_seconds", mode="before") + +class SkillsSettings(BaseModel): + """Configuration for the skills directory override.""" + + directory: str | None = None + + model_config = ConfigDict(extra="ignore") + + +class ShellSettings(BaseModel): + """Configuration for shell execution behavior.""" + + timeout_seconds: int = 90 + """Maximum seconds to wait for command output before terminating (default: 90s)""" + + warning_interval_seconds: int = 30 + """Show timeout warnings every N seconds (default: 30s)""" + + model_config = ConfigDict(extra="ignore") + + @field_validator("timeout_seconds", mode="before") @classmethod - def _coerce_step_seconds(cls, value: Any) -> int: + def _coerce_timeout(cls, value: Any) -> int: + """Support duration strings like '90s', '2m', '1h'""" if isinstance(value, str): - value = cls._parse_duration(value) - elif isinstance(value, (int, float)): - value = int(value) - else: - raise TypeError("Timeline step duration must be a number of seconds.") - if value <= 0: - raise ValueError("Timeline step duration must be greater than zero.") - return value + return MCPTimelineSettings._parse_duration(value) + return int(value) + + @field_validator("warning_interval_seconds", mode="before") + @classmethod + def _coerce_warning_interval(cls, value: Any) -> int: + """Support duration strings like '30s', '1m'""" + if isinstance(value, str): + return MCPTimelineSettings._parse_duration(value) + return int(value) class MCPRootSettings(BaseModel): @@ -591,6 +614,12 @@ class Settings(BaseSettings): mcp_timeline: MCPTimelineSettings = MCPTimelineSettings() """Display settings for MCP activity timelines.""" + skills: SkillsSettings = SkillsSettings() + """Local skills discovery and selection settings.""" + + shell_execution: ShellSettings = ShellSettings() + """Shell execution timeout and warning settings.""" + @classmethod def find_config(cls) -> Path | None: """Find the config file in the current directory or parent directories.""" diff --git a/src/fast_agent/constants.py b/src/fast_agent/constants.py index 71015aeae..49f47641d 100644 --- a/src/fast_agent/constants.py +++ b/src/fast_agent/constants.py @@ -11,3 +11,11 @@ # should we have MAX_TOOL_CALLS instead to constrain by number of tools rather than turns...? DEFAULT_MAX_ITERATIONS = 20 """Maximum number of User/Assistant turns to take""" + +DEFAULT_AGENT_INSTRUCTION = """You are a helpful AI Agent. + +{{serverInstructions}} + +{{agentSkills}} + +The current date is {{currentDate}}.""" diff --git a/src/fast_agent/context.py b/src/fast_agent/context.py index 08f2497dc..e23694d19 100644 --- a/src/fast_agent/context.py +++ b/src/fast_agent/context.py @@ -26,6 +26,7 @@ from fast_agent.core.logging.logger import LoggingConfig, get_logger from fast_agent.core.logging.transport import create_transport from fast_agent.mcp_server_registry import ServerRegistry +from fast_agent.skills import SkillRegistry if TYPE_CHECKING: from fast_agent.core.executor.workflow_signal import SignalWaitCallback @@ -56,6 +57,7 @@ class Context(BaseModel): # Registries server_registry: Optional[ServerRegistry] = None task_registry: Optional[ActivityRegistry] = None + skill_registry: Optional[SkillRegistry] = None tracer: trace.Tracer | None = None _connection_manager: "MCPConnectionManager | None" = None @@ -145,28 +147,26 @@ async def configure_logger(config: "Settings") -> None: python_logger.setLevel(settings.level.upper()) python_logger.propagate = False - handler: logging.Handler + transport = None if settings.type == "console": + # Console mode: use the Python logger to emit to stdout and skip additional transport output handler = logging.StreamHandler() - elif settings.type == "file": - log_path = Path(settings.path) - if log_path.parent: - log_path.parent.mkdir(parents=True, exist_ok=True) - handler = logging.FileHandler(log_path) - elif settings.type == "none": - handler = logging.NullHandler() + handler.setLevel(settings.level.upper()) + handler.setFormatter(logging.Formatter("%(message)s")) + python_logger.addHandler(handler) else: - # For transports that handle output elsewhere (e.g., HTTP), suppress console output. - handler = logging.NullHandler() - - handler.setLevel(settings.level.upper()) - handler.setFormatter(logging.Formatter("%(message)s")) - python_logger.addHandler(handler) + # For all other modes, rely on transports (file/http/none) and keep the Python logger quiet + python_logger.addHandler(logging.NullHandler()) # Use StreamingExclusionFilter to prevent streaming events from flooding logs event_filter: EventFilter = StreamingExclusionFilter(min_level=settings.level) logger.info(f"Configuring logger with level: {settings.level}") - transport = create_transport(settings=settings, event_filter=event_filter) + if settings.type == "console": + from fast_agent.core.logging.transport import NoOpTransport + + transport = NoOpTransport(event_filter=event_filter) + else: + transport = create_transport(settings=settings, event_filter=event_filter) await LoggingConfig.configure( event_filter=event_filter, transport=transport, @@ -206,6 +206,15 @@ async def initialize_context( context.config = config context.server_registry = ServerRegistry(config=config) + skills_settings = getattr(config, "skills", None) + override_directory = None + if skills_settings and getattr(skills_settings, "directory", None): + override_directory = Path(skills_settings.directory).expanduser() + context.skill_registry = SkillRegistry( + base_dir=Path.cwd(), + override_directory=override_directory, + ) + # Configure logging and telemetry await configure_otel(config) await configure_logger(config) diff --git a/src/fast_agent/core/direct_decorators.py b/src/fast_agent/core/direct_decorators.py index cacae9873..585633fb0 100644 --- a/src/fast_agent/core/direct_decorators.py +++ b/src/fast_agent/core/direct_decorators.py @@ -27,6 +27,7 @@ from fast_agent.agents.workflow.router_agent import ( ROUTING_SYSTEM_INSTRUCTION, ) +from fast_agent.skills import SkillManifest, SkillRegistry from fast_agent.types import RequestParams # Type variables for the decorated function @@ -182,6 +183,9 @@ def _decorator_impl( tools: Optional[Dict[str, List[str]]] = None, resources: Optional[Dict[str, List[str]]] = None, prompts: Optional[Dict[str, List[str]]] = None, + skills: SkillManifest | SkillRegistry | Path | str | List[ + SkillManifest | SkillRegistry | Path | str | None + ] | None = None, **extra_kwargs, ) -> Callable[[Callable[P, Coroutine[Any, Any, R]]], Callable[P, Coroutine[Any, Any, R]]]: """ @@ -209,6 +213,7 @@ def decorator(func: Callable[P, Coroutine[Any, Any, R]]) -> Callable[P, Coroutin tools=tools, resources=resources, prompts=prompts, + skills=skills, model=model, use_history=use_history, human_input=human_input, @@ -256,6 +261,7 @@ def agent( tools: Optional[Dict[str, List[str]]] = None, resources: Optional[Dict[str, List[str]]] = None, prompts: Optional[Dict[str, List[str]]] = None, + skills: SkillManifest | SkillRegistry | Path | str | None = None, model: Optional[str] = None, use_history: bool = True, request_params: RequestParams | None = None, @@ -306,6 +312,7 @@ def agent( tools=tools, resources=resources, prompts=prompts, + skills=skills, api_key=api_key, ) @@ -321,6 +328,7 @@ def custom( tools: Optional[Dict[str, List[str]]] = None, resources: Optional[Dict[str, List[str]]] = None, prompts: Optional[Dict[str, List[str]]] = None, + skills: SkillManifest | SkillRegistry | Path | str | None = None, model: Optional[str] = None, use_history: bool = True, request_params: RequestParams | None = None, @@ -368,6 +376,7 @@ def custom( tools=tools, resources=resources, prompts=prompts, + skills=skills, ) diff --git a/src/fast_agent/core/fastagent.py b/src/fast_agent/core/fastagent.py index 7c22dc57e..0237c385c 100644 --- a/src/fast_agent/core/fastagent.py +++ b/src/fast_agent/core/fastagent.py @@ -6,6 +6,7 @@ import argparse import asyncio +import pathlib import sys from contextlib import asynccontextmanager from importlib.metadata import version as get_version @@ -76,12 +77,14 @@ validate_workflow_references, ) from fast_agent.mcp.prompts.prompt_load import load_prompt +from fast_agent.skills import SkillManifest, SkillRegistry from fast_agent.ui.usage_display import display_usage_report if TYPE_CHECKING: from mcp.client.session import ElicitationFnT from pydantic import AnyUrl + from fast_agent.constants import DEFAULT_AGENT_INSTRUCTION from fast_agent.interfaces import AgentProtocol from fast_agent.types import PromptMessageExtended @@ -102,6 +105,7 @@ def __init__( ignore_unknown_args: bool = False, parse_cli_args: bool = True, quiet: bool = False, # Add quiet parameter + skills_directory: str | pathlib.Path | None = None, **kwargs, ) -> None: """ @@ -119,6 +123,10 @@ def __init__( """ self.args = argparse.Namespace() # Initialize args always self._programmatic_quiet = quiet # Store the programmatic quiet setting + self._skills_directory_override = ( + Path(skills_directory).expanduser() if skills_directory else None + ) + self._default_skill_manifests: List[SkillManifest] = [] # --- Wrap argument parsing logic --- if parse_cli_args: @@ -173,6 +181,10 @@ def __init__( default="0.0.0.0", help="Host address to bind to when running as a server with SSE transport", ) + parser.add_argument( + "--skills", + help="Path to skills directory to use instead of default .claude/skills", + ) if ignore_unknown_args: known_args, _ = parser.parse_known_args() @@ -200,6 +212,14 @@ def __init__( if self._programmatic_quiet: self.args.quiet = True + # Apply CLI skills directory if not already set programmatically + if ( + self._skills_directory_override is None + and hasattr(self.args, "skills") + and self.args.skills + ): + self._skills_directory_override = Path(self.args.skills).expanduser() + self.name = name self.config_path = config_path @@ -271,6 +291,7 @@ def context(self) -> Context: from collections.abc import Coroutine from pathlib import Path + from fast_agent.skills import SkillManifest, SkillRegistry from fast_agent.types import RequestParams P = ParamSpec("P") @@ -281,11 +302,12 @@ def agent( name: str = "default", instruction_or_kwarg: Optional[str | Path | AnyUrl] = None, *, - instruction: str | Path | AnyUrl = "You are a helpful agent.", + instruction: str | Path | AnyUrl = DEFAULT_AGENT_INSTRUCTION, servers: List[str] = [], tools: Optional[Dict[str, List[str]]] = None, resources: Optional[Dict[str, List[str]]] = None, prompts: Optional[Dict[str, List[str]]] = None, + skills: Optional[List[SkillManifest | SkillRegistry | Path | str | None]] = None, model: Optional[str] = None, use_history: bool = True, request_params: RequestParams | None = None, @@ -430,6 +452,21 @@ async def run(self) -> AsyncIterator["AgentApp"]: with tracer.start_as_current_span(self.name): try: async with self.app.run(): + registry = getattr(self.context, "skill_registry", None) + if self._skills_directory_override is not None: + override_registry = SkillRegistry( + base_dir=Path.cwd(), + override_directory=self._skills_directory_override, + ) + self.context.skill_registry = override_registry + registry = override_registry + + default_skills: List[SkillManifest] = [] + if registry: + default_skills = registry.load_manifests() + + self._apply_skills_to_agent_configs(default_skills) + # Apply quiet mode if requested if quiet_mode: cfg = self.app.context.config @@ -621,6 +658,69 @@ def model_factory_func(model=None, request_params=None): except Exception: pass + def _apply_skills_to_agent_configs(self, default_skills: List[SkillManifest]) -> None: + self._default_skill_manifests = list(default_skills) + + for agent_data in self.agents.values(): + config_obj = agent_data.get("config") + if not config_obj: + continue + + resolved = self._resolve_skills(config_obj.skills) + if not resolved: + resolved = list(default_skills) + else: + resolved = self._deduplicate_skills(resolved) + + config_obj.skill_manifests = resolved + + def _resolve_skills( + self, + entry: SkillManifest + | SkillRegistry + | Path + | str + | List[SkillManifest | SkillRegistry | Path | str | None] + | None, + ) -> List[SkillManifest]: + if entry is None: + return [] + if isinstance(entry, list): + manifests: List[SkillManifest] = [] + for item in entry: + manifests.extend(self._resolve_skills(item)) + return manifests + if isinstance(entry, SkillManifest): + return [entry] + if isinstance(entry, SkillRegistry): + try: + return entry.load_manifests() + except Exception: + logger.debug( + "Failed to load skills from registry", + data={"registry": type(entry).__name__}, + ) + return [] + if isinstance(entry, Path): + return SkillRegistry.load_directory(entry.expanduser().resolve()) + if isinstance(entry, str): + return SkillRegistry.load_directory(Path(entry).expanduser().resolve()) + + logger.debug( + "Unsupported skill entry type", + data={"type": type(entry).__name__}, + ) + return [] + + @staticmethod + def _deduplicate_skills(manifests: List[SkillManifest]) -> List[SkillManifest]: + unique: Dict[str, SkillManifest] = {} + for manifest in manifests: + key = manifest.name.lower() + if key not in unique: + unique[key] = manifest + return list(unique.values()) + def _handle_error(self, e: Exception, error_type: Optional[str] = None) -> None: """ Handle errors with consistent formatting and messaging. diff --git a/src/fast_agent/core/logging/listeners.py b/src/fast_agent/core/logging/listeners.py index 9d077149f..b486abc01 100644 --- a/src/fast_agent/core/logging/listeners.py +++ b/src/fast_agent/core/logging/listeners.py @@ -64,6 +64,14 @@ def convert_log_event(event: Event) -> "ProgressEvent | None": chat_turn = event_data.get("chat_turn") if chat_turn is not None: details = f"{model} turn {chat_turn}" + + tool_name = event_data.get("tool_name") + tool_event = event_data.get("tool_event") + if tool_name: + tool_suffix = tool_name + if tool_event: + tool_suffix = f"{tool_suffix} ({tool_event})" + details = f"{details} • {tool_suffix}".strip() else: if not target: target = event_data.get("target", "unknown") diff --git a/src/fast_agent/interfaces.py b/src/fast_agent/interfaces.py index bb4d34d39..58fb40560 100644 --- a/src/fast_agent/interfaces.py +++ b/src/fast_agent/interfaces.py @@ -87,9 +87,15 @@ def get_request_params( def add_stream_listener(self, listener: Callable[[str], None]) -> Callable[[], None]: ... + def add_tool_stream_listener( + self, listener: Callable[[str, Dict[str, Any] | None], None] + ) -> Callable[[], None]: ... + @property def message_history(self) -> List[PromptMessageExtended]: ... + def pop_last_message(self) -> PromptMessageExtended | None: ... + @property def usage_accumulator(self) -> UsageAccumulator | None: ... @@ -123,6 +129,8 @@ async def shutdown(self) -> None: ... def clear(self, *, clear_prompts: bool = False) -> None: ... + def pop_last_message(self) -> PromptMessageExtended | None: ... + class AgentProtocol(LlmAgentProtocol, Protocol): """Standard agent interface with flexible input types.""" diff --git a/src/fast_agent/llm/fastagent_llm.py b/src/fast_agent/llm/fastagent_llm.py index 6d43fdca5..0e11e0c71 100644 --- a/src/fast_agent/llm/fastagent_llm.py +++ b/src/fast_agent/llm/fastagent_llm.py @@ -159,6 +159,7 @@ def __init__( # Initialize usage tracking self._usage_accumulator = UsageAccumulator() self._stream_listeners: set[Callable[[str], None]] = set() + self._tool_stream_listeners: set[Callable[[str, Dict[str, Any] | None], None]] = set() def _initialize_default_params(self, kwargs: dict) -> RequestParams: """Initialize default parameters for the LLM. @@ -534,6 +535,37 @@ def _notify_stream_listeners(self, chunk: str) -> None: except Exception: self.logger.exception("Stream listener raised an exception") + def add_tool_stream_listener( + self, listener: Callable[[str, Dict[str, Any] | None], None] + ) -> Callable[[], None]: + """Register a callback invoked with tool streaming events. + + Args: + listener: Callable receiving event_type (str) and optional info dict. + + Returns: + A function that removes the listener when called. + """ + + self._tool_stream_listeners.add(listener) + + def remove() -> None: + self._tool_stream_listeners.discard(listener) + + return remove + + def _notify_tool_stream_listeners( + self, event_type: str, payload: Dict[str, Any] | None = None + ) -> None: + """Notify listeners about tool streaming lifecycle events.""" + + data = payload or {} + for listener in list(self._tool_stream_listeners): + try: + listener(event_type, data) + except Exception: + self.logger.exception("Tool stream listener raised an exception") + def _log_chat_finished(self, model: Optional[str] = None) -> None: """Log a chat finished event""" data = { @@ -643,6 +675,19 @@ def message_history(self) -> List[PromptMessageExtended]: """ return self._message_history + def pop_last_message(self) -> PromptMessageExtended | None: + """Remove and return the most recent message from the conversation history.""" + if not self._message_history: + return None + + removed = self._message_history.pop() + try: + self.history.pop() + except Exception: + # If provider-specific memory isn't available, ignore to avoid crashing UX + pass + return removed + def clear(self, *, clear_prompts: bool = False) -> None: """Reset stored message history while optionally retaining prompt templates.""" diff --git a/src/fast_agent/llm/memory.py b/src/fast_agent/llm/memory.py index 9c22f7218..1e7f190be 100644 --- a/src/fast_agent/llm/memory.py +++ b/src/fast_agent/llm/memory.py @@ -1,4 +1,4 @@ -from typing import Generic, List, Protocol, TypeVar +from typing import Generic, List, Optional, Protocol, TypeVar # Define our own type variable for implementation use MessageParamT = TypeVar("MessageParamT") @@ -23,6 +23,8 @@ def get(self, include_completion_history: bool = True) -> List[MessageParamT]: . def clear(self, clear_prompts: bool = False) -> None: ... + def pop(self, *, from_prompts: bool = False) -> Optional[MessageParamT]: ... + class SimpleMemory(Memory, Generic[MessageParamT]): """ @@ -108,6 +110,29 @@ def clear(self, clear_prompts: bool = False) -> None: if clear_prompts: self.prompt_messages = [] + def pop(self, *, from_prompts: bool = False) -> Optional[MessageParamT]: + """ + Remove and return the most recent message from history or prompt messages. + + Args: + from_prompts: If True, pop from prompt_messages instead of history + + Returns: + The removed message if available, otherwise None + """ + if from_prompts: + if not self.prompt_messages: + return None + return self.prompt_messages.pop() + + if not self.history: + return None + + removed = self.history.pop() + # Recalculate cache positions now that the history shrank + self.conversation_cache_positions = self._calculate_cache_positions(len(self.history)) + return removed + def should_apply_conversation_cache(self) -> bool: """ Determine if conversation caching should be applied based on walking algorithm. diff --git a/src/fast_agent/llm/provider/anthropic/llm_anthropic.py b/src/fast_agent/llm/provider/anthropic/llm_anthropic.py index 608669726..25bb6e188 100644 --- a/src/fast_agent/llm/provider/anthropic/llm_anthropic.py +++ b/src/fast_agent/llm/provider/anthropic/llm_anthropic.py @@ -244,10 +244,114 @@ async def _process_stream(self, stream: AsyncMessageStream, model: str) -> Messa """Process the streaming response and display real-time token usage.""" # Track estimated output tokens by counting text chunks estimated_tokens = 0 + tool_streams: dict[int, dict[str, Any]] = {} try: # Process the raw event stream to get token counts async for event in stream: + if ( + event.type == "content_block_start" + and hasattr(event, "content_block") + and getattr(event.content_block, "type", None) == "tool_use" + ): + content_block = event.content_block + tool_streams[event.index] = { + "name": content_block.name, + "id": content_block.id, + "buffer": [], + } + self._notify_tool_stream_listeners( + "start", + { + "tool_name": content_block.name, + "tool_use_id": content_block.id, + "index": event.index, + "streams_arguments": False, # Anthropic doesn't stream arguments + }, + ) + self.logger.info( + "Model started streaming tool input", + data={ + "progress_action": ProgressAction.CALLING_TOOL, + "agent_name": self.name, + "model": model, + "tool_name": content_block.name, + "tool_use_id": content_block.id, + "tool_event": "start", + }, + ) + continue + + if ( + event.type == "content_block_delta" + and hasattr(event, "delta") + and event.delta.type == "input_json_delta" + ): + info = tool_streams.get(event.index) + if info is not None: + chunk = event.delta.partial_json or "" + info["buffer"].append(chunk) + preview = chunk if len(chunk) <= 80 else chunk[:77] + "..." + self._notify_tool_stream_listeners( + "delta", + { + "tool_name": info.get("name"), + "tool_use_id": info.get("id"), + "index": event.index, + "chunk": chunk, + "streams_arguments": False, + }, + ) + self.logger.debug( + "Streaming tool input delta", + data={ + "tool_name": info.get("name"), + "tool_use_id": info.get("id"), + "chunk": preview, + }, + ) + continue + + if ( + event.type == "content_block_stop" + and event.index in tool_streams + ): + info = tool_streams.pop(event.index) + preview_raw = "".join(info.get("buffer", [])) + if preview_raw: + preview = ( + preview_raw if len(preview_raw) <= 120 else preview_raw[:117] + "..." + ) + self.logger.debug( + "Completed tool input stream", + data={ + "tool_name": info.get("name"), + "tool_use_id": info.get("id"), + "input_preview": preview, + }, + ) + self._notify_tool_stream_listeners( + "stop", + { + "tool_name": info.get("name"), + "tool_use_id": info.get("id"), + "index": event.index, + "streams_arguments": False, + }, + ) + self.logger.info( + "Model finished streaming tool input", + data={ + "progress_action": ProgressAction.CALLING_TOOL, + "agent_name": self.name, + "model": model, + "tool_name": info.get("name"), + "tool_use_id": info.get("id"), + "tool_event": "stop", + }, + ) + continue + # Count tokens in real-time from content_block_delta events if ( event.type == "content_block_delta" @@ -258,6 +362,14 @@ async def _process_stream(self, stream: AsyncMessageStream, model: str) -> Messa estimated_tokens = self._update_streaming_progress( event.delta.text, model, estimated_tokens ) + self._notify_tool_stream_listeners( + "text", + { + "chunk": event.delta.text, + "index": event.index, + "streams_arguments": False, + }, + ) # Also check for final message_delta events with actual usage info elif ( diff --git a/src/fast_agent/llm/provider/openai/llm_openai.py b/src/fast_agent/llm/provider/openai/llm_openai.py index 9bef826ff..fbb0087ee 100644 --- a/src/fast_agent/llm/provider/openai/llm_openai.py +++ b/src/fast_agent/llm/provider/openai/llm_openai.py @@ -7,7 +7,7 @@ ContentBlock, TextContent, ) -from openai import APIError, AsyncOpenAI, AuthenticationError +from openai import APIError, AsyncOpenAI, AuthenticationError, DefaultAioHttpClient from openai.lib.streaming.chat import ChatCompletionStreamState # from openai.types.beta.chat import @@ -95,9 +95,19 @@ def _base_url(self) -> str: return self.context.config.openai.base_url if self.context.config.openai else None def _openai_client(self) -> AsyncOpenAI: - try: - return AsyncOpenAI(api_key=self._api_key(), base_url=self._base_url()) + """ + Create an OpenAI client instance. + Subclasses can override this to provide different client types (e.g., AzureOpenAI). + Note: The returned client should be used within an async context manager + to ensure proper cleanup of aiohttp sessions. + """ + try: + return AsyncOpenAI( + api_key=self._api_key(), + base_url=self._base_url(), + http_client=DefaultAioHttpClient(), + ) except AuthenticationError as e: raise ProviderKeyError( "Invalid OpenAI API key", @@ -119,16 +129,93 @@ async def _process_stream(self, stream, model: str): # Use ChatCompletionStreamState helper for accumulation (OpenAI only) state = ChatCompletionStreamState() + # Track tool call state for stream events + tool_call_started = {} # Maps index -> bool for tracking start events + # Process the stream chunks async for chunk in stream: # Handle chunk accumulation state.handle_chunk(chunk) - # Count tokens in real-time from content deltas - if chunk.choices and chunk.choices[0].delta.content: - content = chunk.choices[0].delta.content - # Use base class method for token estimation and progress emission - estimated_tokens = self._update_streaming_progress(content, model, estimated_tokens) + # Process streaming events for tool calls + if chunk.choices: + choice = chunk.choices[0] + delta = choice.delta + + # Handle tool call streaming + if delta.tool_calls: + for tool_call in delta.tool_calls: + index = tool_call.index + + # Fire "start" event on first chunk for this tool call + if index not in tool_call_started and tool_call.id and tool_call.function and tool_call.function.name: + tool_call_started[index] = True + self._notify_tool_stream_listeners( + "start", + { + "tool_name": tool_call.function.name, + "tool_use_id": tool_call.id, + "index": index, + "streams_arguments": True, # OpenAI streams arguments! + }, + ) + self.logger.info( + "Model started streaming tool call", + data={ + "progress_action": ProgressAction.CALLING_TOOL, + "agent_name": self.name, + "model": model, + "tool_name": tool_call.function.name, + "tool_use_id": tool_call.id, + "tool_event": "start", + }, + ) + + # Fire "delta" event for argument chunks + if tool_call.function and tool_call.function.arguments: + self._notify_tool_stream_listeners( + "delta", + { + "tool_name": tool_call.function.name if tool_call.function.name else None, + "tool_use_id": tool_call.id, + "index": index, + "chunk": tool_call.function.arguments, + "streams_arguments": True, + }, + ) + + # Handle text content streaming + if delta.content: + content = delta.content + # Use base class method for token estimation and progress emission + estimated_tokens = self._update_streaming_progress(content, model, estimated_tokens) + self._notify_tool_stream_listeners( + "text", + { + "chunk": content, + "streams_arguments": True, + }, + ) + + # Fire "stop" event when tool calls complete + if choice.finish_reason == "tool_calls": + for index in tool_call_started.keys(): + self._notify_tool_stream_listeners( + "stop", + { + "index": index, + "streams_arguments": True, + }, + ) + self.logger.info( + "Model finished streaming tool call", + data={ + "progress_action": ProgressAction.CALLING_TOOL, + "agent_name": self.name, + "model": model, + "tool_event": "stop", + }, + ) # Check if we hit the length limit to avoid LengthFinishReasonError current_snapshot = state.current_completion_snapshot @@ -176,14 +263,92 @@ async def _process_stream_manual(self, stream, model: str): finish_reason = None usage_data = None + # Track tool call state for stream events + tool_call_started = {} # Maps index -> bool for tracking start events + # Process the stream chunks manually async for chunk in stream: - # Count tokens in real-time from content deltas - if chunk.choices and chunk.choices[0].delta.content: - content = chunk.choices[0].delta.content - accumulated_content += content - # Use base class method for token estimation and progress emission - estimated_tokens = self._update_streaming_progress(content, model, estimated_tokens) + # Process streaming events for tool calls + if chunk.choices: + choice = chunk.choices[0] + delta = choice.delta + + # Handle tool call streaming + if delta.tool_calls: + for tool_call in delta.tool_calls: + if tool_call.index is not None: + index = tool_call.index + + # Fire "start" event on first chunk for this tool call + if index not in tool_call_started and tool_call.id and tool_call.function and tool_call.function.name: + tool_call_started[index] = True + self._notify_tool_stream_listeners( + "start", + { + "tool_name": tool_call.function.name, + "tool_use_id": tool_call.id, + "index": index, + "streams_arguments": True, # OpenAI-compatible providers stream arguments + }, + ) + self.logger.info( + "Model started streaming tool call", + data={ + "progress_action": ProgressAction.CALLING_TOOL, + "agent_name": self.name, + "model": model, + "tool_name": tool_call.function.name, + "tool_use_id": tool_call.id, + "tool_event": "start", + }, + ) + + # Fire "delta" event for argument chunks + if tool_call.function and tool_call.function.arguments: + self._notify_tool_stream_listeners( + "delta", + { + "tool_name": tool_call.function.name if tool_call.function.name else None, + "tool_use_id": tool_call.id, + "index": index, + "chunk": tool_call.function.arguments, + "streams_arguments": True, + }, + ) + + # Handle text content streaming + if delta.content: + content = delta.content + accumulated_content += content + # Use base class method for token estimation and progress emission + estimated_tokens = self._update_streaming_progress(content, model, estimated_tokens) + self._notify_tool_stream_listeners( + "text", + { + "chunk": content, + "streams_arguments": True, + }, + ) + + # Fire "stop" event when tool calls complete + if choice.finish_reason == "tool_calls": + for index in tool_call_started.keys(): + self._notify_tool_stream_listeners( + "stop", + { + "index": index, + "streams_arguments": True, + }, + ) + self.logger.info( + "Model finished streaming tool call", + data={ + "progress_action": ProgressAction.CALLING_TOOL, + "agent_name": self.name, + "model": model, + "tool_event": "stop", + }, + ) # Extract other fields from the chunk if chunk.choices: @@ -343,11 +508,12 @@ async def _openai_completion( self._log_chat_progress(self.chat_turn(), model=self.default_request_params.model) model_name = self.default_request_params.model or DEFAULT_OPENAI_MODEL - # Use basic streaming API + # Use basic streaming API with context manager to properly close aiohttp session try: - stream = await self._openai_client().chat.completions.create(**arguments) - # Process the stream - response = await self._process_stream(stream, model_name) + async with self._openai_client() as client: + stream = await client.chat.completions.create(**arguments) + # Process the stream + response = await self._process_stream(stream, model_name) except APIError as error: self.logger.error("APIError during OpenAI completion", exc_info=error) return self._stream_failure_response(error, model_name) diff --git a/src/fast_agent/llm/provider/openai/responses.py b/src/fast_agent/llm/provider/openai/responses.py new file mode 100644 index 000000000..ddc83b794 --- /dev/null +++ b/src/fast_agent/llm/provider/openai/responses.py @@ -0,0 +1,133 @@ +# from openai.types.beta.chat import +from typing import List + +from mcp import Tool +from mcp.types import ContentBlock, TextContent +from openai import AsyncOpenAI +from openai.types.chat import ( + ChatCompletionMessage, + ChatCompletionMessageParam, +) +from openai.types.responses import ( + ResponseReasoningItem, + ResponseReasoningSummaryTextDeltaEvent, + ResponseTextDeltaEvent, +) + +from fast_agent.constants import REASONING +from fast_agent.core.logging.logger import get_logger +from fast_agent.event_progress import ProgressAction +from fast_agent.llm.fastagent_llm import FastAgentLLM +from fast_agent.llm.provider_types import Provider +from fast_agent.llm.request_params import RequestParams +from fast_agent.mcp.prompt_message_extended import PromptMessageExtended +from fast_agent.types.llm_stop_reason import LlmStopReason + +_logger = get_logger(__name__) + +DEFAULT_RESPONSES_MODEL = "gpt-5-mini" +DEFAULT_REASONING_EFFORT = "medium" + + +# model selection +# system prompt +# usage info +# reasoning/thinking display and summary +# encrypted tokens + + +class ResponsesLLM(FastAgentLLM[ChatCompletionMessageParam, ChatCompletionMessage]): + """LLM implementation for OpenAI's Responses models.""" + + # OpenAI-specific parameter exclusions + + def __init__(self, provider=Provider.RESPONSES, *args, **kwargs): + super().__init__(*args, provider=provider, **kwargs) + + async def _responses_client(self) -> AsyncOpenAI: + return AsyncOpenAI(api_key=self._api_key()) + + async def _apply_prompt_provider_specific( + self, + multipart_messages: List[PromptMessageExtended], + request_params: RequestParams | None = None, + tools: List[Tool] | None = None, + is_template: bool = False, + ) -> PromptMessageExtended: + responses_client = await self._responses_client() + + async with responses_client.responses.stream( + model="gpt-5-mini", + instructions="You are a helpful assistant.", + input=multipart_messages[-1].all_text(), + reasoning={"summary": "auto", "effort": DEFAULT_REASONING_EFFORT}, + ) as stream: + reasoning_chars: int = 0 + text_chars: int = 0 + + async for event in stream: + if isinstance(event, ResponseReasoningSummaryTextDeltaEvent): + reasoning_chars += len(event.delta) + await self._emit_streaming_progress( + model="gpt-5-mini (thinking)", + new_total=reasoning_chars, + type=ProgressAction.THINKING, + ) + if isinstance(event, ResponseTextDeltaEvent): + # Notify stream listeners with the delta text + self._notify_stream_listeners(event.delta) + text_chars += len(event.delta) + await self._emit_streaming_progress( + model="gpt-5-mini", + new_total=text_chars, + ) + + final_response = await stream.get_final_response() + reasoning_content: List[ContentBlock] = [] + for output_item in final_response.output: + if isinstance(output_item, ResponseReasoningItem): + summary_text = "\n".join(part.text for part in output_item.summary if part.text) + # reasoning text is not supplied by openai - leaving for future use with other providers + reasoning_text = "".join( + chunk.text + for chunk in (output_item.content or []) + if chunk.type == "reasoning_text" + ) + if summary_text.strip(): + reasoning_content.append(TextContent(type="text", text=summary_text.strip())) + if reasoning_text.strip(): + reasoning_content.append( + TextContent(type="text", text=reasoning_text.strip()) + ) + channels = {REASONING: reasoning_content} if reasoning_content else None + + return PromptMessageExtended( + role="assistant", + channels=channels, + content=[TextContent(type="text", text=final_response.output_text)], + stop_reason=LlmStopReason.END_TURN, + ) + + async def _emit_streaming_progress( + self, + model: str, + new_total: int, + type: ProgressAction = ProgressAction.STREAMING, + ) -> None: + """Emit a streaming progress event. + + Args: + model: The model being used. + new_total: The new total token count. + """ + token_str = str(new_total).rjust(5) + + # Emit progress event + data = { + "progress_action": type, + "model": model, + "agent_name": self.name, + "chat_turn": self.chat_turn(), + "details": token_str.strip(), # Token count goes in details for STREAMING action + } + self.logger.info("Streaming progress", data=data) diff --git a/src/fast_agent/skills/__init__.py b/src/fast_agent/skills/__init__.py new file mode 100644 index 000000000..4647b784c --- /dev/null +++ b/src/fast_agent/skills/__init__.py @@ -0,0 +1,9 @@ +"""Skill discovery utilities.""" + +from .registry import SkillManifest, SkillRegistry, format_skills_for_prompt + +__all__ = [ + "SkillManifest", + "SkillRegistry", + "format_skills_for_prompt", +] diff --git a/src/fast_agent/skills/registry.py b/src/fast_agent/skills/registry.py new file mode 100644 index 000000000..89a734059 --- /dev/null +++ b/src/fast_agent/skills/registry.py @@ -0,0 +1,200 @@ +from __future__ import annotations + +from dataclasses import dataclass, replace +from pathlib import Path +from typing import List, Sequence + +import frontmatter + +from fast_agent.core.logging.logger import get_logger + +logger = get_logger(__name__) + + +@dataclass(frozen=True) +class SkillManifest: + """Represents a single skill description loaded from SKILL.md.""" + + name: str + description: str + body: str + path: Path + relative_path: Path | None = None + + +class SkillRegistry: + """Simple registry that resolves a single skills directory and parses manifests.""" + + DEFAULT_CANDIDATES = (Path(".fast-agent/skills"), Path(".claude/skills")) + + def __init__( + self, *, base_dir: Path | None = None, override_directory: Path | None = None + ) -> None: + self._base_dir = base_dir or Path.cwd() + self._directory: Path | None = None + self._override_failed: bool = False + self._errors: List[dict[str, str]] = [] + if override_directory: + resolved = self._resolve_directory(override_directory) + if resolved and resolved.exists() and resolved.is_dir(): + self._directory = resolved + else: + logger.warning( + "Skills directory override not found", + data={"directory": str(resolved)}, + ) + self._override_failed = True + if self._directory is None and not self._override_failed: + self._directory = self._find_default_directory() + + @property + def directory(self) -> Path | None: + return self._directory + + @property + def override_failed(self) -> bool: + return self._override_failed + + def load_manifests(self) -> List[SkillManifest]: + self._errors = [] + if not self._directory: + return [] + return self._load_directory(self._directory, self._errors) + + def load_manifests_with_errors(self) -> tuple[List[SkillManifest], List[dict[str, str]]]: + manifests = self.load_manifests() + return manifests, list(self._errors) + + @property + def errors(self) -> List[dict[str, str]]: + return list(self._errors) + + def _find_default_directory(self) -> Path | None: + for candidate in self.DEFAULT_CANDIDATES: + resolved = self._resolve_directory(candidate) + if resolved and resolved.exists() and resolved.is_dir(): + return resolved + return None + + def _resolve_directory(self, directory: Path) -> Path: + if directory.is_absolute(): + return directory + return (self._base_dir / directory).resolve() + + @classmethod + def load_directory(cls, directory: Path) -> List[SkillManifest]: + if not directory.exists() or not directory.is_dir(): + logger.debug( + "Skills directory not found", + data={"directory": str(directory)}, + ) + return [] + return cls._load_directory(directory) + + @classmethod + def load_directory_with_errors( + cls, directory: Path + ) -> tuple[List[SkillManifest], List[dict[str, str]]]: + errors: List[dict[str, str]] = [] + manifests = cls._load_directory(directory, errors) + return manifests, errors + + @classmethod + def _load_directory( + cls, + directory: Path, + errors: List[dict[str, str]] | None = None, + ) -> List[SkillManifest]: + manifests: List[SkillManifest] = [] + cwd = Path.cwd() + for entry in sorted(directory.iterdir()): + if not entry.is_dir(): + continue + manifest_path = entry / "SKILL.md" + if not manifest_path.exists(): + continue + manifest, error = cls._parse_manifest(manifest_path) + if manifest: + relative_path: Path | None = None + for base in (cwd, directory): + try: + relative_path = manifest_path.relative_to(base) + break + except ValueError: + continue + manifest = replace(manifest, relative_path=relative_path) + manifests.append(manifest) + elif errors is not None: + errors.append( + { + "path": str(manifest_path), + "error": error or "Failed to parse skill manifest", + } + ) + return manifests + + @classmethod + def _parse_manifest(cls, manifest_path: Path) -> tuple[SkillManifest | None, str | None]: + try: + post = frontmatter.loads(manifest_path.read_text(encoding="utf-8")) + except Exception as exc: # noqa: BLE001 + logger.warning( + "Failed to parse skill manifest", + data={"path": str(manifest_path), "error": str(exc)}, + ) + return None, str(exc) + + metadata = post.metadata or {} + name = metadata.get("name") + description = metadata.get("description") + + if not isinstance(name, str) or not name.strip(): + logger.warning("Skill manifest missing name", data={"path": str(manifest_path)}) + return None, "Missing 'name' field" + if not isinstance(description, str) or not description.strip(): + logger.warning("Skill manifest missing description", data={"path": str(manifest_path)}) + return None, "Missing 'description' field" + + body_text = (post.content or "").strip() + + return SkillManifest( + name=name.strip(), + description=description.strip(), + body=body_text, + path=manifest_path, + ), None + + +def format_skills_for_prompt(manifests: Sequence[SkillManifest]) -> str: + """ + Format a collection of skill manifests into an XML-style block suitable for system prompts. + """ + if not manifests: + return "" + + preamble = ( + "Skills provide specialized capabilities and domain knowledge. Use a Skill if it seems in any way " + "relevant to the Users task, intent or would increase effectiveness. \n" + "The 'execute' tool gives you shell access to the current working directory (agent workspace) " + "and outputted files are visible to the User.\n" + "To use a Skill you must first read the SKILL.md file (use 'execute' tool).\n " + "Only use skills listed in below.\n\n" + ) + formatted_parts: List[str] = [] + + for manifest in manifests: + description = (manifest.description or "").strip() + relative_path = manifest.relative_path + path_attr = f' path="{relative_path}"' if relative_path is not None else "" + if relative_path is None and manifest.path: + path_attr = f' path="{manifest.path}"' + + block_lines: List[str] = [f''] + if description: + block_lines.append(f"{description}") + block_lines.append("") + formatted_parts.append("\n".join(block_lines)) + + return "".join( + (f"{preamble}\n", "\n".join(formatted_parts), "\n") + ) diff --git a/src/fast_agent/tools/shell_runtime.py b/src/fast_agent/tools/shell_runtime.py new file mode 100644 index 000000000..e23609e2a --- /dev/null +++ b/src/fast_agent/tools/shell_runtime.py @@ -0,0 +1,404 @@ +from __future__ import annotations + +import asyncio +import os +import platform +import shutil +import signal +import subprocess +import time +from pathlib import Path +from typing import Any, Dict, Optional + +from mcp.types import CallToolResult, TextContent, Tool + +from fast_agent.ui import console +from fast_agent.ui.progress_display import progress_display + + +class ShellRuntime: + """Helper for managing the optional local shell execute tool.""" + + def __init__( + self, + activation_reason: str | None, + logger, + timeout_seconds: int = 90, + warning_interval_seconds: int = 30, + skills_directory: Path | None = None, + ) -> None: + self._activation_reason = activation_reason + self._logger = logger + self._timeout_seconds = timeout_seconds + self._warning_interval_seconds = warning_interval_seconds + self._skills_directory = skills_directory + self.enabled: bool = activation_reason is not None + self._tool: Tool | None = None + + if self.enabled: + # Detect the shell early so we can include it in the tool description + runtime_info = self.runtime_info() + shell_name = runtime_info.get("name", "shell") + + self._tool = Tool( + name="execute", + description=f"Run a shell command ({shell_name}) inside the agent workspace and return its output.", + inputSchema={ + "type": "object", + "properties": { + "command": { + "type": "string", + "description": "Shell command to execute (e.g. 'cat README.md').", + } + }, + "required": ["command"], + "additionalProperties": False, + }, + ) + + @property + def tool(self) -> Tool | None: + return self._tool + + def announce(self) -> None: + """Inform the user why the local shell tool is active.""" + if not self.enabled or not self._activation_reason: + return + + message = f"Local shell execute tool enabled {self._activation_reason}." + self._logger.info(message) + + def working_directory(self) -> Path: + """Return the working directory used for shell execution.""" + # TODO -- reinstate when we provide duplication/isolation of skill workspaces + # if self._skills_directory and self._skills_directory.exists(): + # return self._skills_directory + return Path.cwd() + + def runtime_info(self) -> Dict[str, str | None]: + """Best-effort detection of the shell runtime used for local execution. + + Uses modern Python APIs (platform.system(), shutil.which()) to detect + and prefer modern shells like pwsh (PowerShell 7+) and bash. + """ + system = platform.system() + + if system == "Windows": + # Preference order: pwsh > powershell > cmd + for shell_name in ["pwsh", "powershell", "cmd"]: + shell_path = shutil.which(shell_name) + if shell_path: + return {"name": shell_name, "path": shell_path} + + # Fallback to COMSPEC if nothing found in PATH + comspec = os.environ.get("COMSPEC", "cmd.exe") + return {"name": Path(comspec).name, "path": comspec} + else: + # Unix-like: check SHELL env, then search for common shells + shell_env = os.environ.get("SHELL") + if shell_env and Path(shell_env).exists(): + return {"name": Path(shell_env).name, "path": shell_env} + + # Preference order: bash > zsh > sh + for shell_name in ["bash", "zsh", "sh"]: + shell_path = shutil.which(shell_name) + if shell_path: + return {"name": shell_name, "path": shell_path} + + # Fallback to generic sh + return {"name": "sh", "path": None} + + def metadata(self, command: Optional[str]) -> Dict[str, Any]: + """Build metadata for display when the shell tool is invoked.""" + info = self.runtime_info() + working_dir = self.working_directory() + try: + working_dir_display = str(working_dir.relative_to(Path.cwd())) + except ValueError: + working_dir_display = str(working_dir) + + return { + "variant": "shell", + "command": command, + "shell_name": info.get("name"), + "shell_path": info.get("path"), + "working_dir": str(working_dir), + "working_dir_display": working_dir_display, + "timeout_seconds": self._timeout_seconds, + "warning_interval_seconds": self._warning_interval_seconds, + "streams_output": True, + "returns_exit_code": True, + } + + async def execute(self, arguments: Dict[str, Any] | None = None) -> CallToolResult: + """Execute a shell command and stream output to the console with timeout detection.""" + command_value = (arguments or {}).get("command") if arguments else None + if not isinstance(command_value, str) or not command_value.strip(): + return CallToolResult( + isError=True, + content=[ + TextContent( + type="text", + text="The execute tool requires a 'command' string argument.", + ) + ], + ) + + command = command_value.strip() + self._logger.debug( + f"Executing command with timeout={self._timeout_seconds}s, warning_interval={self._warning_interval_seconds}s" + ) + + # Pause progress display during shell execution to avoid overlaying output + with progress_display.paused(): + try: + working_dir = self.working_directory() + runtime_details = self.runtime_info() + shell_name = (runtime_details.get("name") or "").lower() + shell_path = runtime_details.get("path") + + # Detect platform for process group handling + is_windows = platform.system() == "Windows" + + # Shared process kwargs + process_kwargs: dict[str, Any] = { + "stdout": asyncio.subprocess.PIPE, + "stderr": asyncio.subprocess.PIPE, + "cwd": working_dir, + } + + if is_windows: + # Windows: CREATE_NEW_PROCESS_GROUP allows killing process tree + process_kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP + else: + # Unix: start_new_session creates new process group + process_kwargs["start_new_session"] = True + + # Create the subprocess, preferring PowerShell on Windows when available + if is_windows and shell_path and shell_name in {"pwsh", "powershell"}: + process = await asyncio.create_subprocess_exec( + shell_path, + "-NoLogo", + "-NoProfile", + "-Command", + command, + **process_kwargs, + ) + else: + if shell_path: + process_kwargs["executable"] = shell_path + process = await asyncio.create_subprocess_shell( + command, + **process_kwargs, + ) + + output_segments: list[str] = [] + # Track last output time in a mutable container for sharing across coroutines + last_output_time = [time.time()] + timeout_occurred = [False] + watchdog_task = None + + async def stream_output( + stream, style: Optional[str], is_stderr: bool = False + ) -> None: + if not stream: + return + while True: + line = await stream.readline() + if not line: + break + text = line.decode(errors="replace") + output_segments.append(text if not is_stderr else f"[stderr] {text}") + console.console.print( + text.rstrip("\n"), + style=style, + markup=False, + ) + # Update last output time whenever we receive a line + last_output_time[0] = time.time() + + async def watchdog() -> None: + """Monitor output timeout and emit warnings.""" + last_warning_time = 0.0 + self._logger.debug( + f"Watchdog started: timeout={self._timeout_seconds}s, warning_interval={self._warning_interval_seconds}s" + ) + + while True: + await asyncio.sleep(1) # Check every second + + # Check if process has exited + if process.returncode is not None: + self._logger.debug("Watchdog: process exited normally") + break + + elapsed = time.time() - last_output_time[0] + remaining = self._timeout_seconds - elapsed + + # Emit warnings every warning_interval_seconds throughout execution + time_since_warning = elapsed - last_warning_time + if time_since_warning >= self._warning_interval_seconds and remaining > 0: + self._logger.debug(f"Watchdog: warning at {int(remaining)}s remaining") + console.console.print( + f"▶ No output detected - terminating in {int(remaining)}s", + style="black on red", + ) + last_warning_time = elapsed + + # Timeout exceeded + if elapsed >= self._timeout_seconds: + timeout_occurred[0] = True + self._logger.debug( + "Watchdog: timeout exceeded, terminating process group" + ) + console.console.print( + "▶ Timeout exceeded - terminating process", style="black on red" + ) + try: + if is_windows: + # Windows: try to signal the entire process group before terminating + try: + process.send_signal(signal.CTRL_BREAK_EVENT) + await asyncio.sleep(2) + except AttributeError: + # Older Python/asyncio may not support send_signal on Windows + self._logger.debug( + "Watchdog: CTRL_BREAK_EVENT unsupported, skipping" + ) + except ValueError: + # Raised when no console is attached; fall back to terminate + self._logger.debug( + "Watchdog: no console attached for CTRL_BREAK_EVENT" + ) + except ProcessLookupError: + pass # Process already exited + + if process.returncode is None: + process.terminate() + await asyncio.sleep(2) + if process.returncode is None: + process.kill() + else: + # Unix: kill entire process group for clean cleanup + os.killpg(process.pid, signal.SIGTERM) + await asyncio.sleep(2) + if process.returncode is None: + os.killpg(process.pid, signal.SIGKILL) + except (ProcessLookupError, OSError): + pass # Process already terminated + except Exception as e: + self._logger.debug(f"Error terminating process: {e}") + # Fallback: kill just the main process + try: + process.kill() + except Exception: + pass + break + + stdout_task = asyncio.create_task(stream_output(process.stdout, None)) + stderr_task = asyncio.create_task(stream_output(process.stderr, "red", True)) + watchdog_task = asyncio.create_task(watchdog()) + + # Wait for streams to complete + await asyncio.gather(stdout_task, stderr_task, return_exceptions=True) + + # Cancel watchdog if still running + if watchdog_task and not watchdog_task.done(): + watchdog_task.cancel() + try: + await watchdog_task + except asyncio.CancelledError: + pass + + # Wait for process to finish + try: + return_code = await asyncio.wait_for(process.wait(), timeout=2.0) + except asyncio.TimeoutError: + # Process didn't exit, force kill + try: + if is_windows: + # Windows: force kill main process + process.kill() + else: + # Unix: SIGKILL to process group + os.killpg(process.pid, signal.SIGKILL) + return_code = await process.wait() + except Exception: + return_code = -1 + + # Build result based on timeout or normal completion + if timeout_occurred[0]: + combined_output = "".join(output_segments) + if combined_output and not combined_output.endswith("\n"): + combined_output += "\n" + combined_output += ( + f"(timeout after {self._timeout_seconds}s - process terminated)" + ) + + result = CallToolResult( + isError=True, + content=[ + TextContent( + type="text", + text=combined_output, + ) + ], + ) + else: + combined_output = "".join(output_segments) + # Add explicit exit code message for the LLM + if combined_output and not combined_output.endswith("\n"): + combined_output += "\n" + combined_output += f"process exit code was {return_code}" + + result = CallToolResult( + isError=return_code != 0, + content=[ + TextContent( + type="text", + text=combined_output, + ) + ], + ) + + # Display bottom separator with exit code + try: + from rich.text import Text + except Exception: # pragma: no cover + Text = None # type: ignore[assignment] + + if Text: + # Build bottom separator matching the style: ─| exit code 0 |───────── + width = console.console.size.width + exit_code_style = "red" if return_code != 0 else "dim" + exit_code_text = f"exit code {return_code}" + + prefix = Text("─| ") + prefix.stylize("dim") + exit_text = Text(exit_code_text, style=exit_code_style) + suffix = Text(" |") + suffix.stylize("dim") + + separator = Text() + separator.append_text(prefix) + separator.append_text(exit_text) + separator.append_text(suffix) + remaining = width - separator.cell_len + if remaining > 0: + separator.append("─" * remaining, style="dim") + + console.console.print() + console.console.print(separator) + else: + console.console.print(f"exit code {return_code}", style="dim") + + setattr(result, "_suppress_display", True) + setattr(result, "exit_code", return_code) + return result + + except Exception as exc: + self._logger.error(f"Execute tool failed: {exc}") + return CallToolResult( + isError=True, + content=[TextContent(type="text", text=f"Command failed to start: {exc}")], + ) diff --git a/src/fast_agent/ui/console_display.py b/src/fast_agent/ui/console_display.py index 75a9c1ab6..9c4e76384 100644 --- a/src/fast_agent/ui/console_display.py +++ b/src/fast_agent/ui/console_display.py @@ -1,4 +1,6 @@ +import asyncio import math +import time from contextlib import contextmanager from enum import Enum from json import JSONDecodeError @@ -12,6 +14,7 @@ from fast_agent.config import Settings from fast_agent.constants import REASONING +from fast_agent.core.logging.logger import get_logger from fast_agent.ui import console from fast_agent.ui.markdown_truncator import MarkdownTruncator from fast_agent.ui.mcp_ui_utils import UILink @@ -27,6 +30,8 @@ from fast_agent.mcp.prompt_message_extended import PromptMessageExtended from fast_agent.mcp.skybridge import SkybridgeServerConfig +logger = get_logger(__name__) + CODE_STYLE = "native" MARKDOWN_STREAM_TARGET_RATIO = 0.7 @@ -884,10 +889,11 @@ def show_tool_call( self, tool_name: str, tool_args: Dict[str, Any] | None, - bottom_items: List[str] | None = None, + bottom_items: list[str] | None = None, highlight_index: int | None = None, max_item_length: int | None = None, name: str | None = None, + metadata: Dict[str, Any] | None = None, ) -> None: """Display a tool call in the new visual style. @@ -898,23 +904,77 @@ def show_tool_call( highlight_index: Index of item to highlight in the bottom separator (0-based), or None max_item_length: Optional max length for bottom items (with ellipsis) name: Optional agent name + metadata: Optional dictionary of metadata about the tool call """ if not self.config or not self.config.logger.show_tools: return - # Build right info + tool_args = tool_args or {} + metadata = metadata or {} + # Build right info and specialised content for known variants right_info = f"[dim]tool request - {tool_name}[/dim]" + content: Any = tool_args + pre_content: Text | None = None + truncate_content = True + + if metadata.get("variant") == "shell": + bottom_items = list() + max_item_length = 50 + command = metadata.get("command") or tool_args.get("command") + + command_text = Text() + if command and isinstance(command, str): + # Only prepend $ to the first line, not continuation lines + command_text.append("$ ", style="magenta") + command_text.append(command, style="white") + else: + command_text.append("$ ", style="magenta") + command_text.append("(no shell command provided)", style="dim") + + content = command_text + + # Include shell name and path in the header, with timeout + shell_name = metadata.get("shell_name") or "shell" + shell_path = metadata.get("shell_path") + if shell_path: + bottom_items.append(str(shell_path)) + # Build header right info with shell and timeout + right_parts = [] + if shell_path and shell_path != shell_name: + right_parts.append(f"{shell_name} ({shell_path})") + elif shell_name: + right_parts.append(shell_name) + + right_info = f"[dim]{' | '.join(right_parts)}[/dim]" if right_parts else "" + truncate_content = False + + # Build compact metadata summary - just working directory now + metadata_text = Text() + working_dir_display = metadata.get("working_dir_display") or metadata.get("working_dir") + if working_dir_display: + bottom_items.append(f"cwd: {working_dir_display}") + + timeout_seconds = metadata.get("timeout_seconds") + warning_interval = metadata.get("warning_interval_seconds") + + if timeout_seconds and warning_interval: + bottom_items.append( + f"timeout: {timeout_seconds}s, warning every {warning_interval}s" + ) + + pre_content = metadata_text # Display using unified method self.display_message( - content=tool_args, + content=content, message_type=MessageType.TOOL_CALL, name=name, + pre_content=pre_content, right_info=right_info, bottom_metadata=bottom_items, highlight_index=highlight_index, max_item_length=max_item_length, - truncate_content=True, + truncate_content=truncate_content, ) async def show_tool_update(self, updated_server: str, agent_name: str | None = None) -> None: @@ -1604,6 +1664,16 @@ def __init__( if self._use_plain_text else MARKDOWN_STREAM_REFRESH_PER_SECOND ) + self._min_render_interval = 1.0 / refresh_rate if refresh_rate else None + self._last_render_time = 0.0 + try: + self._loop: asyncio.AbstractEventLoop | None = asyncio.get_running_loop() + except RuntimeError: + self._loop = None + self._async_mode = self._loop is not None + self._queue: asyncio.Queue[object] | None = asyncio.Queue() if self._async_mode else None + self._stop_sentinel: object = object() + self._worker_task: asyncio.Task[None] | None = None self._live: Live | None = Live( initial_renderable, console=console.console, @@ -1626,120 +1696,19 @@ def __init__( ) self._max_render_height = 0 + if self._async_mode and self._loop and self._queue is not None: + self._worker_task = self._loop.create_task(self._render_worker()) + def update(self, chunk: str) -> None: if not self._active or not chunk: return - self._ensure_started() - - if self._use_plain_text: - chunk = self._wrap_plain_chunk(chunk) - if self._pending_table_row: - self._buffer.append(self._pending_table_row) - self._pending_table_row = "" - else: - # Detect if we're streaming table content - # Tables have rows starting with '|' and we want to batch updates until we get a complete row - text_so_far = "".join(self._buffer) - - # Check if we're currently in a table (last non-empty line starts with |) - lines = text_so_far.strip().split("\n") - last_line = lines[-1] if lines else "" - currently_in_table = last_line.strip().startswith("|") - - # If we're in a table and the chunk doesn't contain a newline, accumulate it - if currently_in_table and "\n" not in chunk: - self._pending_table_row += chunk - # Don't update display yet - wait for complete row - return - - # If we have a pending table row, flush it now - if self._pending_table_row: - self._buffer.append(self._pending_table_row) - self._pending_table_row = "" - - self._buffer.append(chunk) - - text = "".join(self._buffer) - - if self._use_plain_text: - trimmed = self._trim_to_displayable(text) - if trimmed != text: - text = trimmed - self._buffer = [trimmed] - - # Guard against single logical paragraphs that would expand far wider than expected. - trailing_paragraph = self._extract_trailing_paragraph(text) - if trailing_paragraph and "\n" not in trailing_paragraph: - width = max(1, console.console.size.width) - target_ratio = ( - PLAIN_STREAM_TARGET_RATIO if self._use_plain_text else MARKDOWN_STREAM_TARGET_RATIO - ) - target_rows = max( - 1, - int(console.console.size.height * target_ratio) - 1, - ) - estimated_rows = math.ceil(len(trailing_paragraph.expandtabs()) / width) - if estimated_rows > target_rows: - trimmed_text = self._trim_to_displayable(text) - if trimmed_text != text: - text = trimmed_text - self._buffer = [trimmed_text] - - # Trim buffer periodically to avoid unbounded growth - # Keep only what can fit in ~1.5x terminal height - if len(self._buffer) > 10: - text = self._trim_to_displayable(text) - self._buffer = [text] - - if self._live: - # Build the header bar - header = self._build_header() - - # Build the content renderable - max_allowed_height = max(1, console.console.size.height - 2) - self._max_render_height = min(self._max_render_height, max_allowed_height) - - if self._use_plain_text: - # Plain text rendering - no markdown processing - content_height = self._estimate_plain_render_height(text) - budget_height = min(content_height + PLAIN_STREAM_HEIGHT_FUDGE, max_allowed_height) - - if budget_height > self._max_render_height: - self._max_render_height = budget_height - - padding_lines = max(0, self._max_render_height - content_height) - display_text = text + ("\n" * padding_lines if padding_lines else "") - content = Text(display_text) - else: - # Markdown rendering with XML escaping - prepared = _prepare_markdown_content(text, self._display._escape_xml) - prepared_for_display = self._close_incomplete_code_blocks(prepared) - - content_height = self._truncator.measure_rendered_height( - prepared_for_display, console.console, CODE_STYLE - ) - budget_height = min( - content_height + MARKDOWN_STREAM_HEIGHT_FUDGE, max_allowed_height - ) - - if budget_height > self._max_render_height: - self._max_render_height = budget_height - - padding_lines = max(0, self._max_render_height - content_height) - if padding_lines: - prepared_for_display = prepared_for_display + ("\n" * padding_lines) - - content = Markdown(prepared_for_display, code_theme=CODE_STYLE) - - # Combine header and content using Group - from rich.console import Group + if self._async_mode and self._queue is not None: + self._enqueue_chunk(chunk) + return - header_with_spacing = header.copy() - header_with_spacing.append("\n", style="default") - - combined = Group(header_with_spacing, content) - self._live.update(combined) + if self._handle_chunk(chunk): + self._render_current_buffer() def _build_header(self) -> Text: """Build the header bar as a Text renderable. @@ -1776,11 +1745,7 @@ def _build_header(self) -> Text: return combined - def _ensure_started(self) -> None: - """Start live rendering and pause progress display if needed.""" - if self._live_started: - return - + def _pause_progress_display(self) -> None: if self._progress_display and not self._progress_paused: try: self._progress_display.pause() @@ -1788,6 +1753,25 @@ def _ensure_started(self) -> None: except Exception: self._progress_paused = False + def _resume_progress_display(self) -> None: + if self._progress_display and self._progress_paused: + try: + self._progress_display.resume() + except Exception: + pass + finally: + self._progress_paused = False + + def _ensure_started(self) -> None: + """Start live rendering and pause progress display if needed.""" + if not self._live: + return + + if self._live_started: + return + + self._pause_progress_display() + if self._live and not self._live_started: self._live.__enter__() self._live_started = True @@ -1859,6 +1843,16 @@ def _trim_to_displayable(self, text: str) -> str: prefer_recent=True, # Streaming mode ) + def _switch_to_plain_text(self) -> None: + """Switch from markdown to plain text rendering for tool arguments.""" + if not self._use_plain_text: + self._use_plain_text = True + # Initialize plain truncator if needed + if not self._plain_truncator: + self._plain_truncator = PlainTextTruncator( + target_height_ratio=PLAIN_STREAM_TARGET_RATIO + ) + def finalize(self, _message: "PromptMessageExtended | str") -> None: if not self._active or self._finalized: return @@ -1867,18 +1861,41 @@ def finalize(self, _message: "PromptMessageExtended | str") -> None: self.close() def close(self) -> None: - if self._live and self._live_started: - self._live.__exit__(None, None, None) - self._live = None - self._live_started = False - if self._progress_display and self._progress_paused: - try: - self._progress_display.resume() - except Exception: - pass - finally: - self._progress_paused = False + if not self._active: + return + self._active = False + if self._async_mode: + if self._queue and self._loop: + try: + current_loop = asyncio.get_running_loop() + except RuntimeError: + current_loop = None + + # Send stop sentinel to queue + try: + if current_loop is self._loop: + self._queue.put_nowait(self._stop_sentinel) + else: + # Use call_soon_threadsafe from different thread/loop + self._loop.call_soon_threadsafe(self._queue.put_nowait, self._stop_sentinel) + except RuntimeError as e: + # Expected during event loop shutdown - log at debug level + logger.debug( + "RuntimeError while closing streaming display (expected during shutdown)", + data={"error": str(e)}, + ) + except Exception as e: + # Unexpected exception - log at warning level + logger.warning( + "Unexpected error while closing streaming display", + exc_info=True, + data={"error": str(e)}, + ) + if self._worker_task: + self._worker_task.cancel() + self._worker_task = None + self._shutdown_live_resources() self._max_render_height = 0 def _extract_trailing_paragraph(self, text: str) -> str: @@ -1958,3 +1975,253 @@ def _estimate_plain_render_height(self, text: str) -> int: expanded_len = len(line.expandtabs()) total += max(1, math.ceil(expanded_len / width)) if expanded_len else 1 return total + + def _enqueue_chunk(self, chunk: str) -> None: + if not self._queue or not self._loop: + return + + try: + current_loop = asyncio.get_running_loop() + except RuntimeError: + current_loop = None + + if current_loop is self._loop: + try: + self._queue.put_nowait(chunk) + except asyncio.QueueFull: + # Shouldn't happen with default unlimited queue, but fail safe + pass + else: + try: + self._loop.call_soon_threadsafe(self._queue.put_nowait, chunk) + except RuntimeError as e: + # Expected during event loop shutdown - log at debug level + logger.debug( + "RuntimeError while enqueuing chunk (expected during shutdown)", + data={"error": str(e), "chunk_length": len(chunk)}, + ) + except Exception as e: + # Unexpected exception - log at warning level + logger.warning( + "Unexpected error while enqueuing chunk", + exc_info=True, + data={"error": str(e), "chunk_length": len(chunk)}, + ) + + def _handle_chunk(self, chunk: str) -> bool: + """ + Process an incoming chunk and determine whether rendering is needed. + + Returns: + True if the display should be updated, False otherwise. + """ + if not chunk: + return False + + if self._use_plain_text: + chunk = self._wrap_plain_chunk(chunk) + if self._pending_table_row: + self._buffer.append(self._pending_table_row) + self._pending_table_row = "" + else: + text_so_far = "".join(self._buffer) + lines = text_so_far.strip().split("\n") + last_line = lines[-1] if lines else "" + currently_in_table = last_line.strip().startswith("|") + + if currently_in_table and "\n" not in chunk: + self._pending_table_row += chunk + return False + + if self._pending_table_row: + self._buffer.append(self._pending_table_row) + self._pending_table_row = "" + + self._buffer.append(chunk) + return True + + def _render_current_buffer(self) -> None: + if not self._buffer: + return + + self._ensure_started() + + if not self._live: + return + + text = "".join(self._buffer) + + if self._use_plain_text: + trimmed = self._trim_to_displayable(text) + if trimmed != text: + text = trimmed + self._buffer = [trimmed] + trailing_paragraph = self._extract_trailing_paragraph(text) + if trailing_paragraph and "\n" not in trailing_paragraph: + width = max(1, console.console.size.width) + target_ratio = ( + PLAIN_STREAM_TARGET_RATIO if self._use_plain_text else MARKDOWN_STREAM_TARGET_RATIO + ) + target_rows = max(1, int(console.console.size.height * target_ratio) - 1) + estimated_rows = math.ceil(len(trailing_paragraph.expandtabs()) / width) + if estimated_rows > target_rows: + trimmed_text = self._trim_to_displayable(text) + if trimmed_text != text: + text = trimmed_text + self._buffer = [trimmed_text] + + if len(self._buffer) > 10: + text = self._trim_to_displayable(text) + self._buffer = [text] + + # Build the header bar + header = self._build_header() + + # Build the content renderable + max_allowed_height = max(1, console.console.size.height - 2) + self._max_render_height = min(self._max_render_height, max_allowed_height) + + if self._use_plain_text: + content_height = self._estimate_plain_render_height(text) + budget_height = min(content_height + PLAIN_STREAM_HEIGHT_FUDGE, max_allowed_height) + + if budget_height > self._max_render_height: + self._max_render_height = budget_height + + padding_lines = max(0, self._max_render_height - content_height) + display_text = text + ("\n" * padding_lines if padding_lines else "") + content = Text(display_text) + else: + prepared = _prepare_markdown_content(text, self._display._escape_xml) + prepared_for_display = self._close_incomplete_code_blocks(prepared) + + content_height = self._truncator.measure_rendered_height( + prepared_for_display, console.console, CODE_STYLE + ) + budget_height = min(content_height + MARKDOWN_STREAM_HEIGHT_FUDGE, max_allowed_height) + + if budget_height > self._max_render_height: + self._max_render_height = budget_height + + padding_lines = max(0, self._max_render_height - content_height) + if padding_lines: + prepared_for_display = prepared_for_display + ("\n" * padding_lines) + + content = Markdown(prepared_for_display, code_theme=CODE_STYLE) + + from rich.console import Group + + header_with_spacing = header.copy() + header_with_spacing.append("\n", style="default") + + combined = Group(header_with_spacing, content) + try: + self._live.update(combined) + self._last_render_time = time.monotonic() + except Exception: + # Avoid crashing streaming on renderer errors + pass + + async def _render_worker(self) -> None: + assert self._queue is not None + try: + while True: + try: + item = await self._queue.get() + except asyncio.CancelledError: + break + + if item is self._stop_sentinel: + break + + stop_requested = False + chunks = [item] + while True: + try: + next_item = self._queue.get_nowait() + except asyncio.QueueEmpty: + break + if next_item is self._stop_sentinel: + stop_requested = True + break + chunks.append(next_item) + + should_render = False + for chunk in chunks: + if isinstance(chunk, str): + should_render = self._handle_chunk(chunk) or should_render + + if should_render: + self._render_current_buffer() + if self._min_render_interval: + try: + await asyncio.sleep(self._min_render_interval) + except asyncio.CancelledError: + break + + if stop_requested: + break + except asyncio.CancelledError: + pass + finally: + self._shutdown_live_resources() + + def _shutdown_live_resources(self) -> None: + if self._live and self._live_started: + try: + self._live.__exit__(None, None, None) + except Exception: + pass + self._live = None + self._live_started = False + + self._resume_progress_display() + self._active = False + + def handle_tool_event(self, event_type: str, info: Dict[str, Any] | None = None) -> None: + """Handle tool streaming events with comprehensive error handling. + + This is called from listener callbacks during async streaming, so we need + to be defensive about any errors to prevent crashes in the event loop. + """ + try: + if not self._active: + return + + # Check if this provider streams tool arguments + streams_arguments = info.get("streams_arguments", False) if info else False + + if event_type == "start": + if streams_arguments: + # OpenAI: Switch to plain text and show tool call header + self._switch_to_plain_text() + tool_name = info.get("tool_name", "unknown") if info else "unknown" + self.update(f"\n→ Calling {tool_name}\n") + else: + # Anthropic: Close streaming display immediately + self.close() + return + elif event_type == "delta": + if streams_arguments and info and "chunk" in info: + # Stream the tool argument chunks as plain text + self.update(info["chunk"]) + elif event_type == "text": + self._pause_progress_display() + elif event_type == "stop": + if streams_arguments: + # Close the streaming display + self.update("\n") + self.close() + else: + self._resume_progress_display() + except Exception as e: + # Log but don't crash - streaming display is "nice to have" + logger.warning( + "Error handling tool event", + exc_info=True, + data={ + "event_type": event_type, + "streams_arguments": info.get("streams_arguments") if info else None, + "error": str(e), + }, + ) diff --git a/src/fast_agent/ui/enhanced_prompt.py b/src/fast_agent/ui/enhanced_prompt.py index 8b91c929b..0e16581d7 100644 --- a/src/fast_agent/ui/enhanced_prompt.py +++ b/src/fast_agent/ui/enhanced_prompt.py @@ -121,6 +121,14 @@ async def _display_agent_info_helper(agent_name: str, agent_provider: "AgentApp prompts_dict = await agent.list_prompts() prompt_count = sum(len(prompts) for prompts in prompts_dict.values()) if prompts_dict else 0 + skill_count = 0 + skill_manifests = getattr(agent, "_skill_manifests", None) + if skill_manifests: + try: + skill_count = len(list(skill_manifests)) + except TypeError: + skill_count = 0 + # Handle different agent types if agent.agent_type == AgentType.PARALLEL: # Count child agents for parallel agents @@ -149,36 +157,38 @@ async def _display_agent_info_helper(agent_name: str, agent_provider: "AgentApp f"[dim]Agent [/dim][blue]{agent_name}[/blue][dim]:[/dim] {child_count:,}[dim] {child_word}[/dim]" ) else: - # For regular agents, only display if they have MCP servers attached - if server_count > 0: - # Build display parts in order: tools, prompts, resources (omit if count is 0) - display_parts = [] + content_parts = [] + if server_count > 0: + sub_parts = [] if tool_count > 0: tool_word = "tool" if tool_count == 1 else "tools" - display_parts.append(f"{tool_count:,}[dim] {tool_word}[/dim]") - + sub_parts.append(f"{tool_count:,}[dim] {tool_word}[/dim]") if prompt_count > 0: prompt_word = "prompt" if prompt_count == 1 else "prompts" - display_parts.append(f"{prompt_count:,}[dim] {prompt_word}[/dim]") - + sub_parts.append(f"{prompt_count:,}[dim] {prompt_word}[/dim]") if resource_count > 0: resource_word = "resource" if resource_count == 1 else "resources" - display_parts.append(f"{resource_count:,}[dim] {resource_word}[/dim]") + sub_parts.append(f"{resource_count:,}[dim] {resource_word}[/dim]") - # Always show server count server_word = "Server" if server_count == 1 else "Servers" server_text = f"{server_count:,}[dim] MCP {server_word}[/dim]" - - if display_parts: - content = ( - f"{server_text}[dim], [/dim]" - + "[dim], [/dim]".join(display_parts) - + "[dim] available[/dim]" + if sub_parts: + server_text = ( + f"{server_text}[dim] ([/dim]" + + "[dim], [/dim]".join(sub_parts) + + "[dim])[/dim]" ) - else: - content = f"{server_text}[dim] available[/dim]" + content_parts.append(server_text) + if skill_count > 0: + skill_word = "skill" if skill_count == 1 else "skills" + content_parts.append( + f"{skill_count:,}[dim] {skill_word}[/dim][dim] available[/dim]" + ) + + if content_parts: + content = "[dim]. [/dim]".join(content_parts) rich_print(f"[dim]Agent [/dim][blue]{agent_name}[/blue][dim]:[/dim] {content}") # await _render_mcp_status(agent) @@ -351,9 +361,11 @@ def __init__( self.commands = { "mcp": "Show MCP server status", "history": "Show conversation history overview (optionally another agent)", - "tools": "List available MCP tools", + "tools": "List available MCP Tools", + "skills": "List available Agent Skills", "prompt": "List and choose MCP prompts, or apply specific prompt (/prompt )", "clear": "Clear history", + "clear last": "Remove the most recent message from history", "agents": "List available agents", "system": "Show the current system prompt", "usage": "Show current usage statistics", @@ -854,8 +866,34 @@ def _style_flag(letter: str, supported: bool) -> str: ) session.app.key_bindings = bindings + shell_agent = None + shell_enabled = False + shell_access_modes: tuple[str, ...] = () + shell_name: str | None = None + if agent_provider: + try: + shell_agent = agent_provider._agent(agent_name) + except Exception: + shell_agent = None + + if shell_agent: + shell_enabled = bool(getattr(shell_agent, "_shell_runtime_enabled", False)) + modes_attr = getattr(shell_agent, "_shell_access_modes", ()) + if isinstance(modes_attr, (list, tuple)): + shell_access_modes = tuple(str(mode) for mode in modes_attr) + elif modes_attr: + shell_access_modes = (str(modes_attr),) + + # Get the detected shell name from the runtime + if shell_enabled: + shell_runtime = getattr(shell_agent, "_shell_runtime", None) + if shell_runtime: + runtime_info = shell_runtime.runtime_info() + shell_name = runtime_info.get("name") + # Create formatted prompt text - prompt_text = f"{agent_name} ❯ " + arrow_segment = "" if shell_enabled else "❯" + prompt_text = f"{agent_name} {arrow_segment} " # Add default value display if requested if show_default and default and default != "STOP": @@ -887,8 +925,10 @@ def _style_flag(letter: str, supported: bool) -> str: # Get logger settings from the agent's context (not agent_provider) logger_settings = None try: - agent = agent_provider._agent(agent_name) - agent_context = agent._context or agent.context + active_agent = shell_agent + if active_agent is None: + active_agent = agent_provider._agent(agent_name) + agent_context = active_agent._context or active_agent.context logger_settings = agent_context.config.logger except Exception: # If we can't get the agent or its context, logger_settings stays None @@ -922,6 +962,11 @@ def _style_flag(letter: str, supported: bool) -> str: f"[dim]Experimental: Streaming Enabled - {streaming_mode} mode[/dim]" ) + if shell_enabled: + modes_display = ", ".join(shell_access_modes or ("direct",)) + shell_display = f"{modes_display}, {shell_name}" if shell_name else modes_display + rich_print(f"[yellow]Shell Access ({shell_display})[/yellow]") + rich_print() help_message_shown = True @@ -953,9 +998,16 @@ def pre_process_input(text): elif cmd == "clear": target_agent = None if len(cmd_parts) > 1: - candidate = cmd_parts[1].strip() - if candidate: - target_agent = candidate + remainder = cmd_parts[1].strip() + if remainder: + tokens = remainder.split(maxsplit=1) + if tokens and tokens[0].lower() == "last": + if len(tokens) > 1: + candidate = tokens[1].strip() + if candidate: + target_agent = candidate + return {"clear_last": {"agent": target_agent}} + target_agent = remainder return {"clear_history": {"agent": target_agent}} elif cmd == "markdown": return "MARKDOWN" @@ -984,6 +1036,8 @@ def pre_process_input(text): elif cmd == "tools": # Return a dictionary with list_tools action return {"list_tools": True} + elif cmd == "skills": + return {"list_skills": True} elif cmd == "exit": return "EXIT" elif cmd.lower() == "stop": @@ -1147,8 +1201,10 @@ async def handle_special_commands( rich_print(" /system - Show the current system prompt") rich_print(" /prompt - Apply a specific prompt by name") rich_print(" /usage - Show current usage statistics") + rich_print(" /skills - List local skills for the active agent") rich_print(" /history [agent_name] - Show chat history overview") rich_print(" /clear [agent_name] - Clear conversation history (keeps templates)") + rich_print(" /clear last [agent_name] - Remove the most recent message from history") rich_print(" /markdown - Show last assistant message without markdown formatting") rich_print(" /mcpstatus - Show MCP server status summary for the active agent") rich_print(" /save_history - Save current chat history to a file") diff --git a/src/fast_agent/ui/history_display.py b/src/fast_agent/ui/history_display.py index bfc4fb669..28a1bd914 100644 --- a/src/fast_agent/ui/history_display.py +++ b/src/fast_agent/ui/history_display.py @@ -35,6 +35,7 @@ class Colours: USER = "blue" ASSISTANT = "green" TOOL = "magenta" + TOOL_ERROR = "red" HEADER = USER TIMELINE_EMPTY = "dim default" CONTEXT_SAFE = "green" @@ -249,6 +250,7 @@ def _build_history_rows(history: Sequence[PromptMessageExtended]) -> list[dict]: result_rows: list[dict] = [] tool_result_total_chars = 0 tool_result_has_non_text = False + tool_result_has_error = False if tool_calls: names: list[str] = [] @@ -273,6 +275,8 @@ def _build_history_rows(history: Sequence[PromptMessageExtended]) -> list[dict]: tool_result_total_chars += result_chars tool_result_has_non_text = tool_result_has_non_text or result_non_text detail = _format_tool_detail("result→", [tool_name]) + is_error = getattr(result, "isError", False) + tool_result_has_error = tool_result_has_error or is_error result_rows.append( { "role": "tool", @@ -284,6 +288,7 @@ def _build_history_rows(history: Sequence[PromptMessageExtended]) -> list[dict]: "has_tool_request": False, "hide_summary": False, "include_in_timeline": False, + "is_error": is_error, } ) if role == "user": @@ -308,6 +313,7 @@ def _build_history_rows(history: Sequence[PromptMessageExtended]) -> list[dict]: if timeline_role == "tool" and tool_result_total_chars > 0: row_chars = tool_result_total_chars row_non_text = row_non_text or tool_result_has_non_text + row_is_error = tool_result_has_error rows.append( { @@ -320,6 +326,7 @@ def _build_history_rows(history: Sequence[PromptMessageExtended]) -> list[dict]: "has_tool_request": has_tool_request, "hide_summary": hide_in_summary, "include_in_timeline": include_in_timeline, + "is_error": row_is_error, } ) rows.extend(result_rows) @@ -333,12 +340,23 @@ def _aggregate_timeline_entries(rows: Sequence[dict]) -> list[dict]: "role": row.get("timeline_role", row["role"]), "chars": row["chars"], "non_text": row["non_text"], + "is_error": row.get("is_error", False), } for row in rows if row.get("include_in_timeline", True) ] +def _get_role_color(role: str, *, is_error: bool = False) -> str: + """Get the display color for a role, accounting for error states.""" + color_map = {"user": Colours.USER, "assistant": Colours.ASSISTANT, "tool": Colours.TOOL} + + if role == "tool" and is_error: + return Colours.TOOL_ERROR + + return color_map.get(role, "white") + + def _shade_block(chars: int, *, non_text: bool, color: str) -> Text: if non_text: return Text(NON_TEXT_MARKER, style=f"bold {color}") @@ -356,12 +374,10 @@ def _shade_block(chars: int, *, non_text: bool, color: str) -> Text: def _build_history_bar(entries: Sequence[dict], width: int = TIMELINE_WIDTH) -> tuple[Text, Text]: - color_map = {"user": Colours.USER, "assistant": Colours.ASSISTANT, "tool": Colours.TOOL} - recent = list(entries[-width:]) bar = Text(" history |", style="dim") for entry in recent: - color = color_map.get(entry["role"], "ansiwhite") + color = _get_role_color(entry["role"], is_error=entry.get("is_error", False)) bar.append_text( _shade_block(entry["chars"], non_text=entry.get("non_text", False), color=color) ) @@ -507,7 +523,6 @@ def display_history_overview( start_index = len(summary_candidates) - len(summary_rows) + 1 role_arrows = {"user": "▶", "assistant": "◀", "tool": "▶"} - role_styles = {"user": Colours.USER, "assistant": Colours.ASSISTANT, "tool": Colours.TOOL} role_labels = {"user": "user", "assistant": "assistant", "tool": "tool result"} try: @@ -517,7 +532,7 @@ def display_history_overview( for offset, row in enumerate(summary_rows): role = row["role"] - color = role_styles.get(role, "white") + color = _get_role_color(role, is_error=row.get("is_error", False)) arrow = role_arrows.get(role, "▶") label = role_labels.get(role, role) if role == "assistant" and row.get("has_tool_request"): diff --git a/src/fast_agent/ui/interactive_prompt.py b/src/fast_agent/ui/interactive_prompt.py index ffc7d9a96..94cf5f208 100644 --- a/src/fast_agent/ui/interactive_prompt.py +++ b/src/fast_agent/ui/interactive_prompt.py @@ -14,6 +14,7 @@ ) """ +from pathlib import Path from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, List, Optional, Union, cast if TYPE_CHECKING: @@ -169,6 +170,9 @@ async def prompt_loop( # Handle tools list display await self._list_tools(prompt_provider, agent) continue + elif "list_skills" in command_dict: + await self._list_skills(prompt_provider, agent) + continue elif "show_usage" in command_dict: # Handle usage display await self._show_usage(prompt_provider, agent) @@ -189,6 +193,41 @@ async def prompt_loop( usage = getattr(agent_obj, "usage_accumulator", None) display_history_overview(target_agent, history, usage) continue + elif "clear_last" in command_dict: + clear_info = command_dict.get("clear_last") + clear_agent = ( + clear_info.get("agent") if isinstance(clear_info, dict) else None + ) + target_agent = clear_agent or agent + try: + agent_obj = prompt_provider._agent(target_agent) + except Exception: + rich_print(f"[red]Unable to load agent '{target_agent}'[/red]") + continue + + removed_message = None + pop_callable = getattr(agent_obj, "pop_last_message", None) + if callable(pop_callable): + removed_message = pop_callable() + else: + history = getattr(agent_obj, "message_history", []) + if history: + try: + removed_message = history.pop() + except Exception: + removed_message = None + + if removed_message: + role = getattr(removed_message, "role", "message") + role_display = role.capitalize() if isinstance(role, str) else "Message" + rich_print( + f"[green]Removed last {role_display} for agent '{target_agent}'.[/green]" + ) + else: + rich_print( + f"[yellow]No messages to remove for agent '{target_agent}'.[/yellow]" + ) + continue elif "clear_history" in command_dict: clear_info = command_dict.get("clear_history") clear_agent = ( @@ -857,19 +896,21 @@ async def _list_tools(self, prompt_provider: "AgentApp", agent_name: str) -> Non rich_print() # Display tools using clean compact format - for i, tool in enumerate(tools_result.tools, 1): + index = 1 + for tool in tools_result.tools: # Main line: [ 1] tool_name Title from rich.text import Text + meta = getattr(tool, "meta", {}) or {} + tool_line = Text() - tool_line.append(f"[{i:2}] ", style="dim cyan") + tool_line.append(f"[{index:2}] ", style="dim cyan") tool_line.append(tool.name, style="bright_blue bold") # Add title if available if tool.title and tool.title.strip(): tool_line.append(f" {tool.title}", style="default") - meta = getattr(tool, "meta", {}) or {} if meta.get("openai/skybridgeEnabled"): tool_line.append(" (skybridge)", style="cyan") @@ -932,13 +973,77 @@ async def _list_tools(self, prompt_provider: "AgentApp", agent_name: str) -> Non rich_print(f" [dim magenta]template:[/dim magenta] {template}") rich_print() # Space between tools + index += 1 + if index == 1: + rich_print("[yellow]No MCP tools available for this agent[/yellow]") except Exception as e: import traceback rich_print(f"[red]Error listing tools: {e}[/red]") rich_print(f"[dim]{traceback.format_exc()}[/dim]") + async def _list_skills(self, prompt_provider: "AgentApp", agent_name: str) -> None: + """List available local skills for an agent.""" + + try: + assert hasattr(prompt_provider, "_agent"), ( + "Interactive prompt expects an AgentApp with _agent()" + ) + agent = prompt_provider._agent(agent_name) + + rich_print(f"\n[bold]Skills for agent [cyan]{agent_name}[/cyan]:[/bold]") + + skill_manifests = getattr(agent, "_skill_manifests", None) + manifests = list(skill_manifests) if skill_manifests else [] + + if not manifests: + rich_print("[yellow]No skills available for this agent[/yellow]") + return + + rich_print() + + for index, manifest in enumerate(manifests, 1): + from rich.text import Text + + name = getattr(manifest, "name", "") + description = getattr(manifest, "description", "") + path = Path(getattr(manifest, "path", Path())) + + tool_line = Text() + tool_line.append(f"[{index:2}] ", style="dim cyan") + tool_line.append(name, style="bright_blue bold") + rich_print(tool_line) + + if description: + import textwrap + + wrapped_lines = textwrap.wrap( + description.strip(), width=72, subsequent_indent=" " + ) + for line in wrapped_lines: + if line.startswith(" "): + rich_print(f" [white]{line[5:]}[/white]") + else: + rich_print(f" [white]{line}[/white]") + + source_path = path if path else Path(".") + if source_path.is_file(): + source_path = source_path.parent + try: + display_path = source_path.relative_to(Path.cwd()) + except ValueError: + display_path = source_path + + rich_print(f" [dim green]source:[/dim green] {display_path}") + rich_print() + + except Exception as exc: # noqa: BLE001 + import traceback + + rich_print(f"[red]Error listing skills: {exc}[/red]") + rich_print(f"[dim]{traceback.format_exc()}[/dim]") + async def _show_usage(self, prompt_provider: "AgentApp", agent_name: str) -> None: """ Show usage statistics for the current agent(s) in a colorful table format. diff --git a/src/fast_agent/ui/markdown_truncator.py b/src/fast_agent/ui/markdown_truncator.py index 7d98d7a75..ec1aab3b3 100644 --- a/src/fast_agent/ui/markdown_truncator.py +++ b/src/fast_agent/ui/markdown_truncator.py @@ -876,7 +876,7 @@ def _fix_incomplete_structures(self, original_text: str, truncated_text: str) -> return truncated_text # Find where the truncated text starts in the original - truncation_pos = original_text.find(truncated_text) + truncation_pos = original_text.rfind(truncated_text) if truncation_pos == -1: # Can't find it, return as-is return truncated_text diff --git a/tests/e2e/multimodal/image_server.py b/tests/e2e/multimodal/image_server.py index fcd65f584..7c883a6e6 100644 --- a/tests/e2e/multimodal/image_server.py +++ b/tests/e2e/multimodal/image_server.py @@ -22,8 +22,14 @@ image_path = "image.png" -@app.tool(name="get_image", description="Returns the sample image with some descriptive text") -async def get_image(image_name: str = "default", ctx: Context = None) -> list[TextContent | ImageContent]: +@app.tool( + name="get_image", + description="Returns the sample image with some descriptive text", + structured_output=False, +) +async def get_image( + image_name: str = "default", ctx: Context = None +) -> list[TextContent | ImageContent]: try: # Use the global image path return [ @@ -38,6 +44,7 @@ async def get_image(image_name: str = "default", ctx: Context = None) -> list[Te @app.tool( name="get_pdf", description="Returns 'sample.pdf' - use when the User requests a sample PDF file", + structured_output=False, ) async def get_pdf() -> list[TextContent | EmbeddedResource]: try: diff --git a/tests/unit/fast_agent/agents/test_mcp_agent_skills.py b/tests/unit/fast_agent/agents/test_mcp_agent_skills.py new file mode 100644 index 000000000..5aa61c2ae --- /dev/null +++ b/tests/unit/fast_agent/agents/test_mcp_agent_skills.py @@ -0,0 +1,91 @@ +from pathlib import Path +from unittest.mock import patch + +import pytest + +from fast_agent.agents.agent_types import AgentConfig +from fast_agent.agents.mcp_agent import McpAgent +from fast_agent.context import Context +from fast_agent.skills.registry import SkillRegistry + + +def create_skill(directory: Path, name: str, description: str = "desc", body: str = "Body") -> None: + skill_dir = directory / name + skill_dir.mkdir(parents=True, exist_ok=True) + manifest = skill_dir / "SKILL.md" + manifest.write_text( + f"""---\nname: {name}\ndescription: {description}\n---\n{body}\n""", + encoding="utf-8", + ) + + +@pytest.mark.asyncio +async def test_mcp_agent_exposes_skill_tools(tmp_path: Path) -> None: + skills_root = tmp_path / "skills" + create_skill(skills_root, "alpha", body="Alpha body") + + manifests = SkillRegistry.load_directory(skills_root) + context = Context() + + config = AgentConfig(name="test", instruction="Instruction", servers=[], skills=skills_root) + config.skill_manifests = manifests + + agent = McpAgent(config=config, context=context) + + tools_result = await agent.list_tools() + tool_names = {tool.name for tool in tools_result.tools} + assert "alpha" not in tool_names + assert manifests[0].relative_path == Path("alpha/SKILL.md") + + +@pytest.mark.asyncio +async def test_agent_skills_template_substitution(tmp_path: Path) -> None: + skills_root = tmp_path / "skills" + create_skill(skills_root, "beta", description="Beta desc", body="Beta body") + + manifests = SkillRegistry.load_directory(skills_root) + context = Context() + + config = AgentConfig( + name="test", + instruction="Instructions:\n\n{{agentSkills}}\nEnd.", + servers=[], + skills=skills_root, + ) + config.skill_manifests = manifests + + agent = McpAgent(config=config, context=context) + await agent._apply_instruction_templates() + + assert "{{agentSkills}}" not in agent.instruction + assert '" not in agent.instruction + assert "Beta body" not in agent.instruction + + +@pytest.mark.asyncio +async def test_agent_skills_missing_placeholder_warns(tmp_path: Path) -> None: + skills_root = tmp_path / "skills" + create_skill(skills_root, "gamma") + + manifests = SkillRegistry.load_directory(skills_root) + context = Context() + + config = AgentConfig( + name="test", + instruction="Instruction without placeholder.", + servers=[], + skills=skills_root, + ) + config.skill_manifests = manifests + + agent = McpAgent(config=config, context=context) + + with patch.object(agent.logger, "warning") as mock_warning: + await agent._apply_instruction_templates() + await agent._apply_instruction_templates() + + mock_warning.assert_called_once() + assert "system prompt does not include {{agentSkills}}" in mock_warning.call_args[0][0] diff --git a/tests/unit/fast_agent/skills/test_registry.py b/tests/unit/fast_agent/skills/test_registry.py new file mode 100644 index 000000000..3e889a08b --- /dev/null +++ b/tests/unit/fast_agent/skills/test_registry.py @@ -0,0 +1,91 @@ +from pathlib import Path + +from fast_agent.skills.registry import SkillRegistry + + +def write_skill(directory: Path, name: str, description: str = "desc", body: str = "Body") -> Path: + skill_dir = directory / name + skill_dir.mkdir(parents=True, exist_ok=True) + manifest = skill_dir / "SKILL.md" + manifest.write_text( + f"""--- +name: {name} +description: {description} +--- +{body} +""", + encoding="utf-8", + ) + return manifest + + +def test_default_directory_prefers_fast_agent(tmp_path: Path) -> None: + default_dir = tmp_path / ".fast-agent" / "skills" + write_skill(default_dir, "alpha", body="Alpha body") + write_skill(tmp_path / "claude" / "skills", "beta", body="Beta body") + + registry = SkillRegistry(base_dir=tmp_path) + assert registry.directory == default_dir.resolve() + + manifests = registry.load_manifests() + assert [manifest.name for manifest in manifests] == ["alpha"] + assert manifests[0].body == "Alpha body" + + +def test_default_directory_falls_back_to_claude(tmp_path: Path) -> None: + claude_dir = tmp_path / ".claude" / "skills" + write_skill(claude_dir, "alpha", body="Alpha body") + + registry = SkillRegistry(base_dir=tmp_path) + assert registry.directory == claude_dir.resolve() + manifests = registry.load_manifests() + assert len(manifests) == 1 and manifests[0].name == "alpha" + + +def test_override_directory(tmp_path: Path) -> None: + override_dir = tmp_path / "custom" + write_skill(override_dir, "override", body="Override body") + + registry = SkillRegistry(base_dir=tmp_path, override_directory=override_dir) + assert registry.directory == override_dir.resolve() + + manifests = registry.load_manifests() + assert len(manifests) == 1 + assert manifests[0].name == "override" + assert manifests[0].body == "Override body" + + +def test_load_directory_helper(tmp_path: Path) -> None: + skills_dir = tmp_path / "skills" + write_skill(skills_dir, "alpha") + write_skill(skills_dir, "beta") + + manifests = SkillRegistry.load_directory(skills_dir) + assert {manifest.name for manifest in manifests} == {"alpha", "beta"} + + +def test_no_default_directory(tmp_path: Path) -> None: + registry = SkillRegistry(base_dir=tmp_path) + assert registry.directory is None + assert registry.load_manifests() == [] + + +def test_registry_reports_errors(tmp_path: Path) -> None: + invalid_dir = tmp_path / ".fast-agent" / "skills" / "invalid" + invalid_dir.mkdir(parents=True) + (invalid_dir / "SKILL.md").write_text("invalid front matter", encoding="utf-8") + + registry = SkillRegistry(base_dir=tmp_path) + manifests, errors = registry.load_manifests_with_errors() + assert manifests == [] + assert errors + assert "invalid" in errors[0]["path"] + + +def test_override_missing_directory(tmp_path: Path) -> None: + override_dir = tmp_path / "missing" / "skills" + registry = SkillRegistry(base_dir=tmp_path, override_directory=override_dir) + manifests = registry.load_manifests() + assert manifests == [] + assert registry.override_failed is True + assert registry.directory is None diff --git a/tests/unit/fast_agent/ui/test_console_display.py b/tests/unit/fast_agent/ui/test_console_display.py deleted file mode 100644 index bb1b26cab..000000000 --- a/tests/unit/fast_agent/ui/test_console_display.py +++ /dev/null @@ -1,368 +0,0 @@ -"""Unit tests for console_display module, specifically _prepare_markdown_content.""" - -from fast_agent.ui.console_display import _prepare_markdown_content - - -class TestPrepareMarkdownContent: - """Test the _prepare_markdown_content function.""" - - def test_none_input(self): - """Test that None input returns None unchanged.""" - result = _prepare_markdown_content(None) - assert result is None - - def test_none_input_with_escape_false(self): - """Test that None input returns None even when escape_xml is False.""" - result = _prepare_markdown_content(None, escape_xml=False) - assert result is None - - def test_empty_string(self): - """Test that empty string doesn't crash and returns empty string.""" - result = _prepare_markdown_content("") - assert result == "" - - def test_empty_string_with_escape_false(self): - """Test that empty string returns empty when escape_xml is False.""" - result = _prepare_markdown_content("", escape_xml=False) - assert result == "" - - def test_escape_xml_false_returns_unchanged(self): - """Test that escape_xml=False returns content unchanged.""" - content = "content & 'quotes' \"double\"" - result = _prepare_markdown_content(content, escape_xml=False) - assert result == content - - def test_non_string_input(self): - """Test that non-string inputs are returned unchanged.""" - # Test with integer - result = _prepare_markdown_content(123) - assert result == 123 - - # Test with list - result = _prepare_markdown_content([1, 2, 3]) - assert result == [1, 2, 3] - - # Test with dict - test_dict = {"key": "value"} - result = _prepare_markdown_content(test_dict) - assert result == test_dict - - def test_basic_html_escaping(self): - """Test that HTML characters are properly escaped outside code blocks.""" - content = "This has and & and > and < and \" and ' characters" - result = _prepare_markdown_content(content) - expected = ( - "This has <tag> and & and > and < and " and ' characters" - ) - assert result == expected - - def test_preserves_fenced_code_blocks(self): - """Test that content inside fenced code blocks is not escaped.""" - content = """Before code -```python -def func(): - return "" & 'value' -``` -After code with """ - result = _prepare_markdown_content(content) - - # Check that code block content is preserved - assert "def func():" in result - assert "return \"\" & 'value'" in result - - # Check that content outside code blocks is escaped - assert "After code with <tag>" in result - - def test_preserves_inline_code(self): - """Test that content inside inline code is not escaped.""" - content = "Use `` and `x & y` in code, but escape outside" - result = _prepare_markdown_content(content) - - # Inline code should be preserved - assert "``" in result - assert "`x & y`" in result - - # Outside content should be escaped - assert "but escape <tag> outside" in result - - def test_multiple_code_blocks(self): - """Test handling of multiple code blocks in the same content.""" - content = """First -``` - & "quotes" -``` -Middle -``` - & 'quotes' -``` -End """ - result = _prepare_markdown_content(content) - - # Code blocks should be preserved - assert ' & "quotes"' in result - assert " & 'quotes'" in result - - # Outside content should be escaped - assert "First <tag>" in result - assert "Middle <tag>" in result - assert "End <tag>" in result - - def test_mixed_inline_and_fenced_code(self): - """Test content with both inline and fenced code blocks.""" - content = """Use `` here -``` - & "code" -``` -And `` inline with outside""" - result = _prepare_markdown_content(content) - - # Both types of code should be preserved - assert "``" in result - assert ' & "code"' in result - assert "``" in result - - # Outside content should be escaped - assert "with <tag> outside" in result - - def test_empty_code_blocks(self): - """Test that empty code blocks don't cause issues.""" - content = """Before -``` -``` -After """ - result = _prepare_markdown_content(content) - assert "After <tag>" in result - - def test_nested_backticks_not_treated_as_inline_code(self): - """Test that triple backticks are not treated as inline code.""" - content = "This ```is not``` inline code " - result = _prepare_markdown_content(content) - # The content between triple backticks should be escaped - assert "```is not``` inline code <tag>" in result - - def test_single_backtick_not_treated_as_code(self): - """Test that single backtick without closing is not treated as code.""" - content = "This ` is not code " - result = _prepare_markdown_content(content) - assert "This ` is not code <tag>" in result - - def test_all_escape_characters(self): - """Test that all defined escape characters are properly replaced.""" - content = "& < > \" '" - result = _prepare_markdown_content(content) - assert result == "& < > " '" - - def test_preserve_newlines_and_whitespace(self): - """Test that newlines and whitespace are preserved.""" - content = "Line 1\n Line 2 with spaces\n\tLine 3 with tab" - result = _prepare_markdown_content(content) - assert "Line 1\n Line 2 with spaces\n\tLine 3 with tab" == result - - def test_code_block_at_start(self): - """Test code block at the very start of content.""" - content = """``` - -``` -After """ - result = _prepare_markdown_content(content) - assert "" in result - assert "After <tag>" in result - - def test_code_block_at_end(self): - """Test code block at the very end of content.""" - content = """Before -``` - -```""" - result = _prepare_markdown_content(content) - assert "Before <tag>" in result - assert "" in result - - def test_adjacent_inline_code(self): - """Test adjacent inline code blocks. - - With the markdown-it parser approach, adjacent inline code is properly - identified and preserved (unlike the old regex approach). - """ - # Test with space between inline code blocks (works correctly) - content = "`` `` and " - result = _prepare_markdown_content(content) - assert "``" in result - assert "``" in result - assert "and <tag>" in result - - # Adjacent without space - markdown-it parser handles this correctly - content_adjacent = "```` and " - result_adjacent = _prepare_markdown_content(content_adjacent) - # With parser-based approach, inline code is properly identified and preserved - assert "``" in result_adjacent - assert "``" in result_adjacent - assert "and <tag>" in result_adjacent - - def test_realistic_xml_content(self): - """Test with realistic XML content that should be escaped.""" - content = """Here's an XML example: - - Content & more - -But in code it's preserved: -```xml - - Content & more - -```""" - result = _prepare_markdown_content(content) - - # Outside code should be escaped - assert "<root>" in result - assert "<child attr="value">" in result - - # Inside code should be preserved - assert ' Content & more' in result - - def test_multiple_code_blocks_with_language_tags(self): - """Test handling of multiple code blocks with language specifiers (e.g., ```html, ```typescript).""" - content = """I'll create a compact, self-contained TypeScript game (~200 lines) that runs in the browser. -It's a mini "Dodge-Faller" arcade: you move left/right with arrow keys, avoid falling blocks, and survive as long as possible. Everything (HTML, CSS, TS) is in one snippet so you can copy-paste it into a single `.ts` file, compile with `tsc`, and open the resulting `.html`. - -```html - - - - - Dodge-Faller - - - - - - - -``` - -```typescript -// game.ts -const canvas = document.getElementById("c") as HTMLCanvasElement; -const ctx = canvas.getContext("2d")!; -const W = canvas.width; -const H = canvas.height; - -const PLAYER_W = 40; -const PLAYER_H = 20; -const PLAYER_SPEED = 6; -const BLOCK_W = 30; -const BLOCK_H = 30; -const BLOCK_SPEED = 3; -const BLOCK_SPAWN_EVERY = 45; // frames - -let playerX = W / 2 - PLAYER_W / 2; -let playerY = H - PLAYER_H - 10; -let blocks: { x: number; y: number }[] = []; -let frame = 0; -let running = true; -let score = 0; - -const keys: Record = {}; -window.addEventListener("keydown", e => keys[e.key] = true); -window.addEventListener("keyup", e => keys[e.key] = false); - -function rand(min: number, max: number) { - return Math.random() * (max - min) + min; -} - -function collides(ax: number, ay: number, aw: number, ah: number, - bx: number, by: number, bw: number, bh: number) { - return ax < bx + bw && ax + aw > bx && ay < by + bh && ay + ah > by; -} - -function update() { - if (!running) return; - frame++; - - // move player - if (keys["ArrowLeft"]) playerX = Math.max(0, playerX - PLAYER_SPEED); - if (keys["ArrowRight"]) playerX = Math.min(W - PLAYER_W, playerX + PLAYER_SPEED); - - // spawn blocks - if (frame % BLOCK_SPAWN_EVERY === 0) { - blocks.push({ x: rand(0, W - BLOCK_W), y: -BLOCK_H }); - } - - // move blocks - for (const b of blocks) b.y += BLOCK_SPEED; - - // remove off-screen blocks - blocks = blocks.filter(b => b.y < H + BLOCK_H); - - // collisions - for (const b of blocks) { - if (collides(playerX, playerY, PLAYER_W, PLAYER_H, b.x, b.y, BLOCK_W, BLOCK_H)) { - running = false; - } - } - - score = Math.max(score, Math.floor(frame / 10)); -} - -function draw() { - ctx.clearRect(0, 0, W, H); - - // player - ctx.fillStyle = "#0af"; - ctx.fillRect(playerX, playerY, PLAYER_W, PLAYER_H); - - // blocks - ctx.fillStyle = "#f44"; - for (const b of blocks) ctx.fillRect(b.x, b.y, BLOCK_W, BLOCK_H); - - // score - ctx.fillStyle = "#fff"; - ctx.font = "20px monospace"; - ctx.fillText(`Score: ${score}`, 10, 30); - - if (!running) { - ctx.fillStyle = "rgba(0,0,0,0.7)"; - ctx.fillRect(0, 0, W, H); - ctx.fillStyle = "#fff"; - ctx.font = "30px monospace"; - ctx.fillText("Game Over", W / 2 - 70, H / 2); - ctx.font = "16px monospace"; - ctx.fillText("Refresh to play again", W / 2 - 90, H / 2 + 30); - } -} - -function loop() { - update(); - draw(); - requestAnimationFrame(loop); -} - -requestAnimationFrame(loop); -``` - -Compile with -`tsc game.ts --target ES2020 --module none --outFile game.js` -and open the HTML file in your browser.""" - - result = _prepare_markdown_content(content) - - # Debug: print the result to see what's happening - print("\n=== RESULT ===") - print(result) - print("\n=== END RESULT ===") - - # The code blocks should be preserved exactly as they are (no HTML encoding inside) - assert '' in result, "HTML code block content should not be escaped" - assert 'ctx.fillText("Game Over", W / 2 - 70, H / 2)' in result, "TypeScript string literals should not be escaped" - - # Content should not be duplicated - # Note: "Game Over" appears once, but requestAnimationFrame(loop) appears twice - # (once inside loop(), once to call it initially) - assert result.count('ctx.fillText("Game Over"') == 1, "Content should not be duplicated" - assert result.count('requestAnimationFrame(loop)') == 2, "Should match the 2 instances in original code (not 4 like the bug produced)" - - # Outside code blocks, backticks for inline code should be preserved - assert "`tsc game.ts" in result or "tsc game.ts" in result, "Inline code or plain text should be present" diff --git a/tests/ui/test_console_display_plain_wrap.py b/tests/unit/fast_agent/ui/test_console_display_plain_wrap.py similarity index 100% rename from tests/ui/test_console_display_plain_wrap.py rename to tests/unit/fast_agent/ui/test_console_display_plain_wrap.py diff --git a/tests/unit/fast_agent/ui/test_console_display_skybridge.py b/tests/unit/fast_agent/ui/test_console_display_skybridge.py deleted file mode 100644 index 14fc48c70..000000000 --- a/tests/unit/fast_agent/ui/test_console_display_skybridge.py +++ /dev/null @@ -1,57 +0,0 @@ -from fast_agent.mcp.skybridge import ( - SkybridgeResourceConfig, - SkybridgeServerConfig, - SkybridgeToolConfig, -) -from fast_agent.ui.console_display import ConsoleDisplay - - -def test_summarize_skybridge_configs_flags_invalid_resource() -> None: - resource_warning = "served as 'text/html' instead of 'text/html+skybridge'" - resource = SkybridgeResourceConfig( - uri="ui://widget/pizza-map.html", - mime_type="text/html", - is_skybridge=False, - warning=resource_warning, - ) - tool_warning = ( - "Tool 'hf/pizzaz-pizza-map' references resource 'ui://widget/pizza-map.html' " - "served as 'text/html' instead of 'text/html+skybridge'" - ) - tool = SkybridgeToolConfig( - tool_name="pizzaz-pizza-map", - namespaced_tool_name="hf/pizzaz-pizza-map", - template_uri="ui://widget/pizza-map.html", - is_valid=False, - warning=tool_warning, - ) - config = SkybridgeServerConfig( - server_name="hf", - supports_resources=True, - ui_resources=[resource], - warnings=[f"{resource.uri}: {resource_warning}", tool_warning], - tools=[tool], - ) - - rows, warnings = ConsoleDisplay.summarize_skybridge_configs({"hf": config}) - - assert len(rows) == 1 - row = rows[0] - assert row["server_name"] == "hf" - assert row["enabled"] is False - assert row["valid_resource_count"] == 0 - assert row["total_resource_count"] == 1 - assert row["active_tools"] == [] - - assert len(warnings) == 2 - assert any("ui://widget/pizza-map.html" in warning for warning in warnings) - assert any("pizzaz-pizza-map" in warning for warning in warnings) - - -def test_summarize_skybridge_configs_ignores_servers_without_signals() -> None: - config = SkybridgeServerConfig(server_name="empty") - - rows, warnings = ConsoleDisplay.summarize_skybridge_configs({"empty": config}) - - assert rows == [] - assert warnings == [] diff --git a/tests/unit/fast_agent/ui/test_markdown_truncator.py b/tests/unit/fast_agent/ui/test_markdown_truncator.py deleted file mode 100644 index 6f6ff1b9b..000000000 --- a/tests/unit/fast_agent/ui/test_markdown_truncator.py +++ /dev/null @@ -1,694 +0,0 @@ -"""Tests for smart markdown truncation.""" - -import pytest -from rich.console import Console - -from fast_agent.ui.markdown_truncator import MarkdownTruncator, TruncationPoint - - -@pytest.fixture -def console(): - """Create a Console instance for testing.""" - return Console(width=80, height=24, legacy_windows=False) - - -@pytest.fixture -def truncator(): - """Create a MarkdownTruncator instance for testing.""" - return MarkdownTruncator(target_height_ratio=0.6) - - -class TestMarkdownTruncator: - """Test suite for MarkdownTruncator.""" - - def test_no_truncation_needed(self, truncator, console): - """Test that short text is not truncated.""" - text = "# Hello\n\nThis is a short paragraph." - result = truncator.truncate(text, terminal_height=24, console=console) - assert result == text - - def test_empty_text(self, truncator, console): - """Test handling of empty text.""" - result = truncator.truncate("", terminal_height=24, console=console) - assert result == "" - - def test_truncate_after_paragraph(self, truncator, console): - """Test truncation between paragraphs preserves markdown structure.""" - text = "\n".join( - [ - "# Title", - "", - "First paragraph that is quite long and takes up space.", - "", - "Second paragraph with more content.", - "", - "Third paragraph that we might not see.", - ] - ) - - result = truncator.truncate(text, terminal_height=5, console=console) - - # Should truncate and keep some content - assert len(result) < len(text) - assert len(result) > 0 - # Should keep complete paragraphs (one of them at least) - assert "paragraph" in result.lower() - - def test_truncate_after_code_block(self, truncator, console): - """Test truncation after complete code blocks.""" - text = "\n".join( - [ - "# Code Example", - "", - "```python", - "def hello():", - " print('world')", - "```", - "", - "More content after code block.", - ] - ) - - result = truncator.truncate(text, terminal_height=8, console=console) - - # If code block is included, it should be complete - if "```python" in result: - assert "def hello():" in result - # Should have closing fence if we kept the code - assert result.count("```") % 2 == 0 or result.endswith("```") - - def test_truncate_within_code_block_preserves_fence(self, truncator, console): - """Test that truncating within a code block preserves the opening fence.""" - text = "\n".join( - [ - "Start content.", - "", - "```python", - "def function1():", - " pass", - "", - "def function2():", - " pass", - "", - "def function3():", - " pass", - "```", - "", - "End content.", - ] - ) - - # Use a very small height to force truncation within the code block - result = truncator.truncate(text, terminal_height=5, console=console) - - # If we kept any of the code, should have the opening fence - if "def function" in result: - assert "```python" in result or "```" in result - - def test_truncate_after_list_items(self, truncator, console): - """Test truncation preserves list structure.""" - text = "\n".join( - [ - "# List Example", - "", - "- Item 1", - "- Item 2", - "- Item 3", - "- Item 4", - "- Item 5", - ] - ) - - result = truncator.truncate(text, terminal_height=6, console=console) - - # Should truncate but keep complete list items - assert "List Example" in result or "Item" in result - - def test_truncate_ordered_list(self, truncator, console): - """Test truncation with ordered lists.""" - text = "\n".join( - [ - "# Steps", - "", - "1. First step", - "2. Second step", - "3. Third step", - "4. Fourth step", - ] - ) - - result = truncator.truncate(text, terminal_height=6, console=console) - - # Should keep some content - assert len(result) > 0 - - def test_truncate_blockquote(self, truncator, console): - """Test truncation with blockquotes.""" - text = "\n".join( - [ - "# Quote", - "", - "> This is a quote", - "> that spans multiple lines", - "> and has more content.", - "", - "After quote.", - ] - ) - - result = truncator.truncate(text, terminal_height=6, console=console) - - # Should keep some content - assert len(result) > 0 - - def test_oversized_single_block_fallback(self, truncator, console): - """Test that oversized single blocks fall back to character truncation.""" - # Create a very long code block that exceeds terminal height - code_lines = [f"line_{i} = {i}" for i in range(50)] - text = "```python\n" + "\n".join(code_lines) + "\n```" - - result = truncator.truncate(text, terminal_height=10, console=console) - - # Should have truncated something - assert len(result) < len(text) - # Should still have some content - assert len(result) > 0 - - def test_mixed_content(self, truncator, console): - """Test truncation with mixed markdown content.""" - text = "\n".join( - [ - "# Title", - "", - "Paragraph text.", - "", - "```python", - "code here", - "```", - "", - "- List item 1", - "- List item 2", - "", - "> Quote", - "", - "Final paragraph.", - ] - ) - - result = truncator.truncate(text, terminal_height=10, console=console) - - # Should keep some content and not crash - assert len(result) > 0 - - def test_find_safe_truncation_points(self, truncator): - """Test that safe truncation points are identified correctly.""" - text = "\n".join( - [ - "# Title", - "", - "Paragraph 1.", - "", - "```python", - "code", - "```", - "", - "Paragraph 2.", - ] - ) - - safe_points = truncator._find_safe_truncation_points(text) - - # Should find multiple safe points (after paragraphs, after code block) - assert len(safe_points) > 0 - assert all(isinstance(p, TruncationPoint) for p in safe_points) - - def test_measure_rendered_height(self, truncator, console): - """Test that rendered height measurement works.""" - text = "# Title\n\nParagraph." - height = truncator._measure_rendered_height(text, console, "monokai") - - # Should return a positive height - assert height > 0 - assert isinstance(height, int) - - def test_measure_empty_text_height(self, truncator, console): - """Test that empty text has zero height.""" - height = truncator._measure_rendered_height("", console, "monokai") - assert height == 0 - - def test_plain_text_no_markdown(self, truncator, console): - """Test truncation of plain text without markdown.""" - text = "This is plain text without any markdown formatting at all." - result = truncator.truncate(text, terminal_height=2, console=console) - - # Should handle plain text gracefully - assert len(result) <= len(text) - - def test_nested_lists(self, truncator, console): - """Test truncation with nested list structures.""" - text = "\n".join( - [ - "- Item 1", - " - Nested 1.1", - " - Nested 1.2", - "- Item 2", - " - Nested 2.1", - ] - ) - - result = truncator.truncate(text, terminal_height=5, console=console) - - # Should not crash with nested structures - assert len(result) > 0 - - def test_code_block_with_language(self, truncator, console): - """Test that code block language specifier is preserved.""" - text = "\n".join( - [ - "Intro text.", - "", - "```javascript", - "const x = 1;", - "const y = 2;", - "const z = 3;", - "```", - "", - "More text after.", - ] - ) - - result = truncator.truncate(text, terminal_height=5, console=console) - - # If we kept code, should preserve language - if "const" in result: - # Should have fence (might be added by truncation handler) - assert "```" in result - - def test_multiple_code_blocks(self, truncator, console): - """Test handling of multiple code blocks.""" - text = "\n".join( - [ - "```python", - "def foo():", - " pass", - "```", - "", - "Some text.", - "", - "```python", - "def bar():", - " pass", - "```", - ] - ) - - result = truncator.truncate(text, terminal_height=8, console=console) - - # Should handle multiple blocks without crashing - assert len(result) > 0 - - def test_target_height_ratio(self, console): - """Test that target_height_ratio parameter works.""" - truncator_60 = MarkdownTruncator(target_height_ratio=0.6) - truncator_80 = MarkdownTruncator(target_height_ratio=0.8) - - # Create text that will need truncation - text = "\n".join([f"Line {i}" for i in range(50)]) - - result_60 = truncator_60.truncate(text, terminal_height=20, console=console) - result_80 = truncator_80.truncate(text, terminal_height=20, console=console) - - # Higher ratio should keep more text (or equal in edge cases) - assert len(result_80) >= len(result_60) - - def test_very_long_single_line(self, truncator, console): - """Test handling of very long single lines.""" - text = "This is a very long line that goes on and on. " * 50 - result = truncator.truncate(text, terminal_height=5, console=console) - - # Should handle without crashing - assert len(result) > 0 - - def test_special_markdown_characters(self, truncator, console): - """Test handling of special markdown characters.""" - text = "\n".join( - [ - "# Title with **bold** and *italic*", - "", - "Paragraph with `inline code` and [link](url).", - "", - "---", - "", - "More content.", - ] - ) - - result = truncator.truncate(text, terminal_height=6, console=console) - - # Should handle special characters without crashing - assert len(result) > 0 - - def test_table_truncation_preserves_header(self, truncator, console): - """Test that truncating within a table body preserves the header.""" - text = "\n".join( - [ - "Some intro text", - "", - "| Header 1 | Header 2 | Header 3 |", - "|----------|----------|----------|", - "| Row 1 A | Row 1 B | Row 1 C |", - "| Row 2 A | Row 2 B | Row 2 C |", - "| Row 3 A | Row 3 B | Row 3 C |", - "| Row 4 A | Row 4 B | Row 4 C |", - "", - "Text after table", - ] - ) - - # Force truncation in middle of table body - result = truncator.truncate(text, terminal_height=8, console=console) - - # If we have any table rows, we should have the header - if "Row" in result: - assert "Header 1" in result - assert "Header 2" in result - assert "Header 3" in result - # Should have the separator line - assert "----------" in result - - def test_table_truncation_before_table(self, truncator, console): - """Test that truncating before a table doesn't affect the table.""" - text = "\n".join( - [ - "Lots of intro text here.", - "More intro text.", - "Even more intro.", - "", - "| Header 1 | Header 2 |", - "|----------|----------|", - "| Row 1 A | Row 1 B |", - "| Row 2 A | Row 2 B |", - ] - ) - - result = truncator.truncate(text, terminal_height=10, console=console) - - # If table is in result, it should be complete - if "Header 1" in result: - assert "Row 1 A" in result or "Row 2 A" in result - - def test_table_no_truncation_needed(self, truncator, console): - """Test that short tables don't get truncated.""" - text = "\n".join( - [ - "| Header 1 | Header 2 |", - "|----------|----------|", - "| Row 1 A | Row 1 B |", - "| Row 2 A | Row 2 B |", - ] - ) - - result = truncator.truncate(text, terminal_height=24, console=console) - assert result == text - - def test_multiple_tables_truncation(self, truncator, console): - """Test truncation with multiple tables.""" - text = "\n".join( - [ - "# Table 1", - "", - "| Header A | Header B |", - "|----------|----------|", - "| Data 1 | Data 2 |", - "| Data 3 | Data 4 |", - "", - "# Table 2", - "", - "| Header X | Header Y |", - "|----------|----------|", - "| Data 5 | Data 6 |", - "| Data 7 | Data 8 |", - "| Data 9 | Data 10 |", - ] - ) - - result = truncator.truncate(text, terminal_height=10, console=console) - - # Should handle multiple tables without crashing - assert len(result) > 0 - - # If any table data is present, headers should be present - if "Data" in result: - # At least one table header should be present - assert "Header" in result - - def test_table_with_code_block_truncation(self, truncator, console): - """Test mixed table and code block content.""" - text = "\n".join( - [ - "| Header 1 | Header 2 |", - "|----------|----------|", - "| Row 1 A | Row 1 B |", - "| Row 2 A | Row 2 B |", - "", - "```python", - "def foo():", - " pass", - "```", - "", - "| Header 3 | Header 4 |", - "|----------|----------|", - "| Row 3 A | Row 3 B |", - "| Row 4 A | Row 4 B |", - ] - ) - - result = truncator.truncate(text, terminal_height=12, console=console) - - # Should handle both tables and code blocks - assert len(result) > 0 - - def test_table_truncation_character_fallback(self, truncator, console): - """Test table header preservation with character-based truncation fallback.""" - # Create a very long table that requires character truncation - rows = [f"| Row {i} A | Row {i} B | Row {i} C |" for i in range(50)] - text = "\n".join( - [ - "| Header 1 | Header 2 | Header 3 |", - "|----------|----------|----------|", - ] - + rows - ) - - result = truncator.truncate(text, terminal_height=10, console=console) - - # If we have any row data, should have headers - if "Row" in result and "Row" in result[result.find("Row") + 1 :]: - # Has at least one row - should have headers - assert "Header 1" in result - assert "----------" in result - - def test_table_header_only_no_duplication(self, truncator, console): - """Test that truncating right after thead doesn't duplicate header.""" - text = "\n".join( - [ - "Long intro paragraph that takes space.", - "More intro text here.", - "", - "| Header 1 | Header 2 |", - "|----------|----------|", - "| Row 1 A | Row 1 B |", - "| Row 2 A | Row 2 B |", - ] - ) - - result = truncator.truncate(text, terminal_height=10, console=console) - - # Count how many times header appears (should be at most once) - header_count = result.count("Header 1") - assert header_count <= 1 - - def test_table_dominant_shows_first_page(self, truncator, console): - """Test that table-dominant documents show first page (beginning), not last page.""" - # Create a "top 30 objects" scenario - document is primarily a table - rows = [f"| Object {i:02d} | Value {i:02d} | Status {i:02d} |" for i in range(1, 31)] - text = "\n".join( - [ - "| Object | Value | Status |", - "|--------|-------|--------|", - ] - + rows - ) - - # Force truncation to show only part of the table - result = truncator.truncate(text, terminal_height=10, console=console) - - # Should show FIRST rows (1, 2, 3...), not LAST rows (28, 29, 30) - assert "Object 01" in result or "Object 02" in result or "Object 03" in result - # Should NOT have the last rows - assert "Object 30" not in result or "Object 29" not in result - - def test_table_dominant_with_intro_shows_table_start(self, truncator, console): - """Test table-dominant doc with intro text still shows table from beginning.""" - rows = [f"| Row {i} | Data {i} |" for i in range(1, 26)] - text = "\n".join( - [ - "# Top 25 Results", - "", - "| Row | Data |", - "|-----|------|", - ] - + rows - ) - - result = truncator.truncate(text, terminal_height=15, console=console) - - # Check if we have data rows (not just the header "Row | Data") - # Look for "Row 1" through "Row 25" patterns - has_data_rows = any(f"Row {i}" in result for i in range(1, 26)) - - if has_data_rows: - # If we have data rows, check which ones - has_early = any(f"Row {i}" in result for i in range(1, 6)) - has_late = any(f"Row {i}" in result for i in range(20, 26)) - - # Should have early rows - assert has_early - # Should NOT have late rows (or at least not as many) - # If we see late rows, we should also see early rows - if has_late: - assert has_early - - def test_non_table_content_shows_end(self, truncator, console): - """Test that non-table content keeps original behavior (show end/most recent).""" - # Create streaming-style content with paragraphs - paragraphs = [f"Paragraph {i} with some content here." for i in range(1, 21)] - text = "\n\n".join(paragraphs) - - result = truncator.truncate(text, terminal_height=6, console=console) - - # Should show LATER paragraphs (most recent in streaming), not early ones - has_early = "Paragraph 1" in result or "Paragraph 2" in result - has_late = "Paragraph 19" in result or "Paragraph 20" in result or "Paragraph 18" in result - - # Should prefer showing later content - # At least one of the late paragraphs should be present - assert has_late or not has_early - - def test_mixed_table_text_below_threshold(self, truncator, console): - """Test mixed content where table is <50% uses streaming behavior.""" - text = "\n".join( - [ - "# Analysis Report", - "", - "Here is a long introduction with multiple paragraphs.", - "This section provides context for the data below.", - "We have several points to make before showing the table.", - "", - "| Metric | Value |", - "|--------|-------|", - "| Row 1 | Val 1 |", - "| Row 2 | Val 2 |", - "", - "And here is a conclusion with more text.", - "This provides analysis of the results.", - "We wrap up with final thoughts here.", - ] - ) - - result = truncator.truncate(text, terminal_height=8, console=console) - - # This is NOT table-dominant (<50% table), so should use streaming behavior - # (show end/most recent content) - # Should be more likely to see conclusion than intro - - # Not a strict test since it depends on height measurement, - # but we should see some content - assert len(result) > 0 - - def test_is_primary_content_table_detection(self, truncator): - """Test the table detection heuristic directly.""" - # Table-dominant content (>50% table lines) - table_text = "\n".join( - [ - "| Col A | Col B |", - "|-------|-------|", - "| R1 A | R1 B |", - "| R2 A | R2 B |", - "| R3 A | R3 B |", - ] - ) - assert truncator._is_primary_content_table(table_text) is True - - # Non-table content - non_table_text = "\n".join( - [ - "# Title", - "", - "Paragraph 1", - "", - "Paragraph 2", - "", - "Paragraph 3", - ] - ) - assert truncator._is_primary_content_table(non_table_text) is False - - # Mixed content - more text than table - mixed_text = "\n".join( - [ - "# Title", - "", - "Long paragraph here.", - "Another paragraph.", - "More content.", - "", - "| Col A | Col B |", - "|-------|-------|", - "| Data | Data |", - ] - ) - # This should be False since table is <50% of lines - result = truncator._is_primary_content_table(mixed_text) - # Mixed case - depends on exact line count - # Just verify it doesn't crash - assert isinstance(result, bool) - - def test_repeated_streaming_truncation(self, truncator, console): - """Test repeated truncation passes as would occur during streaming. - - This simulates streaming behavior where content is repeatedly truncated - as new content arrives. The fence should be correctly prepended each time, - and never duplicated - this was the bug being fixed. - """ - # Start with a long code block - code_lines = [f"line_{i} = {i}" for i in range(1, 51)] - text = "```python\n" + "\n".join(code_lines) + "\n```" - - # First truncation pass - moderate truncation - pass1 = truncator.truncate(text, terminal_height=20, console=console) - - # Should have fence since we truncated within the block - assert "```python" in pass1 - # Verify we have some code content - assert "line_" in pass1 - - # Second truncation pass - truncate the already-truncated text more - pass2 = truncator.truncate(pass1, terminal_height=15, console=console) - - # Should still have fence (not duplicated) - # This is the key test: after repeated truncation, should have exactly 1 fence - assert pass2.count("```python") == 1 - # Verify fence is on its own line (not mangled) - lines = pass2.split("\n") - assert lines[0] == "```python" - - # Third truncation pass - simulate aggressive truncation - pass3 = truncator.truncate(pass2, terminal_height=10, console=console) - - # Should STILL have exactly one fence - this validates the fix - # The old code could create duplicate/partial fences here - assert pass3.count("```python") == 1 - # And it should be clean (first line) - lines3 = pass3.split("\n") - assert lines3[0] == "```python" diff --git a/tests/ui/test_plain_text_truncator.py b/tests/unit/fast_agent/ui/test_plain_text_truncator.py similarity index 100% rename from tests/ui/test_plain_text_truncator.py rename to tests/unit/fast_agent/ui/test_plain_text_truncator.py diff --git a/uv.lock b/uv.lock index 0208a5dda..e3840a14d 100644 --- a/uv.lock +++ b/uv.lock @@ -10,7 +10,7 @@ members = [ [[package]] name = "a2a-sdk" -version = "0.3.6" +version = "0.3.10" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "google-api-core" }, @@ -19,9 +19,9 @@ dependencies = [ { name = "protobuf" }, { name = "pydantic" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/04/c1/ffe3d9aa51288703dbe68afb0b146601562f1232458c9f6da02c1e63b78c/a2a_sdk-0.3.6.tar.gz", hash = "sha256:d5d1b8de940435b1df9b58e95c3a375a8e53a650b0307a3cf90865b1dec69968", size = 222989, upload-time = "2025-09-17T17:17:36.418Z" } +sdist = { url = "https://files.pythonhosted.org/packages/de/5a/3634ce054a8985c0d2ca0cb2ed1c8c8fdcd67456ddb6496895483c17fee0/a2a_sdk-0.3.10.tar.gz", hash = "sha256:f2df01935fb589c6ebaf8581aede4fe059a30a72cd38e775035337c78f8b2cca", size = 225974, upload-time = "2025-10-21T20:40:38.423Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/84/13/b8901dd49017eac755d599d6751fb3f8346808846d4b79612401145aade7/a2a_sdk-0.3.6-py3-none-any.whl", hash = "sha256:f006c9aee2a2f9b235eb884273e572b7b09e1220d00f5789432a156fcd9dfe4c", size = 137620, upload-time = "2025-09-17T17:17:34.498Z" }, + { url = "https://files.pythonhosted.org/packages/ba/9b/82df9530ed77d30831c49ffffc827222961422d444c0d684101e945ee214/a2a_sdk-0.3.10-py3-none-any.whl", hash = "sha256:b216ccc5ccfd00dcfa42f0f2dc709bc7ba057550717a34b0b1b34a99a76749cf", size = 140291, upload-time = "2025-10-21T20:40:36.929Z" }, ] [[package]] @@ -35,7 +35,7 @@ wheels = [ [[package]] name = "aiohttp" -version = "3.12.15" +version = "3.13.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohappyeyeballs" }, @@ -46,25 +46,25 @@ dependencies = [ { name = "propcache" }, { name = "yarl" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/9b/e7/d92a237d8802ca88483906c388f7c201bbe96cd80a165ffd0ac2f6a8d59f/aiohttp-3.12.15.tar.gz", hash = "sha256:4fc61385e9c98d72fcdf47e6dd81833f47b2f77c114c29cd64a361be57a763a2", size = 7823716, upload-time = "2025-07-29T05:52:32.215Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/fa/3ae643cd525cf6844d3dc810481e5748107368eb49563c15a5fb9f680750/aiohttp-3.13.1.tar.gz", hash = "sha256:4b7ee9c355015813a6aa085170b96ec22315dabc3d866fd77d147927000e9464", size = 7835344, upload-time = "2025-10-17T14:03:29.337Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f2/33/918091abcf102e39d15aba2476ad9e7bd35ddb190dcdd43a854000d3da0d/aiohttp-3.12.15-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:9f922ffd05034d439dde1c77a20461cf4a1b0831e6caa26151fe7aa8aaebc315", size = 696741, upload-time = "2025-07-29T05:51:19.021Z" }, - { url = "https://files.pythonhosted.org/packages/b5/2a/7495a81e39a998e400f3ecdd44a62107254803d1681d9189be5c2e4530cd/aiohttp-3.12.15-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2ee8a8ac39ce45f3e55663891d4b1d15598c157b4d494a4613e704c8b43112cd", size = 474407, upload-time = "2025-07-29T05:51:21.165Z" }, - { url = "https://files.pythonhosted.org/packages/49/fc/a9576ab4be2dcbd0f73ee8675d16c707cfc12d5ee80ccf4015ba543480c9/aiohttp-3.12.15-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3eae49032c29d356b94eee45a3f39fdf4b0814b397638c2f718e96cfadf4c4e4", size = 466703, upload-time = "2025-07-29T05:51:22.948Z" }, - { url = "https://files.pythonhosted.org/packages/09/2f/d4bcc8448cf536b2b54eed48f19682031ad182faa3a3fee54ebe5b156387/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b97752ff12cc12f46a9b20327104448042fce5c33a624f88c18f66f9368091c7", size = 1705532, upload-time = "2025-07-29T05:51:25.211Z" }, - { url = "https://files.pythonhosted.org/packages/f1/f3/59406396083f8b489261e3c011aa8aee9df360a96ac8fa5c2e7e1b8f0466/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:894261472691d6fe76ebb7fcf2e5870a2ac284c7406ddc95823c8598a1390f0d", size = 1686794, upload-time = "2025-07-29T05:51:27.145Z" }, - { url = "https://files.pythonhosted.org/packages/dc/71/164d194993a8d114ee5656c3b7ae9c12ceee7040d076bf7b32fb98a8c5c6/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5fa5d9eb82ce98959fc1031c28198b431b4d9396894f385cb63f1e2f3f20ca6b", size = 1738865, upload-time = "2025-07-29T05:51:29.366Z" }, - { url = "https://files.pythonhosted.org/packages/1c/00/d198461b699188a93ead39cb458554d9f0f69879b95078dce416d3209b54/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0fa751efb11a541f57db59c1dd821bec09031e01452b2b6217319b3a1f34f3d", size = 1788238, upload-time = "2025-07-29T05:51:31.285Z" }, - { url = "https://files.pythonhosted.org/packages/85/b8/9e7175e1fa0ac8e56baa83bf3c214823ce250d0028955dfb23f43d5e61fd/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5346b93e62ab51ee2a9d68e8f73c7cf96ffb73568a23e683f931e52450e4148d", size = 1710566, upload-time = "2025-07-29T05:51:33.219Z" }, - { url = "https://files.pythonhosted.org/packages/59/e4/16a8eac9df39b48ae102ec030fa9f726d3570732e46ba0c592aeeb507b93/aiohttp-3.12.15-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:049ec0360f939cd164ecbfd2873eaa432613d5e77d6b04535e3d1fbae5a9e645", size = 1624270, upload-time = "2025-07-29T05:51:35.195Z" }, - { url = "https://files.pythonhosted.org/packages/1f/f8/cd84dee7b6ace0740908fd0af170f9fab50c2a41ccbc3806aabcb1050141/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b52dcf013b57464b6d1e51b627adfd69a8053e84b7103a7cd49c030f9ca44461", size = 1677294, upload-time = "2025-07-29T05:51:37.215Z" }, - { url = "https://files.pythonhosted.org/packages/ce/42/d0f1f85e50d401eccd12bf85c46ba84f947a84839c8a1c2c5f6e8ab1eb50/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:9b2af240143dd2765e0fb661fd0361a1b469cab235039ea57663cda087250ea9", size = 1708958, upload-time = "2025-07-29T05:51:39.328Z" }, - { url = "https://files.pythonhosted.org/packages/d5/6b/f6fa6c5790fb602538483aa5a1b86fcbad66244997e5230d88f9412ef24c/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ac77f709a2cde2cc71257ab2d8c74dd157c67a0558a0d2799d5d571b4c63d44d", size = 1651553, upload-time = "2025-07-29T05:51:41.356Z" }, - { url = "https://files.pythonhosted.org/packages/04/36/a6d36ad545fa12e61d11d1932eef273928b0495e6a576eb2af04297fdd3c/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:47f6b962246f0a774fbd3b6b7be25d59b06fdb2f164cf2513097998fc6a29693", size = 1727688, upload-time = "2025-07-29T05:51:43.452Z" }, - { url = "https://files.pythonhosted.org/packages/aa/c8/f195e5e06608a97a4e52c5d41c7927301bf757a8e8bb5bbf8cef6c314961/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:760fb7db442f284996e39cf9915a94492e1896baac44f06ae551974907922b64", size = 1761157, upload-time = "2025-07-29T05:51:45.643Z" }, - { url = "https://files.pythonhosted.org/packages/05/6a/ea199e61b67f25ba688d3ce93f63b49b0a4e3b3d380f03971b4646412fc6/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad702e57dc385cae679c39d318def49aef754455f237499d5b99bea4ef582e51", size = 1710050, upload-time = "2025-07-29T05:51:48.203Z" }, - { url = "https://files.pythonhosted.org/packages/b4/2e/ffeb7f6256b33635c29dbed29a22a723ff2dd7401fff42ea60cf2060abfb/aiohttp-3.12.15-cp313-cp313-win32.whl", hash = "sha256:f813c3e9032331024de2eb2e32a88d86afb69291fbc37a3a3ae81cc9917fb3d0", size = 422647, upload-time = "2025-07-29T05:51:50.718Z" }, - { url = "https://files.pythonhosted.org/packages/1b/8e/78ee35774201f38d5e1ba079c9958f7629b1fd079459aea9467441dbfbf5/aiohttp-3.12.15-cp313-cp313-win_amd64.whl", hash = "sha256:1a649001580bdb37c6fdb1bebbd7e3bc688e8ec2b5c6f52edbb664662b17dc84", size = 449067, upload-time = "2025-07-29T05:51:52.549Z" }, + { url = "https://files.pythonhosted.org/packages/16/6d/d267b132342e1080f4c1bb7e1b4e96b168b3cbce931ec45780bff693ff95/aiohttp-3.13.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:55785a7f8f13df0c9ca30b5243d9909bd59f48b274262a8fe78cee0828306e5d", size = 730727, upload-time = "2025-10-17T14:00:39.681Z" }, + { url = "https://files.pythonhosted.org/packages/92/c8/1cf495bac85cf71b80fad5f6d7693e84894f11b9fe876b64b0a1e7cbf32f/aiohttp-3.13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4bef5b83296cebb8167707b4f8d06c1805db0af632f7a72d7c5288a84667e7c3", size = 488678, upload-time = "2025-10-17T14:00:41.541Z" }, + { url = "https://files.pythonhosted.org/packages/a8/19/23c6b81cca587ec96943d977a58d11d05a82837022e65cd5502d665a7d11/aiohttp-3.13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:27af0619c33f9ca52f06069ec05de1a357033449ab101836f431768ecfa63ff5", size = 487637, upload-time = "2025-10-17T14:00:43.527Z" }, + { url = "https://files.pythonhosted.org/packages/48/58/8f9464afb88b3eed145ad7c665293739b3a6f91589694a2bb7e5778cbc72/aiohttp-3.13.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a47fe43229a8efd3764ef7728a5c1158f31cdf2a12151fe99fde81c9ac87019c", size = 1718975, upload-time = "2025-10-17T14:00:45.496Z" }, + { url = "https://files.pythonhosted.org/packages/e1/8b/c3da064ca392b2702f53949fd7c403afa38d9ee10bf52c6ad59a42537103/aiohttp-3.13.1-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6e68e126de5b46e8b2bee73cab086b5d791e7dc192056916077aa1e2e2b04437", size = 1686905, upload-time = "2025-10-17T14:00:47.707Z" }, + { url = "https://files.pythonhosted.org/packages/0a/a4/9c8a3843ecf526daee6010af1a66eb62579be1531d2d5af48ea6f405ad3c/aiohttp-3.13.1-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e65ef49dd22514329c55970d39079618a8abf856bae7147913bb774a3ab3c02f", size = 1754907, upload-time = "2025-10-17T14:00:49.702Z" }, + { url = "https://files.pythonhosted.org/packages/a4/80/1f470ed93e06436e3fc2659a9fc329c192fa893fb7ed4e884d399dbfb2a8/aiohttp-3.13.1-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0e425a7e0511648b3376839dcc9190098671a47f21a36e815b97762eb7d556b0", size = 1857129, upload-time = "2025-10-17T14:00:51.822Z" }, + { url = "https://files.pythonhosted.org/packages/cc/e6/33d305e6cce0a8daeb79c7d8d6547d6e5f27f4e35fa4883fc9c9eb638596/aiohttp-3.13.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:010dc9b7110f055006acd3648d5d5955bb6473b37c3663ec42a1b4cba7413e6b", size = 1738189, upload-time = "2025-10-17T14:00:53.976Z" }, + { url = "https://files.pythonhosted.org/packages/ac/42/8df03367e5a64327fe0c39291080697795430c438fc1139c7cc1831aa1df/aiohttp-3.13.1-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:1b5c722d0ca5f57d61066b5dfa96cdb87111e2519156b35c1f8dd17c703bee7a", size = 1553608, upload-time = "2025-10-17T14:00:56.144Z" }, + { url = "https://files.pythonhosted.org/packages/96/17/6d5c73cd862f1cf29fddcbb54aac147037ff70a043a2829d03a379e95742/aiohttp-3.13.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:93029f0e9b77b714904a281b5aa578cdc8aa8ba018d78c04e51e1c3d8471b8ec", size = 1681809, upload-time = "2025-10-17T14:00:58.603Z" }, + { url = "https://files.pythonhosted.org/packages/be/31/8926c8ab18533f6076ce28d2c329a203b58c6861681906e2d73b9c397588/aiohttp-3.13.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:d1824c7d08d8ddfc8cb10c847f696942e5aadbd16fd974dfde8bd2c3c08a9fa1", size = 1711161, upload-time = "2025-10-17T14:01:01.744Z" }, + { url = "https://files.pythonhosted.org/packages/f2/36/2f83e1ca730b1e0a8cf1c8ab9559834c5eec9f5da86e77ac71f0d16b521d/aiohttp-3.13.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:8f47d0ff5b3eb9c1278a2f56ea48fda667da8ebf28bd2cb378b7c453936ce003", size = 1731999, upload-time = "2025-10-17T14:01:04.626Z" }, + { url = "https://files.pythonhosted.org/packages/b9/ec/1f818cc368dfd4d5ab4e9efc8f2f6f283bfc31e1c06d3e848bcc862d4591/aiohttp-3.13.1-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:8a396b1da9b51ded79806ac3b57a598f84e0769eaa1ba300655d8b5e17b70c7b", size = 1548684, upload-time = "2025-10-17T14:01:06.828Z" }, + { url = "https://files.pythonhosted.org/packages/d3/ad/33d36efd16e4fefee91b09a22a3a0e1b830f65471c3567ac5a8041fac812/aiohttp-3.13.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:d9c52a65f54796e066b5d674e33b53178014752d28bca555c479c2c25ffcec5b", size = 1756676, upload-time = "2025-10-17T14:01:09.517Z" }, + { url = "https://files.pythonhosted.org/packages/3c/c4/4a526d84e77d464437713ca909364988ed2e0cd0cdad2c06cb065ece9e08/aiohttp-3.13.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a89da72d18d6c95a653470b78d8ee5aa3c4b37212004c103403d0776cbea6ff0", size = 1715577, upload-time = "2025-10-17T14:01:11.958Z" }, + { url = "https://files.pythonhosted.org/packages/a2/21/e39638b7d9c7f1362c4113a91870f89287e60a7ea2d037e258b81e8b37d5/aiohttp-3.13.1-cp313-cp313-win32.whl", hash = "sha256:02e0258b7585ddf5d01c79c716ddd674386bfbf3041fbbfe7bdf9c7c32eb4a9b", size = 424468, upload-time = "2025-10-17T14:01:14.344Z" }, + { url = "https://files.pythonhosted.org/packages/cc/00/f3a92c592a845ebb2f47d102a67f35f0925cb854c5e7386f1a3a1fdff2ab/aiohttp-3.13.1-cp313-cp313-win_amd64.whl", hash = "sha256:ef56ffe60e8d97baac123272bde1ab889ee07d3419606fae823c80c2b86c403e", size = 450806, upload-time = "2025-10-17T14:01:16.437Z" }, ] [[package]] @@ -90,7 +90,7 @@ wheels = [ [[package]] name = "anthropic" -version = "0.69.0" +version = "0.71.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -102,9 +102,9 @@ dependencies = [ { name = "sniffio" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c8/9d/9ad1778b95f15c5b04e7d328c1b5f558f1e893857b7c33cd288c19c0057a/anthropic-0.69.0.tar.gz", hash = "sha256:c604d287f4d73640f40bd2c0f3265a2eb6ce034217ead0608f6b07a8bc5ae5f2", size = 480622, upload-time = "2025-09-29T16:53:45.282Z" } +sdist = { url = "https://files.pythonhosted.org/packages/82/4f/70682b068d897841f43223df82d96ec1d617435a8b759c4a2d901a50158b/anthropic-0.71.0.tar.gz", hash = "sha256:eb8e6fa86d049061b3ef26eb4cbae0174ebbff21affa6de7b3098da857d8de6a", size = 489102, upload-time = "2025-10-16T15:54:40.08Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/9b/38/75129688de5637eb5b383e5f2b1570a5cc3aecafa4de422da8eea4b90a6c/anthropic-0.69.0-py3-none-any.whl", hash = "sha256:1f73193040f33f11e27c2cd6ec25f24fe7c3f193dc1c5cde6b7a08b18a16bcc5", size = 337265, upload-time = "2025-09-29T16:53:43.686Z" }, + { url = "https://files.pythonhosted.org/packages/5d/77/073e8ac488f335aec7001952825275582fb8f433737e90f24eeef9d878f6/anthropic-0.71.0-py3-none-any.whl", hash = "sha256:85c5015fcdbdc728390f11b17642a65a4365d03b12b799b18b6cc57e71fdb327", size = 355035, upload-time = "2025-10-16T15:54:38.238Z" }, ] [[package]] @@ -492,7 +492,7 @@ dependencies = [ { name = "google-genai" }, { name = "keyring" }, { name = "mcp" }, - { name = "openai" }, + { name = "openai", extra = ["aiohttp"] }, { name = "opentelemetry-distro" }, { name = "opentelemetry-exporter-otlp-proto-http" }, { name = "opentelemetry-instrumentation-anthropic" }, @@ -503,6 +503,7 @@ dependencies = [ { name = "pydantic" }, { name = "pydantic-settings" }, { name = "pyperclip" }, + { name = "python-frontmatter" }, { name = "pyyaml" }, { name = "rich" }, { name = "tensorzero" }, @@ -526,18 +527,18 @@ dev = [ [package.metadata] requires-dist = [ - { name = "a2a-sdk", specifier = ">=0.3.6" }, - { name = "aiohttp", specifier = ">=3.11.13" }, - { name = "anthropic", specifier = ">=0.69.0" }, + { name = "a2a-sdk", specifier = ">=0.3.10" }, + { name = "aiohttp", specifier = ">=3.13.1" }, + { name = "anthropic", specifier = ">=0.71.0" }, { name = "azure-identity", specifier = ">=1.14.0" }, { name = "boto3", specifier = ">=1.35.0" }, { name = "deprecated", specifier = ">=1.2.18" }, { name = "email-validator", specifier = ">=2.2.0" }, { name = "fastapi", specifier = ">=0.115.6" }, - { name = "google-genai", specifier = ">=1.33.0" }, + { name = "google-genai", specifier = ">=1.46.0" }, { name = "keyring", specifier = ">=24.3.1" }, - { name = "mcp", specifier = "==1.18.0" }, - { name = "openai", specifier = ">=2.3.0" }, + { name = "mcp", specifier = "==1.19.0" }, + { name = "openai", extras = ["aiohttp"], specifier = ">=2.6.1" }, { name = "opentelemetry-distro", specifier = ">=0.55b0" }, { name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.7.0" }, { name = "opentelemetry-instrumentation-anthropic", marker = "python_full_version >= '3.10' and python_full_version < '4'", specifier = ">=0.43.1" }, @@ -548,6 +549,7 @@ requires-dist = [ { name = "pydantic", specifier = ">=2.10.4" }, { name = "pydantic-settings", specifier = ">=2.7.0" }, { name = "pyperclip", specifier = ">=1.9.0" }, + { name = "python-frontmatter", specifier = ">=1.1.0" }, { name = "pyyaml", specifier = ">=6.0.2" }, { name = "rich", specifier = ">=14.1.0" }, { name = "tensorzero", specifier = ">=2025.7.5" }, @@ -689,7 +691,7 @@ wheels = [ [[package]] name = "google-genai" -version = "1.33.0" +version = "1.46.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -701,9 +703,9 @@ dependencies = [ { name = "typing-extensions" }, { name = "websockets" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/2d/f5/506221067750087ba1346f0a31f6e1714fda4b612d45a54cd2164750e05a/google_genai-1.33.0.tar.gz", hash = "sha256:7d3a5ebad712d95a0d1775842505886eb43cc52f9f478aa4ab0e2d25412499a2", size = 241006, upload-time = "2025-09-03T22:54:10.662Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c7/2d/d5907af6a46fb0b660291a09bb62f9cbc1365899f7d64a74e7d8d2e056c2/google_genai-1.46.0.tar.gz", hash = "sha256:6824c31149fe3b1c7285b25f79b924c5f89fd52466f62e30f76954f8104fe3a7", size = 239561, upload-time = "2025-10-21T22:55:04.241Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/43/8e/55052fe488d6604309b425360beb72e6d65f11fa4cc1cdde17ccfe93e1bc/google_genai-1.33.0-py3-none-any.whl", hash = "sha256:1710e958af0a0f3d19521fabbefd86b22d1f212376103f18fed11c9d96fa48e8", size = 241753, upload-time = "2025-09-03T22:54:08.789Z" }, + { url = "https://files.pythonhosted.org/packages/db/79/8993ec6cbf56e5c8f88c165380e55de34ec74f7b928bc302ff5c370f9c4e/google_genai-1.46.0-py3-none-any.whl", hash = "sha256:879c4a260d630db0dcedb5cc84a9d7b47acd29e43e9dc63541b511b757ea7296", size = 239445, upload-time = "2025-10-21T22:55:03.072Z" }, ] [[package]] @@ -773,6 +775,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, ] +[[package]] +name = "httpx-aiohttp" +version = "0.1.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "httpx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d8/f2/9a86ce9bc48cf57dabb3a3160dfed26d8bbe5a2478a51f9d1dbf89f2f1fc/httpx_aiohttp-0.1.9.tar.gz", hash = "sha256:4ee8b22e6f2e7c80cd03be29eff98bfe7d89bd77f021ce0b578ee76b73b4bfe6", size = 206023, upload-time = "2025-10-15T08:52:57.475Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a1/db/5cfa8254a86c34a1ab7fe0dbec9f81bb5ebd831cbdd65aa4be4f37027804/httpx_aiohttp-0.1.9-py3-none-any.whl", hash = "sha256:3dc2845568b07742588710fcf3d72db2cbcdf2acc93376edf85f789c4d8e5fda", size = 6180, upload-time = "2025-10-15T08:52:56.521Z" }, +] + [[package]] name = "httpx-sse" version = "0.4.1" @@ -1108,7 +1123,7 @@ wheels = [ [[package]] name = "mcp" -version = "1.18.0" +version = "1.19.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -1123,9 +1138,9 @@ dependencies = [ { name = "starlette" }, { name = "uvicorn", marker = "sys_platform != 'emscripten'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/1a/e0/fe34ce16ea2bacce489ab859abd1b47ae28b438c3ef60b9c5eee6c02592f/mcp-1.18.0.tar.gz", hash = "sha256:aa278c44b1efc0a297f53b68df865b988e52dd08182d702019edcf33a8e109f6", size = 482926, upload-time = "2025-10-16T19:19:55.125Z" } +sdist = { url = "https://files.pythonhosted.org/packages/69/2b/916852a5668f45d8787378461eaa1244876d77575ffef024483c94c0649c/mcp-1.19.0.tar.gz", hash = "sha256:213de0d3cd63f71bc08ffe9cc8d4409cc87acffd383f6195d2ce0457c021b5c1", size = 444163, upload-time = "2025-10-24T01:11:15.839Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/1b/44/f5970e3e899803823826283a70b6003afd46f28e082544407e24575eccd3/mcp-1.18.0-py3-none-any.whl", hash = "sha256:42f10c270de18e7892fdf9da259029120b1ea23964ff688248c69db9d72b1d0a", size = 168762, upload-time = "2025-10-16T19:19:53.2Z" }, + { url = "https://files.pythonhosted.org/packages/ce/a3/3e71a875a08b6a830b88c40bc413bff01f1650f1efe8a054b5e90a9d4f56/mcp-1.19.0-py3-none-any.whl", hash = "sha256:f5907fe1c0167255f916718f376d05f09a830a215327a3ccdd5ec8a519f2e572", size = 170105, upload-time = "2025-10-24T01:11:14.151Z" }, ] [[package]] @@ -1257,7 +1272,7 @@ wheels = [ [[package]] name = "openai" -version = "2.3.0" +version = "2.6.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -1269,9 +1284,15 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/de/90/8f26554d24d63ed4f94d33c24271559863223a67e624f4d2e65ba8e48dca/openai-2.3.0.tar.gz", hash = "sha256:8d213ee5aaf91737faea2d7fc1cd608657a5367a18966372a3756ceaabfbd812", size = 589616, upload-time = "2025-10-10T01:12:50.851Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c4/44/303deb97be7c1c9b53118b52825cbd1557aeeff510f3a52566b1fa66f6a2/openai-2.6.1.tar.gz", hash = "sha256:27ae704d190615fca0c0fc2b796a38f8b5879645a3a52c9c453b23f97141bb49", size = 593043, upload-time = "2025-10-24T13:29:52.79Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/9c/5b/4be258ff072ed8ee15f6bfd8d5a1a4618aa4704b127c0c5959212ad177d6/openai-2.3.0-py3-none-any.whl", hash = "sha256:a7aa83be6f7b0ab2e4d4d7bcaf36e3d790874c0167380c5d0afd0ed99a86bd7b", size = 999768, upload-time = "2025-10-10T01:12:48.647Z" }, + { url = "https://files.pythonhosted.org/packages/15/0e/331df43df633e6105ff9cf45e0ce57762bd126a45ac16b25a43f6738d8a2/openai-2.6.1-py3-none-any.whl", hash = "sha256:904e4b5254a8416746a2f05649594fa41b19d799843cd134dac86167e094edef", size = 1005551, upload-time = "2025-10-24T13:29:50.973Z" }, +] + +[package.optional-dependencies] +aiohttp = [ + { name = "aiohttp" }, + { name = "httpx-aiohttp" }, ] [[package]] @@ -1909,6 +1930,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5f/ed/539768cf28c661b5b068d66d96a2f155c4971a5d55684a514c1a0e0dec2f/python_dotenv-1.1.1-py3-none-any.whl", hash = "sha256:31f23644fe2602f88ff55e1f5c79ba497e01224ee7737937930c448e4d0e24dc", size = 20556, upload-time = "2025-06-24T04:21:06.073Z" }, ] +[[package]] +name = "python-frontmatter" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyyaml" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/96/de/910fa208120314a12f9a88ea63e03707261692af782c99283f1a2c8a5e6f/python-frontmatter-1.1.0.tar.gz", hash = "sha256:7118d2bd56af9149625745c58c9b51fb67e8d1294a0c76796dafdc72c36e5f6d", size = 16256, upload-time = "2024-01-16T18:50:04.052Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/49/87/3c8da047b3ec5f99511d1b4d7a5bc72d4b98751c7e78492d14dc736319c5/python_frontmatter-1.1.0-py3-none-any.whl", hash = "sha256:335465556358d9d0e6c98bbeb69b1c969f2a4a21360587b9873bfc3b213407c1", size = 9834, upload-time = "2024-01-16T18:50:00.911Z" }, +] + [[package]] name = "python-multipart" version = "0.0.20"