diff --git a/docs/workflow-syntax.md b/docs/workflow-syntax.md index 7893740..ada8aec 100644 --- a/docs/workflow-syntax.md +++ b/docs/workflow-syntax.md @@ -196,6 +196,42 @@ prompt: | **Restrictions** — workflow steps cannot have `prompt`, `model`, `provider`, `tools`, `system_prompt`, `command`, or `options`. Workflow steps also cannot be used inside `parallel` groups or `for_each` groups. +### Dialog Mode + +Dialog mode allows agents to conditionally pause after execution and enter a free-form conversation with the user. An LLM evaluator examines the agent's output against user-defined criteria and decides whether to initiate a dialog. + +```yaml +agents: + - name: researcher + prompt: "Research the given topic thoroughly" + dialog: + trigger_prompt: | + Enter dialog if the agent expresses uncertainty about + the user's intent, encounters ambiguous requirements, + or needs clarification before proceeding. + routes: + - to: writer +``` + +When triggered, the user is presented with a choice: +1. **Discuss** — engage in a multi-turn conversation with the agent +2. **Do your best and continue** — skip the dialog and let the agent proceed + +After the conversation, the agent re-executes with the dialog transcript as additional context, producing a refined output. + +**Configuration:** + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `dialog.trigger_prompt` | string | Yes | Criteria for the LLM evaluator to decide when dialog is needed | + +**Behavior notes:** +- Dialog is supported on regular `agent` type only (not `human_gate`, `script`, or `workflow`) +- In web dashboard mode, the dialog temporarily replaces the graph area with a chat interface +- When `--skip-gates` is set (e.g., CI/automation), dialogs are automatically skipped +- The evaluator prompt should describe *when* to trigger dialog, not *what* to ask — the evaluator generates the opening question from the agent's output context +- After dialog, the agent sees the full conversation transcript and produces updated output + ## Parallel Groups Parallel groups execute multiple agents concurrently for improved performance. diff --git a/examples/dialog-mode.yaml b/examples/dialog-mode.yaml new file mode 100644 index 0000000..6658820 --- /dev/null +++ b/examples/dialog-mode.yaml @@ -0,0 +1,77 @@ +# Dialog Mode Example +# +# This example demonstrates the dialog mode feature, where an agent +# can conditionally pause and have a conversation with the user. +# +# The researcher agent has a dialog trigger that fires when the agent +# encounters ambiguity or needs clarification. When triggered, the +# user can choose to discuss or let the agent proceed on its own. +# +# Usage: +# conductor run examples/dialog-mode.yaml --input topic="quantum computing" +# conductor run examples/dialog-mode.yaml --web --input topic="quantum computing" + +workflow: + name: dialog-mode + description: Research workflow with agent-initiated dialog + version: "1.0.0" + entry_point: researcher + + runtime: + provider: copilot + + input: + topic: + type: string + required: true + description: The topic to research + +agents: + - name: researcher + description: Researches the topic and may ask for user clarification + prompt: | + Research the following topic and provide a comprehensive summary: + + Topic: {{ workflow.input.topic }} + + If the topic is broad, pick a specific angle and explain your choice. + If anything is ambiguous, note what assumptions you're making. + output: + summary: + type: string + description: Research summary + key_findings: + type: string + description: Bullet-pointed key findings + assumptions: + type: string + description: Any assumptions made during research + dialog: + trigger_prompt: | + Trigger dialog if the agent: + - Expresses significant uncertainty about the scope or angle + - Makes assumptions that could lead the research in the wrong direction + - Encounters multiple valid interpretations of the topic + Do NOT trigger for minor uncertainties that the agent can resolve on its own. + routes: + - to: writer + + - name: writer + description: Writes a polished article from the research + prompt: | + Write a clear, engaging article based on this research: + + Summary: {{ researcher.output.summary }} + Key Findings: {{ researcher.output.key_findings }} + + Keep the tone informative but accessible. + output: + article: + type: string + description: The final article + routes: + - to: $end + +output: + article: "{{ writer.output.article }}" + research_assumptions: "{{ researcher.output.assumptions }}" diff --git a/examples/dialog-test.yaml b/examples/dialog-test.yaml new file mode 100644 index 0000000..e66d0bb --- /dev/null +++ b/examples/dialog-test.yaml @@ -0,0 +1,60 @@ +# Dialog Mode Test — guaranteed to trigger dialog +# +# This workflow asks an agent to analyze a codebase question, +# but the agent will express uncertainty about scope/approach, +# triggering a dialog with the user. +# +# Usage: +# conductor run examples/dialog-test.yaml --input question="How should I refactor the error handling?" + +workflow: + name: dialog-test + description: Quick test to exercise dialog mode + entry_point: analyst + + runtime: + provider: copilot + + input: + question: + type: string + required: true + description: A code analysis question — the agent will seek clarification + +agents: + - name: analyst + description: An agent that analyzes code but needs clarification on scope + model: claude-haiku-4.5 + prompt: | + The user asked: {{ workflow.input.question }} + + You MUST respond as though this question is too broad to answer well. + Identify at least 3 different ways you could approach or scope this + analysis, explain why each leads to very different recommendations, + and state clearly that you need the user to narrow the scope before + you can give a useful answer. Express strong uncertainty about which + approach the user wants. + output: + approaches: + type: string + description: The possible approaches or scopes you identified + chosen_approach: + type: string + description: Which approach you went with (after dialog) + analysis: + type: string + description: Your analysis based on the chosen approach + dialog: + trigger_prompt: | + ALWAYS trigger dialog. The agent's output will contain + uncertainty and multiple possible approaches — this is by design. + Trigger if the output mentions any ambiguity, multiple + approaches, uncertainty, or inability to proceed. + In practice: always return trigger=true for this agent. + routes: + - to: $end + +output: + approaches: "{{ analyst.output.approaches }}" + chosen: "{{ analyst.output.chosen_approach }}" + analysis: "{{ analyst.output.analysis }}" diff --git a/src/conductor/config/__init__.py b/src/conductor/config/__init__.py index 46bf1a1..78efbff 100644 --- a/src/conductor/config/__init__.py +++ b/src/conductor/config/__init__.py @@ -13,6 +13,7 @@ from conductor.config.schema import ( AgentDef, ContextConfig, + DialogConfig, GateOption, HooksConfig, InputDef, @@ -34,6 +35,7 @@ # Schema models "AgentDef", "ContextConfig", + "DialogConfig", "GateOption", "HooksConfig", "InputDef", diff --git a/src/conductor/config/schema.py b/src/conductor/config/schema.py index a0e8791..841481e 100644 --- a/src/conductor/config/schema.py +++ b/src/conductor/config/schema.py @@ -387,6 +387,33 @@ class RetryPolicy(BaseModel): """ +class DialogConfig(BaseModel): + """Configuration for agent dialog mode. + + When present on an agent, enables the agent to conditionally pause + after execution and enter a free-form conversation with the user. + + An evaluator LLM call examines the agent's output against the + user-defined trigger_prompt criteria and decides whether to pause + and start a conversation. + + Example YAML:: + + dialog: + trigger_prompt: | + Enter dialog if the agent expresses uncertainty about + the user's intent or needs clarification on requirements. + """ + + trigger_prompt: str + """User-defined criteria for when to enter dialog mode. + + This prompt is wrapped in a system message and evaluated against + the agent's output. The evaluator decides whether to pause and + start a conversation with the user. + """ + + class AgentDef(BaseModel): """Definition for a single agent in the workflow.""" @@ -543,6 +570,24 @@ class AgentDef(BaseModel): - timeout """ + dialog: DialogConfig | None = None + """Optional dialog mode configuration. + + When set, enables this agent to conditionally pause after execution + and enter a free-form conversation with the user. A lightweight + evaluator LLM call uses the trigger_prompt to decide whether dialog + should be triggered based on the agent's output. + + Only applies to provider-backed agents (type='agent' or None). + + Example YAML:: + + dialog: + trigger_prompt: | + Enter dialog if the agent is uncertain about the user's + intent or needs clarification on ambiguous requirements. + """ + @field_validator("timeout") @classmethod def validate_timeout(cls, v: int | None) -> int | None: @@ -561,6 +606,8 @@ def validate_agent_type(self) -> AgentDef: raise ValueError("human_gate agents require 'prompt'") if self.input_mapping is not None: raise ValueError("human_gate agents cannot have 'input_mapping'") + if self.dialog is not None: + raise ValueError("human_gate agents cannot have 'dialog'") if self.max_depth is not None: raise ValueError("human_gate agents cannot have 'max_depth'") elif self.type == "script": @@ -591,6 +638,8 @@ def validate_agent_type(self) -> AgentDef: raise ValueError("script agents cannot have 'retry'") if self.input_mapping is not None: raise ValueError("script agents cannot have 'input_mapping'") + if self.dialog is not None: + raise ValueError("script agents cannot have 'dialog'") if self.max_depth is not None: raise ValueError("script agents cannot have 'max_depth'") elif self.type == "workflow": @@ -616,6 +665,8 @@ def validate_agent_type(self) -> AgentDef: raise ValueError("workflow agents cannot have 'max_agent_iterations'") if self.retry is not None: raise ValueError("workflow agents cannot have 'retry'") + if self.dialog is not None: + raise ValueError("workflow agents cannot have 'dialog'") else: # Regular agent or human_gate — input_mapping is not valid if self.input_mapping is not None: diff --git a/src/conductor/engine/dialog_evaluator.py b/src/conductor/engine/dialog_evaluator.py new file mode 100644 index 0000000..c554416 --- /dev/null +++ b/src/conductor/engine/dialog_evaluator.py @@ -0,0 +1,188 @@ +"""Dialog evaluator for conditional agent-user dialog triggering. + +This module provides the DialogEvaluator class which uses an LLM call +to determine whether an agent should enter dialog mode based on +user-defined criteria in the trigger_prompt. +""" + +from __future__ import annotations + +import json +import logging +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from conductor.config.schema import AgentDef + from conductor.providers.base import AgentProvider + +logger = logging.getLogger(__name__) + +EVALUATOR_SYSTEM_PROMPT = """\ +You are a dialog trigger evaluator. Your job is to examine an agent's output \ +and decide whether the agent should pause and start a conversation with the user. + +The workflow author has defined the following criteria for triggering dialog: + +--- CRITERIA --- +{trigger_prompt} +--- END CRITERIA --- + +Examine the agent's output below and decide: +1. Does the output meet the criteria for triggering a dialog with the user? +2. If yes, what question or topic should the agent open the dialog with? \ +Include full context — file paths, code snippets, data points, and reasoning — \ +so the user has everything they need to respond meaningfully. + +You MUST respond with ONLY a JSON object (no markdown, no extra text): +{{"trigger": true/false, "reason": "brief explanation", "question": "the opening \ +question to ask the user with full context (only if trigger is true)"}} +""" + +EVALUATOR_USER_PROMPT = """\ +Agent name: {agent_name} +Agent output: +{agent_output} +""" + +# Sentinel marker appended when the agent output is truncated to fit the +# evaluator prompt. Lets the evaluator LLM know it has partial data so it +# can either still trigger or ask for clarification rather than failing silently. +_TRUNCATION_MARKER = "\n…[truncated]" + + +def _truncate_for_evaluator(text: str, limit: int) -> str: + """Truncate ``text`` to ``limit`` chars, appending a marker if cut. + + The marker is part of the budget — we leave headroom so the evaluator + sees ``…[truncated]`` rather than a half-token at the boundary. + """ + if len(text) <= limit: + return text + headroom = len(_TRUNCATION_MARKER) + return text[: max(0, limit - headroom)] + _TRUNCATION_MARKER + + +@dataclass +class DialogEvaluation: + """Result of a dialog trigger evaluation. + + Attributes: + trigger: Whether dialog should be triggered. + reason: Explanation of why dialog was or was not triggered. + question: The opening question for the dialog (if triggered). + """ + + trigger: bool + reason: str + question: str = "" + + +class DialogEvaluator: + """Evaluates whether an agent should enter dialog mode. + + Uses a single LLM call to evaluate the agent's output against + user-defined trigger criteria. + """ + + async def evaluate( + self, + agent: AgentDef, + output: dict[str, Any], + provider: AgentProvider, + ) -> DialogEvaluation: + """Evaluate whether an agent's output should trigger dialog. + + Args: + agent: The agent definition with dialog config. + output: The agent's output content. + provider: The provider to use for the evaluation LLM call. + + Returns: + DialogEvaluation with trigger decision and opening question. + """ + if not agent.dialog: + return DialogEvaluation(trigger=False, reason="No dialog config") + + return await self._run_evaluator(agent, output, provider) + + async def _run_evaluator( + self, + agent: AgentDef, + output: dict[str, Any], + provider: AgentProvider, + ) -> DialogEvaluation: + """Run the LLM evaluator to decide whether dialog is needed. + + Args: + agent: The agent definition with dialog config. + output: The agent's output content. + provider: The provider for the LLM call. + + Returns: + DialogEvaluation with trigger decision and opening question. + """ + try: + output_str = json.dumps(output, indent=2, default=str) + except (TypeError, ValueError): + output_str = str(output) + + system_prompt = EVALUATOR_SYSTEM_PROMPT.format( + trigger_prompt=agent.dialog.trigger_prompt, + ) + user_prompt = EVALUATOR_USER_PROMPT.format( + agent_name=agent.name, + agent_output=_truncate_for_evaluator(output_str, limit=4000), + ) + + try: + result = await provider.execute_dialog_turn( + system_prompt=system_prompt, + user_message=user_prompt, + history=[], + model=agent.model, + ) + return self._parse_evaluation(result) + except Exception: + logger.warning( + "Dialog evaluation failed for agent '%s', skipping dialog", + agent.name, + exc_info=True, + ) + return DialogEvaluation( + trigger=False, + reason="Evaluation failed", + ) + + def _parse_evaluation(self, response: str) -> DialogEvaluation: + """Parse the evaluator LLM response into a DialogEvaluation. + + Args: + response: Raw LLM response text. + + Returns: + Parsed DialogEvaluation. + """ + try: + text = response.strip() + # Handle markdown code blocks. The LLM may omit the closing fence, + # in which case we must NOT swallow the last line of valid JSON. + if text.startswith("```"): + lines = text.splitlines() + if len(lines) > 1 and lines[-1].strip().startswith("```"): + text = "\n".join(lines[1:-1]) + elif len(lines) > 1: + text = "\n".join(lines[1:]) + + data = json.loads(text) + return DialogEvaluation( + trigger=bool(data.get("trigger", False)), + reason=str(data.get("reason", "")), + question=str(data.get("question", "")), + ) + except (json.JSONDecodeError, KeyError, TypeError): + logger.warning("Failed to parse dialog evaluation response: %s", response[:200]) + return DialogEvaluation( + trigger=False, + reason=f"Failed to parse evaluation: {response[:100]}", + ) diff --git a/src/conductor/engine/workflow.py b/src/conductor/engine/workflow.py index cc0c7fc..f751b0a 100644 --- a/src/conductor/engine/workflow.py +++ b/src/conductor/engine/workflow.py @@ -377,6 +377,17 @@ def __init__( # Web dashboard for bidirectional gate input self._web_dashboard = web_dashboard + # Dialog mode support + from conductor.engine.dialog_evaluator import DialogEvaluator + from conductor.gates.dialog import DialogHandler + + self._dialog_evaluator = DialogEvaluator() + self._dialog_handler = DialogHandler( + skip_dialogs=skip_gates, + emitter=event_emitter, + web_dashboard=web_dashboard, + ) + # Checkpoint tracking self._current_agent_name: str | None = None self._last_checkpoint_path: Path | None = None @@ -1388,6 +1399,95 @@ async def _handle_partial_output( new_guidance_section = self.context.get_guidance_prompt_section() return await executor.execute(agent, agent_context, guidance_section=new_guidance_section) + async def _handle_dialog( + self, + agent: AgentDef, + output: AgentOutput, + agent_context: dict[str, Any], + executor: AgentExecutor, + ) -> AgentOutput: + """Handle dialog mode evaluation and conversation for an agent. + + Runs the dialog evaluator against the agent's output. If dialog is + triggered, presents the user with a choice to engage or skip, then + manages the conversation. After dialog, re-executes the agent with + the dialog transcript as additional guidance so the agent can refine + its output. + + Args: + agent: The agent with dialog config. + output: The agent's current output. + agent_context: The context used for agent execution. + executor: The executor for the agent. + + Returns: + The original output if dialog was not triggered or declined, + or an updated output after re-execution with dialog context. + """ + provider = executor.provider + + # Suspend keyboard listener for interactive dialog + if self._keyboard_listener is not None: + await self._keyboard_listener.suspend() + + try: + evaluation = await self._dialog_evaluator.evaluate(agent, output.content, provider) + + if not evaluation.trigger: + logger.debug("Dialog not triggered for '%s': %s", agent.name, evaluation.reason) + return output + + logger.info("Dialog triggered for '%s': %s", agent.name, evaluation.reason) + + dialog_result = await self._dialog_handler.handle_dialog( + agent=agent, + agent_output=output.content, + opening_question=evaluation.question, + provider=provider, + base_dir=self.workflow_path.parent if self.workflow_path else None, + ) + + # If user declined or no meaningful dialog occurred, keep original output + if dialog_result.user_declined or not dialog_result.messages: + return output + + # Build dialog transcript for re-execution guidance + transcript_parts = [] + for msg in dialog_result.messages: + label = "User" if msg.role == "user" else "Agent" + transcript_parts.append(f"{label}: {msg.content}") + transcript = "\n".join(transcript_parts) + + dialog_guidance = ( + f"\n\n--- DIALOG WITH USER ---\n" + f"The following conversation occurred after your initial output. " + f"Use this context to refine your response:\n\n" + f"{transcript}\n" + f"--- END DIALOG ---\n\n" + f"Now produce your final output incorporating the dialog above." + ) + + # Re-execute with dialog context + guidance_section = self.context.get_guidance_prompt_section() or "" + guidance_section += dialog_guidance + + new_output = await executor.execute( + agent, agent_context, guidance_section=guidance_section + ) + return new_output + + except Exception: + logger.warning( + "Dialog handling failed for '%s', using original output", + agent.name, + exc_info=True, + ) + return output + finally: + # Resume keyboard listener + if self._keyboard_listener is not None: + await self._keyboard_listener.resume() + async def _execute_loop(self, current_agent_name: str) -> dict[str, Any]: """Core execution loop shared by :meth:`run` and :meth:`resume`. @@ -1979,6 +2079,16 @@ async def _execute_loop(self, current_agent_name: str) -> dict[str, Any]: ) _agent_elapsed = _time.time() - _agent_start + # Dialog mode: evaluate whether agent should enter dialog + if agent.dialog and not output.partial: + output = await self._handle_dialog( + agent, + output, + agent_context, + executor, + ) + _agent_elapsed = _time.time() - _agent_start + # Record usage and calculate cost usage = self.usage_tracker.record(agent.name, output, _agent_elapsed) diff --git a/src/conductor/executor/linkify.py b/src/conductor/executor/linkify.py new file mode 100644 index 0000000..163d80e --- /dev/null +++ b/src/conductor/executor/linkify.py @@ -0,0 +1,265 @@ +"""Auto-linkify bare file paths and URLs in rendered markdown text. + +This module provides post-processing for human-facing rendered text (gate +prompts, etc.) to automatically convert bare file paths and URLs into +markdown links. It is *not* used inside the generic ``TemplateRenderer`` — +only at call-sites that produce text destined for markdown rendering (web +dashboard, Rich terminal). + +The processing is markdown-aware: fenced code blocks, inline code spans, +and existing markdown links are left untouched. +""" + +from __future__ import annotations + +import re +from pathlib import Path + +# --------------------------------------------------------------------------- +# Shared extension allowlist — kept in sync with web/server.py +# --------------------------------------------------------------------------- +LINKABLE_EXTENSIONS = frozenset( + { + ".md", + ".txt", + ".yaml", + ".yml", + ".json", + ".log", + ".py", + ".ts", + ".js", + ".tsx", + ".jsx", + ".css", + ".html", + ".toml", + ".cfg", + ".ini", + ".csv", + ".xml", + ".sh", + ".bat", + ".ps1", + ".plan.md", + } +) + +# --------------------------------------------------------------------------- +# Regex patterns +# --------------------------------------------------------------------------- + +# Fenced code block (``` or ~~~, with optional language tag) +_FENCED_CODE_RE = re.compile(r"^(`{3,}|~{3,}).*?^\1", re.MULTILINE | re.DOTALL) + +# Inline code span (`...`) +_INLINE_CODE_RE = re.compile(r"`[^`\n]+`") + +# Existing markdown links: [text](url) or [text][ref] +_EXISTING_LINK_RE = re.compile(r"\[[^\]]*\]\([^)]*\)|\[[^\]]*\]\[[^\]]*\]") + +# Bare URL: http(s)://... terminated at whitespace or common punctuation +_URL_RE = re.compile( + r"(?\]\[\"'`]+" +) + +# Bare file path: contains at least one /, ends with a known extension. +# Must start at a word boundary or line start. Avoids matching inside +# URLs (already handled) by requiring no scheme prefix. +_FILE_PATH_RE = re.compile( + r"(? str: + """Post-process rendered text to add markdown links for paths and URLs. + + Processing steps: + 1. Normalize Jinja2 whitespace artifacts (3+ consecutive newlines → 2). + 2. Auto-linkify bare ``http(s)://`` URLs. + 3. Auto-linkify bare file paths (verified against *base_dir* when given). + + Fenced code blocks, inline code spans, and existing markdown links are + preserved unchanged. + + Args: + text: Rendered template text (may contain markdown). + base_dir: Optional directory for file existence checks. When + provided, only paths that resolve to an existing file within + *base_dir* are linkified. + + Returns: + Text with bare paths/URLs wrapped in markdown link syntax. + """ + # Step 1: normalize whitespace + text = _normalize_whitespace(text) + + # Step 2 & 3: linkify, skipping protected regions + text = _linkify_with_protection(text, base_dir) + + return text + + +# --------------------------------------------------------------------------- +# Internals +# --------------------------------------------------------------------------- + + +def _normalize_whitespace(text: str) -> str: + """Collapse 3+ consecutive newlines into exactly 2 (one blank line).""" + return re.sub(r"\n{3,}", "\n\n", text) + + +def _linkify_with_protection(text: str, base_dir: Path | None) -> str: + """Linkify URLs and file paths while protecting code/links. + + Strategy: identify protected spans (fenced code, inline code, existing + links), then process only the unprotected gaps. + """ + protected: list[tuple[int, int]] = [] + + for pattern in (_FENCED_CODE_RE, _INLINE_CODE_RE, _EXISTING_LINK_RE): + for m in pattern.finditer(text): + protected.append((m.start(), m.end())) + + # Sort and merge overlapping spans + protected.sort() + merged: list[tuple[int, int]] = [] + for start, end in protected: + if merged and start <= merged[-1][1]: + merged[-1] = (merged[-1][0], max(merged[-1][1], end)) + else: + merged.append((start, end)) + + # Build result by processing unprotected segments + result: list[str] = [] + prev_end = 0 + for pstart, pend in merged: + if prev_end < pstart: + # Unprotected gap — linkify it + result.append(_linkify_segment(text[prev_end:pstart], base_dir)) + # Protected span — copy verbatim + result.append(text[pstart:pend]) + prev_end = pend + # Final unprotected tail + if prev_end < len(text): + result.append(_linkify_segment(text[prev_end:], base_dir)) + + return "".join(result) + + +def _linkify_segment(segment: str, base_dir: Path | None) -> str: + """Linkify bare URLs and file paths in an unprotected text segment.""" + # First pass: linkify URLs + segment = _URL_RE.sub(_wrap_url, segment) + # Second pass: linkify file paths + segment = _linkify_file_paths(segment, base_dir) + return segment + + +def _wrap_url(m: re.Match[str]) -> str: + """Wrap a bare URL in markdown autolink syntax.""" + url = m.group(0) + # Strip trailing punctuation that's unlikely part of the URL + trailing = "" + while url and url[-1] in ".,;:!?)": + # Keep ) only if there's a matching ( in the URL (e.g. Wikipedia links) + if url[-1] == ")" and "(" in url: + break + trailing = url[-1] + trailing + url = url[:-1] + return f"[{url}]({url}){trailing}" + + +def _linkify_file_paths(segment: str, base_dir: Path | None) -> str: + """Find and linkify bare file paths in a text segment. + + A token is considered a file path if: + - It contains at least one ``/`` + - It ends with a known extension + - If *base_dir* is given, the file must exist + """ + # Split on whitespace boundaries to find path-like tokens + # We process word-by-word to avoid partial matches + tokens = re.split(r"(\s+)", segment) + result: list[str] = [] + + for token in tokens: + linked = _try_linkify_path(token, base_dir) + result.append(linked if linked else token) + + return "".join(result) + + +def _try_linkify_path(token: str, base_dir: Path | None) -> str | None: + """Try to linkify a single token as a file path. + + Returns the markdown link string, or None if the token is not a file path. + """ + # Strip leading/trailing punctuation that isn't part of the path + prefix = "" + suffix = "" + stripped = token + + # Strip common leading chars + while stripped and stripped[0] in "([\"'": + prefix += stripped[0] + stripped = stripped[1:] + + # Strip common trailing chars + while stripped and stripped[-1] in ")]\"'.,;:!?": + suffix = stripped[-1] + suffix + stripped = stripped[:-1] + + if not stripped: + return None + + # Must contain a path separator + if "/" not in stripped and "\\" not in stripped: + return None + + # Normalize to forward slashes for extension check + normalized = stripped.replace("\\", "/") + + # Must end with a known extension + if not _has_linkable_extension(normalized): + return None + + # Must not look like a URL (already handled) + if re.match(r"https?://", stripped): + return None + + # If base_dir is provided, verify file exists + if base_dir is not None: + try: + resolved_base = base_dir.resolve() + candidate = (base_dir / stripped).resolve() + # Security: must be within base_dir (path-aware containment, not + # string-prefix — `/foo/bar` must not match `/foo/barbaz/...`). + if not candidate.is_relative_to(resolved_base): + return None + if not candidate.is_file(): + return None + except (OSError, ValueError): + return None + + # Build markdown link with forward slashes (for dashboard API) + link_target = normalized + return f"{prefix}[{stripped}]({link_target}){suffix}" + + +def _has_linkable_extension(path: str) -> bool: + """Check if a path ends with a known linkable extension.""" + lower = path.lower() + return any(lower.endswith(ext) for ext in LINKABLE_EXTENSIONS) diff --git a/src/conductor/gates/dialog.py b/src/conductor/gates/dialog.py new file mode 100644 index 0000000..66f0d3a --- /dev/null +++ b/src/conductor/gates/dialog.py @@ -0,0 +1,734 @@ +"""Dialog handler for agent-initiated user conversations. + +This module implements the interactive dialog mode where an agent pauses +after execution and enters a free-form conversation with the user. +The dialog presents full context (output, file paths, reasoning) and +supports multi-turn exchanges until the user or agent concludes. +""" + +from __future__ import annotations + +import asyncio +import json +import logging +import uuid +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Any + +from rich.console import Console +from rich.markdown import Markdown as RichMarkdown +from rich.panel import Panel +from rich.prompt import Prompt +from rich.text import Text + +from conductor.executor.linkify import linkify_markdown + +if TYPE_CHECKING: + from pathlib import Path + + from conductor.config.schema import AgentDef + from conductor.events import WorkflowEventEmitter + from conductor.providers.base import AgentProvider + from conductor.web.server import WebDashboard + +logger = logging.getLogger(__name__) + +# System prompt for the agent during dialog mode. +# The agent should be conversational and propose completion when ready. +DIALOG_AGENT_SYSTEM_PROMPT = """\ +You are helping with a workflow dialog. A workflow agent named "{agent_name}" \ +has produced output and needs to discuss it with the user. + +YOUR TASK: Act as the agent "{agent_name}" and have a conversation with the \ +user about the output below. You must stay in character and discuss the output \ +topic naturally. This is NOT a coding task — the user wants to discuss the \ +content of the agent's output, whatever the topic may be. + +RULES: +- Discuss the output topic as written — do NOT refuse, redirect, or claim \ + the topic is "out of scope" +- Share full context including file paths, code snippets, and reasoning \ + when relevant +- When you believe you have enough information to proceed, include the \ + exact marker [READY_TO_CONTINUE] at the end of your message +- If the user says "done", "continue", or "go ahead", treat that as \ + permission to stop discussing + +--- AGENT OUTPUT TO DISCUSS --- +{agent_output} +--- END AGENT OUTPUT --- +""" + +# Dismiss keywords the user can type to exit dialog +DISMISS_KEYWORDS = frozenset( + { + "done", + "continue", + "go ahead", + "proceed", + "that's all", + "thats all", + "resume", + "exit", + "/done", + "/continue", + } +) + +# Marker the agent appends to signal it's ready to continue. Treated as a +# terminal control token (must be at the end of the response) to prevent +# false positives if the agent quotes the marker mid-response. +_READY_MARKER = "[READY_TO_CONTINUE]" + + +def _extract_ready_marker(response: str) -> tuple[bool, str]: + """Return ``(proposed, cleaned)`` for an agent response. + + ``proposed`` is True only when the marker appears at the very end of the + (right-stripped) response. ``cleaned`` is the response with the trailing + marker removed. This avoids both false positives from mid-response + mentions and the user-injection vector where a user pastes the marker. + """ + stripped = response.rstrip() + if stripped.endswith(_READY_MARKER): + cleaned = stripped[: -len(_READY_MARKER)].rstrip() + return True, cleaned + return False, response + + +@dataclass +class DialogMessage: + """A single message in a dialog conversation. + + Attributes: + role: Either 'user' or 'agent'. + content: The message content. + """ + + role: str + content: str + + +@dataclass +class DialogResult: + """Result of a dialog session. + + Attributes: + dialog_id: Unique identifier for this dialog session. + messages: Full transcript of the dialog conversation. + user_dismissed: Whether the user explicitly dismissed the dialog. + user_declined: Whether the user declined to engage at all. + agent_proposed_continue: Whether the agent proposed continuing. + """ + + dialog_id: str + messages: list[DialogMessage] = field(default_factory=list) + user_dismissed: bool = False + user_declined: bool = False + agent_proposed_continue: bool = False + + +class DialogHandler: + """Handles interactive dialog sessions between agents and users. + + Presents the agent's full context (output, file paths, reasoning) + and manages a multi-turn conversation until the user or agent + concludes the dialog. + + Example:: + + handler = DialogHandler() + result = await handler.handle_dialog( + agent=agent_def, + agent_output={"result": "analysis complete", "files": [...]}, + opening_question="I found some ambiguity in the requirements...", + provider=copilot_provider, + ) + """ + + def __init__( + self, + console: Console | None = None, + skip_dialogs: bool = False, + emitter: WorkflowEventEmitter | None = None, + web_dashboard: WebDashboard | None = None, + ) -> None: + """Initialize the DialogHandler. + + Args: + console: Rich console for output. Creates one if not provided. + skip_dialogs: If True, auto-skip all dialogs (for CI/automation). + emitter: Optional event emitter for dialog events. + web_dashboard: Optional web dashboard for web-based dialog input. + """ + self.console = console or Console() + self.skip_dialogs = skip_dialogs + self.emitter = emitter + self.web_dashboard = web_dashboard + + async def handle_dialog( + self, + agent: AgentDef, + agent_output: dict[str, Any], + opening_question: str, + provider: AgentProvider, + base_dir: Path | None = None, + ) -> DialogResult: + """Run an interactive dialog session with the user. + + Presents the agent's full output and opening question, then + manages a multi-turn conversation until conclusion. + + Args: + agent: The agent definition that triggered dialog. + agent_output: The agent's complete output (shown to user as context). + opening_question: The evaluator-extracted opening question. + provider: The provider for generating agent responses. + base_dir: Optional directory for resolving file paths in output. + + Returns: + DialogResult with the full conversation transcript. + """ + dialog_id = str(uuid.uuid4())[:8] + result = DialogResult(dialog_id=dialog_id) + + if self.skip_dialogs: + logger.info("Dialog skipped for agent '%s' (skip_dialogs=True)", agent.name) + result.user_declined = True + return result + + # Dispatch to web mode if dashboard is available + if self.web_dashboard is not None: + return await self._web_handle_dialog( + agent=agent, + agent_output=agent_output, + opening_question=opening_question, + provider=provider, + dialog_id=dialog_id, + result=result, + ) + + self._emit_event( + "dialog_started", + { + "dialog_id": dialog_id, + "agent_name": agent.name, + "opening_question": opening_question, + }, + ) + + # Build the system prompt with full agent output context + try: + output_str = json.dumps(agent_output, indent=2, default=str) + except (TypeError, ValueError): + output_str = str(agent_output) + + system_prompt = DIALOG_AGENT_SYSTEM_PROMPT.format( + agent_name=agent.name, agent_output=output_str + ) + + # Display full context and the opening question to the user + self._display_dialog_start(agent, agent_output, opening_question, base_dir) + + # Record the opening question as the first agent message + result.messages.append(DialogMessage(role="agent", content=opening_question)) + self._emit_event( + "dialog_message", + { + "dialog_id": dialog_id, + "agent_name": agent.name, + "role": "agent", + "content": opening_question, + }, + ) + + # Ask user if they want to engage or let the agent continue on its own + engagement = await self._ask_engagement() + if engagement == "decline": + result.user_declined = True + self._display_dialog_end(dismissed_by="declined") + self._emit_event( + "dialog_completed", + { + "dialog_id": dialog_id, + "agent_name": agent.name, + "turn_count": len(result.messages), + "user_declined": True, + }, + ) + return result + + # Track conversation history for the provider + history: list[dict[str, str]] = [] + + # Dialog loop + while True: + # Get user input + user_input = await self._get_user_input() + + if user_input is None: + # EOF or error + result.user_dismissed = True + break + + result.messages.append(DialogMessage(role="user", content=user_input)) + self._emit_event( + "dialog_message", + { + "dialog_id": dialog_id, + "agent_name": agent.name, + "role": "user", + "content": user_input, + }, + ) + + # Check if user is dismissing the dialog + if self._is_dismiss(user_input): + result.user_dismissed = True + self._display_dialog_end(dismissed_by="user") + break + + # Send to agent and get response + history.append({"role": "user", "content": user_input}) + try: + agent_response = await provider.execute_dialog_turn( + system_prompt=system_prompt, + user_message=user_input, + history=history[:-1], # History excludes current message + model=agent.model, + ) + except Exception: + # Roll back the user turn so the next attempt doesn't leave two + # consecutive user messages in the provider context. + history.pop() + logger.warning( + "Dialog turn failed for agent '%s'", + agent.name, + exc_info=True, + ) + self.console.print( + "[dim red] (Agent response failed — you can continue or type 'done')[/dim red]" + ) + continue + + history.append({"role": "assistant", "content": agent_response}) + ready_proposed, clean_response = _extract_ready_marker(agent_response) + stored_response = clean_response if ready_proposed else agent_response + result.messages.append(DialogMessage(role="agent", content=stored_response)) + self._emit_event( + "dialog_message", + { + "dialog_id": dialog_id, + "agent_name": agent.name, + "role": "agent", + "content": stored_response, + }, + ) + + # Check if agent proposed completion (terminal marker only) + if ready_proposed: + result.agent_proposed_continue = True + self._display_agent_message(clean_response) + self._display_continue_proposal() + + # Ask user if they approve + approval = await self._get_user_input( + prompt_text="[bold]Continue?[/bold] ([green]yes[/green]/no)" + ) + if approval is None or approval.lower() in ("yes", "y", ""): + self._display_dialog_end(dismissed_by="agent_approved") + break + # User wants to keep chatting + history.append({"role": "user", "content": approval}) + result.messages.append(DialogMessage(role="user", content=approval)) + continue + + self._display_agent_message(agent_response) + + self._emit_event( + "dialog_completed", + { + "dialog_id": dialog_id, + "agent_name": agent.name, + "turn_count": len(result.messages), + "user_dismissed": result.user_dismissed, + "agent_proposed_continue": result.agent_proposed_continue, + }, + ) + + return result + + async def _web_handle_dialog( + self, + agent: AgentDef, + agent_output: dict[str, Any], + opening_question: str, + provider: AgentProvider, + dialog_id: str, + result: DialogResult, + ) -> DialogResult: + """Run a dialog session with input from the web dashboard. + + Events are already emitted by the regular flow. This method replaces + CLI prompts with web dashboard WebSocket communication. + """ + assert self.web_dashboard is not None + + self._emit_event( + "dialog_started", + { + "dialog_id": dialog_id, + "agent_name": agent.name, + "opening_question": opening_question, + }, + ) + + # Build the system prompt with full agent output context + try: + output_str = json.dumps(agent_output, indent=2, default=str) + except (TypeError, ValueError): + output_str = str(agent_output) + + system_prompt = DIALOG_AGENT_SYSTEM_PROMPT.format( + agent_name=agent.name, agent_output=output_str + ) + + # Record the opening question as the first agent message + result.messages.append(DialogMessage(role="agent", content=opening_question)) + self._emit_event( + "dialog_message", + { + "dialog_id": dialog_id, + "agent_name": agent.name, + "role": "agent", + "content": opening_question, + }, + ) + + # Wait for engagement decision from web client + msg = await self.web_dashboard.wait_for_dialog_message(agent.name, dialog_id) + if msg.get("type") == "dialog_decline": + result.user_declined = True + self._emit_event( + "dialog_completed", + { + "dialog_id": dialog_id, + "agent_name": agent.name, + "turn_count": len(result.messages), + "user_declined": True, + }, + ) + return result + + # First message content from the user (engagement + first input) + user_input = msg.get("content", "") + history: list[dict[str, str]] = [] + + # Process first user message + result.messages.append(DialogMessage(role="user", content=user_input)) + self._emit_event( + "dialog_message", + { + "dialog_id": dialog_id, + "agent_name": agent.name, + "role": "user", + "content": user_input, + }, + ) + + if self._is_dismiss(user_input): + result.user_dismissed = True + self._emit_event( + "dialog_completed", + { + "dialog_id": dialog_id, + "agent_name": agent.name, + "turn_count": len(result.messages), + "user_dismissed": True, + }, + ) + return result + + # Dialog loop + while True: + # Send to agent and get response + history.append({"role": "user", "content": user_input}) + try: + agent_response = await provider.execute_dialog_turn( + system_prompt=system_prompt, + user_message=user_input, + history=history[:-1], + model=agent.model, + ) + except Exception: + # Roll back the user turn so the next attempt doesn't leave two + # consecutive user messages in the provider context. + history.pop() + logger.warning( + "Dialog turn failed for agent '%s'", + agent.name, + exc_info=True, + ) + # Emit a failure message so user knows + self._emit_event( + "dialog_message", + { + "dialog_id": dialog_id, + "agent_name": agent.name, + "role": "agent", + "content": "(Agent response failed — you can continue or type 'done')", + }, + ) + # Wait for next user message + msg = await self.web_dashboard.wait_for_dialog_message(agent.name, dialog_id) + if msg.get("type") == "dialog_decline": + result.user_dismissed = True + break + user_input = msg.get("content", "") + result.messages.append(DialogMessage(role="user", content=user_input)) + self._emit_event( + "dialog_message", + { + "dialog_id": dialog_id, + "agent_name": agent.name, + "role": "user", + "content": user_input, + }, + ) + if self._is_dismiss(user_input): + result.user_dismissed = True + break + continue + + history.append({"role": "assistant", "content": agent_response}) + ready_proposed, clean_response = _extract_ready_marker(agent_response) + stored_response = clean_response if ready_proposed else agent_response + result.messages.append(DialogMessage(role="agent", content=stored_response)) + + # Check if agent proposed completion (terminal marker only) + if ready_proposed: + result.agent_proposed_continue = True + self._emit_event( + "dialog_message", + { + "dialog_id": dialog_id, + "agent_name": agent.name, + "role": "agent", + "content": clean_response + + "\n\n*The agent believes it has enough information to continue.*", + }, + ) + # Wait for approval or continuation + msg = await self.web_dashboard.wait_for_dialog_message(agent.name, dialog_id) + if msg.get("type") == "dialog_decline": + break + approval = msg.get("content", "") + if approval.lower() in ("yes", "y", ""): + break + # User wants to keep chatting — treat approval as the next user + # turn. The loop top will append it to provider history exactly + # once; we only update the transcript / UI here. + user_input = approval + result.messages.append(DialogMessage(role="user", content=approval)) + self._emit_event( + "dialog_message", + { + "dialog_id": dialog_id, + "agent_name": agent.name, + "role": "user", + "content": approval, + }, + ) + continue + + self._emit_event( + "dialog_message", + { + "dialog_id": dialog_id, + "agent_name": agent.name, + "role": "agent", + "content": stored_response, + }, + ) + + # Wait for next user message + msg = await self.web_dashboard.wait_for_dialog_message(agent.name, dialog_id) + if msg.get("type") == "dialog_decline": + result.user_dismissed = True + break + user_input = msg.get("content", "") + result.messages.append(DialogMessage(role="user", content=user_input)) + self._emit_event( + "dialog_message", + { + "dialog_id": dialog_id, + "agent_name": agent.name, + "role": "user", + "content": user_input, + }, + ) + + if self._is_dismiss(user_input): + result.user_dismissed = True + break + + self._emit_event( + "dialog_completed", + { + "dialog_id": dialog_id, + "agent_name": agent.name, + "turn_count": len(result.messages), + "user_dismissed": result.user_dismissed, + "agent_proposed_continue": result.agent_proposed_continue, + }, + ) + + return result + + def _display_dialog_start( + self, + agent: AgentDef, + agent_output: dict[str, Any], + opening_question: str, + base_dir: Path | None = None, + ) -> None: + """Display the dialog opening with full agent context.""" + self.console.print() + self.console.print( + Panel( + Text.from_markup( + f"[bold]Agent '{agent.name}'[/bold] would like to discuss " + f"its output with you.\n" + f"[dim]Type your responses below. Say [bold]done[/bold] or " + f"[bold]/done[/bold] when finished.[/dim]" + ), + title="[bold magenta]Dialog Mode[/bold magenta]", + border_style="magenta", + ) + ) + + # Show agent output with full context + try: + output_str = json.dumps(agent_output, indent=2, default=str) + except (TypeError, ValueError): + output_str = str(agent_output) + + # Linkify file paths in the output for clickable links + output_display = linkify_markdown(output_str, base_dir=base_dir) + + self.console.print() + self.console.print( + Panel( + RichMarkdown(f"```json\n{output_display}\n```"), + title="[bold cyan]Agent Output (Full Context)[/bold cyan]", + border_style="cyan", + expand=True, + ) + ) + + # Show the opening question + self.console.print() + question_display = linkify_markdown(opening_question, base_dir=base_dir) + self.console.print( + Panel( + RichMarkdown(question_display), + title=f"[bold yellow]{agent.name}[/bold yellow]", + border_style="yellow", + ) + ) + + def _display_agent_message(self, message: str) -> None: + """Display an agent message in the dialog.""" + self.console.print() + self.console.print( + Panel( + RichMarkdown(message), + border_style="yellow", + ) + ) + + def _display_continue_proposal(self) -> None: + """Display the agent's proposal to continue.""" + self.console.print() + msg = ( + "[bold magenta] ↳ The agent believes it has enough " + "information to continue.[/bold magenta]" + ) + self.console.print(msg) + + def _display_dialog_end(self, dismissed_by: str) -> None: + """Display dialog conclusion message.""" + self.console.print() + if dismissed_by == "user": + self.console.print( + "[dim magenta] ✓ Dialog ended by user — agent resuming.[/dim magenta]" + ) + elif dismissed_by == "agent_approved": + self.console.print("[dim magenta] ✓ Agent continuing — dialog complete.[/dim magenta]") + elif dismissed_by == "declined": + self.console.print( + "[dim magenta] ✓ Dialog declined — agent will do" + " its best and continue.[/dim magenta]" + ) + self.console.print() + + async def _ask_engagement(self) -> str: + """Ask the user whether they want to engage in the dialog. + + Returns: + "engage" if the user wants to chat, "decline" to skip. + """ + self.console.print() + self.console.print("[bold]How would you like to proceed?[/bold]") + self.console.print(" [cyan][1][/cyan] Discuss this with the agent") + self.console.print(" [cyan][2][/cyan] Do your best and continue [dim](skip dialog)[/dim]") + + def _ask() -> str: + return Prompt.ask( + "\n[bold]Select[/bold]", + choices=["1", "2"], + default="1", + show_choices=True, + ) + + choice = await asyncio.to_thread(_ask) + return "engage" if choice == "1" else "decline" + + async def _get_user_input( + self, + prompt_text: str = "[bold magenta]You[/bold magenta]", + ) -> str | None: + """Get user input from the terminal. + + Runs in a thread to avoid blocking the event loop. + + Returns: + User input text, or None on EOF/error. + """ + try: + + def _ask() -> str: + return Prompt.ask(prompt_text) + + return await asyncio.to_thread(_ask) + except (EOFError, KeyboardInterrupt): + return None + + def _is_dismiss(self, text: str) -> bool: + """Check if user input is a dismiss signal.""" + return text.strip().lower() in DISMISS_KEYWORDS + + def _emit_event(self, event_type: str, data: dict[str, Any]) -> None: + """Emit a dialog event if emitter is available.""" + if self.emitter is not None: + import time + + from conductor.events import WorkflowEvent + + self.emitter.emit( + WorkflowEvent( + type=event_type, + timestamp=time.time(), + data=data, + ) + ) diff --git a/src/conductor/providers/base.py b/src/conductor/providers/base.py index 2b56c6b..adfba7f 100644 --- a/src/conductor/providers/base.py +++ b/src/conductor/providers/base.py @@ -165,6 +165,35 @@ async def execute( """ ... + async def execute_dialog_turn( + self, + system_prompt: str, + user_message: str, + history: list[dict[str, str]] | None = None, + model: str | None = None, + ) -> str: + """Execute a single dialog turn for agent-user conversation. + + Used by the dialog evaluator and dialog handler for lightweight + conversational exchanges. Creates a fresh, short-lived session + for each call — not tied to the agent's main execution session. + + Args: + system_prompt: System prompt providing dialog context. + user_message: The latest user message. + history: Optional prior conversation history as a list of + ``{"role": "user"|"assistant", "content": "..."}`` dicts. + model: Optional model override. If not provided, uses the + provider's default model. + + Returns: + The agent's response text. + + Raises: + ProviderError: If the dialog turn fails. + """ + raise NotImplementedError(f"{type(self).__name__} does not support dialog turns") + @abstractmethod async def validate_connection(self) -> bool: """Verify the provider can connect to its backend. diff --git a/src/conductor/providers/claude.py b/src/conductor/providers/claude.py index b3feec8..143d9b5 100644 --- a/src/conductor/providers/claude.py +++ b/src/conductor/providers/claude.py @@ -469,8 +469,71 @@ async def close(self) -> None: await client.close() logger.debug("Claude provider closed") - # Drop cached metadata so a re-initialized provider re-fetches. - self._max_input_cache = None + # Drop cached metadata so a re-initialized provider re-fetches. + self._max_input_cache = None + + async def execute_dialog_turn( + self, + system_prompt: str, + user_message: str, + history: list[dict[str, str]] | None = None, + model: str | None = None, + ) -> str: + """Execute a single dialog turn using the Claude messages API. + + Creates a lightweight message call with the conversation context + and returns the agent's response text. + + Args: + system_prompt: System prompt providing dialog context. + user_message: The latest user message. + history: Optional prior conversation history. + model: Optional model override. Falls back to provider default. + + Returns: + The agent's response text. + + Raises: + ProviderError: If the dialog turn fails. + """ + if self._client is None: + raise ProviderError( + "Claude client not initialized", + suggestion="Call validate_connection() first", + ) + + # Build messages list from history + current message + messages: list[dict[str, str]] = [] + for msg in history or []: + messages.append( + { + "role": msg["role"], + "content": msg["content"], + } + ) + messages.append({"role": "user", "content": user_message}) + + try: + response = await self._client.messages.create( + model=model or self._default_model, + max_tokens=4096, + system=system_prompt, + messages=messages, + ) + + # Extract text from response + text_parts = [] + for block in response.content: + if hasattr(block, "text"): + text_parts.append(block.text) + + return "\n".join(text_parts) if text_parts else "" + + except Exception as exc: + raise ProviderError( + f"Dialog turn failed: {exc}", + is_retryable=False, + ) from exc async def execute( self, diff --git a/src/conductor/providers/copilot.py b/src/conductor/providers/copilot.py index e9538bd..5becd26 100644 --- a/src/conductor/providers/copilot.py +++ b/src/conductor/providers/copilot.py @@ -1775,6 +1775,97 @@ async def validate_connection(self) -> bool: is_retryable=False, ) from e + async def execute_dialog_turn( + self, + system_prompt: str, + user_message: str, + history: list[dict[str, str]] | None = None, + model: str | None = None, + ) -> str: + """Execute a single dialog turn using a lightweight Copilot session. + + Creates a fresh session for the dialog, sends the conversation + context, and returns the agent's response. The session is destroyed + after the turn completes. + + Args: + system_prompt: System prompt providing dialog context. + user_message: The latest user message. + history: Optional prior conversation history. + model: Optional model override. Falls back to provider default. + + Returns: + The agent's response text. + + Raises: + ProviderError: If the dialog turn fails. + """ + await self._ensure_client_started() + + # Build the full prompt from history + current message + # System prompt is passed via create_session's system_message parameter + # to replace the SDK's default identity instructions. + parts = [] + for msg in history or []: + role_label = "User" if msg["role"] == "user" else "Assistant" + parts.append(f"{role_label}: {msg['content']}") + parts.append(f"User: {user_message}") + full_prompt = "\n\n".join(parts) + + session = None + try: + session = await self._client.create_session( + model=model or self._default_model, + on_permission_request=self._default_permission_handler, + system_message={"mode": "replace", "content": system_prompt}, + ) + + response_content = "" + done = asyncio.Event() + error_message: str | None = None + + def on_event(event: Any) -> None: + nonlocal response_content, error_message + event_type = event.type.value if hasattr(event.type, "value") else str(event.type) + if event_type == "assistant.message": + response_content = event.data.content + elif event_type == "session.idle": + done.set() + elif event_type in ("error", "session.error"): + error_message = getattr(event.data, "message", str(event.data)) + done.set() + + session.on(on_event) + await session.send(full_prompt) + + try: + await asyncio.wait_for(done.wait(), timeout=120.0) + except TimeoutError as exc: + raise ProviderError( + "Dialog turn timed out after 120s", + is_retryable=False, + ) from exc + + if error_message: + raise ProviderError( + f"Dialog turn error: {error_message}", + is_retryable=False, + ) + + return response_content + + except ProviderError: + raise + except Exception as exc: + raise ProviderError( + f"Dialog turn failed: {exc}", + is_retryable=False, + ) from exc + finally: + if session is not None: + with contextlib.suppress(Exception): + await session.destroy() + async def close(self) -> None: """Close Copilot SDK client. diff --git a/src/conductor/web/frontend/package-lock.json b/src/conductor/web/frontend/package-lock.json index aac1f7b..f2d2801 100644 --- a/src/conductor/web/frontend/package-lock.json +++ b/src/conductor/web/frontend/package-lock.json @@ -16,6 +16,7 @@ "react-dom": "^19.0.0", "react-markdown": "^10.1.0", "react-resizable-panels": "^2.1.7", + "remark-gfm": "^4.0.1", "tailwind-merge": "^2.6.0", "zustand": "^5.0.3" }, @@ -60,6 +61,7 @@ "integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/code-frame": "^7.29.0", "@babel/generator": "^7.29.0", @@ -1661,6 +1663,7 @@ "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.14.tgz", "integrity": "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w==", "license": "MIT", + "peer": true, "dependencies": { "csstype": "^3.2.2" } @@ -1811,6 +1814,7 @@ } ], "license": "MIT", + "peer": true, "dependencies": { "baseline-browser-mapping": "^2.9.0", "caniuse-lite": "^1.0.30001759", @@ -1991,6 +1995,7 @@ "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz", "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==", "license": "ISC", + "peer": true, "engines": { "node": ">=12" } @@ -2174,6 +2179,18 @@ "node": ">=6" } }, + "node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/estree-util-is-identifier-name": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz", @@ -2695,6 +2712,32 @@ "@jridgewell/sourcemap-codec": "^1.5.5" } }, + "node_modules/markdown-table": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.4.tgz", + "integrity": "sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/mdast-util-find-and-replace": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.2.tgz", + "integrity": "sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "escape-string-regexp": "^5.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/mdast-util-from-markdown": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.3.tgz", @@ -2719,6 +2762,107 @@ "url": "https://opencollective.com/unified" } }, + "node_modules/mdast-util-gfm": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.1.0.tgz", + "integrity": "sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ==", + "license": "MIT", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-gfm-autolink-literal": "^2.0.0", + "mdast-util-gfm-footnote": "^2.0.0", + "mdast-util-gfm-strikethrough": "^2.0.0", + "mdast-util-gfm-table": "^2.0.0", + "mdast-util-gfm-task-list-item": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.1.tgz", + "integrity": "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "ccount": "^2.0.0", + "devlop": "^1.0.0", + "mdast-util-find-and-replace": "^3.0.0", + "micromark-util-character": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-strikethrough": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz", + "integrity": "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-table": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz", + "integrity": "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "markdown-table": "^3.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-task-list-item": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz", + "integrity": "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/mdast-util-mdx-expression": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.1.tgz", @@ -2917,6 +3061,127 @@ "micromark-util-types": "^2.0.0" } }, + "node_modules/micromark-extension-gfm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-3.0.0.tgz", + "integrity": "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==", + "license": "MIT", + "dependencies": { + "micromark-extension-gfm-autolink-literal": "^2.0.0", + "micromark-extension-gfm-footnote": "^2.0.0", + "micromark-extension-gfm-strikethrough": "^2.0.0", + "micromark-extension-gfm-table": "^2.0.0", + "micromark-extension-gfm-tagfilter": "^2.0.0", + "micromark-extension-gfm-task-list-item": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.1.0.tgz", + "integrity": "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==", + "license": "MIT", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-strikethrough": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.1.0.tgz", + "integrity": "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-table": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.1.tgz", + "integrity": "sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-tagfilter": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-2.0.0.tgz", + "integrity": "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==", + "license": "MIT", + "dependencies": { + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-task-list-item": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.1.0.tgz", + "integrity": "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==", + "license": "MIT", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/micromark-factory-destination": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.1.tgz", @@ -3360,6 +3625,7 @@ "integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -3420,6 +3686,7 @@ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.4.tgz", "integrity": "sha512-AXJdLo8kgMbimY95O2aKQqsz2iWi9jMgKJhRBAxECE4IFxfcazB2LmzloIoibJI3C12IlY20+KFaLv+71bUJeQ==", "license": "MIT", + "peer": true, "dependencies": { "scheduler": "^0.27.0" }, @@ -3474,6 +3741,24 @@ "react-dom": "^16.14.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" } }, + "node_modules/remark-gfm": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-4.0.1.tgz", + "integrity": "sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-gfm": "^3.0.0", + "micromark-extension-gfm": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-stringify": "^11.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/remark-parse": { "version": "11.0.0", "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-11.0.0.tgz", @@ -3507,6 +3792,21 @@ "url": "https://opencollective.com/unified" } }, + "node_modules/remark-stringify": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-11.0.0.tgz", + "integrity": "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==", + "license": "MIT", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-to-markdown": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/rollup": { "version": "4.59.0", "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.59.0.tgz", @@ -3863,6 +4163,7 @@ "integrity": "sha512-2N/55r4JDJ4gdrCvGgINMy+HH3iRpNIz8K6SFwVsA+JbQScLiC+clmAxBgwiSPgcG9U15QmvqCGWzMbqda5zGQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "esbuild": "^0.25.0", "fdir": "^6.4.4", diff --git a/src/conductor/web/frontend/package.json b/src/conductor/web/frontend/package.json index c9ba64e..8c56cba 100644 --- a/src/conductor/web/frontend/package.json +++ b/src/conductor/web/frontend/package.json @@ -17,6 +17,7 @@ "react-dom": "^19.0.0", "react-markdown": "^10.1.0", "react-resizable-panels": "^2.1.7", + "remark-gfm": "^4.0.1", "tailwind-merge": "^2.6.0", "zustand": "^5.0.3" }, diff --git a/src/conductor/web/frontend/src/components/detail/DetailPanel.tsx b/src/conductor/web/frontend/src/components/detail/DetailPanel.tsx index 83b6b03..0e005f3 100644 --- a/src/conductor/web/frontend/src/components/detail/DetailPanel.tsx +++ b/src/conductor/web/frontend/src/components/detail/DetailPanel.tsx @@ -6,6 +6,8 @@ import { AgentDetail } from './AgentDetail'; import { ScriptDetail } from './ScriptDetail'; import { GateDetail } from './GateDetail'; import { GroupDetail } from './GroupDetail'; +import { DialogDetail } from './DialogDetail'; +import { DialogEngagementPrompt } from './DialogEngagementPrompt'; import { SubworkflowDetail } from './SubworkflowDetail'; import { cn } from '@/lib/utils'; @@ -13,6 +15,7 @@ export function DetailPanel() { const selectedNode = useWorkflowStore((s) => s.selectedNode); const viewedNodes = useViewedNodes(); const selectNode = useWorkflowStore((s) => s.selectNode); + const dialogEngaged = useWorkflowStore((s) => s.dialogEngaged); // Slide-in animation state const [mounted, setMounted] = useState(false); @@ -38,6 +41,10 @@ export function DetailPanel() { } const DetailComponent = (() => { + // Show engagement prompt when dialog is active but user hasn't engaged yet + if (node.dialog_active && !dialogEngaged) return DialogEngagementPrompt; + // When dialog is active and engaged, show normal agent detail + if (node.dialog_active && dialogEngaged) return AgentDetail; switch (node.type) { case 'script': return ScriptDetail; diff --git a/src/conductor/web/frontend/src/components/detail/DialogDetail.tsx b/src/conductor/web/frontend/src/components/detail/DialogDetail.tsx new file mode 100644 index 0000000..695a01a --- /dev/null +++ b/src/conductor/web/frontend/src/components/detail/DialogDetail.tsx @@ -0,0 +1,206 @@ +import { useState, useRef, useEffect } from 'react'; +import ReactMarkdown from 'react-markdown'; +import remarkGfm from 'remark-gfm'; +import { Send, MessageCircle } from 'lucide-react'; +import { useWorkflowStore } from '@/stores/workflow-store'; +import type { NodeData } from '@/stores/workflow-store'; + +interface DialogDetailProps { + node: NodeData; +} + +function DialogMarkdown({ text }: { text: string }) { + return ( +
{children}
, + ul: ({ children }) =>
+ {children}
+
+ );
+ }
+ return (
+
+ {children}
+
+ );
+ },
+ pre: ({ children }) => (
+
+ {children}
+
+ ),
+ strong: ({ children }) => {children},
+ em: ({ children }) => {children},
+ a: ({ href, children }) => (
+
+ {children}
+
+ ),
+ blockquote: ({ children }) => (
+ + {children} ++ ), + hr: () =>
+ Press Enter to send · Type "done" to end dialog +
+P "[React Flow]: Seems like you have not used zustand provider as an ancestor. Help: https://reactflow.dev/error#001",error002:()=>"It looks like you've created a new nodeTypes or edgeTypes object. If this wasn't on purpose please define the nodeTypes/edgeTypes outside of the component or memoize them.",error003:e=>`Node type "${e}" not found. Using fallback type "default".`,error004:()=>"The React Flow parent container needs a width and a height to render the graph.",error005:()=>"Only child nodes can use a parent extent.",error006:()=>"Can't create edge. An edge needs a source and a target.",error007:e=>`The old edge with id=${e} does not exist.`,error009:e=>`Marker type "${e}" doesn't exist.`,error008:(e,{id:n,sourceHandle:r,targetHandle:l})=>`Couldn't create edge for ${e} handle id: "${e==="source"?r:l}", edge id: ${n}.`,error010:()=>"Handle: No node id found. Make sure to only use a Handle inside a custom Node.",error011:e=>`Edge type "${e}" not found. Using fallback type "default".`,error012:e=>`Node with id "${e}" does not exist, it may have been removed. This can happen when a node is deleted before the "onNodeClick" handler is called.`,error013:(e="react")=>`It seems that you haven't loaded the styles. Please import '@xyflow/${e}/dist/style.css' or base.css to make sure everything is working properly.`,error014:()=>"useNodeConnections: No node ID found. Call useNodeConnections inside a custom Node or provide a node ID.",error015:()=>"It seems that you are trying to drag a node that is not initialized. Please use onNodesChange as explained in the docs."},Ro=[[Number.NEGATIVE_INFINITY,Number.NEGATIVE_INFINITY],[Number.POSITIVE_INFINITY,Number.POSITIVE_INFINITY]],Hw=["Enter"," ","Escape"],Bw={"node.a11yDescription.default":"Press enter or space to select a node. Press delete to remove it and escape to cancel.","node.a11yDescription.keyboardDisabled":"Press enter or space to select a node. You can then use the arrow keys to move the node around. Press delete to remove it and escape to cancel.","node.a11yDescription.ariaLiveMessage":({direction:e,x:n,y:r})=>`Moved selected node ${e}. New position, x: ${n}, y: ${r}`,"edge.a11yDescription.default":"Press enter or space to select an edge. You can then press delete to remove it or escape to cancel.","controls.ariaLabel":"Control Panel","controls.zoomIn.ariaLabel":"Zoom In","controls.zoomOut.ariaLabel":"Zoom Out","controls.fitView.ariaLabel":"Fit View","controls.interactive.ariaLabel":"Toggle Interactivity","minimap.ariaLabel":"Mini Map","handle.ariaLabel":"Handle"};var aa;(function(e){e.Strict="strict",e.Loose="loose"})(aa||(aa={}));var Yi;(function(e){e.Free="free",e.Vertical="vertical",e.Horizontal="horizontal"})(Yi||(Yi={}));var Lo;(function(e){e.Partial="partial",e.Full="full"})(Lo||(Lo={}));const qw={inProgress:!1,isValid:null,from:null,fromHandle:null,fromPosition:null,fromNode:null,to:null,toHandle:null,toPosition:null,toNode:null,pointer:null};var pi;(function(e){e.Bezier="default",e.Straight="straight",e.Step="step",e.SmoothStep="smoothstep",e.SimpleBezier="simplebezier"})(pi||(pi={}));var ic;(function(e){e.Arrow="arrow",e.ArrowClosed="arrowclosed"})(ic||(ic={}));var ve;(function(e){e.Left="left",e.Top="top",e.Right="right",e.Bottom="bottom"})(ve||(ve={}));const rv={[ve.Left]:ve.Right,[ve.Right]:ve.Left,[ve.Top]:ve.Bottom,[ve.Bottom]:ve.Top};function Uw(e){return e===null?null:e?"valid":"invalid"}const Iw=e=>"id"in e&&"source"in e&&"target"in e,XT=e=>"id"in e&&"position"in e&&!("source"in e)&&!("target"in e),sm=e=>"id"in e&&"internals"in e&&!("source"in e)&&!("target"in e),Yo=(e,n=[0,0])=>{const{width:r,height:l}=Or(e),a=e.origin??n,s=r*a[0],u=l*a[1];return{x:e.position.x-s,y:e.position.y-u}},FT=(e,n={nodeOrigin:[0,0]})=>{if(e.length===0)return{x:0,y:0,width:0,height:0};const r=e.reduce((l,a)=>{const s=typeof a=="string";let u=!n.nodeLookup&&!s?a:void 0;n.nodeLookup&&(u=s?n.nodeLookup.get(a):sm(a)?a:n.nodeLookup.get(a.id));const f=u?lc(u,n.nodeOrigin):{x:0,y:0,x2:0,y2:0};return bc(l,f)},{x:1/0,y:1/0,x2:-1/0,y2:-1/0});return wc(r)},Xo=(e,n={})=>{let r={x:1/0,y:1/0,x2:-1/0,y2:-1/0},l=!1;return e.forEach(a=>{(n.filter===void 0||n.filter(a))&&(r=bc(r,lc(a)),l=!0)}),l?wc(r):{x:0,y:0,width:0,height:0}},um=(e,n,[r,l,a]=[0,0,1],s=!1,u=!1)=>{const f={...Qo(n,[r,l,a]),width:n.width/a,height:n.height/a},h=[];for(const d of e.values()){const{measured:m,selectable:p=!0,hidden:y=!1}=d;if(u&&!p||y)continue;const v=m.width??d.width??d.initialWidth??null,_=m.height??d.height??d.initialHeight??null,k=Ho(f,sa(d)),w=(v??0)*(_??0),S=s&&k>0;(!d.internals.handleBounds||S||k>=w||d.dragging)&&h.push(d)}return h},QT=(e,n)=>{const r=new Set;return e.forEach(l=>{r.add(l.id)}),n.filter(l=>r.has(l.source)||r.has(l.target))};function ZT(e,n){const r=new Map,l=n!=null&&n.nodes?new Set(n.nodes.map(a=>a.id)):null;return e.forEach(a=>{a.measured.width&&a.measured.height&&((n==null?void 0:n.includeHiddenNodes)||!a.hidden)&&(!l||l.has(a.id))&&r.set(a.id,a)}),r}async function KT({nodes:e,width:n,height:r,panZoom:l,minZoom:a,maxZoom:s},u){if(e.size===0)return Promise.resolve(!0);const f=ZT(e,u),h=Xo(f),d=cm(h,n,r,(u==null?void 0:u.minZoom)??a,(u==null?void 0:u.maxZoom)??s,(u==null?void 0:u.padding)??.1);return await l.setViewport(d,{duration:u==null?void 0:u.duration,ease:u==null?void 0:u.ease,interpolate:u==null?void 0:u.interpolate}),Promise.resolve(!0)}function Vw({nodeId:e,nextPosition:n,nodeLookup:r,nodeOrigin:l=[0,0],nodeExtent:a,onError:s}){const u=r.get(e),f=u.parentId?r.get(u.parentId):void 0,{x:h,y:d}=f?f.internals.positionAbsolute:{x:0,y:0},m=u.origin??l;let p=u.extent||a;if(u.extent==="parent"&&!u.expandParent)if(!f)s==null||s("005",rr.error005());else{const v=f.measured.width,_=f.measured.height;v&&_&&(p=[[h,d],[h+v,d+_]])}else f&&ua(u.extent)&&(p=[[u.extent[0][0]+h,u.extent[0][1]+d],[u.extent[1][0]+h,u.extent[1][1]+d]]);const y=ua(p)?Zi(n,p,u.measured):n;return(u.measured.width===void 0||u.measured.height===void 0)&&(s==null||s("015",rr.error015())),{position:{x:y.x-h+(u.measured.width??0)*m[0],y:y.y-d+(u.measured.height??0)*m[1]},positionAbsolute:y}}async function JT({nodesToRemove:e=[],edgesToRemove:n=[],nodes:r,edges:l,onBeforeDelete:a}){const s=new Set(e.map(y=>y.id)),u=[];for(const y of r){if(y.deletable===!1)continue;const v=s.has(y.id),_=!v&&y.parentId&&u.find(k=>k.id===y.parentId);(v||_)&&u.push(y)}const f=new Set(n.map(y=>y.id)),h=l.filter(y=>y.deletable!==!1),m=QT(u,h);for(const y of h)f.has(y.id)&&!m.find(_=>_.id===y.id)&&m.push(y);if(!a)return{edges:m,nodes:u};const p=await a({nodes:u,edges:m});return typeof p=="boolean"?p?{edges:m,nodes:u}:{edges:[],nodes:[]}:p}const oa=(e,n=0,r=1)=>Math.min(Math.max(e,n),r),Zi=(e={x:0,y:0},n,r)=>({x:oa(e.x,n[0][0],n[1][0]-((r==null?void 0:r.width)??0)),y:oa(e.y,n[0][1],n[1][1]-((r==null?void 0:r.height)??0))});function Gw(e,n,r){const{width:l,height:a}=Or(r),{x:s,y:u}=r.internals.positionAbsolute;return Zi(e,[[s,u],[s+l,u+a]],n)}const iv=(e,n,r)=>e1){const d=l.type==="source"?"target":"source";return a.find(m=>m.type===d)??a[0]}return a[0]}function iS(e,n,r,l,a,s=!1){var d,m,p;const u=l.get(e);if(!u)return null;const f=a==="strict"?(d=u.internals.handleBounds)==null?void 0:d[n]:[...((m=u.internals.handleBounds)==null?void 0:m.source)??[],...((p=u.internals.handleBounds)==null?void 0:p.target)??[]],h=(r?f==null?void 0:f.find(y=>y.id===r):f==null?void 0:f[0])??null;return h&&s?{...h,...Ki(u,h,h.position,!0)}:h}function lS(e,n){return e||(n!=null&&n.classList.contains("target")?"target":n!=null&&n.classList.contains("source")?"source":null)}function AA(e,n){let r=null;return n?r=!0:e&&!n&&(r=!1),r}const aS=()=>!0;function zA(e,{connectionMode:n,connectionRadius:r,handleId:l,nodeId:a,edgeUpdaterType:s,isTarget:u,domNode:f,nodeLookup:h,lib:d,autoPanOnConnect:m,flowId:p,panBy:y,cancelConnection:v,onConnectStart:_,onConnect:k,onConnectEnd:w,isValidConnection:S=aS,onReconnectEnd:T,updateConnection:E,getTransform:A,getFromHandle:I,autoPanSpeed:z,dragThreshold:R=1,handleDomNode:D}){const U=Fw(e.target);let X=0,B;const{x:q,y:ee}=Vn(e),L=lS(s,D),$=f==null?void 0:f.getBoundingClientRect();let M=!1;if(!$||!L)return;const P=iS(a,L,l,h,n);if(!P)return;let Q=Vn(e,$),K=!1,j=null,H=!1,Y=null;function N(){if(!m||!$)return;const[be,xe]=Pw(Q,$,z);y({x:be,y:xe}),X=requestAnimationFrame(N)}const G={...P,nodeId:a,type:L,position:P.position},F=h.get(a);let ne={inProgress:!0,isValid:null,from:Ki(F,G,ve.Left,!0),fromHandle:G,fromPosition:G.position,fromNode:F,to:Q,toHandle:null,toPosition:rv[G.position],toNode:null,pointer:Q};function re(){M=!0,E(ne),_==null||_(e,{nodeId:a,handleId:l,handleType:L})}R===0&&re();function se(be){if(!M){const{x:st,y:tt}=Vn(be),zt=st-q,Vt=tt-ee;if(!(zt*zt+Vt*Vt>R*R))return;re()}if(!I()||!G){ye(be);return}const xe=A();Q=Vn(be,$),B=TA(Qo(Q,xe,!1,[1,1]),r,h,G),K||(N(),K=!0);const pe=oS(be,{handle:B,connectionMode:n,fromNodeId:a,fromHandleId:l,fromType:u?"target":"source",isValidConnection:S,doc:U,lib:d,flowId:p,nodeLookup:h});Y=pe.handleDomNode,j=pe.connection,H=AA(!!B,pe.isValid);const _e=h.get(a),Me=_e?Ki(_e,G,ve.Left,!0):ne.from,Ce={...ne,from:Me,isValid:H,to:pe.toHandle&&H?ac({x:pe.toHandle.x,y:pe.toHandle.y},xe):Q,toHandle:pe.toHandle,toPosition:H&&pe.toHandle?pe.toHandle.position:rv[G.position],toNode:pe.toHandle?h.get(pe.toHandle.nodeId):null,pointer:Q};E(Ce),ne=Ce}function ye(be){if(!("touches"in be&&be.touches.length>0)){if(M){(B||Y)&&j&&H&&(k==null||k(j));const{inProgress:xe,...pe}=ne,_e={...pe,toPosition:ne.toHandle?ne.toPosition:null};w==null||w(be,_e),s&&(T==null||T(be,_e))}v(),cancelAnimationFrame(X),K=!1,H=!1,j=null,Y=null,U.removeEventListener("mousemove",se),U.removeEventListener("mouseup",ye),U.removeEventListener("touchmove",se),U.removeEventListener("touchend",ye)}}U.addEventListener("mousemove",se),U.addEventListener("mouseup",ye),U.addEventListener("touchmove",se),U.addEventListener("touchend",ye)}function oS(e,{handle:n,connectionMode:r,fromNodeId:l,fromHandleId:a,fromType:s,doc:u,lib:f,flowId:h,isValidConnection:d=aS,nodeLookup:m}){const p=s==="target",y=n?u.querySelector(`.${f}-flow__handle[data-id="${h}-${n==null?void 0:n.nodeId}-${n==null?void 0:n.id}-${n==null?void 0:n.type}"]`):null,{x:v,y:_}=Vn(e),k=u.elementFromPoint(v,_),w=k!=null&&k.classList.contains(`${f}-flow__handle`)?k:y,S={handleDomNode:w,isValid:!1,connection:null,toHandle:null};if(w){const T=lS(void 0,w),E=w.getAttribute("data-nodeid"),A=w.getAttribute("data-handleid"),I=w.classList.contains("connectable"),z=w.classList.contains("connectableend");if(!E||!T)return S;const R={source:p?E:l,sourceHandle:p?A:a,target:p?l:E,targetHandle:p?a:A};S.connection=R;const U=I&&z&&(r===aa.Strict?p&&T==="source"||!p&&T==="target":E!==l||A!==a);S.isValid=U&&d(R),S.toHandle=iS(E,T,A,m,r,!0)}return S}const Lp={onPointerDown:zA,isValid:oS};function MA({domNode:e,panZoom:n,getTransform:r,getViewScale:l}){const a=vn(e);function s({translateExtent:f,width:h,height:d,zoomStep:m=1,pannable:p=!0,zoomable:y=!0,inversePan:v=!1}){const _=E=>{if(E.sourceEvent.type!=="wheel"||!n)return;const A=r(),I=E.sourceEvent.ctrlKey&&Bo()?10:1,z=-E.sourceEvent.deltaY*(E.sourceEvent.deltaMode===1?.05:E.sourceEvent.deltaMode?1:.002)*m,R=A[2]*Math.pow(2,z*I);n.scaleTo(R)};let k=[0,0];const w=E=>{(E.sourceEvent.type==="mousedown"||E.sourceEvent.type==="touchstart")&&(k=[E.sourceEvent.clientX??E.sourceEvent.touches[0].clientX,E.sourceEvent.clientY??E.sourceEvent.touches[0].clientY])},S=E=>{const A=r();if(E.sourceEvent.type!=="mousemove"&&E.sourceEvent.type!=="touchmove"||!n)return;const I=[E.sourceEvent.clientX??E.sourceEvent.touches[0].clientX,E.sourceEvent.clientY??E.sourceEvent.touches[0].clientY],z=[I[0]-k[0],I[1]-k[1]];k=I;const R=l()*Math.max(A[2],Math.log(A[2]))*(v?-1:1),D={x:A[0]-z[0]*R,y:A[1]-z[1]*R},U=[[0,0],[h,d]];n.setViewportConstrained({x:D.x,y:D.y,zoom:A[2]},U,f)},T=Lw().on("start",w).on("zoom",p?S:null).on("zoom.wheel",y?_:null);a.call(T,{})}function u(){a.on("zoom",null)}return{update:s,destroy:u,pointer:qn}}const Sc=e=>({x:e.x,y:e.y,zoom:e.k}),lh=({x:e,y:n,zoom:r})=>vc.translate(e,n).scale(r),Zl=(e,n)=>e.target.closest(`.${n}`),sS=(e,n)=>n===2&&Array.isArray(e)&&e.includes(2),jA=e=>((e*=2)<=1?e*e*e:(e-=2)*e*e+2)/2,ah=(e,n=0,r=jA,l=()=>{})=>{const a=typeof n=="number"&&n>0;return a||l(),a?e.transition().duration(n).ease(r).on("end",l):e},uS=e=>{const n=e.ctrlKey&&Bo()?10:1;return-e.deltaY*(e.deltaMode===1?.05:e.deltaMode?1:.002)*n};function OA({zoomPanValues:e,noWheelClassName:n,d3Selection:r,d3Zoom:l,panOnScrollMode:a,panOnScrollSpeed:s,zoomOnPinch:u,onPanZoomStart:f,onPanZoom:h,onPanZoomEnd:d}){return m=>{if(Zl(m,n))return m.ctrlKey&&m.preventDefault(),!1;m.preventDefault(),m.stopImmediatePropagation();const p=r.property("__zoom").k||1;if(m.ctrlKey&&u){const w=qn(m),S=uS(m),T=p*Math.pow(2,S);l.scaleTo(r,T,w,m);return}const y=m.deltaMode===1?20:1;let v=a===Yi.Vertical?0:m.deltaX*y,_=a===Yi.Horizontal?0:m.deltaY*y;!Bo()&&m.shiftKey&&a!==Yi.Vertical&&(v=m.deltaY*y,_=0),l.translateBy(r,-(v/p)*s,-(_/p)*s,{internal:!0});const k=Sc(r.property("__zoom"));clearTimeout(e.panScrollTimeout),e.isPanScrolling?(h==null||h(m,k),e.panScrollTimeout=setTimeout(()=>{d==null||d(m,k),e.isPanScrolling=!1},150)):(e.isPanScrolling=!0,f==null||f(m,k))}}function DA({noWheelClassName:e,preventScrolling:n,d3ZoomHandler:r}){return function(l,a){const s=l.type==="wheel",u=!n&&s&&!l.ctrlKey,f=Zl(l,e);if(l.ctrlKey&&s&&f&&l.preventDefault(),u||f)return null;l.preventDefault(),r.call(this,l,a)}}function RA({zoomPanValues:e,onDraggingChange:n,onPanZoomStart:r}){return l=>{var s,u,f;if((s=l.sourceEvent)!=null&&s.internal)return;const a=Sc(l.transform);e.mouseButton=((u=l.sourceEvent)==null?void 0:u.button)||0,e.isZoomingOrPanning=!0,e.prevViewport=a,((f=l.sourceEvent)==null?void 0:f.type)==="mousedown"&&n(!0),r&&(r==null||r(l.sourceEvent,a))}}function LA({zoomPanValues:e,panOnDrag:n,onPaneContextMenu:r,onTransformChange:l,onPanZoom:a}){return s=>{var u,f;e.usedRightMouseButton=!!(r&&sS(n,e.mouseButton??0)),(u=s.sourceEvent)!=null&&u.sync||l([s.transform.x,s.transform.y,s.transform.k]),a&&!((f=s.sourceEvent)!=null&&f.internal)&&(a==null||a(s.sourceEvent,Sc(s.transform)))}}function HA({zoomPanValues:e,panOnDrag:n,panOnScroll:r,onDraggingChange:l,onPanZoomEnd:a,onPaneContextMenu:s}){return u=>{var f;if(!((f=u.sourceEvent)!=null&&f.internal)&&(e.isZoomingOrPanning=!1,s&&sS(n,e.mouseButton??0)&&!e.usedRightMouseButton&&u.sourceEvent&&s(u.sourceEvent),e.usedRightMouseButton=!1,l(!1),a)){const h=Sc(u.transform);e.prevViewport=h,clearTimeout(e.timerId),e.timerId=setTimeout(()=>{a==null||a(u.sourceEvent,h)},r?150:0)}}}function BA({zoomActivationKeyPressed:e,zoomOnScroll:n,zoomOnPinch:r,panOnDrag:l,panOnScroll:a,zoomOnDoubleClick:s,userSelectionActive:u,noWheelClassName:f,noPanClassName:h,lib:d,connectionInProgress:m}){return p=>{var w;const y=e||n,v=r&&p.ctrlKey,_=p.type==="wheel";if(p.button===1&&p.type==="mousedown"&&(Zl(p,`${d}-flow__node`)||Zl(p,`${d}-flow__edge`)))return!0;if(!l&&!y&&!a&&!s&&!r||u||m&&!_||Zl(p,f)&&_||Zl(p,h)&&(!_||a&&_&&!e)||!r&&p.ctrlKey&&_)return!1;if(!r&&p.type==="touchstart"&&((w=p.touches)==null?void 0:w.length)>1)return p.preventDefault(),!1;if(!y&&!a&&!v&&_||!l&&(p.type==="mousedown"||p.type==="touchstart")||Array.isArray(l)&&!l.includes(p.button)&&p.type==="mousedown")return!1;const k=Array.isArray(l)&&l.includes(p.button)||!p.button||p.button<=1;return(!p.ctrlKey||_)&&k}}function qA({domNode:e,minZoom:n,maxZoom:r,translateExtent:l,viewport:a,onPanZoom:s,onPanZoomStart:u,onPanZoomEnd:f,onDraggingChange:h}){const d={isZoomingOrPanning:!1,usedRightMouseButton:!1,prevViewport:{},mouseButton:0,timerId:void 0,panScrollTimeout:void 0,isPanScrolling:!1},m=e.getBoundingClientRect(),p=Lw().scaleExtent([n,r]).translateExtent(l),y=vn(e).call(p);T({x:a.x,y:a.y,zoom:oa(a.zoom,n,r)},[[0,0],[m.width,m.height]],l);const v=y.on("wheel.zoom"),_=y.on("dblclick.zoom");p.wheelDelta(uS);function k(B,q){return y?new Promise(ee=>{p==null||p.interpolate((q==null?void 0:q.interpolate)==="linear"?_o:Gu).transform(ah(y,q==null?void 0:q.duration,q==null?void 0:q.ease,()=>ee(!0)),B)}):Promise.resolve(!1)}function w({noWheelClassName:B,noPanClassName:q,onPaneContextMenu:ee,userSelectionActive:L,panOnScroll:$,panOnDrag:M,panOnScrollMode:P,panOnScrollSpeed:Q,preventScrolling:K,zoomOnPinch:j,zoomOnScroll:H,zoomOnDoubleClick:Y,zoomActivationKeyPressed:N,lib:G,onTransformChange:F,connectionInProgress:J,paneClickDistance:ne,selectionOnDrag:re}){L&&!d.isZoomingOrPanning&&S();const se=$&&!N&&!L;p.clickDistance(re?1/0:!In(ne)||ne<0?0:ne);const ye=se?OA({zoomPanValues:d,noWheelClassName:B,d3Selection:y,d3Zoom:p,panOnScrollMode:P,panOnScrollSpeed:Q,zoomOnPinch:j,onPanZoomStart:u,onPanZoom:s,onPanZoomEnd:f}):DA({noWheelClassName:B,preventScrolling:K,d3ZoomHandler:v});if(y.on("wheel.zoom",ye,{passive:!1}),!L){const xe=RA({zoomPanValues:d,onDraggingChange:h,onPanZoomStart:u});p.on("start",xe);const pe=LA({zoomPanValues:d,panOnDrag:M,onPaneContextMenu:!!ee,onPanZoom:s,onTransformChange:F});p.on("zoom",pe);const _e=HA({zoomPanValues:d,panOnDrag:M,panOnScroll:$,onPaneContextMenu:ee,onPanZoomEnd:f,onDraggingChange:h});p.on("end",_e)}const be=BA({zoomActivationKeyPressed:N,panOnDrag:M,zoomOnScroll:H,panOnScroll:$,zoomOnDoubleClick:Y,zoomOnPinch:j,userSelectionActive:L,noPanClassName:q,noWheelClassName:B,lib:G,connectionInProgress:J});p.filter(be),Y?y.on("dblclick.zoom",_):y.on("dblclick.zoom",null)}function S(){p.on("zoom",null)}async function T(B,q,ee){const L=lh(B),$=p==null?void 0:p.constrain()(L,q,ee);return $&&await k($),new Promise(M=>M($))}async function E(B,q){const ee=lh(B);return await k(ee,q),new Promise(L=>L(ee))}function A(B){if(y){const q=lh(B),ee=y.property("__zoom");(ee.k!==B.zoom||ee.x!==B.x||ee.y!==B.y)&&(p==null||p.transform(y,q,null,{sync:!0}))}}function I(){const B=y?Rw(y.node()):{x:0,y:0,k:1};return{x:B.x,y:B.y,zoom:B.k}}function z(B,q){return y?new Promise(ee=>{p==null||p.interpolate((q==null?void 0:q.interpolate)==="linear"?_o:Gu).scaleTo(ah(y,q==null?void 0:q.duration,q==null?void 0:q.ease,()=>ee(!0)),B)}):Promise.resolve(!1)}function R(B,q){return y?new Promise(ee=>{p==null||p.interpolate((q==null?void 0:q.interpolate)==="linear"?_o:Gu).scaleBy(ah(y,q==null?void 0:q.duration,q==null?void 0:q.ease,()=>ee(!0)),B)}):Promise.resolve(!1)}function D(B){p==null||p.scaleExtent(B)}function U(B){p==null||p.translateExtent(B)}function X(B){const q=!In(B)||B<0?0:B;p==null||p.clickDistance(q)}return{update:w,destroy:S,setViewport:E,setViewportConstrained:T,getViewport:I,scaleTo:z,scaleBy:R,setScaleExtent:D,setTranslateExtent:U,syncViewport:A,setClickDistance:X}}var ca;(function(e){e.Line="line",e.Handle="handle"})(ca||(ca={}));function UA({width:e,prevWidth:n,height:r,prevHeight:l,affectsX:a,affectsY:s}){const u=e-n,f=r-l,h=[u>0?1:u<0?-1:0,f>0?1:f<0?-1:0];return u&&a&&(h[0]=h[0]*-1),f&&s&&(h[1]=h[1]*-1),h}function gv(e){const n=e.includes("right")||e.includes("left"),r=e.includes("bottom")||e.includes("top"),l=e.includes("left"),a=e.includes("top");return{isHorizontal:n,isVertical:r,affectsX:l,affectsY:a}}function ci(e,n){return Math.max(0,n-e)}function fi(e,n){return Math.max(0,e-n)}function Du(e,n,r){return Math.max(0,n-e,e-r)}function yv(e,n){return e?!n:n}function IA(e,n,r,l,a,s,u,f){let{affectsX:h,affectsY:d}=n;const{isHorizontal:m,isVertical:p}=n,y=m&&p,{xSnapped:v,ySnapped:_}=r,{minWidth:k,maxWidth:w,minHeight:S,maxHeight:T}=l,{x:E,y:A,width:I,height:z,aspectRatio:R}=e;let D=Math.floor(m?v-e.pointerX:0),U=Math.floor(p?_-e.pointerY:0);const X=I+(h?-D:D),B=z+(d?-U:U),q=-s[0]*I,ee=-s[1]*z;let L=Du(X,k,w),$=Du(B,S,T);if(u){let Q=0,K=0;h&&D<0?Q=ci(E+D+q,u[0][0]):!h&&D>0&&(Q=fi(E+X+q,u[1][0])),d&&U<0?K=ci(A+U+ee,u[0][1]):!d&&U>0&&(K=fi(A+B+ee,u[1][1])),L=Math.max(L,Q),$=Math.max($,K)}if(f){let Q=0,K=0;h&&D>0?Q=fi(E+D,f[0][0]):!h&&D<0&&(Q=ci(E+X,f[1][0])),d&&U>0?K=fi(A+U,f[0][1]):!d&&U<0&&(K=ci(A+B,f[1][1])),L=Math.max(L,Q),$=Math.max($,K)}if(a){if(m){const Q=Du(X/R,S,T)*R;if(L=Math.max(L,Q),u){let K=0;!h&&!d||h&&!d&&y?K=fi(A+ee+X/R,u[1][1])*R:K=ci(A+ee+(h?D:-D)/R,u[0][1])*R,L=Math.max(L,K)}if(f){let K=0;!h&&!d||h&&!d&&y?K=ci(A+X/R,f[1][1])*R:K=fi(A+(h?D:-D)/R,f[0][1])*R,L=Math.max(L,K)}}if(p){const Q=Du(B*R,k,w)/R;if($=Math.max($,Q),u){let K=0;!h&&!d||d&&!h&&y?K=fi(E+B*R+q,u[1][0])/R:K=ci(E+(d?U:-U)*R+q,u[0][0])/R,$=Math.max($,K)}if(f){let K=0;!h&&!d||d&&!h&&y?K=ci(E+B*R,f[1][0])/R:K=fi(E+(d?U:-U)*R,f[0][0])/R,$=Math.max($,K)}}}U=U+(U<0?$:-$),D=D+(D<0?L:-L),a&&(y?X>B*R?U=(yv(h,d)?-D:D)/R:D=(yv(h,d)?-U:U)*R:m?(U=D/R,d=h):(D=U*R,h=d));const M=h?E+D:E,P=d?A+U:A;return{width:I+(h?-D:D),height:z+(d?-U:U),x:s[0]*D*(h?-1:1)+M,y:s[1]*U*(d?-1:1)+P}}const cS={width:0,height:0,x:0,y:0},VA={...cS,pointerX:0,pointerY:0,aspectRatio:1};function GA(e){return[[0,0],[e.measured.width,e.measured.height]]}function PA(e,n,r){const l=n.position.x+e.position.x,a=n.position.y+e.position.y,s=e.measured.width??0,u=e.measured.height??0,f=r[0]*s,h=r[1]*u;return[[l-f,a-h],[l+s-f,a+u-h]]}function $A({domNode:e,nodeId:n,getStoreItems:r,onChange:l,onEnd:a}){const s=vn(e);let u={controlDirection:gv("bottom-right"),boundaries:{minWidth:0,minHeight:0,maxWidth:Number.MAX_VALUE,maxHeight:Number.MAX_VALUE},resizeDirection:void 0,keepAspectRatio:!1};function f({controlPosition:d,boundaries:m,keepAspectRatio:p,resizeDirection:y,onResizeStart:v,onResize:_,onResizeEnd:k,shouldResize:w}){let S={...cS},T={...VA};u={boundaries:m,resizeDirection:y,keepAspectRatio:p,controlDirection:gv(d)};let E,A=null,I=[],z,R,D,U=!1;const X=ww().on("start",B=>{const{nodeLookup:q,transform:ee,snapGrid:L,snapToGrid:$,nodeOrigin:M,paneDomNode:P}=r();if(E=q.get(n),!E)return;A=(P==null?void 0:P.getBoundingClientRect())??null;const{xSnapped:Q,ySnapped:K}=Eo(B.sourceEvent,{transform:ee,snapGrid:L,snapToGrid:$,containerBounds:A});S={width:E.measured.width??0,height:E.measured.height??0,x:E.position.x??0,y:E.position.y??0},T={...S,pointerX:Q,pointerY:K,aspectRatio:S.width/S.height},z=void 0,E.parentId&&(E.extent==="parent"||E.expandParent)&&(z=q.get(E.parentId),R=z&&E.extent==="parent"?GA(z):void 0),I=[],D=void 0;for(const[j,H]of q)if(H.parentId===n&&(I.push({id:j,position:{...H.position},extent:H.extent}),H.extent==="parent"||H.expandParent)){const Y=PA(H,E,H.origin??M);D?D=[[Math.min(Y[0][0],D[0][0]),Math.min(Y[0][1],D[0][1])],[Math.max(Y[1][0],D[1][0]),Math.max(Y[1][1],D[1][1])]]:D=Y}v==null||v(B,{...S})}).on("drag",B=>{const{transform:q,snapGrid:ee,snapToGrid:L,nodeOrigin:$}=r(),M=Eo(B.sourceEvent,{transform:q,snapGrid:ee,snapToGrid:L,containerBounds:A}),P=[];if(!E)return;const{x:Q,y:K,width:j,height:H}=S,Y={},N=E.origin??$,{width:G,height:F,x:J,y:ne}=IA(T,u.controlDirection,M,u.boundaries,u.keepAspectRatio,N,R,D),re=G!==j,se=F!==H,ye=J!==Q&&re,be=ne!==K&&se;if(!ye&&!be&&!re&&!se)return;if((ye||be||N[0]===1||N[1]===1)&&(Y.x=ye?J:S.x,Y.y=be?ne:S.y,S.x=Y.x,S.y=Y.y,I.length>0)){const Me=J-Q,Ce=ne-K;for(const st of I)st.position={x:st.position.x-Me+N[0]*(G-j),y:st.position.y-Ce+N[1]*(F-H)},P.push(st)}if((re||se)&&(Y.width=re&&(!u.resizeDirection||u.resizeDirection==="horizontal")?G:S.width,Y.height=se&&(!u.resizeDirection||u.resizeDirection==="vertical")?F:S.height,S.width=Y.width,S.height=Y.height),z&&E.expandParent){const Me=N[0]*(Y.width??0);Y.x&&Y.x{_.has(w)||_.set(w,[]),_.get(w).push(S)};for(const w of p.nodes()){const S=p.node(w);if(typeof S.rank=="number"&&k(S.rank,w),typeof S.minRank=="number"&&typeof S.maxRank=="number")for(let T=S.minRank;T<=S.maxRank;T++)T!==S.rank&&k(T,w)}return y.map(function(w){return l(p,w,v,_.get(w)||[])})}function d(p,y){let v=new s;p.forEach(function(_){let k=_.graph().root,w=r(_,k,v,y);w.vs.forEach((S,T)=>_.node(S).order=T),a(_,v,w.vs)})}function m(p,y){Object.values(y).forEach(v=>v.forEach((_,k)=>p.node(_).order=k))}return Wh}var ep,D1;function s5(){if(D1)return ep;D1=1;let e=$n().Graph,n=Tt();ep={positionX:v,findType1Conflicts:r,findType2Conflicts:l,addConflict:s,hasConflict:u,verticalAlignment:f,horizontalCompaction:h,alignCoordinates:p,findSmallestWidthAlignment:m,balance:y};function r(w,S){let T={};function E(A,I){let z=0,R=0,D=A.length,U=I[I.length-1];return I.forEach((X,B)=>{let q=a(w,X),ee=q?w.node(q).order:D;(q||X===U)&&(I.slice(R,B+1).forEach(L=>{w.predecessors(L).forEach($=>{let M=w.node($),P=M.order;(P