Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 22 additions & 21 deletions aider/tools/command_interactive.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,32 +51,33 @@ async def execute(cls, coder, command_string):
coder.io.tool_output(f"Skipped execution of shell command: {command_string}")
return "Shell command execution skipped by user."

should_print = True
# tui = None
if coder.tui and coder.tui():
# tui = coder.tui()
should_print = False

coder.io.tool_output(f"⚙️ Starting interactive shell command: {command_string}")
coder.io.tool_output(">>> You may need to interact with the command below <<<")
coder.io.tool_output(" \n")

await coder.io.stop_input_task()
await asyncio.sleep(1)
tui = coder.tui() if coder.tui else None

# Use run_cmd which handles PTY logic
exit_status, combined_output = run_cmd(
command_string,
verbose=coder.verbose, # Pass verbose flag
error_print=coder.io.tool_error, # Use io for error printing
cwd=coder.root, # Execute in the project root
should_print=should_print,
)
def _run_interactive():
return run_cmd(
command_string,
verbose=coder.verbose,
error_print=coder.io.tool_error,
cwd=coder.root,
should_print=True,
)

await asyncio.sleep(1)
if tui:
# Notify user and suspend TUI for interactive command
coder.io.tool_output(">>> Suspending TUI for interactive command <<<")
exit_status, combined_output = tui.run_obstructive(_run_interactive)
else:
coder.io.tool_output(">>> You may need to interact with the command below <<<")
coder.io.tool_output(" \n")
await coder.io.stop_input_task()
await asyncio.sleep(1)
exit_status, combined_output = _run_interactive()
await asyncio.sleep(1)
coder.io.tool_output(" \n")
coder.io.tool_output(" \n")

coder.io.tool_output(" \n")
coder.io.tool_output(" \n")
coder.io.tool_output(">>> Interactive command finished <<<")

# Format the output for the result message, include more content
Expand Down
85 changes: 84 additions & 1 deletion aider/tui/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@

from textual.app import App, ComposeResult

from aider.editor import pipe_editor

# from textual.binding import Binding
from textual.containers import Vertical
from textual.theme import Theme
Expand Down Expand Up @@ -112,7 +114,13 @@ def __init__(self, coder_worker, output_queue, input_queue, args):
show=True,
)
self.bind(
self._encode_keys(self.get_keys_for("focus")), "quit", description="Quit", show=True
self._encode_keys(self.get_keys_for("quit")), "quit", description="Quit", show=True
)
self.bind(
self._encode_keys(self.get_keys_for("editor")),
"open_editor",
description="Editor",
show=True,
)

self.register_theme(BASE_THEME)
Expand Down Expand Up @@ -184,8 +192,19 @@ def _get_config(self):
"cancel": "ctrl+c",
"clear": "ctrl+l",
"quit": "ctrl+q",
"editor": "ctrl+o",
}

# Default settings for the "other" section
default_other = {
"render_markdown": False,
}

# Merge default other settings with user-provided settings
for key, default_value in default_other.items():
if key not in config["other"]:
config["other"][key] = default_value

# Merge default colors with user-provided colors
for key, default_value in default_colors.items():
if key not in config["colors"]:
Expand Down Expand Up @@ -312,6 +331,14 @@ def handle_output_message(self, msg):

if msg_type == "output":
self.add_output(msg["text"], msg.get("task_id"))
elif msg_type == "tool_call":
# Render tool call with styled panel
output_container = self.query_one("#output", OutputContainer)
output_container.add_tool_call(msg["lines"])
elif msg_type == "tool_result":
# Render tool result with connector prefix
output_container = self.query_one("#output", OutputContainer)
output_container.add_tool_result(msg["text"])
elif msg_type == "start_response":
# Start a new LLM response with streaming
self.run_worker(self._start_response())
Expand Down Expand Up @@ -439,6 +466,22 @@ def on_input_area_submit(self, message: InputArea.Submit):
if not user_input.strip():
return

# Intercept /editor and /edit commands to handle with TUI suspension
stripped = user_input.strip()
if stripped in ("/editor", "/edit") or stripped.startswith("/editor ") or stripped.startswith("/edit "):
# Extract initial content if provided (e.g., "/editor some text")
initial_content = ""
if stripped.startswith("/editor "):
initial_content = stripped[8:]
elif stripped.startswith("/edit "):
initial_content = stripped[6:]

# Clear input and open editor with suspend
input_area = self.query_one("#input", InputArea)
input_area.value = ""
self._open_editor_suspended(initial_content)
return

# Save to history before clearing
input_area = self.query_one("#input", InputArea)
input_area.save_to_history(user_input)
Expand Down Expand Up @@ -501,6 +544,41 @@ def action_quit(self):
def action_noop(self):
pass

def action_open_editor(self):
"""Open an external editor to compose a prompt (keyboard shortcut)."""
# Get current input text to use as initial content
input_area = self.query_one("#input", InputArea)
current_text = input_area.value
self._open_editor_suspended(current_text)

def _open_editor_suspended(self, initial_content=""):
"""Open an external editor with proper TUI suspension.

Args:
initial_content: Initial text to populate the editor with
"""
# Get editor from coder's commands or default
editor = getattr(self.worker.coder.commands, "editor", None)

# Suspend TUI and open editor
with self.suspend():
edited_text = pipe_editor(initial_content, suffix="md", editor=editor)

# Set the edited text back to input
input_area = self.query_one("#input", InputArea)
if edited_text and edited_text.strip():
input_area.value = edited_text.rstrip()
input_area.focus()

# Show notification
try:
status_bar = self.query_one("#status-bar", StatusBar)
status_bar.show_notification("Editor content loaded", severity="information", timeout=2)
except Exception:
pass
else:
input_area.focus()

def _encode_keys(self, key):
key = key.replace("shift+enter", "ctrl+j")

Expand All @@ -522,6 +600,11 @@ def get_keys_for(self, type):
allowed_keys = self.tui_config["key_bindings"][type]
return self._decode_keys(allowed_keys)

@property
def render_markdown(self):
"""Return whether markdown rendering is enabled."""
return self.tui_config.get("other", {}).get("render_markdown", True)

def _do_quit(self):
"""Perform the actual quit after UI updates."""
self.worker.stop()
Expand Down
73 changes: 70 additions & 3 deletions aider/tui/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,11 @@ def __init__(self, output_queue, input_queue, **kwargs):
("Removing", "file_op"),
]

# Tool call buffering for styled panel rendering
self._tool_call_buffer = []
self._in_tool_call = False
self._expect_tool_result = False

def rule(self):
pass

Expand Down Expand Up @@ -154,6 +159,25 @@ def reset_streaming_response(self):
self._streaming_response = False
self.output_queue.put({"type": "end_response"})

def assistant_output(self, message, pretty=None):
"""Override assistant_output to send LLM response through streaming path.

This ensures non-streaming mode output gets the same markdown rendering
treatment as streaming mode.

Args:
message: The assistant's response message
pretty: Whether to use pretty formatting (unused in TUI, kept for compatibility)
"""
if not message:
self.tool_warning("Empty response received from LLM. Check your provider account?")
return

# Use the streaming path so markdown rendering is applied
self.output_queue.put({"type": "start_response"})
self.output_queue.put({"type": "stream_chunk", "text": message})
self.output_queue.put({"type": "end_response"})

def tool_output(self, *messages, **kwargs):
"""Override tool_output to detect task boundaries and queue output.

Expand All @@ -163,14 +187,57 @@ def tool_output(self, *messages, **kwargs):
"""
if messages:
text = " ".join(str(m) for m in messages)
type = kwargs.get("type", None)
msg_type = kwargs.get("type", None)

# Handle tool call buffering for styled panel rendering
if msg_type == "Tool Call":
# Start buffering a new tool call
self._in_tool_call = True
self._tool_call_buffer = [text]
# Log to history
self.append_chat_history(text, linebreak=True, blockquote=True)
return
elif msg_type == "tool-footer":
# End of tool call - flush buffer as styled panel
if self._in_tool_call and self._tool_call_buffer:
self.output_queue.put(
{
"type": "tool_call",
"lines": self._tool_call_buffer,
}
)
# Expect a tool result next
self._expect_tool_result = True
self._in_tool_call = False
self._tool_call_buffer = []
return
elif self._in_tool_call:
# Add to tool call buffer
if text.strip():
self._tool_call_buffer.append(text)
# Log to history
self.append_chat_history(text, linebreak=True, blockquote=True)
return

# Check if this is a tool result (comes right after tool call)
if self._expect_tool_result and text.strip():
self._expect_tool_result = False
self.output_queue.put(
{
"type": "tool_result",
"text": text,
}
)
# Log to history
self.append_chat_history(text, linebreak=True, blockquote=True)
return

# Check if this should start a new task
should_start, title, task_type = self._detect_task_start(text)

if type:
if msg_type:
should_start = True
title = type
title = msg_type

if should_start:
self.start_task(title, task_type)
Expand Down
57 changes: 54 additions & 3 deletions aider/tui/widgets/output.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import re

from rich.markdown import Markdown
from rich.padding import Padding
from rich.style import Style as RichStyle
from rich.text import Text
Expand Down Expand Up @@ -68,7 +69,7 @@ async def stream_chunk(self, text: str):
# self.write(Padding(line.strip(), (0, 0, 0, 1)))
if line.rstrip():
self.set_last_write_type("assistant")
self.output(line.rstrip())
self.output(line.rstrip(), render_markdown=True)

async def end_response(self):
"""End the current LLM response."""
Expand All @@ -78,7 +79,7 @@ async def _stop_stream(self):
"""Stop the current markdown stream."""
# Flush any remaining buffer content
if self._line_buffer.rstrip():
self.output(self.rstrip())
self.output(self._line_buffer.rstrip(), render_markdown=True)
self._line_buffer = ""

def add_user_message(self, text: str):
Expand Down Expand Up @@ -134,6 +135,49 @@ def add_output_styled(self, text: str, styles=None):

self.output(Padding(capture_text, (0, 0, 0, 2)))

def add_tool_call(self, lines: list):
"""Add a tool call with themed styling.

Args:
lines: List of lines from the tool call (header, arguments, etc.)
"""
if not lines:
return

for i, line in enumerate(lines):
# Strip Rich markup
clean_line = line.replace("[bright_cyan]", "").replace("[/bright_cyan]", "")

content = Text()
if i == 0:
# First line: reformat "Tool Call: server • function" to "Tool Call · server · function"
clean_line = clean_line.replace("Tool Call:", "Tool Call ·").replace(" • ", " · ")
content.append(clean_line, style="#00ff87") # $accent
else:
# Subsequent lines (arguments) - prefix with corner to show they belong to the call
content.append("⎿ ", style="#00ff87")
content.append(clean_line, style="dim")

self.set_last_write_type("tool_call")
self.output(Padding(content, (0, 0, 0, 1)))

def add_tool_result(self, text: str):
"""Add a tool result.

Args:
text: The tool result text
"""
if not text:
return

clean_text = text.strip()

result = Text()
result.append(clean_text, style="dim")

self.set_last_write_type("tool_result")
self.output(Padding(result, (0, 0, 0, 1)))

def _check_cost(self, text: str):
"""Extract and emit cost updates."""
match = re.search(r"\$(\d+\.?\d*)\s*session", text)
Expand All @@ -158,13 +202,20 @@ def set_last_write_type(self, type):

self._last_write_type = type

def output(self, text, check_duplicates=True):
def output(self, text, check_duplicates=True, render_markdown=False):
"""Write output with duplicate newline checking.

Args:
text: The text to write
check_duplicates: If True, check for duplicate newlines before writing
render_markdown: If True and app config allows, render as markdown
"""
# Check if we should render as markdown
if render_markdown and hasattr(self.app, 'render_markdown') and self.app.render_markdown:
# Only render string content as markdown
if isinstance(text, str):
text = Markdown(text)

with self.app.console.capture() as capture:
self.app.console.print(text)
check = Text(capture.get()).plain
Expand Down