diff --git a/aider/__init__.py b/aider/__init__.py index c41ed70f912..8d341f89783 100644 --- a/aider/__init__.py +++ b/aider/__init__.py @@ -1,6 +1,6 @@ from packaging import version -__version__ = "0.88.1.dev" +__version__ = "0.88.2.dev" safe_version = __version__ try: diff --git a/aider/coders/architect_coder.py b/aider/coders/architect_coder.py index a7cba79eb2e..d681316c1c0 100644 --- a/aider/coders/architect_coder.py +++ b/aider/coders/architect_coder.py @@ -14,7 +14,7 @@ async def reply_completed(self): if not content or not content.strip(): return - if not self.auto_accept_architect and not self.io.confirm_ask("Edit the files?"): + if not self.auto_accept_architect and not await self.io.confirm_ask("Edit the files?"): return kwargs = dict() diff --git a/aider/coders/base_coder.py b/aider/coders/base_coder.py index 848cb3b2bf5..16a1cb8410b 100755 --- a/aider/coders/base_coder.py +++ b/aider/coders/base_coder.py @@ -616,9 +616,6 @@ def __init__( except Exception as e: self.io.tool_warning(f"Could not remove todo list file {todo_file_path}: {e}") - # Instantiate MCP tools - if self.mcp_servers: - pass # validate the functions jsonschema if self.functions: from jsonschema import Draft7Validator @@ -1755,7 +1752,7 @@ def warm_cache_worker(): return chunks - def check_tokens(self, messages): + async def check_tokens(self, messages): """Check if the messages will fit within the model's token limits.""" input_tokens = self.main_model.token_count(messages) max_input_tokens = self.main_model.info.get("max_input_tokens") or 0 @@ -1774,7 +1771,7 @@ def check_tokens(self, messages): " the context limit is exceeded." ) - if not self.io.confirm_ask("Try to proceed anyway?"): + if not await self.io.confirm_ask("Try to proceed anyway?"): return False return True @@ -1792,7 +1789,7 @@ async def send_message(self, inp): chunks = self.format_messages() messages = chunks.all_messages() - if not self.check_tokens(messages): + if not await self.check_tokens(messages): return self.warm_cache(chunks) @@ -2352,7 +2349,8 @@ async def get_server_tools(server): ) return (server.name, server_tools) except Exception as e: - self.io.tool_warning(f"Error initializing MCP server {server.name}:\n{e}") + if server.name != "unnamed-server": + self.io.tool_warning(f"Error initializing MCP server {server.name}:\n{e}") return None async def get_all_server_tools(): @@ -2604,7 +2602,7 @@ async def send(self, messages, model=None, functions=None, tools=None): ) self.chat_completion_call_hashes.append(hash_object.hexdigest()) - if self.stream: + if not isinstance(completion, ModelResponse): async for chunk in self.show_send_output_stream(completion): yield chunk else: @@ -2640,6 +2638,10 @@ def show_send_output(self, completion): if self.verbose: print(completion) + if not isinstance(completion, ModelResponse): + self.io.tool_error(str(completion)) + return + if not completion.choices: self.io.tool_error(str(completion)) return @@ -3092,7 +3094,7 @@ async def allowed_to_edit(self, path): return if not Path(full_path).exists(): - if not self.io.confirm_ask("Create new file?", subject=path): + if not await self.io.confirm_ask("Create new file?", subject=path): self.io.tool_output(f"Skipping edits to {path}") return diff --git a/aider/io.py b/aider/io.py index b721d848abb..a570c2125f6 100644 --- a/aider/io.py +++ b/aider/io.py @@ -1301,7 +1301,7 @@ def assistant_output(self, message, pretty=None): else: show_resp = Text(message or "(empty response)") - self.stream_print(show_resp) + self.console.print(show_resp) def render_markdown(self, text): output = StringIO() diff --git a/aider/mcp/__init__.py b/aider/mcp/__init__.py index 17903017745..eea87122003 100644 --- a/aider/mcp/__init__.py +++ b/aider/mcp/__init__.py @@ -154,4 +154,21 @@ def load_mcp_servers(mcp_servers, mcp_servers_file, io, verbose=False, mcp_trans if mcp_servers_file: servers = _parse_mcp_servers_from_file(mcp_servers_file, io, verbose, mcp_transport) + if not servers: + # A default MCP server is actually now necessary for the overall agentic loop + # and a dummy server does suffice for the job + # because I am not smart enough to figure out why + # on coder switch, the agent actually initializes the prompt area twice + # once immediately after input for the old coder + # and immediately again for the new target coder + # which causes a race condition where we are awaiting a coroutine + # that can no longer yield control (somehow?) + # but somehow having to run through the MCP server checks + # allows control to be yielded again somehow + # and I cannot figure out just how that is happening + # and maybe it is actually prompt_toolkit's fault + # but this hack works swimmingly because ??? + # so sure! why not + servers = [McpServer(json.loads('{"aider_default": {}}'))] + return servers diff --git a/pyproject.toml b/pyproject.toml index c39e82e813d..10413f42fbe 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,7 +22,6 @@ dynamic = ["dependencies", "optional-dependencies", "version"] Homepage = "https://github.com/dwash96/aider-ce" [project.scripts] -aider = "aider.main:main" aider-ce = "aider.main:main" [tool.setuptools.dynamic]