From 391a0d9f92143692596f297af2a4e36cbccca2df Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Wed, 1 Oct 2025 18:40:45 -0400 Subject: [PATCH 01/30] Add a decorate that wraps around the `mcp.tool` decorator. This will allow us to more easily collect tools --- .../UnityMcpServer~/src/registry/__init__.py | 14 +++++ .../src/registry/tool_registry.py | 56 +++++++++++++++++++ 2 files changed, 70 insertions(+) create mode 100644 UnityMcpBridge/UnityMcpServer~/src/registry/__init__.py create mode 100644 UnityMcpBridge/UnityMcpServer~/src/registry/tool_registry.py diff --git a/UnityMcpBridge/UnityMcpServer~/src/registry/__init__.py b/UnityMcpBridge/UnityMcpServer~/src/registry/__init__.py new file mode 100644 index 00000000..5beb708b --- /dev/null +++ b/UnityMcpBridge/UnityMcpServer~/src/registry/__init__.py @@ -0,0 +1,14 @@ +""" +Registry package for MCP tool auto-discovery. +""" +from .tool_registry import ( + mcp_for_unity_tool, + get_registered_tools, + clear_registry +) + +__all__ = [ + 'mcp_for_unity_tool', + 'get_registered_tools', + 'clear_registry' +] diff --git a/UnityMcpBridge/UnityMcpServer~/src/registry/tool_registry.py b/UnityMcpBridge/UnityMcpServer~/src/registry/tool_registry.py new file mode 100644 index 00000000..62d7ad55 --- /dev/null +++ b/UnityMcpBridge/UnityMcpServer~/src/registry/tool_registry.py @@ -0,0 +1,56 @@ +""" +Tool registry for auto-discovery of MCP tools. +""" +from typing import Callable, Any +from telemetry_decorator import telemetry_tool + +# Global registry to collect decorated tools +_tool_registry: list[dict[str, Any]] = [] + + +def mcp_for_unity_tool( + name: str | None = None, + description: str | None = None, + **kwargs +) -> Callable: + """ + Decorator for registering MCP tools with auto-discovery. + + Automatically applies: + - Telemetry tracking (if available and enabled) + - Registration in the global tool registry + + Args: + name: Tool name (defaults to function name) + description: Tool description + enable_telemetry: Whether to enable telemetry for this tool (default: True) + **kwargs: Additional arguments passed to @mcp.tool() + + Example: + @mcp_for_unity_tool(description="Does something cool") + async def my_custom_tool(ctx: Context, ...): + pass + """ + def decorator(func: Callable) -> Callable: + tool_name = name if name is not None else func.__name__ + wrapped_func = telemetry_tool(tool_name)(func) + _tool_registry.append({ + 'func': wrapped_func, + 'name': tool_name, + 'description': description, + 'kwargs': kwargs + }) + + return wrapped_func + + return decorator + + +def get_registered_tools() -> list[dict[str, Any]]: + """Get all registered tools""" + return _tool_registry.copy() + + +def clear_registry(): + """Clear the tool registry (useful for testing)""" + _tool_registry.clear() From 5c74980078a329ce856000d32ab5ff1ee43cb11d Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Wed, 1 Oct 2025 18:51:50 -0400 Subject: [PATCH 02/30] Register tools that's defined in the tools folder --- .../src/registry/tool_registry.py | 6 +- .../UnityMcpServer~/src/tools/__init__.py | 75 ++++++++++++------- 2 files changed, 51 insertions(+), 30 deletions(-) diff --git a/UnityMcpBridge/UnityMcpServer~/src/registry/tool_registry.py b/UnityMcpBridge/UnityMcpServer~/src/registry/tool_registry.py index 62d7ad55..a7dee182 100644 --- a/UnityMcpBridge/UnityMcpServer~/src/registry/tool_registry.py +++ b/UnityMcpBridge/UnityMcpServer~/src/registry/tool_registry.py @@ -2,7 +2,6 @@ Tool registry for auto-discovery of MCP tools. """ from typing import Callable, Any -from telemetry_decorator import telemetry_tool # Global registry to collect decorated tools _tool_registry: list[dict[str, Any]] = [] @@ -33,15 +32,14 @@ async def my_custom_tool(ctx: Context, ...): """ def decorator(func: Callable) -> Callable: tool_name = name if name is not None else func.__name__ - wrapped_func = telemetry_tool(tool_name)(func) _tool_registry.append({ - 'func': wrapped_func, + 'func': func, 'name': tool_name, 'description': description, 'kwargs': kwargs }) - return wrapped_func + return func return decorator diff --git a/UnityMcpBridge/UnityMcpServer~/src/tools/__init__.py b/UnityMcpBridge/UnityMcpServer~/src/tools/__init__.py index 5bf45f2e..c7507fc5 100644 --- a/UnityMcpBridge/UnityMcpServer~/src/tools/__init__.py +++ b/UnityMcpBridge/UnityMcpServer~/src/tools/__init__.py @@ -1,35 +1,58 @@ +""" +MCP Tools package - Auto-discovers and registers all tools in this directory. +""" +import importlib import logging +from pathlib import Path +import pkgutil from mcp.server.fastmcp import FastMCP +from telemetry_decorator import telemetry_tool -from .manage_script_edits import register_manage_script_edits_tools -from .manage_script import register_manage_script_tools -from .manage_scene import register_manage_scene_tools -from .manage_editor import register_manage_editor_tools -from .manage_gameobject import register_manage_gameobject_tools -from .manage_asset import register_manage_asset_tools -from .manage_prefabs import register_manage_prefabs_tools -from .manage_shader import register_manage_shader_tools -from .read_console import register_read_console_tools -from .manage_menu_item import register_manage_menu_item_tools -from .resource_tools import register_resource_tools +from registry import get_registered_tools, mcp_for_unity_tool logger = logging.getLogger("mcp-for-unity-server") +# Export decorator for easy imports within tools +__all__ = ['register_all_tools', 'mcp_for_unity_tool'] + def register_all_tools(mcp: FastMCP): - """Register all refactored tools with the MCP server.""" - # Prefer the surgical edits tool so LLMs discover it first - logger.info("Registering MCP for Unity Server refactored tools...") - register_manage_script_edits_tools(mcp) - register_manage_script_tools(mcp) - register_manage_scene_tools(mcp) - register_manage_editor_tools(mcp) - register_manage_gameobject_tools(mcp) - register_manage_asset_tools(mcp) - register_manage_prefabs_tools(mcp) - register_manage_shader_tools(mcp) - register_read_console_tools(mcp) - register_manage_menu_item_tools(mcp) - register_resource_tools(mcp) - logger.info("MCP for Unity Server tool registration complete.") + """ + Auto-discover and register all tools in the tools/ directory. + + Any .py file in this directory with @mcp_for_unity_tool decorated + functions will be automatically registered. + """ + logger.info("Auto-discovering MCP for Unity Server tools...") + # Dynamic import of all modules in this directory + tools_dir = Path(__file__).parent + + for _, module_name, _ in pkgutil.iter_modules([str(tools_dir)]): + # Skip private modules and __init__ + if module_name.startswith('_'): + continue + + try: + importlib.import_module(f'.{module_name}', __package__) + except Exception as e: + logger.warning(f"Failed to import tool module {module_name}: {e}") + + tools = get_registered_tools() + + if not tools: + logger.warning("No MCP tools registered!") + return + + for tool_info in tools: + func = tool_info['func'] + tool_name = tool_info['name'] + description = tool_info['description'] + kwargs = tool_info['kwargs'] + + # Apply the @mcp.tool decorator and telemetry + mcp.tool(name=tool_name, description=description, **kwargs)(func) + telemetry_tool(tool_name)(func) + logger.info(f"Registered tool: {tool_name} - {description}") + + logger.info(f"Registered {len(tools)} MCP tools") From 00ccf140286590523fe288c32c98fc7c2c6432f8 Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Wed, 1 Oct 2025 18:54:30 -0400 Subject: [PATCH 03/30] Update Python tools to use new decorator --- .../UnityMcpServer~/src/tools/manage_asset.py | 135 +++++---- .../src/tools/manage_editor.py | 105 ++++--- .../src/tools/manage_gameobject.py | 271 +++++++++--------- .../src/tools/manage_menu_item.py | 63 ++-- .../src/tools/manage_prefabs.py | 105 ++++--- .../UnityMcpServer~/src/tools/manage_scene.py | 103 ++++--- .../src/tools/manage_shader.py | 111 ++++--- .../UnityMcpServer~/src/tools/read_console.py | 143 +++++---- 8 files changed, 505 insertions(+), 531 deletions(-) diff --git a/UnityMcpBridge/UnityMcpServer~/src/tools/manage_asset.py b/UnityMcpBridge/UnityMcpServer~/src/tools/manage_asset.py index a442b422..5e21d2ce 100644 --- a/UnityMcpBridge/UnityMcpServer~/src/tools/manage_asset.py +++ b/UnityMcpBridge/UnityMcpServer~/src/tools/manage_asset.py @@ -4,83 +4,80 @@ import asyncio from typing import Annotated, Any, Literal -from mcp.server.fastmcp import FastMCP, Context - +from mcp.server.fastmcp import Context +from registry import mcp_for_unity_tool from unity_connection import async_send_command_with_retry -from telemetry_decorator import telemetry_tool - -def register_manage_asset_tools(mcp: FastMCP): - """Registers the manage_asset tool with the MCP server.""" - @mcp.tool(name="manage_asset", description="Performs asset operations (import, create, modify, delete, etc.) in Unity.") - @telemetry_tool("manage_asset") - async def manage_asset( - ctx: Context, - action: Annotated[Literal["import", "create", "modify", "delete", "duplicate", "move", "rename", "search", "get_info", "create_folder", "get_components"], "Perform CRUD operations on assets."], - path: Annotated[str, "Asset path (e.g., 'Materials/MyMaterial.mat') or search scope."], - asset_type: Annotated[str, - "Asset type (e.g., 'Material', 'Folder') - required for 'create'."] | None = None, - properties: Annotated[dict[str, Any], - "Dictionary of properties for 'create'/'modify'."] | None = None, - destination: Annotated[str, - "Target path for 'duplicate'/'move'."] | None = None, - generate_preview: Annotated[bool, - "Generate a preview/thumbnail for the asset when supported."] = False, - search_pattern: Annotated[str, - "Search pattern (e.g., '*.prefab')."] | None = None, - filter_type: Annotated[str, "Filter type for search"] | None = None, - filter_date_after: Annotated[str, - "Date after which to filter"] | None = None, - page_size: Annotated[int, "Page size for pagination"] | None = None, - page_number: Annotated[int, "Page number for pagination"] | None = None - ) -> dict[str, Any]: - ctx.info(f"Processing manage_asset: {action}") - # Ensure properties is a dict if None - if properties is None: - properties = {} +@mcp_for_unity_tool( + description="Performs asset operations (import, create, modify, delete, etc.) in Unity." +) +async def manage_asset( + ctx: Context, + action: Annotated[Literal["import", "create", "modify", "delete", "duplicate", "move", "rename", "search", "get_info", "create_folder", "get_components"], "Perform CRUD operations on assets."], + path: Annotated[str, "Asset path (e.g., 'Materials/MyMaterial.mat') or search scope."], + asset_type: Annotated[str, + "Asset type (e.g., 'Material', 'Folder') - required for 'create'."] | None = None, + properties: Annotated[dict[str, Any], + "Dictionary of properties for 'create'/'modify'."] | None = None, + destination: Annotated[str, + "Target path for 'duplicate'/'move'."] | None = None, + generate_preview: Annotated[bool, + "Generate a preview/thumbnail for the asset when supported."] = False, + search_pattern: Annotated[str, + "Search pattern (e.g., '*.prefab')."] | None = None, + filter_type: Annotated[str, "Filter type for search"] | None = None, + filter_date_after: Annotated[str, + "Date after which to filter"] | None = None, + page_size: Annotated[int, "Page size for pagination"] | None = None, + page_number: Annotated[int, "Page number for pagination"] | None = None +) -> dict[str, Any]: + ctx.info(f"Processing manage_asset: {action}") + # Ensure properties is a dict if None + if properties is None: + properties = {} - # Coerce numeric inputs defensively - def _coerce_int(value, default=None): - if value is None: + # Coerce numeric inputs defensively + def _coerce_int(value, default=None): + if value is None: + return default + try: + if isinstance(value, bool): return default - try: - if isinstance(value, bool): - return default - if isinstance(value, int): - return int(value) - s = str(value).strip() - if s.lower() in ("", "none", "null"): - return default - return int(float(s)) - except Exception: + if isinstance(value, int): + return int(value) + s = str(value).strip() + if s.lower() in ("", "none", "null"): return default + return int(float(s)) + except Exception: + return default - page_size = _coerce_int(page_size) - page_number = _coerce_int(page_number) + page_size = _coerce_int(page_size) + page_number = _coerce_int(page_number) - # Prepare parameters for the C# handler - params_dict = { - "action": action.lower(), - "path": path, - "assetType": asset_type, - "properties": properties, - "destination": destination, - "generatePreview": generate_preview, - "searchPattern": search_pattern, - "filterType": filter_type, - "filterDateAfter": filter_date_after, - "pageSize": page_size, - "pageNumber": page_number - } + # Prepare parameters for the C# handler + params_dict = { + "action": action.lower(), + "path": path, + "assetType": asset_type, + "properties": properties, + "destination": destination, + "generatePreview": generate_preview, + "searchPattern": search_pattern, + "filterType": filter_type, + "filterDateAfter": filter_date_after, + "pageSize": page_size, + "pageNumber": page_number + } - # Remove None values to avoid sending unnecessary nulls - params_dict = {k: v for k, v in params_dict.items() if v is not None} + # Remove None values to avoid sending unnecessary nulls + params_dict = {k: v for k, v in params_dict.items() if v is not None} - # Get the current asyncio event loop - loop = asyncio.get_running_loop() + # Get the current asyncio event loop + loop = asyncio.get_running_loop() - # Use centralized async retry helper to avoid blocking the event loop - result = await async_send_command_with_retry("manage_asset", params_dict, loop=loop) - # Return the result obtained from Unity - return result if isinstance(result, dict) else {"success": False, "message": str(result)} + # Use centralized async retry helper to avoid blocking the event loop + result = await async_send_command_with_retry("manage_asset", params_dict, loop=loop) + # Return the result obtained from Unity + return result if isinstance(result, dict) else {"success": False, "message": str(result)} diff --git a/UnityMcpBridge/UnityMcpServer~/src/tools/manage_editor.py b/UnityMcpBridge/UnityMcpServer~/src/tools/manage_editor.py index 644209f7..c0de76c2 100644 --- a/UnityMcpBridge/UnityMcpServer~/src/tools/manage_editor.py +++ b/UnityMcpBridge/UnityMcpServer~/src/tools/manage_editor.py @@ -1,60 +1,57 @@ from typing import Annotated, Any, Literal -from mcp.server.fastmcp import FastMCP, Context -from telemetry_decorator import telemetry_tool +from mcp.server.fastmcp import Context +from registry import mcp_for_unity_tool from telemetry import is_telemetry_enabled, record_tool_usage - from unity_connection import send_command_with_retry -def register_manage_editor_tools(mcp: FastMCP): - """Register all editor management tools with the MCP server.""" - - @mcp.tool(name="manage_editor", description="Controls and queries the Unity editor's state and settings") - @telemetry_tool("manage_editor") - def manage_editor( - ctx: Context, - action: Annotated[Literal["telemetry_status", "telemetry_ping", "play", "pause", "stop", "get_state", "get_project_root", "get_windows", - "get_active_tool", "get_selection", "get_prefab_stage", "set_active_tool", "add_tag", "remove_tag", "get_tags", "add_layer", "remove_layer", "get_layers"], "Get and update the Unity Editor state."], - wait_for_completion: Annotated[bool, - "Optional. If True, waits for certain actions"] | None = None, - tool_name: Annotated[str, - "Tool name when setting active tool"] | None = None, - tag_name: Annotated[str, - "Tag name when adding and removing tags"] | None = None, - layer_name: Annotated[str, - "Layer name when adding and removing layers"] | None = None, - ) -> dict[str, Any]: - ctx.info(f"Processing manage_editor: {action}") - try: - # Diagnostics: quick telemetry checks - if action == "telemetry_status": - return {"success": True, "telemetry_enabled": is_telemetry_enabled()} - - if action == "telemetry_ping": - record_tool_usage("diagnostic_ping", True, 1.0, None) - return {"success": True, "message": "telemetry ping queued"} - # Prepare parameters, removing None values - params = { - "action": action, - "waitForCompletion": wait_for_completion, - "toolName": tool_name, # Corrected parameter name to match C# - "tagName": tag_name, # Pass tag name - "layerName": layer_name, # Pass layer name - # Add other parameters based on the action being performed - # "width": width, - # "height": height, - # etc. - } - params = {k: v for k, v in params.items() if v is not None} - - # Send command using centralized retry helper - response = send_command_with_retry("manage_editor", params) - - # Preserve structured failure data; unwrap success into a friendlier shape - if isinstance(response, dict) and response.get("success"): - return {"success": True, "message": response.get("message", "Editor operation successful."), "data": response.get("data")} - return response if isinstance(response, dict) else {"success": False, "message": str(response)} - - except Exception as e: - return {"success": False, "message": f"Python error managing editor: {str(e)}"} +@mcp_for_unity_tool( + description="Controls and queries the Unity editor's state and settings" +) +def manage_editor( + ctx: Context, + action: Annotated[Literal["telemetry_status", "telemetry_ping", "play", "pause", "stop", "get_state", "get_project_root", "get_windows", + "get_active_tool", "get_selection", "get_prefab_stage", "set_active_tool", "add_tag", "remove_tag", "get_tags", "add_layer", "remove_layer", "get_layers"], "Get and update the Unity Editor state."], + wait_for_completion: Annotated[bool, + "Optional. If True, waits for certain actions"] | None = None, + tool_name: Annotated[str, + "Tool name when setting active tool"] | None = None, + tag_name: Annotated[str, + "Tag name when adding and removing tags"] | None = None, + layer_name: Annotated[str, + "Layer name when adding and removing layers"] | None = None, +) -> dict[str, Any]: + ctx.info(f"Processing manage_editor: {action}") + try: + # Diagnostics: quick telemetry checks + if action == "telemetry_status": + return {"success": True, "telemetry_enabled": is_telemetry_enabled()} + + if action == "telemetry_ping": + record_tool_usage("diagnostic_ping", True, 1.0, None) + return {"success": True, "message": "telemetry ping queued"} + # Prepare parameters, removing None values + params = { + "action": action, + "waitForCompletion": wait_for_completion, + "toolName": tool_name, # Corrected parameter name to match C# + "tagName": tag_name, # Pass tag name + "layerName": layer_name, # Pass layer name + # Add other parameters based on the action being performed + # "width": width, + # "height": height, + # etc. + } + params = {k: v for k, v in params.items() if v is not None} + + # Send command using centralized retry helper + response = send_command_with_retry("manage_editor", params) + + # Preserve structured failure data; unwrap success into a friendlier shape + if isinstance(response, dict) and response.get("success"): + return {"success": True, "message": response.get("message", "Editor operation successful."), "data": response.get("data")} + return response if isinstance(response, dict) else {"success": False, "message": str(response)} + + except Exception as e: + return {"success": False, "message": f"Python error managing editor: {str(e)}"} diff --git a/UnityMcpBridge/UnityMcpServer~/src/tools/manage_gameobject.py b/UnityMcpBridge/UnityMcpServer~/src/tools/manage_gameobject.py index 41d4a1c0..a8ca1609 100644 --- a/UnityMcpBridge/UnityMcpServer~/src/tools/manage_gameobject.py +++ b/UnityMcpBridge/UnityMcpServer~/src/tools/manage_gameobject.py @@ -1,148 +1,145 @@ from typing import Annotated, Any, Literal -from mcp.server.fastmcp import FastMCP, Context -from telemetry_decorator import telemetry_tool - +from mcp.server.fastmcp import Context +from registry import mcp_for_unity_tool from unity_connection import send_command_with_retry -def register_manage_gameobject_tools(mcp: FastMCP): - """Register all GameObject management tools with the MCP server.""" - - @mcp.tool(name="manage_gameobject", description="Manage GameObjects. Note: for 'get_components', the `data` field contains a dictionary of component names and their serialized properties. For 'get_component', specify 'component_name' to retrieve only that component's serialized data.") - @telemetry_tool("manage_gameobject") - def manage_gameobject( - ctx: Context, - action: Annotated[Literal["create", "modify", "delete", "find", "add_component", "remove_component", "set_component_property", "get_components", "get_component"], "Perform CRUD operations on GameObjects and components."], - target: Annotated[str, - "GameObject identifier by name or path for modify/delete/component actions"] | None = None, - search_method: Annotated[Literal["by_id", "by_name", "by_path", "by_tag", "by_layer", "by_component"], - "How to find objects. Used with 'find' and some 'target' lookups."] | None = None, - name: Annotated[str, - "GameObject name for 'create' (initial name) and 'modify' (rename) actions ONLY. For 'find' action, use 'search_term' instead."] | None = None, - tag: Annotated[str, - "Tag name - used for both 'create' (initial tag) and 'modify' (change tag)"] | None = None, - parent: Annotated[str, - "Parent GameObject reference - used for both 'create' (initial parent) and 'modify' (change parent)"] | None = None, - position: Annotated[list[float], - "Position - used for both 'create' (initial position) and 'modify' (change position)"] | None = None, - rotation: Annotated[list[float], - "Rotation - used for both 'create' (initial rotation) and 'modify' (change rotation)"] | None = None, - scale: Annotated[list[float], - "Scale - used for both 'create' (initial scale) and 'modify' (change scale)"] | None = None, - components_to_add: Annotated[list[str], - "List of component names to add"] | None = None, - primitive_type: Annotated[str, - "Primitive type for 'create' action"] | None = None, - save_as_prefab: Annotated[bool, - "If True, saves the created GameObject as a prefab"] | None = None, - prefab_path: Annotated[str, "Path for prefab creation"] | None = None, - prefab_folder: Annotated[str, - "Folder for prefab creation"] | None = None, - # --- Parameters for 'modify' --- - set_active: Annotated[bool, - "If True, sets the GameObject active"] | None = None, - layer: Annotated[str, "Layer name"] | None = None, - components_to_remove: Annotated[list[str], - "List of component names to remove"] | None = None, - component_properties: Annotated[dict[str, dict[str, Any]], - """Dictionary of component names to their properties to set. For example: - `{"MyScript": {"otherObject": {"find": "Player", "method": "by_name"}}}` assigns GameObject - `{"MyScript": {"playerHealth": {"find": "Player", "component": "HealthComponent"}}}` assigns Component - Example set nested property: - - Access shared material: `{"MeshRenderer": {"sharedMaterial.color": [1, 0, 0, 1]}}`"""] | None = None, - # --- Parameters for 'find' --- - search_term: Annotated[str, - "Search term for 'find' action ONLY. Use this (not 'name') when searching for GameObjects."] | None = None, - find_all: Annotated[bool, - "If True, finds all GameObjects matching the search term"] | None = None, - search_in_children: Annotated[bool, - "If True, searches in children of the GameObject"] | None = None, - search_inactive: Annotated[bool, - "If True, searches inactive GameObjects"] | None = None, - # -- Component Management Arguments -- - component_name: Annotated[str, - "Component name for 'add_component' and 'remove_component' actions"] | None = None, - # Controls whether serialization of private [SerializeField] fields is included - includeNonPublicSerialized: Annotated[bool, - "Controls whether serialization of private [SerializeField] fields is included"] | None = None, - ) -> dict[str, Any]: - ctx.info(f"Processing manage_gameobject: {action}") - try: - # Validate parameter usage to prevent silent failures - if action == "find": - if name is not None: - return { - "success": False, - "message": "For 'find' action, use 'search_term' parameter, not 'name'. Remove 'name' parameter. Example: search_term='Player', search_method='by_name'" - } - if search_term is None: - return { - "success": False, - "message": "For 'find' action, 'search_term' parameter is required. Use search_term (not 'name') to specify what to find." - } +@mcp_for_unity_tool( + description="Manage GameObjects. Note: for 'get_components', the `data` field contains a dictionary of component names and their serialized properties. For 'get_component', specify 'component_name' to retrieve only that component's serialized data." +) +def manage_gameobject( + ctx: Context, + action: Annotated[Literal["create", "modify", "delete", "find", "add_component", "remove_component", "set_component_property", "get_components", "get_component"], "Perform CRUD operations on GameObjects and components."], + target: Annotated[str, + "GameObject identifier by name or path for modify/delete/component actions"] | None = None, + search_method: Annotated[Literal["by_id", "by_name", "by_path", "by_tag", "by_layer", "by_component"], + "How to find objects. Used with 'find' and some 'target' lookups."] | None = None, + name: Annotated[str, + "GameObject name for 'create' (initial name) and 'modify' (rename) actions ONLY. For 'find' action, use 'search_term' instead."] | None = None, + tag: Annotated[str, + "Tag name - used for both 'create' (initial tag) and 'modify' (change tag)"] | None = None, + parent: Annotated[str, + "Parent GameObject reference - used for both 'create' (initial parent) and 'modify' (change parent)"] | None = None, + position: Annotated[list[float], + "Position - used for both 'create' (initial position) and 'modify' (change position)"] | None = None, + rotation: Annotated[list[float], + "Rotation - used for both 'create' (initial rotation) and 'modify' (change rotation)"] | None = None, + scale: Annotated[list[float], + "Scale - used for both 'create' (initial scale) and 'modify' (change scale)"] | None = None, + components_to_add: Annotated[list[str], + "List of component names to add"] | None = None, + primitive_type: Annotated[str, + "Primitive type for 'create' action"] | None = None, + save_as_prefab: Annotated[bool, + "If True, saves the created GameObject as a prefab"] | None = None, + prefab_path: Annotated[str, "Path for prefab creation"] | None = None, + prefab_folder: Annotated[str, + "Folder for prefab creation"] | None = None, + # --- Parameters for 'modify' --- + set_active: Annotated[bool, + "If True, sets the GameObject active"] | None = None, + layer: Annotated[str, "Layer name"] | None = None, + components_to_remove: Annotated[list[str], + "List of component names to remove"] | None = None, + component_properties: Annotated[dict[str, dict[str, Any]], + """Dictionary of component names to their properties to set. For example: + `{"MyScript": {"otherObject": {"find": "Player", "method": "by_name"}}}` assigns GameObject + `{"MyScript": {"playerHealth": {"find": "Player", "component": "HealthComponent"}}}` assigns Component + Example set nested property: + - Access shared material: `{"MeshRenderer": {"sharedMaterial.color": [1, 0, 0, 1]}}`"""] | None = None, + # --- Parameters for 'find' --- + search_term: Annotated[str, + "Search term for 'find' action ONLY. Use this (not 'name') when searching for GameObjects."] | None = None, + find_all: Annotated[bool, + "If True, finds all GameObjects matching the search term"] | None = None, + search_in_children: Annotated[bool, + "If True, searches in children of the GameObject"] | None = None, + search_inactive: Annotated[bool, + "If True, searches inactive GameObjects"] | None = None, + # -- Component Management Arguments -- + component_name: Annotated[str, + "Component name for 'add_component' and 'remove_component' actions"] | None = None, + # Controls whether serialization of private [SerializeField] fields is included + includeNonPublicSerialized: Annotated[bool, + "Controls whether serialization of private [SerializeField] fields is included"] | None = None, +) -> dict[str, Any]: + ctx.info(f"Processing manage_gameobject: {action}") + try: + # Validate parameter usage to prevent silent failures + if action == "find": + if name is not None: + return { + "success": False, + "message": "For 'find' action, use 'search_term' parameter, not 'name'. Remove 'name' parameter. Example: search_term='Player', search_method='by_name'" + } + if search_term is None: + return { + "success": False, + "message": "For 'find' action, 'search_term' parameter is required. Use search_term (not 'name') to specify what to find." + } - if action in ["create", "modify"]: - if search_term is not None: - return { - "success": False, - "message": f"For '{action}' action, use 'name' parameter, not 'search_term'." - } + if action in ["create", "modify"]: + if search_term is not None: + return { + "success": False, + "message": f"For '{action}' action, use 'name' parameter, not 'search_term'." + } - # Prepare parameters, removing None values - params = { - "action": action, - "target": target, - "searchMethod": search_method, - "name": name, - "tag": tag, - "parent": parent, - "position": position, - "rotation": rotation, - "scale": scale, - "componentsToAdd": components_to_add, - "primitiveType": primitive_type, - "saveAsPrefab": save_as_prefab, - "prefabPath": prefab_path, - "prefabFolder": prefab_folder, - "setActive": set_active, - "layer": layer, - "componentsToRemove": components_to_remove, - "componentProperties": component_properties, - "searchTerm": search_term, - "findAll": find_all, - "searchInChildren": search_in_children, - "searchInactive": search_inactive, - "componentName": component_name, - "includeNonPublicSerialized": includeNonPublicSerialized - } - params = {k: v for k, v in params.items() if v is not None} + # Prepare parameters, removing None values + params = { + "action": action, + "target": target, + "searchMethod": search_method, + "name": name, + "tag": tag, + "parent": parent, + "position": position, + "rotation": rotation, + "scale": scale, + "componentsToAdd": components_to_add, + "primitiveType": primitive_type, + "saveAsPrefab": save_as_prefab, + "prefabPath": prefab_path, + "prefabFolder": prefab_folder, + "setActive": set_active, + "layer": layer, + "componentsToRemove": components_to_remove, + "componentProperties": component_properties, + "searchTerm": search_term, + "findAll": find_all, + "searchInChildren": search_in_children, + "searchInactive": search_inactive, + "componentName": component_name, + "includeNonPublicSerialized": includeNonPublicSerialized + } + params = {k: v for k, v in params.items() if v is not None} - # --- Handle Prefab Path Logic --- - # Check if 'saveAsPrefab' is explicitly True in params - if action == "create" and params.get("saveAsPrefab"): - if "prefabPath" not in params: - if "name" not in params or not params["name"]: - return {"success": False, "message": "Cannot create default prefab path: 'name' parameter is missing."} - # Use the provided prefab_folder (which has a default) and the name to construct the path - constructed_path = f"{prefab_folder}/{params['name']}.prefab" - # Ensure clean path separators (Unity prefers '/') - params["prefabPath"] = constructed_path.replace("\\", "/") - elif not params["prefabPath"].lower().endswith(".prefab"): - return {"success": False, "message": f"Invalid prefab_path: '{params['prefabPath']}' must end with .prefab"} - # Ensure prefabFolder itself isn't sent if prefabPath was constructed or provided - # The C# side only needs the final prefabPath - params.pop("prefabFolder", None) - # -------------------------------- + # --- Handle Prefab Path Logic --- + # Check if 'saveAsPrefab' is explicitly True in params + if action == "create" and params.get("saveAsPrefab"): + if "prefabPath" not in params: + if "name" not in params or not params["name"]: + return {"success": False, "message": "Cannot create default prefab path: 'name' parameter is missing."} + # Use the provided prefab_folder (which has a default) and the name to construct the path + constructed_path = f"{prefab_folder}/{params['name']}.prefab" + # Ensure clean path separators (Unity prefers '/') + params["prefabPath"] = constructed_path.replace("\\", "/") + elif not params["prefabPath"].lower().endswith(".prefab"): + return {"success": False, "message": f"Invalid prefab_path: '{params['prefabPath']}' must end with .prefab"} + # Ensure prefabFolder itself isn't sent if prefabPath was constructed or provided + # The C# side only needs the final prefabPath + params.pop("prefabFolder", None) + # -------------------------------- - # Use centralized retry helper - response = send_command_with_retry("manage_gameobject", params) + # Use centralized retry helper + response = send_command_with_retry("manage_gameobject", params) - # Check if the response indicates success - # If the response is not successful, raise an exception with the error message - if isinstance(response, dict) and response.get("success"): - return {"success": True, "message": response.get("message", "GameObject operation successful."), "data": response.get("data")} - return response if isinstance(response, dict) else {"success": False, "message": str(response)} + # Check if the response indicates success + # If the response is not successful, raise an exception with the error message + if isinstance(response, dict) and response.get("success"): + return {"success": True, "message": response.get("message", "GameObject operation successful."), "data": response.get("data")} + return response if isinstance(response, dict) else {"success": False, "message": str(response)} - except Exception as e: - return {"success": False, "message": f"Python error managing GameObject: {str(e)}"} \ No newline at end of file + except Exception as e: + return {"success": False, "message": f"Python error managing GameObject: {str(e)}"} \ No newline at end of file diff --git a/UnityMcpBridge/UnityMcpServer~/src/tools/manage_menu_item.py b/UnityMcpBridge/UnityMcpServer~/src/tools/manage_menu_item.py index 3e7620a6..5463614d 100644 --- a/UnityMcpBridge/UnityMcpServer~/src/tools/manage_menu_item.py +++ b/UnityMcpBridge/UnityMcpServer~/src/tools/manage_menu_item.py @@ -4,41 +4,38 @@ import asyncio from typing import Annotated, Any, Literal -from mcp.server.fastmcp import FastMCP, Context -from telemetry_decorator import telemetry_tool - +from mcp.server.fastmcp import Context +from registry import mcp_for_unity_tool from unity_connection import async_send_command_with_retry -def register_manage_menu_item_tools(mcp: FastMCP): - """Registers the manage_menu_item tool with the MCP server.""" - - @mcp.tool(name="manage_menu_item", description="Manage Unity menu items (execute/list/exists). If you're not sure what menu item to use, use the 'list' action to find it before using 'execute'.") - @telemetry_tool("manage_menu_item") - async def manage_menu_item( - ctx: Context, - action: Annotated[Literal["execute", "list", "exists"], "Read and execute Unity menu items."], - menu_path: Annotated[str, - "Menu path for 'execute' or 'exists' (e.g., 'File/Save Project')"] | None = None, - search: Annotated[str, - "Optional filter string for 'list' (e.g., 'Save')"] | None = None, - refresh: Annotated[bool, - "Optional flag to force refresh of the menu cache when listing"] | None = None, - ) -> dict[str, Any]: - ctx.info(f"Processing manage_menu_item: {action}") - # Prepare parameters for the C# handler - params_dict: dict[str, Any] = { - "action": action, - "menuPath": menu_path, - "search": search, - "refresh": refresh, - } - # Remove None values - params_dict = {k: v for k, v in params_dict.items() if v is not None} +@mcp_for_unity_tool( + description="Manage Unity menu items (execute/list/exists). If you're not sure what menu item to use, use the 'list' action to find it before using 'execute'." +) +async def manage_menu_item( + ctx: Context, + action: Annotated[Literal["execute", "list", "exists"], "Read and execute Unity menu items."], + menu_path: Annotated[str, + "Menu path for 'execute' or 'exists' (e.g., 'File/Save Project')"] | None = None, + search: Annotated[str, + "Optional filter string for 'list' (e.g., 'Save')"] | None = None, + refresh: Annotated[bool, + "Optional flag to force refresh of the menu cache when listing"] | None = None, +) -> dict[str, Any]: + ctx.info(f"Processing manage_menu_item: {action}") + # Prepare parameters for the C# handler + params_dict: dict[str, Any] = { + "action": action, + "menuPath": menu_path, + "search": search, + "refresh": refresh, + } + # Remove None values + params_dict = {k: v for k, v in params_dict.items() if v is not None} - # Get the current asyncio event loop - loop = asyncio.get_running_loop() + # Get the current asyncio event loop + loop = asyncio.get_running_loop() - # Use centralized async retry helper - result = await async_send_command_with_retry("manage_menu_item", params_dict, loop=loop) - return result if isinstance(result, dict) else {"success": False, "message": str(result)} + # Use centralized async retry helper + result = await async_send_command_with_retry("manage_menu_item", params_dict, loop=loop) + return result if isinstance(result, dict) else {"success": False, "message": str(result)} diff --git a/UnityMcpBridge/UnityMcpServer~/src/tools/manage_prefabs.py b/UnityMcpBridge/UnityMcpServer~/src/tools/manage_prefabs.py index 7c65f28f..ea89201c 100644 --- a/UnityMcpBridge/UnityMcpServer~/src/tools/manage_prefabs.py +++ b/UnityMcpBridge/UnityMcpServer~/src/tools/manage_prefabs.py @@ -1,61 +1,58 @@ from typing import Annotated, Any, Literal -from mcp.server.fastmcp import FastMCP, Context -from telemetry_decorator import telemetry_tool - +from mcp.server.fastmcp import Context +from registry import mcp_for_unity_tool from unity_connection import send_command_with_retry -def register_manage_prefabs_tools(mcp: FastMCP) -> None: - """Register prefab management tools with the MCP server.""" - - @mcp.tool(name="manage_prefabs", description="Bridge for prefab management commands (stage control and creation).") - @telemetry_tool("manage_prefabs") - def manage_prefabs( - ctx: Context, - action: Annotated[Literal[ - "open_stage", - "close_stage", - "save_open_stage", - "create_from_gameobject", - ], "Manage prefabs (stage control and creation)."], - prefab_path: Annotated[str, - "Prefab asset path relative to Assets e.g. Assets/Prefabs/favorite.prefab"] | None = None, - mode: Annotated[str, - "Optional prefab stage mode (only 'InIsolation' is currently supported)"] | None = None, - save_before_close: Annotated[bool, - "When true, `close_stage` will save the prefab before exiting the stage."] | None = None, - target: Annotated[str, - "Scene GameObject name required for create_from_gameobject"] | None = None, - allow_overwrite: Annotated[bool, - "Allow replacing an existing prefab at the same path"] | None = None, - search_inactive: Annotated[bool, - "Include inactive objects when resolving the target name"] | None = None, - ) -> dict[str, Any]: - ctx.info(f"Processing manage_prefabs: {action}") - try: - params: dict[str, Any] = {"action": action} +@mcp_for_unity_tool( + description="Bridge for prefab management commands (stage control and creation)." +) +def manage_prefabs( + ctx: Context, + action: Annotated[Literal[ + "open_stage", + "close_stage", + "save_open_stage", + "create_from_gameobject", + ], "Manage prefabs (stage control and creation)."], + prefab_path: Annotated[str, + "Prefab asset path relative to Assets e.g. Assets/Prefabs/favorite.prefab"] | None = None, + mode: Annotated[str, + "Optional prefab stage mode (only 'InIsolation' is currently supported)"] | None = None, + save_before_close: Annotated[bool, + "When true, `close_stage` will save the prefab before exiting the stage."] | None = None, + target: Annotated[str, + "Scene GameObject name required for create_from_gameobject"] | None = None, + allow_overwrite: Annotated[bool, + "Allow replacing an existing prefab at the same path"] | None = None, + search_inactive: Annotated[bool, + "Include inactive objects when resolving the target name"] | None = None, +) -> dict[str, Any]: + ctx.info(f"Processing manage_prefabs: {action}") + try: + params: dict[str, Any] = {"action": action} - if prefab_path: - params["prefabPath"] = prefab_path - if mode: - params["mode"] = mode - if save_before_close is not None: - params["saveBeforeClose"] = bool(save_before_close) - if target: - params["target"] = target - if allow_overwrite is not None: - params["allowOverwrite"] = bool(allow_overwrite) - if search_inactive is not None: - params["searchInactive"] = bool(search_inactive) - response = send_command_with_retry("manage_prefabs", params) + if prefab_path: + params["prefabPath"] = prefab_path + if mode: + params["mode"] = mode + if save_before_close is not None: + params["saveBeforeClose"] = bool(save_before_close) + if target: + params["target"] = target + if allow_overwrite is not None: + params["allowOverwrite"] = bool(allow_overwrite) + if search_inactive is not None: + params["searchInactive"] = bool(search_inactive) + response = send_command_with_retry("manage_prefabs", params) - if isinstance(response, dict) and response.get("success"): - return { - "success": True, - "message": response.get("message", "Prefab operation successful."), - "data": response.get("data"), - } - return response if isinstance(response, dict) else {"success": False, "message": str(response)} - except Exception as exc: - return {"success": False, "message": f"Python error managing prefabs: {exc}"} + if isinstance(response, dict) and response.get("success"): + return { + "success": True, + "message": response.get("message", "Prefab operation successful."), + "data": response.get("data"), + } + return response if isinstance(response, dict) else {"success": False, "message": str(response)} + except Exception as exc: + return {"success": False, "message": f"Python error managing prefabs: {exc}"} diff --git a/UnityMcpBridge/UnityMcpServer~/src/tools/manage_scene.py b/UnityMcpBridge/UnityMcpServer~/src/tools/manage_scene.py index fb5a1bca..09494e4a 100644 --- a/UnityMcpBridge/UnityMcpServer~/src/tools/manage_scene.py +++ b/UnityMcpBridge/UnityMcpServer~/src/tools/manage_scene.py @@ -1,61 +1,56 @@ from typing import Annotated, Literal, Any -from mcp.server.fastmcp import FastMCP, Context -from telemetry_decorator import telemetry_tool - +from mcp.server.fastmcp import Context +from registry import mcp_for_unity_tool from unity_connection import send_command_with_retry -def register_manage_scene_tools(mcp: FastMCP): - """Register all scene management tools with the MCP server.""" - - @mcp.tool(name="manage_scene", description="Manage Unity scenes") - @telemetry_tool("manage_scene") - def manage_scene( - ctx: Context, - action: Annotated[Literal["create", "load", "save", "get_hierarchy", "get_active", "get_build_settings"], "Perform CRUD operations on Unity scenes."], - name: Annotated[str, - "Scene name. Not required get_active/get_build_settings"] | None = None, - path: Annotated[str, - "Asset path for scene operations (default: 'Assets/')"] | None = None, - build_index: Annotated[int, - "Build index for load/build settings actions"] | None = None, - ) -> dict[str, Any]: - ctx.info(f"Processing manage_scene: {action}") - try: - # Coerce numeric inputs defensively - def _coerce_int(value, default=None): - if value is None: +@mcp_for_unity_tool(description="Manage Unity scenes") +def manage_scene( + ctx: Context, + action: Annotated[Literal["create", "load", "save", "get_hierarchy", "get_active", "get_build_settings"], "Perform CRUD operations on Unity scenes."], + name: Annotated[str, + "Scene name. Not required get_active/get_build_settings"] | None = None, + path: Annotated[str, + "Asset path for scene operations (default: 'Assets/')"] | None = None, + build_index: Annotated[int, + "Build index for load/build settings actions"] | None = None, +) -> dict[str, Any]: + ctx.info(f"Processing manage_scene: {action}") + try: + # Coerce numeric inputs defensively + def _coerce_int(value, default=None): + if value is None: + return default + try: + if isinstance(value, bool): return default - try: - if isinstance(value, bool): - return default - if isinstance(value, int): - return int(value) - s = str(value).strip() - if s.lower() in ("", "none", "null"): - return default - return int(float(s)) - except Exception: + if isinstance(value, int): + return int(value) + s = str(value).strip() + if s.lower() in ("", "none", "null"): return default - - coerced_build_index = _coerce_int(build_index, default=None) - - params = {"action": action} - if name: - params["name"] = name - if path: - params["path"] = path - if coerced_build_index is not None: - params["buildIndex"] = coerced_build_index - - # Use centralized retry helper - response = send_command_with_retry("manage_scene", params) - - # Preserve structured failure data; unwrap success into a friendlier shape - if isinstance(response, dict) and response.get("success"): - return {"success": True, "message": response.get("message", "Scene operation successful."), "data": response.get("data")} - return response if isinstance(response, dict) else {"success": False, "message": str(response)} - - except Exception as e: - return {"success": False, "message": f"Python error managing scene: {str(e)}"} + return int(float(s)) + except Exception: + return default + + coerced_build_index = _coerce_int(build_index, default=None) + + params = {"action": action} + if name: + params["name"] = name + if path: + params["path"] = path + if coerced_build_index is not None: + params["buildIndex"] = coerced_build_index + + # Use centralized retry helper + response = send_command_with_retry("manage_scene", params) + + # Preserve structured failure data; unwrap success into a friendlier shape + if isinstance(response, dict) and response.get("success"): + return {"success": True, "message": response.get("message", "Scene operation successful."), "data": response.get("data")} + return response if isinstance(response, dict) else {"success": False, "message": str(response)} + + except Exception as e: + return {"success": False, "message": f"Python error managing scene: {str(e)}"} diff --git a/UnityMcpBridge/UnityMcpServer~/src/tools/manage_shader.py b/UnityMcpBridge/UnityMcpServer~/src/tools/manage_shader.py index e9ccc14a..9c199661 100644 --- a/UnityMcpBridge/UnityMcpServer~/src/tools/manage_shader.py +++ b/UnityMcpBridge/UnityMcpServer~/src/tools/manage_shader.py @@ -1,63 +1,60 @@ import base64 from typing import Annotated, Any, Literal -from mcp.server.fastmcp import FastMCP, Context -from telemetry_decorator import telemetry_tool - +from mcp.server.fastmcp import Context +from registry import mcp_for_unity_tool from unity_connection import send_command_with_retry -def register_manage_shader_tools(mcp: FastMCP): - """Register all shader script management tools with the MCP server.""" - - @mcp.tool(name="manage_shader", description="Manages shader scripts in Unity (create, read, update, delete).") - @telemetry_tool("manage_shader") - def manage_shader( - ctx: Context, - action: Annotated[Literal['create', 'read', 'update', 'delete'], "Perform CRUD operations on shader scripts."], - name: Annotated[str, "Shader name (no .cs extension)"], - path: Annotated[str, "Asset path (default: \"Assets/\")"], - contents: Annotated[str, - "Shader code for 'create'/'update'"] | None = None, - ) -> dict[str, Any]: - ctx.info(f"Processing manage_shader: {action}") - try: - # Prepare parameters for Unity - params = { - "action": action, - "name": name, - "path": path, - } - - # Base64 encode the contents if they exist to avoid JSON escaping issues - if contents is not None: - if action in ['create', 'update']: - # Encode content for safer transmission - params["encodedContents"] = base64.b64encode( - contents.encode('utf-8')).decode('utf-8') - params["contentsEncoded"] = True - else: - params["contents"] = contents - - # Remove None values so they don't get sent as null - params = {k: v for k, v in params.items() if v is not None} - - # Send command via centralized retry helper - response = send_command_with_retry("manage_shader", params) - - # Process response from Unity - if isinstance(response, dict) and response.get("success"): - # If the response contains base64 encoded content, decode it - if response.get("data", {}).get("contentsEncoded"): - decoded_contents = base64.b64decode( - response["data"]["encodedContents"]).decode('utf-8') - response["data"]["contents"] = decoded_contents - del response["data"]["encodedContents"] - del response["data"]["contentsEncoded"] - - return {"success": True, "message": response.get("message", "Operation successful."), "data": response.get("data")} - return response if isinstance(response, dict) else {"success": False, "message": str(response)} - - except Exception as e: - # Handle Python-side errors (e.g., connection issues) - return {"success": False, "message": f"Python error managing shader: {str(e)}"} +@mcp_for_unity_tool( + description="Manages shader scripts in Unity (create, read, update, delete)." +) +def manage_shader( + ctx: Context, + action: Annotated[Literal['create', 'read', 'update', 'delete'], "Perform CRUD operations on shader scripts."], + name: Annotated[str, "Shader name (no .cs extension)"], + path: Annotated[str, "Asset path (default: \"Assets/\")"], + contents: Annotated[str, + "Shader code for 'create'/'update'"] | None = None, +) -> dict[str, Any]: + ctx.info(f"Processing manage_shader: {action}") + try: + # Prepare parameters for Unity + params = { + "action": action, + "name": name, + "path": path, + } + + # Base64 encode the contents if they exist to avoid JSON escaping issues + if contents is not None: + if action in ['create', 'update']: + # Encode content for safer transmission + params["encodedContents"] = base64.b64encode( + contents.encode('utf-8')).decode('utf-8') + params["contentsEncoded"] = True + else: + params["contents"] = contents + + # Remove None values so they don't get sent as null + params = {k: v for k, v in params.items() if v is not None} + + # Send command via centralized retry helper + response = send_command_with_retry("manage_shader", params) + + # Process response from Unity + if isinstance(response, dict) and response.get("success"): + # If the response contains base64 encoded content, decode it + if response.get("data", {}).get("contentsEncoded"): + decoded_contents = base64.b64decode( + response["data"]["encodedContents"]).decode('utf-8') + response["data"]["contents"] = decoded_contents + del response["data"]["encodedContents"] + del response["data"]["contentsEncoded"] + + return {"success": True, "message": response.get("message", "Operation successful."), "data": response.get("data")} + return response if isinstance(response, dict) else {"success": False, "message": str(response)} + + except Exception as e: + # Handle Python-side errors (e.g., connection issues) + return {"success": False, "message": f"Python error managing shader: {str(e)}"} diff --git a/UnityMcpBridge/UnityMcpServer~/src/tools/read_console.py b/UnityMcpBridge/UnityMcpServer~/src/tools/read_console.py index c647cf8f..5fc9a096 100644 --- a/UnityMcpBridge/UnityMcpServer~/src/tools/read_console.py +++ b/UnityMcpBridge/UnityMcpServer~/src/tools/read_console.py @@ -3,88 +3,85 @@ """ from typing import Annotated, Any, Literal -from mcp.server.fastmcp import FastMCP, Context -from telemetry_decorator import telemetry_tool - +from mcp.server.fastmcp import Context +from registry import mcp_for_unity_tool from unity_connection import send_command_with_retry -def register_read_console_tools(mcp: FastMCP): - """Registers the read_console tool with the MCP server.""" - - @mcp.tool(name="read_console", description="Gets messages from or clears the Unity Editor console.") - @telemetry_tool("read_console") - def read_console( - ctx: Context, - action: Annotated[Literal['get', 'clear'], "Get or clear the Unity Editor console."], - types: Annotated[list[Literal['error', 'warning', - 'log', 'all']], "Message types to get"] | None = None, - count: Annotated[int, "Max messages to return"] | None = None, - filter_text: Annotated[str, "Text filter for messages"] | None = None, - since_timestamp: Annotated[str, - "Get messages after this timestamp (ISO 8601)"] | None = None, - format: Annotated[Literal['plain', 'detailed', - 'json'], "Output format"] | None = None, - include_stacktrace: Annotated[bool, - "Include stack traces in output"] | None = None - ) -> dict[str, Any]: - ctx.info(f"Processing read_console: {action}") - # Set defaults if values are None - action = action if action is not None else 'get' - types = types if types is not None else ['error', 'warning', 'log'] - format = format if format is not None else 'detailed' - include_stacktrace = include_stacktrace if include_stacktrace is not None else True +@mcp_for_unity_tool( + description="Gets messages from or clears the Unity Editor console." +) +def read_console( + ctx: Context, + action: Annotated[Literal['get', 'clear'], "Get or clear the Unity Editor console."], + types: Annotated[list[Literal['error', 'warning', + 'log', 'all']], "Message types to get"] | None = None, + count: Annotated[int, "Max messages to return"] | None = None, + filter_text: Annotated[str, "Text filter for messages"] | None = None, + since_timestamp: Annotated[str, + "Get messages after this timestamp (ISO 8601)"] | None = None, + format: Annotated[Literal['plain', 'detailed', + 'json'], "Output format"] | None = None, + include_stacktrace: Annotated[bool, + "Include stack traces in output"] | None = None +) -> dict[str, Any]: + ctx.info(f"Processing read_console: {action}") + # Set defaults if values are None + action = action if action is not None else 'get' + types = types if types is not None else ['error', 'warning', 'log'] + format = format if format is not None else 'detailed' + include_stacktrace = include_stacktrace if include_stacktrace is not None else True - # Normalize action if it's a string - if isinstance(action, str): - action = action.lower() + # Normalize action if it's a string + if isinstance(action, str): + action = action.lower() - # Coerce count defensively (string/float -> int) - def _coerce_int(value, default=None): - if value is None: + # Coerce count defensively (string/float -> int) + def _coerce_int(value, default=None): + if value is None: + return default + try: + if isinstance(value, bool): return default - try: - if isinstance(value, bool): - return default - if isinstance(value, int): - return int(value) - s = str(value).strip() - if s.lower() in ("", "none", "null"): - return default - return int(float(s)) - except Exception: + if isinstance(value, int): + return int(value) + s = str(value).strip() + if s.lower() in ("", "none", "null"): return default + return int(float(s)) + except Exception: + return default - count = _coerce_int(count) + count = _coerce_int(count) - # Prepare parameters for the C# handler - params_dict = { - "action": action, - "types": types, - "count": count, - "filterText": filter_text, - "sinceTimestamp": since_timestamp, - "format": format.lower() if isinstance(format, str) else format, - "includeStacktrace": include_stacktrace - } + # Prepare parameters for the C# handler + params_dict = { + "action": action, + "types": types, + "count": count, + "filterText": filter_text, + "sinceTimestamp": since_timestamp, + "format": format.lower() if isinstance(format, str) else format, + "includeStacktrace": include_stacktrace + } - # Remove None values unless it's 'count' (as None might mean 'all') - params_dict = {k: v for k, v in params_dict.items() - if v is not None or k == 'count'} + # Remove None values unless it's 'count' (as None might mean 'all') + params_dict = {k: v for k, v in params_dict.items() + if v is not None or k == 'count'} - # Add count back if it was None, explicitly sending null might be important for C# logic - if 'count' not in params_dict: - params_dict['count'] = None + # Add count back if it was None, explicitly sending null might be important for C# logic + if 'count' not in params_dict: + params_dict['count'] = None - # Use centralized retry helper - resp = send_command_with_retry("read_console", params_dict) - if isinstance(resp, dict) and resp.get("success") and not include_stacktrace: - # Strip stacktrace fields from returned lines if present - try: - lines = resp.get("data", {}).get("lines", []) - for line in lines: - if isinstance(line, dict) and "stacktrace" in line: - line.pop("stacktrace", None) - except Exception: - pass - return resp if isinstance(resp, dict) else {"success": False, "message": str(resp)} + # Use centralized retry helper + resp = send_command_with_retry("read_console", params_dict) + if isinstance(resp, dict) and resp.get("success") and not include_stacktrace: + # Strip stacktrace fields from returned lines if present + try: + lines = resp.get("data", {}).get("lines", []) + for line in lines: + if isinstance(line, dict) and "stacktrace" in line: + line.pop("stacktrace", None) + except Exception: + pass + return resp if isinstance(resp, dict) else {"success": False, "message": str(resp)} From 31ce85e70be976d3e4aec1a28591c7406930e809 Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Wed, 1 Oct 2025 18:57:50 -0400 Subject: [PATCH 04/30] Convert script_apply_edits tool --- .../src/tools/manage_script_edits.py | 968 ------------------ .../src/tools/script_apply_edits.py | 966 +++++++++++++++++ 2 files changed, 966 insertions(+), 968 deletions(-) delete mode 100644 UnityMcpBridge/UnityMcpServer~/src/tools/manage_script_edits.py create mode 100644 UnityMcpBridge/UnityMcpServer~/src/tools/script_apply_edits.py diff --git a/UnityMcpBridge/UnityMcpServer~/src/tools/manage_script_edits.py b/UnityMcpBridge/UnityMcpServer~/src/tools/manage_script_edits.py deleted file mode 100644 index 261eb502..00000000 --- a/UnityMcpBridge/UnityMcpServer~/src/tools/manage_script_edits.py +++ /dev/null @@ -1,968 +0,0 @@ -import base64 -import hashlib -import re -from typing import Annotated, Any - -from mcp.server.fastmcp import FastMCP, Context -from telemetry_decorator import telemetry_tool - -from unity_connection import send_command_with_retry - - -def _apply_edits_locally(original_text: str, edits: list[dict[str, Any]]) -> str: - text = original_text - for edit in edits or []: - op = ( - (edit.get("op") - or edit.get("operation") - or edit.get("type") - or edit.get("mode") - or "") - .strip() - .lower() - ) - - if not op: - allowed = "anchor_insert, prepend, append, replace_range, regex_replace" - raise RuntimeError( - f"op is required; allowed: {allowed}. Use 'op' (aliases accepted: type/mode/operation)." - ) - - if op == "prepend": - prepend_text = edit.get("text", "") - text = (prepend_text if prepend_text.endswith( - "\n") else prepend_text + "\n") + text - elif op == "append": - append_text = edit.get("text", "") - if not text.endswith("\n"): - text += "\n" - text += append_text - if not text.endswith("\n"): - text += "\n" - elif op == "anchor_insert": - anchor = edit.get("anchor", "") - position = (edit.get("position") or "before").lower() - insert_text = edit.get("text", "") - flags = re.MULTILINE | ( - re.IGNORECASE if edit.get("ignore_case") else 0) - - # Find the best match using improved heuristics - match = _find_best_anchor_match( - anchor, text, flags, bool(edit.get("prefer_last", True))) - if not match: - if edit.get("allow_noop", True): - continue - raise RuntimeError(f"anchor not found: {anchor}") - idx = match.start() if position == "before" else match.end() - text = text[:idx] + insert_text + text[idx:] - elif op == "replace_range": - start_line = int(edit.get("startLine", 1)) - start_col = int(edit.get("startCol", 1)) - end_line = int(edit.get("endLine", start_line)) - end_col = int(edit.get("endCol", 1)) - replacement = edit.get("text", "") - lines = text.splitlines(keepends=True) - max_line = len(lines) + 1 # 1-based, exclusive end - if (start_line < 1 or end_line < start_line or end_line > max_line - or start_col < 1 or end_col < 1): - raise RuntimeError("replace_range out of bounds") - - def index_of(line: int, col: int) -> int: - if line <= len(lines): - return sum(len(l) for l in lines[: line - 1]) + (col - 1) - return sum(len(l) for l in lines) - a = index_of(start_line, start_col) - b = index_of(end_line, end_col) - text = text[:a] + replacement + text[b:] - elif op == "regex_replace": - pattern = edit.get("pattern", "") - repl = edit.get("replacement", "") - # Translate $n backrefs (our input) to Python \g - repl_py = re.sub(r"\$(\d+)", r"\\g<\1>", repl) - count = int(edit.get("count", 0)) # 0 = replace all - flags = re.MULTILINE - if edit.get("ignore_case"): - flags |= re.IGNORECASE - text = re.sub(pattern, repl_py, text, count=count, flags=flags) - else: - allowed = "anchor_insert, prepend, append, replace_range, regex_replace" - raise RuntimeError( - f"unknown edit op: {op}; allowed: {allowed}. Use 'op' (aliases accepted: type/mode/operation).") - return text - - -def _find_best_anchor_match(pattern: str, text: str, flags: int, prefer_last: bool = True): - """ - Find the best anchor match using improved heuristics. - - For patterns like \\s*}\\s*$ that are meant to find class-ending braces, - this function uses heuristics to choose the most semantically appropriate match: - - 1. If prefer_last=True, prefer the last match (common for class-end insertions) - 2. Use indentation levels to distinguish class vs method braces - 3. Consider context to avoid matches inside strings/comments - - Args: - pattern: Regex pattern to search for - text: Text to search in - flags: Regex flags - prefer_last: If True, prefer the last match over the first - - Returns: - Match object of the best match, or None if no match found - """ - - # Find all matches - matches = list(re.finditer(pattern, text, flags)) - if not matches: - return None - - # If only one match, return it - if len(matches) == 1: - return matches[0] - - # For patterns that look like they're trying to match closing braces at end of lines - is_closing_brace_pattern = '}' in pattern and ( - '$' in pattern or pattern.endswith(r'\s*')) - - if is_closing_brace_pattern and prefer_last: - # Use heuristics to find the best closing brace match - return _find_best_closing_brace_match(matches, text) - - # Default behavior: use last match if prefer_last, otherwise first match - return matches[-1] if prefer_last else matches[0] - - -def _find_best_closing_brace_match(matches, text: str): - """ - Find the best closing brace match using C# structure heuristics. - - Enhanced heuristics for scope-aware matching: - 1. Prefer matches with lower indentation (likely class-level) - 2. Prefer matches closer to end of file - 3. Avoid matches that seem to be inside method bodies - 4. For #endregion patterns, ensure class-level context - 5. Validate insertion point is at appropriate scope - - Args: - matches: List of regex match objects - text: The full text being searched - - Returns: - The best match object - """ - if not matches: - return None - - scored_matches = [] - lines = text.splitlines() - - for match in matches: - score = 0 - start_pos = match.start() - - # Find which line this match is on - lines_before = text[:start_pos].count('\n') - line_num = lines_before - - if line_num < len(lines): - line_content = lines[line_num] - - # Calculate indentation level (lower is better for class braces) - indentation = len(line_content) - len(line_content.lstrip()) - - # Prefer lower indentation (class braces are typically less indented than method braces) - # Max 20 points for indentation=0 - score += max(0, 20 - indentation) - - # Prefer matches closer to end of file (class closing braces are typically at the end) - distance_from_end = len(lines) - line_num - # More points for being closer to end - score += max(0, 10 - distance_from_end) - - # Look at surrounding context to avoid method braces - context_start = max(0, line_num - 3) - context_end = min(len(lines), line_num + 2) - context_lines = lines[context_start:context_end] - - # Penalize if this looks like it's inside a method (has method-like patterns above) - for context_line in context_lines: - if re.search(r'\b(void|public|private|protected)\s+\w+\s*\(', context_line): - score -= 5 # Penalty for being near method signatures - - # Bonus if this looks like a class-ending brace (very minimal indentation and near EOF) - if indentation <= 4 and distance_from_end <= 3: - score += 15 # Bonus for likely class-ending brace - - scored_matches.append((score, match)) - - # Return the match with the highest score - scored_matches.sort(key=lambda x: x[0], reverse=True) - best_match = scored_matches[0][1] - - return best_match - - -def _infer_class_name(script_name: str) -> str: - # Default to script name as class name (common Unity pattern) - return (script_name or "").strip() - - -def _extract_code_after(keyword: str, request: str) -> str: - # Deprecated with NL removal; retained as no-op for compatibility - idx = request.lower().find(keyword) - if idx >= 0: - return request[idx + len(keyword):].strip() - return "" -# Removed _is_structurally_balanced - validation now handled by C# side using Unity's compiler services - - -def _normalize_script_locator(name: str, path: str) -> tuple[str, str]: - """Best-effort normalization of script "name" and "path". - - Accepts any of: - - name = "SmartReach", path = "Assets/Scripts/Interaction" - - name = "SmartReach.cs", path = "Assets/Scripts/Interaction" - - name = "Assets/Scripts/Interaction/SmartReach.cs", path = "" - - path = "Assets/Scripts/Interaction/SmartReach.cs" (name empty) - - name or path using uri prefixes: unity://path/..., file://... - - accidental duplicates like "Assets/.../SmartReach.cs/SmartReach.cs" - - Returns (name_without_extension, directory_path_under_Assets). - """ - n = (name or "").strip() - p = (path or "").strip() - - def strip_prefix(s: str) -> str: - if s.startswith("unity://path/"): - return s[len("unity://path/"):] - if s.startswith("file://"): - return s[len("file://"):] - return s - - def collapse_duplicate_tail(s: str) -> str: - # Collapse trailing "/X.cs/X.cs" to "/X.cs" - parts = s.split("/") - if len(parts) >= 2 and parts[-1] == parts[-2]: - parts = parts[:-1] - return "/".join(parts) - - # Prefer a full path if provided in either field - candidate = "" - for v in (n, p): - v2 = strip_prefix(v) - if v2.endswith(".cs") or v2.startswith("Assets/"): - candidate = v2 - break - - if candidate: - candidate = collapse_duplicate_tail(candidate) - # If a directory was passed in path and file in name, join them - if not candidate.endswith(".cs") and n.endswith(".cs"): - v2 = strip_prefix(n) - candidate = (candidate.rstrip("/") + "/" + v2.split("/")[-1]) - if candidate.endswith(".cs"): - parts = candidate.split("/") - file_name = parts[-1] - dir_path = "/".join(parts[:-1]) if len(parts) > 1 else "Assets" - base = file_name[:- - 3] if file_name.lower().endswith(".cs") else file_name - return base, dir_path - - # Fall back: remove extension from name if present and return given path - base_name = n[:-3] if n.lower().endswith(".cs") else n - return base_name, (p or "Assets") - - -def _with_norm(resp: dict[str, Any] | Any, edits: list[dict[str, Any]], routing: str | None = None) -> dict[str, Any] | Any: - if not isinstance(resp, dict): - return resp - data = resp.setdefault("data", {}) - data.setdefault("normalizedEdits", edits) - if routing: - data["routing"] = routing - return resp - - -def _err(code: str, message: str, *, expected: dict[str, Any] | None = None, rewrite: dict[str, Any] | None = None, - normalized: list[dict[str, Any]] | None = None, routing: str | None = None, extra: dict[str, Any] | None = None) -> dict[str, Any]: - payload: dict[str, Any] = {"success": False, - "code": code, "message": message} - data: dict[str, Any] = {} - if expected: - data["expected"] = expected - if rewrite: - data["rewrite_suggestion"] = rewrite - if normalized is not None: - data["normalizedEdits"] = normalized - if routing: - data["routing"] = routing - if extra: - data.update(extra) - if data: - payload["data"] = data - return payload - -# Natural-language parsing removed; clients should send structured edits. - - -def register_manage_script_edits_tools(mcp: FastMCP): - @mcp.tool(name="script_apply_edits", description=( - """Structured C# edits (methods/classes) with safer boundaries - prefer this over raw text. - Best practices: - - Prefer anchor_* ops for pattern-based insert/replace near stable markers - - Use replace_method/delete_method for whole-method changes (keeps signatures balanced) - - Avoid whole-file regex deletes; validators will guard unbalanced braces - - For tail insertions, prefer anchor/regex_replace on final brace (class closing) - - Pass options.validate='standard' for structural checks; 'relaxed' for interior-only edits - Canonical fields (use these exact keys): - - op: replace_method | insert_method | delete_method | anchor_insert | anchor_delete | anchor_replace - - className: string (defaults to 'name' if omitted on method/class ops) - - methodName: string (required for replace_method, delete_method) - - replacement: string (required for replace_method, insert_method) - - position: start | end | after | before (insert_method only) - - afterMethodName / beforeMethodName: string (required when position='after'/'before') - - anchor: regex string (for anchor_* ops) - - text: string (for anchor_insert/anchor_replace) - Examples: - 1) Replace a method: - { - "name": "SmartReach", - "path": "Assets/Scripts/Interaction", - "edits": [ - { - "op": "replace_method", - "className": "SmartReach", - "methodName": "HasTarget", - "replacement": "public bool HasTarget(){ return currentTarget!=null; }" - } - ], - "options": {"validate": "standard", "refresh": "immediate"} - } - "2) Insert a method after another: - { - "name": "SmartReach", - "path": "Assets/Scripts/Interaction", - "edits": [ - { - "op": "insert_method", - "className": "SmartReach", - "replacement": "public void PrintSeries(){ Debug.Log(seriesName); }", - "position": "after", - "afterMethodName": "GetCurrentTarget" - } - ], - } - ]""" - )) - @telemetry_tool("script_apply_edits") - def script_apply_edits( - ctx: Context, - name: Annotated[str, "Name of the script to edit"], - path: Annotated[str, "Path to the script to edit under Assets/ directory"], - edits: Annotated[list[dict[str, Any]], "List of edits to apply to the script"], - options: Annotated[dict[str, Any], - "Options for the script edit"] | None = None, - script_type: Annotated[str, - "Type of the script to edit"] = "MonoBehaviour", - namespace: Annotated[str, - "Namespace of the script to edit"] | None = None, - ) -> dict[str, Any]: - ctx.info(f"Processing script_apply_edits: {name}") - # Normalize locator first so downstream calls target the correct script file. - name, path = _normalize_script_locator(name, path) - # Normalize unsupported or aliased ops to known structured/text paths - - def _unwrap_and_alias(edit: dict[str, Any]) -> dict[str, Any]: - # Unwrap single-key wrappers like {"replace_method": {...}} - for wrapper_key in ( - "replace_method", "insert_method", "delete_method", - "replace_class", "delete_class", - "anchor_insert", "anchor_replace", "anchor_delete", - ): - if wrapper_key in edit and isinstance(edit[wrapper_key], dict): - inner = dict(edit[wrapper_key]) - inner["op"] = wrapper_key - edit = inner - break - - e = dict(edit) - op = (e.get("op") or e.get("operation") or e.get( - "type") or e.get("mode") or "").strip().lower() - if op: - e["op"] = op - - # Common field aliases - if "class_name" in e and "className" not in e: - e["className"] = e.pop("class_name") - if "class" in e and "className" not in e: - e["className"] = e.pop("class") - if "method_name" in e and "methodName" not in e: - e["methodName"] = e.pop("method_name") - # Some clients use a generic 'target' for method name - if "target" in e and "methodName" not in e: - e["methodName"] = e.pop("target") - if "method" in e and "methodName" not in e: - e["methodName"] = e.pop("method") - if "new_content" in e and "replacement" not in e: - e["replacement"] = e.pop("new_content") - if "newMethod" in e and "replacement" not in e: - e["replacement"] = e.pop("newMethod") - if "new_method" in e and "replacement" not in e: - e["replacement"] = e.pop("new_method") - if "content" in e and "replacement" not in e: - e["replacement"] = e.pop("content") - if "after" in e and "afterMethodName" not in e: - e["afterMethodName"] = e.pop("after") - if "after_method" in e and "afterMethodName" not in e: - e["afterMethodName"] = e.pop("after_method") - if "before" in e and "beforeMethodName" not in e: - e["beforeMethodName"] = e.pop("before") - if "before_method" in e and "beforeMethodName" not in e: - e["beforeMethodName"] = e.pop("before_method") - # anchor_method → before/after based on position (default after) - if "anchor_method" in e: - anchor = e.pop("anchor_method") - pos = (e.get("position") or "after").strip().lower() - if pos == "before" and "beforeMethodName" not in e: - e["beforeMethodName"] = anchor - elif "afterMethodName" not in e: - e["afterMethodName"] = anchor - if "anchorText" in e and "anchor" not in e: - e["anchor"] = e.pop("anchorText") - if "pattern" in e and "anchor" not in e and e.get("op") and e["op"].startswith("anchor_"): - e["anchor"] = e.pop("pattern") - if "newText" in e and "text" not in e: - e["text"] = e.pop("newText") - - # CI compatibility (T‑A/T‑E): - # Accept method-anchored anchor_insert and upgrade to insert_method - # Example incoming shape: - # {"op":"anchor_insert","afterMethodName":"GetCurrentTarget","text":"..."} - if ( - e.get("op") == "anchor_insert" - and not e.get("anchor") - and (e.get("afterMethodName") or e.get("beforeMethodName")) - ): - e["op"] = "insert_method" - if "replacement" not in e: - e["replacement"] = e.get("text", "") - - # LSP-like range edit -> replace_range - if "range" in e and isinstance(e["range"], dict): - rng = e.pop("range") - start = rng.get("start", {}) - end = rng.get("end", {}) - # Convert 0-based to 1-based line/col - e["op"] = "replace_range" - e["startLine"] = int(start.get("line", 0)) + 1 - e["startCol"] = int(start.get("character", 0)) + 1 - e["endLine"] = int(end.get("line", 0)) + 1 - e["endCol"] = int(end.get("character", 0)) + 1 - if "newText" in edit and "text" not in e: - e["text"] = edit.get("newText", "") - return e - - normalized_edits: list[dict[str, Any]] = [] - for raw in edits or []: - e = _unwrap_and_alias(raw) - op = (e.get("op") or e.get("operation") or e.get( - "type") or e.get("mode") or "").strip().lower() - - # Default className to script name if missing on structured method/class ops - if op in ("replace_class", "delete_class", "replace_method", "delete_method", "insert_method") and not e.get("className"): - e["className"] = name - - # Map common aliases for text ops - if op in ("text_replace",): - e["op"] = "replace_range" - normalized_edits.append(e) - continue - if op in ("regex_delete",): - e["op"] = "regex_replace" - e.setdefault("text", "") - normalized_edits.append(e) - continue - if op == "regex_replace" and ("replacement" not in e): - if "text" in e: - e["replacement"] = e.get("text", "") - elif "insert" in e or "content" in e: - e["replacement"] = e.get( - "insert") or e.get("content") or "" - if op == "anchor_insert" and not (e.get("text") or e.get("insert") or e.get("content") or e.get("replacement")): - e["op"] = "anchor_delete" - normalized_edits.append(e) - continue - normalized_edits.append(e) - - edits = normalized_edits - normalized_for_echo = edits - - # Validate required fields and produce machine-parsable hints - def error_with_hint(message: str, expected: dict[str, Any], suggestion: dict[str, Any]) -> dict[str, Any]: - return _err("missing_field", message, expected=expected, rewrite=suggestion, normalized=normalized_for_echo) - - for e in edits or []: - op = e.get("op", "") - if op == "replace_method": - if not e.get("methodName"): - return error_with_hint( - "replace_method requires 'methodName'.", - {"op": "replace_method", "required": [ - "className", "methodName", "replacement"]}, - {"edits[0].methodName": "HasTarget"} - ) - if not (e.get("replacement") or e.get("text")): - return error_with_hint( - "replace_method requires 'replacement' (inline or base64).", - {"op": "replace_method", "required": [ - "className", "methodName", "replacement"]}, - {"edits[0].replacement": "public bool X(){ return true; }"} - ) - elif op == "insert_method": - if not (e.get("replacement") or e.get("text")): - return error_with_hint( - "insert_method requires a non-empty 'replacement'.", - {"op": "insert_method", "required": ["className", "replacement"], "position": { - "after_requires": "afterMethodName", "before_requires": "beforeMethodName"}}, - {"edits[0].replacement": "public void PrintSeries(){ Debug.Log(\"1,2,3\"); }"} - ) - pos = (e.get("position") or "").lower() - if pos == "after" and not e.get("afterMethodName"): - return error_with_hint( - "insert_method with position='after' requires 'afterMethodName'.", - {"op": "insert_method", "position": { - "after_requires": "afterMethodName"}}, - {"edits[0].afterMethodName": "GetCurrentTarget"} - ) - if pos == "before" and not e.get("beforeMethodName"): - return error_with_hint( - "insert_method with position='before' requires 'beforeMethodName'.", - {"op": "insert_method", "position": { - "before_requires": "beforeMethodName"}}, - {"edits[0].beforeMethodName": "GetCurrentTarget"} - ) - elif op == "delete_method": - if not e.get("methodName"): - return error_with_hint( - "delete_method requires 'methodName'.", - {"op": "delete_method", "required": [ - "className", "methodName"]}, - {"edits[0].methodName": "PrintSeries"} - ) - elif op in ("anchor_insert", "anchor_replace", "anchor_delete"): - if not e.get("anchor"): - return error_with_hint( - f"{op} requires 'anchor' (regex).", - {"op": op, "required": ["anchor"]}, - {"edits[0].anchor": "(?m)^\\s*public\\s+bool\\s+HasTarget\\s*\\("} - ) - if op in ("anchor_insert", "anchor_replace") and not (e.get("text") or e.get("replacement")): - return error_with_hint( - f"{op} requires 'text'.", - {"op": op, "required": ["anchor", "text"]}, - {"edits[0].text": "/* comment */\n"} - ) - - # Decide routing: structured vs text vs mixed - STRUCT = {"replace_class", "delete_class", "replace_method", "delete_method", - "insert_method", "anchor_delete", "anchor_replace", "anchor_insert"} - TEXT = {"prepend", "append", "replace_range", "regex_replace"} - ops_set = {(e.get("op") or "").lower() for e in edits or []} - all_struct = ops_set.issubset(STRUCT) - all_text = ops_set.issubset(TEXT) - mixed = not (all_struct or all_text) - - # If everything is structured (method/class/anchor ops), forward directly to Unity's structured editor. - if all_struct: - opts2 = dict(options or {}) - # For structured edits, prefer immediate refresh to avoid missed reloads when Editor is unfocused - opts2.setdefault("refresh", "immediate") - params_struct: dict[str, Any] = { - "action": "edit", - "name": name, - "path": path, - "namespace": namespace, - "scriptType": script_type, - "edits": edits, - "options": opts2, - } - resp_struct = send_command_with_retry( - "manage_script", params_struct) - if isinstance(resp_struct, dict) and resp_struct.get("success"): - pass # Optional sentinel reload removed (deprecated) - return _with_norm(resp_struct if isinstance(resp_struct, dict) else {"success": False, "message": str(resp_struct)}, normalized_for_echo, routing="structured") - - # 1) read from Unity - read_resp = send_command_with_retry("manage_script", { - "action": "read", - "name": name, - "path": path, - "namespace": namespace, - "scriptType": script_type, - }) - if not isinstance(read_resp, dict) or not read_resp.get("success"): - return read_resp if isinstance(read_resp, dict) else {"success": False, "message": str(read_resp)} - - data = read_resp.get("data") or read_resp.get( - "result", {}).get("data") or {} - contents = data.get("contents") - if contents is None and data.get("contentsEncoded") and data.get("encodedContents"): - contents = base64.b64decode( - data["encodedContents"]).decode("utf-8") - if contents is None: - return {"success": False, "message": "No contents returned from Unity read."} - - # Optional preview/dry-run: apply locally and return diff without writing - preview = bool((options or {}).get("preview")) - - # If we have a mixed batch (TEXT + STRUCT), apply text first with precondition, then structured - if mixed: - text_edits = [e for e in edits or [] if ( - e.get("op") or "").lower() in TEXT] - struct_edits = [e for e in edits or [] if ( - e.get("op") or "").lower() in STRUCT] - try: - base_text = contents - - def line_col_from_index(idx: int) -> tuple[int, int]: - line = base_text.count("\n", 0, idx) + 1 - last_nl = base_text.rfind("\n", 0, idx) - col = (idx - (last_nl + 1)) + \ - 1 if last_nl >= 0 else idx + 1 - return line, col - - at_edits: list[dict[str, Any]] = [] - for e in text_edits: - opx = (e.get("op") or e.get("operation") or e.get( - "type") or e.get("mode") or "").strip().lower() - text_field = e.get("text") or e.get("insert") or e.get( - "content") or e.get("replacement") or "" - if opx == "anchor_insert": - anchor = e.get("anchor") or "" - position = (e.get("position") or "after").lower() - flags = re.MULTILINE | ( - re.IGNORECASE if e.get("ignore_case") else 0) - try: - # Use improved anchor matching logic - m = _find_best_anchor_match( - anchor, base_text, flags, prefer_last=True) - except Exception as ex: - return _with_norm(_err("bad_regex", f"Invalid anchor regex: {ex}", normalized=normalized_for_echo, routing="mixed/text-first", extra={"hint": "Escape parentheses/braces or use a simpler anchor."}), normalized_for_echo, routing="mixed/text-first") - if not m: - return _with_norm({"success": False, "code": "anchor_not_found", "message": f"anchor not found: {anchor}"}, normalized_for_echo, routing="mixed/text-first") - idx = m.start() if position == "before" else m.end() - # Normalize insertion to avoid jammed methods - text_field_norm = text_field - if not text_field_norm.startswith("\n"): - text_field_norm = "\n" + text_field_norm - if not text_field_norm.endswith("\n"): - text_field_norm = text_field_norm + "\n" - sl, sc = line_col_from_index(idx) - at_edits.append( - {"startLine": sl, "startCol": sc, "endLine": sl, "endCol": sc, "newText": text_field_norm}) - # do not mutate base_text when building atomic spans - elif opx == "replace_range": - if all(k in e for k in ("startLine", "startCol", "endLine", "endCol")): - at_edits.append({ - "startLine": int(e.get("startLine", 1)), - "startCol": int(e.get("startCol", 1)), - "endLine": int(e.get("endLine", 1)), - "endCol": int(e.get("endCol", 1)), - "newText": text_field - }) - else: - return _with_norm(_err("missing_field", "replace_range requires startLine/startCol/endLine/endCol", normalized=normalized_for_echo, routing="mixed/text-first"), normalized_for_echo, routing="mixed/text-first") - elif opx == "regex_replace": - pattern = e.get("pattern") or "" - try: - regex_obj = re.compile(pattern, re.MULTILINE | ( - re.IGNORECASE if e.get("ignore_case") else 0)) - except Exception as ex: - return _with_norm(_err("bad_regex", f"Invalid regex pattern: {ex}", normalized=normalized_for_echo, routing="mixed/text-first", extra={"hint": "Escape special chars or prefer structured delete for methods."}), normalized_for_echo, routing="mixed/text-first") - m = regex_obj.search(base_text) - if not m: - continue - # Expand $1, $2... in replacement using this match - - def _expand_dollars(rep: str, _m=m) -> str: - return re.sub(r"\$(\d+)", lambda g: _m.group(int(g.group(1))) or "", rep) - repl = _expand_dollars(text_field) - sl, sc = line_col_from_index(m.start()) - el, ec = line_col_from_index(m.end()) - at_edits.append( - {"startLine": sl, "startCol": sc, "endLine": el, "endCol": ec, "newText": repl}) - # do not mutate base_text when building atomic spans - elif opx in ("prepend", "append"): - if opx == "prepend": - sl, sc = 1, 1 - at_edits.append( - {"startLine": sl, "startCol": sc, "endLine": sl, "endCol": sc, "newText": text_field}) - # prepend can be applied atomically without local mutation - else: - # Insert at true EOF position (handles both \n and \r\n correctly) - eof_idx = len(base_text) - sl, sc = line_col_from_index(eof_idx) - new_text = ("\n" if not base_text.endswith( - "\n") else "") + text_field - at_edits.append( - {"startLine": sl, "startCol": sc, "endLine": sl, "endCol": sc, "newText": new_text}) - # do not mutate base_text when building atomic spans - else: - return _with_norm(_err("unknown_op", f"Unsupported text edit op: {opx}", normalized=normalized_for_echo, routing="mixed/text-first"), normalized_for_echo, routing="mixed/text-first") - - sha = hashlib.sha256(base_text.encode("utf-8")).hexdigest() - if at_edits: - params_text: dict[str, Any] = { - "action": "apply_text_edits", - "name": name, - "path": path, - "namespace": namespace, - "scriptType": script_type, - "edits": at_edits, - "precondition_sha256": sha, - "options": {"refresh": (options or {}).get("refresh", "debounced"), "validate": (options or {}).get("validate", "standard"), "applyMode": ("atomic" if len(at_edits) > 1 else (options or {}).get("applyMode", "sequential"))} - } - resp_text = send_command_with_retry( - "manage_script", params_text) - if not (isinstance(resp_text, dict) and resp_text.get("success")): - return _with_norm(resp_text if isinstance(resp_text, dict) else {"success": False, "message": str(resp_text)}, normalized_for_echo, routing="mixed/text-first") - # Optional sentinel reload removed (deprecated) - except Exception as e: - return _with_norm({"success": False, "message": f"Text edit conversion failed: {e}"}, normalized_for_echo, routing="mixed/text-first") - - if struct_edits: - opts2 = dict(options or {}) - # Prefer debounced background refresh unless explicitly overridden - opts2.setdefault("refresh", "debounced") - params_struct: dict[str, Any] = { - "action": "edit", - "name": name, - "path": path, - "namespace": namespace, - "scriptType": script_type, - "edits": struct_edits, - "options": opts2 - } - resp_struct = send_command_with_retry( - "manage_script", params_struct) - if isinstance(resp_struct, dict) and resp_struct.get("success"): - pass # Optional sentinel reload removed (deprecated) - return _with_norm(resp_struct if isinstance(resp_struct, dict) else {"success": False, "message": str(resp_struct)}, normalized_for_echo, routing="mixed/text-first") - - return _with_norm({"success": True, "message": "Applied text edits (no structured ops)"}, normalized_for_echo, routing="mixed/text-first") - - # If the edits are text-ops, prefer sending them to Unity's apply_text_edits with precondition - # so header guards and validation run on the C# side. - # Supported conversions: anchor_insert, replace_range, regex_replace (first match only). - text_ops = {(e.get("op") or e.get("operation") or e.get("type") or e.get( - "mode") or "").strip().lower() for e in (edits or [])} - structured_kinds = {"replace_class", "delete_class", - "replace_method", "delete_method", "insert_method", "anchor_insert"} - if not text_ops.issubset(structured_kinds): - # Convert to apply_text_edits payload - try: - base_text = contents - - def line_col_from_index(idx: int) -> tuple[int, int]: - # 1-based line/col against base buffer - line = base_text.count("\n", 0, idx) + 1 - last_nl = base_text.rfind("\n", 0, idx) - col = (idx - (last_nl + 1)) + \ - 1 if last_nl >= 0 else idx + 1 - return line, col - - at_edits: list[dict[str, Any]] = [] - import re as _re - for e in edits or []: - op = (e.get("op") or e.get("operation") or e.get( - "type") or e.get("mode") or "").strip().lower() - # aliasing for text field - text_field = e.get("text") or e.get( - "insert") or e.get("content") or "" - if op == "anchor_insert": - anchor = e.get("anchor") or "" - position = (e.get("position") or "after").lower() - # Use improved anchor matching logic with helpful errors, honoring ignore_case - try: - flags = re.MULTILINE | ( - re.IGNORECASE if e.get("ignore_case") else 0) - m = _find_best_anchor_match( - anchor, base_text, flags, prefer_last=True) - except Exception as ex: - return _with_norm(_err("bad_regex", f"Invalid anchor regex: {ex}", normalized=normalized_for_echo, routing="text", extra={"hint": "Escape parentheses/braces or use a simpler anchor."}), normalized_for_echo, routing="text") - if not m: - return _with_norm({"success": False, "code": "anchor_not_found", "message": f"anchor not found: {anchor}"}, normalized_for_echo, routing="text") - idx = m.start() if position == "before" else m.end() - # Normalize insertion newlines - if text_field and not text_field.startswith("\n"): - text_field = "\n" + text_field - if text_field and not text_field.endswith("\n"): - text_field = text_field + "\n" - sl, sc = line_col_from_index(idx) - at_edits.append({ - "startLine": sl, - "startCol": sc, - "endLine": sl, - "endCol": sc, - "newText": text_field or "" - }) - # Do not mutate base buffer when building an atomic batch - elif op == "replace_range": - # Directly forward if already in line/col form - if "startLine" in e: - at_edits.append({ - "startLine": int(e.get("startLine", 1)), - "startCol": int(e.get("startCol", 1)), - "endLine": int(e.get("endLine", 1)), - "endCol": int(e.get("endCol", 1)), - "newText": text_field - }) - else: - # If only indices provided, skip (we don't support index-based here) - return _with_norm({"success": False, "code": "missing_field", "message": "replace_range requires startLine/startCol/endLine/endCol"}, normalized_for_echo, routing="text") - elif op == "regex_replace": - pattern = e.get("pattern") or "" - repl = text_field - flags = re.MULTILINE | ( - re.IGNORECASE if e.get("ignore_case") else 0) - # Early compile for clearer error messages - try: - regex_obj = re.compile(pattern, flags) - except Exception as ex: - return _with_norm(_err("bad_regex", f"Invalid regex pattern: {ex}", normalized=normalized_for_echo, routing="text", extra={"hint": "Escape special chars or prefer structured delete for methods."}), normalized_for_echo, routing="text") - # Use smart anchor matching for consistent behavior with anchor_insert - m = _find_best_anchor_match( - pattern, base_text, flags, prefer_last=True) - if not m: - continue - # Expand $1, $2... backrefs in replacement using the first match (consistent with mixed-path behavior) - - def _expand_dollars(rep: str, _m=m) -> str: - return re.sub(r"\$(\d+)", lambda g: _m.group(int(g.group(1))) or "", rep) - repl_expanded = _expand_dollars(repl) - # Let C# side handle validation using Unity's built-in compiler services - sl, sc = line_col_from_index(m.start()) - el, ec = line_col_from_index(m.end()) - at_edits.append({ - "startLine": sl, - "startCol": sc, - "endLine": el, - "endCol": ec, - "newText": repl_expanded - }) - # Do not mutate base buffer when building an atomic batch - else: - return _with_norm({"success": False, "code": "unsupported_op", "message": f"Unsupported text edit op for server-side apply_text_edits: {op}"}, normalized_for_echo, routing="text") - - if not at_edits: - return _with_norm({"success": False, "code": "no_spans", "message": "No applicable text edit spans computed (anchor not found or zero-length)."}, normalized_for_echo, routing="text") - - sha = hashlib.sha256(base_text.encode("utf-8")).hexdigest() - params: dict[str, Any] = { - "action": "apply_text_edits", - "name": name, - "path": path, - "namespace": namespace, - "scriptType": script_type, - "edits": at_edits, - "precondition_sha256": sha, - "options": { - "refresh": (options or {}).get("refresh", "debounced"), - "validate": (options or {}).get("validate", "standard"), - "applyMode": ("atomic" if len(at_edits) > 1 else (options or {}).get("applyMode", "sequential")) - } - } - resp = send_command_with_retry("manage_script", params) - if isinstance(resp, dict) and resp.get("success"): - pass # Optional sentinel reload removed (deprecated) - return _with_norm( - resp if isinstance(resp, dict) else { - "success": False, "message": str(resp)}, - normalized_for_echo, - routing="text" - ) - except Exception as e: - return _with_norm({"success": False, "code": "conversion_failed", "message": f"Edit conversion failed: {e}"}, normalized_for_echo, routing="text") - - # For regex_replace, honor preview consistently: if preview=true, always return diff without writing. - # If confirm=false (default) and preview not requested, return diff and instruct confirm=true to apply. - if "regex_replace" in text_ops and (preview or not (options or {}).get("confirm")): - try: - preview_text = _apply_edits_locally(contents, edits) - import difflib - diff = list(difflib.unified_diff(contents.splitlines( - ), preview_text.splitlines(), fromfile="before", tofile="after", n=2)) - if len(diff) > 800: - diff = diff[:800] + ["... (diff truncated) ..."] - if preview: - return {"success": True, "message": "Preview only (no write)", "data": {"diff": "\n".join(diff), "normalizedEdits": normalized_for_echo}} - return _with_norm({"success": False, "message": "Preview diff; set options.confirm=true to apply.", "data": {"diff": "\n".join(diff)}}, normalized_for_echo, routing="text") - except Exception as e: - return _with_norm({"success": False, "code": "preview_failed", "message": f"Preview failed: {e}"}, normalized_for_echo, routing="text") - # 2) apply edits locally (only if not text-ops) - try: - new_contents = _apply_edits_locally(contents, edits) - except Exception as e: - return {"success": False, "message": f"Edit application failed: {e}"} - - # Short-circuit no-op edits to avoid false "applied" reports downstream - if new_contents == contents: - return _with_norm({ - "success": True, - "message": "No-op: contents unchanged", - "data": {"no_op": True, "evidence": {"reason": "identical_content"}} - }, normalized_for_echo, routing="text") - - if preview: - # Produce a compact unified diff limited to small context - import difflib - a = contents.splitlines() - b = new_contents.splitlines() - diff = list(difflib.unified_diff( - a, b, fromfile="before", tofile="after", n=3)) - # Limit diff size to keep responses small - if len(diff) > 2000: - diff = diff[:2000] + ["... (diff truncated) ..."] - return {"success": True, "message": "Preview only (no write)", "data": {"diff": "\n".join(diff), "normalizedEdits": normalized_for_echo}} - - # 3) update to Unity - # Default refresh/validate for natural usage on text path as well - options = dict(options or {}) - options.setdefault("validate", "standard") - options.setdefault("refresh", "debounced") - - # Compute the SHA of the current file contents for the precondition - old_lines = contents.splitlines(keepends=True) - end_line = len(old_lines) + 1 # 1-based exclusive end - sha = hashlib.sha256(contents.encode("utf-8")).hexdigest() - - # Apply a whole-file text edit rather than the deprecated 'update' action - params = { - "action": "apply_text_edits", - "name": name, - "path": path, - "namespace": namespace, - "scriptType": script_type, - "edits": [ - { - "startLine": 1, - "startCol": 1, - "endLine": end_line, - "endCol": 1, - "newText": new_contents, - } - ], - "precondition_sha256": sha, - "options": options or {"validate": "standard", "refresh": "debounced"}, - } - - write_resp = send_command_with_retry("manage_script", params) - if isinstance(write_resp, dict) and write_resp.get("success"): - pass # Optional sentinel reload removed (deprecated) - return _with_norm( - write_resp if isinstance(write_resp, dict) - else {"success": False, "message": str(write_resp)}, - normalized_for_echo, - routing="text", - ) diff --git a/UnityMcpBridge/UnityMcpServer~/src/tools/script_apply_edits.py b/UnityMcpBridge/UnityMcpServer~/src/tools/script_apply_edits.py new file mode 100644 index 00000000..59fbbc61 --- /dev/null +++ b/UnityMcpBridge/UnityMcpServer~/src/tools/script_apply_edits.py @@ -0,0 +1,966 @@ +import base64 +import hashlib +import re +from typing import Annotated, Any + +from mcp.server.fastmcp import Context + +from registry import mcp_for_unity_tool +from unity_connection import send_command_with_retry + + +def _apply_edits_locally(original_text: str, edits: list[dict[str, Any]]) -> str: + text = original_text + for edit in edits or []: + op = ( + (edit.get("op") + or edit.get("operation") + or edit.get("type") + or edit.get("mode") + or "") + .strip() + .lower() + ) + + if not op: + allowed = "anchor_insert, prepend, append, replace_range, regex_replace" + raise RuntimeError( + f"op is required; allowed: {allowed}. Use 'op' (aliases accepted: type/mode/operation)." + ) + + if op == "prepend": + prepend_text = edit.get("text", "") + text = (prepend_text if prepend_text.endswith( + "\n") else prepend_text + "\n") + text + elif op == "append": + append_text = edit.get("text", "") + if not text.endswith("\n"): + text += "\n" + text += append_text + if not text.endswith("\n"): + text += "\n" + elif op == "anchor_insert": + anchor = edit.get("anchor", "") + position = (edit.get("position") or "before").lower() + insert_text = edit.get("text", "") + flags = re.MULTILINE | ( + re.IGNORECASE if edit.get("ignore_case") else 0) + + # Find the best match using improved heuristics + match = _find_best_anchor_match( + anchor, text, flags, bool(edit.get("prefer_last", True))) + if not match: + if edit.get("allow_noop", True): + continue + raise RuntimeError(f"anchor not found: {anchor}") + idx = match.start() if position == "before" else match.end() + text = text[:idx] + insert_text + text[idx:] + elif op == "replace_range": + start_line = int(edit.get("startLine", 1)) + start_col = int(edit.get("startCol", 1)) + end_line = int(edit.get("endLine", start_line)) + end_col = int(edit.get("endCol", 1)) + replacement = edit.get("text", "") + lines = text.splitlines(keepends=True) + max_line = len(lines) + 1 # 1-based, exclusive end + if (start_line < 1 or end_line < start_line or end_line > max_line + or start_col < 1 or end_col < 1): + raise RuntimeError("replace_range out of bounds") + + def index_of(line: int, col: int) -> int: + if line <= len(lines): + return sum(len(l) for l in lines[: line - 1]) + (col - 1) + return sum(len(l) for l in lines) + a = index_of(start_line, start_col) + b = index_of(end_line, end_col) + text = text[:a] + replacement + text[b:] + elif op == "regex_replace": + pattern = edit.get("pattern", "") + repl = edit.get("replacement", "") + # Translate $n backrefs (our input) to Python \g + repl_py = re.sub(r"\$(\d+)", r"\\g<\1>", repl) + count = int(edit.get("count", 0)) # 0 = replace all + flags = re.MULTILINE + if edit.get("ignore_case"): + flags |= re.IGNORECASE + text = re.sub(pattern, repl_py, text, count=count, flags=flags) + else: + allowed = "anchor_insert, prepend, append, replace_range, regex_replace" + raise RuntimeError( + f"unknown edit op: {op}; allowed: {allowed}. Use 'op' (aliases accepted: type/mode/operation).") + return text + + +def _find_best_anchor_match(pattern: str, text: str, flags: int, prefer_last: bool = True): + """ + Find the best anchor match using improved heuristics. + + For patterns like \\s*}\\s*$ that are meant to find class-ending braces, + this function uses heuristics to choose the most semantically appropriate match: + + 1. If prefer_last=True, prefer the last match (common for class-end insertions) + 2. Use indentation levels to distinguish class vs method braces + 3. Consider context to avoid matches inside strings/comments + + Args: + pattern: Regex pattern to search for + text: Text to search in + flags: Regex flags + prefer_last: If True, prefer the last match over the first + + Returns: + Match object of the best match, or None if no match found + """ + + # Find all matches + matches = list(re.finditer(pattern, text, flags)) + if not matches: + return None + + # If only one match, return it + if len(matches) == 1: + return matches[0] + + # For patterns that look like they're trying to match closing braces at end of lines + is_closing_brace_pattern = '}' in pattern and ( + '$' in pattern or pattern.endswith(r'\s*')) + + if is_closing_brace_pattern and prefer_last: + # Use heuristics to find the best closing brace match + return _find_best_closing_brace_match(matches, text) + + # Default behavior: use last match if prefer_last, otherwise first match + return matches[-1] if prefer_last else matches[0] + + +def _find_best_closing_brace_match(matches, text: str): + """ + Find the best closing brace match using C# structure heuristics. + + Enhanced heuristics for scope-aware matching: + 1. Prefer matches with lower indentation (likely class-level) + 2. Prefer matches closer to end of file + 3. Avoid matches that seem to be inside method bodies + 4. For #endregion patterns, ensure class-level context + 5. Validate insertion point is at appropriate scope + + Args: + matches: List of regex match objects + text: The full text being searched + + Returns: + The best match object + """ + if not matches: + return None + + scored_matches = [] + lines = text.splitlines() + + for match in matches: + score = 0 + start_pos = match.start() + + # Find which line this match is on + lines_before = text[:start_pos].count('\n') + line_num = lines_before + + if line_num < len(lines): + line_content = lines[line_num] + + # Calculate indentation level (lower is better for class braces) + indentation = len(line_content) - len(line_content.lstrip()) + + # Prefer lower indentation (class braces are typically less indented than method braces) + # Max 20 points for indentation=0 + score += max(0, 20 - indentation) + + # Prefer matches closer to end of file (class closing braces are typically at the end) + distance_from_end = len(lines) - line_num + # More points for being closer to end + score += max(0, 10 - distance_from_end) + + # Look at surrounding context to avoid method braces + context_start = max(0, line_num - 3) + context_end = min(len(lines), line_num + 2) + context_lines = lines[context_start:context_end] + + # Penalize if this looks like it's inside a method (has method-like patterns above) + for context_line in context_lines: + if re.search(r'\b(void|public|private|protected)\s+\w+\s*\(', context_line): + score -= 5 # Penalty for being near method signatures + + # Bonus if this looks like a class-ending brace (very minimal indentation and near EOF) + if indentation <= 4 and distance_from_end <= 3: + score += 15 # Bonus for likely class-ending brace + + scored_matches.append((score, match)) + + # Return the match with the highest score + scored_matches.sort(key=lambda x: x[0], reverse=True) + best_match = scored_matches[0][1] + + return best_match + + +def _infer_class_name(script_name: str) -> str: + # Default to script name as class name (common Unity pattern) + return (script_name or "").strip() + + +def _extract_code_after(keyword: str, request: str) -> str: + # Deprecated with NL removal; retained as no-op for compatibility + idx = request.lower().find(keyword) + if idx >= 0: + return request[idx + len(keyword):].strip() + return "" +# Removed _is_structurally_balanced - validation now handled by C# side using Unity's compiler services + + +def _normalize_script_locator(name: str, path: str) -> tuple[str, str]: + """Best-effort normalization of script "name" and "path". + + Accepts any of: + - name = "SmartReach", path = "Assets/Scripts/Interaction" + - name = "SmartReach.cs", path = "Assets/Scripts/Interaction" + - name = "Assets/Scripts/Interaction/SmartReach.cs", path = "" + - path = "Assets/Scripts/Interaction/SmartReach.cs" (name empty) + - name or path using uri prefixes: unity://path/..., file://... + - accidental duplicates like "Assets/.../SmartReach.cs/SmartReach.cs" + + Returns (name_without_extension, directory_path_under_Assets). + """ + n = (name or "").strip() + p = (path or "").strip() + + def strip_prefix(s: str) -> str: + if s.startswith("unity://path/"): + return s[len("unity://path/"):] + if s.startswith("file://"): + return s[len("file://"):] + return s + + def collapse_duplicate_tail(s: str) -> str: + # Collapse trailing "/X.cs/X.cs" to "/X.cs" + parts = s.split("/") + if len(parts) >= 2 and parts[-1] == parts[-2]: + parts = parts[:-1] + return "/".join(parts) + + # Prefer a full path if provided in either field + candidate = "" + for v in (n, p): + v2 = strip_prefix(v) + if v2.endswith(".cs") or v2.startswith("Assets/"): + candidate = v2 + break + + if candidate: + candidate = collapse_duplicate_tail(candidate) + # If a directory was passed in path and file in name, join them + if not candidate.endswith(".cs") and n.endswith(".cs"): + v2 = strip_prefix(n) + candidate = (candidate.rstrip("/") + "/" + v2.split("/")[-1]) + if candidate.endswith(".cs"): + parts = candidate.split("/") + file_name = parts[-1] + dir_path = "/".join(parts[:-1]) if len(parts) > 1 else "Assets" + base = file_name[:- + 3] if file_name.lower().endswith(".cs") else file_name + return base, dir_path + + # Fall back: remove extension from name if present and return given path + base_name = n[:-3] if n.lower().endswith(".cs") else n + return base_name, (p or "Assets") + + +def _with_norm(resp: dict[str, Any] | Any, edits: list[dict[str, Any]], routing: str | None = None) -> dict[str, Any] | Any: + if not isinstance(resp, dict): + return resp + data = resp.setdefault("data", {}) + data.setdefault("normalizedEdits", edits) + if routing: + data["routing"] = routing + return resp + + +def _err(code: str, message: str, *, expected: dict[str, Any] | None = None, rewrite: dict[str, Any] | None = None, + normalized: list[dict[str, Any]] | None = None, routing: str | None = None, extra: dict[str, Any] | None = None) -> dict[str, Any]: + payload: dict[str, Any] = {"success": False, + "code": code, "message": message} + data: dict[str, Any] = {} + if expected: + data["expected"] = expected + if rewrite: + data["rewrite_suggestion"] = rewrite + if normalized is not None: + data["normalizedEdits"] = normalized + if routing: + data["routing"] = routing + if extra: + data.update(extra) + if data: + payload["data"] = data + return payload + +# Natural-language parsing removed; clients should send structured edits. + + +@mcp_for_unity_tool(name="script_apply_edits", description=( + """Structured C# edits (methods/classes) with safer boundaries - prefer this over raw text. + Best practices: + - Prefer anchor_* ops for pattern-based insert/replace near stable markers + - Use replace_method/delete_method for whole-method changes (keeps signatures balanced) + - Avoid whole-file regex deletes; validators will guard unbalanced braces + - For tail insertions, prefer anchor/regex_replace on final brace (class closing) + - Pass options.validate='standard' for structural checks; 'relaxed' for interior-only edits + Canonical fields (use these exact keys): + - op: replace_method | insert_method | delete_method | anchor_insert | anchor_delete | anchor_replace + - className: string (defaults to 'name' if omitted on method/class ops) + - methodName: string (required for replace_method, delete_method) + - replacement: string (required for replace_method, insert_method) + - position: start | end | after | before (insert_method only) + - afterMethodName / beforeMethodName: string (required when position='after'/'before') + - anchor: regex string (for anchor_* ops) + - text: string (for anchor_insert/anchor_replace) + Examples: + 1) Replace a method: + { + "name": "SmartReach", + "path": "Assets/Scripts/Interaction", + "edits": [ + { + "op": "replace_method", + "className": "SmartReach", + "methodName": "HasTarget", + "replacement": "public bool HasTarget(){ return currentTarget!=null; }" + } + ], + "options": {"validate": "standard", "refresh": "immediate"} + } + "2) Insert a method after another: + { + "name": "SmartReach", + "path": "Assets/Scripts/Interaction", + "edits": [ + { + "op": "insert_method", + "className": "SmartReach", + "replacement": "public void PrintSeries(){ Debug.Log(seriesName); }", + "position": "after", + "afterMethodName": "GetCurrentTarget" + } + ], + } + ]""" +)) +def script_apply_edits( + ctx: Context, + name: Annotated[str, "Name of the script to edit"], + path: Annotated[str, "Path to the script to edit under Assets/ directory"], + edits: Annotated[list[dict[str, Any]], "List of edits to apply to the script"], + options: Annotated[dict[str, Any], + "Options for the script edit"] | None = None, + script_type: Annotated[str, + "Type of the script to edit"] = "MonoBehaviour", + namespace: Annotated[str, + "Namespace of the script to edit"] | None = None, +) -> dict[str, Any]: + ctx.info(f"Processing script_apply_edits: {name}") + # Normalize locator first so downstream calls target the correct script file. + name, path = _normalize_script_locator(name, path) + # Normalize unsupported or aliased ops to known structured/text paths + + def _unwrap_and_alias(edit: dict[str, Any]) -> dict[str, Any]: + # Unwrap single-key wrappers like {"replace_method": {...}} + for wrapper_key in ( + "replace_method", "insert_method", "delete_method", + "replace_class", "delete_class", + "anchor_insert", "anchor_replace", "anchor_delete", + ): + if wrapper_key in edit and isinstance(edit[wrapper_key], dict): + inner = dict(edit[wrapper_key]) + inner["op"] = wrapper_key + edit = inner + break + + e = dict(edit) + op = (e.get("op") or e.get("operation") or e.get( + "type") or e.get("mode") or "").strip().lower() + if op: + e["op"] = op + + # Common field aliases + if "class_name" in e and "className" not in e: + e["className"] = e.pop("class_name") + if "class" in e and "className" not in e: + e["className"] = e.pop("class") + if "method_name" in e and "methodName" not in e: + e["methodName"] = e.pop("method_name") + # Some clients use a generic 'target' for method name + if "target" in e and "methodName" not in e: + e["methodName"] = e.pop("target") + if "method" in e and "methodName" not in e: + e["methodName"] = e.pop("method") + if "new_content" in e and "replacement" not in e: + e["replacement"] = e.pop("new_content") + if "newMethod" in e and "replacement" not in e: + e["replacement"] = e.pop("newMethod") + if "new_method" in e and "replacement" not in e: + e["replacement"] = e.pop("new_method") + if "content" in e and "replacement" not in e: + e["replacement"] = e.pop("content") + if "after" in e and "afterMethodName" not in e: + e["afterMethodName"] = e.pop("after") + if "after_method" in e and "afterMethodName" not in e: + e["afterMethodName"] = e.pop("after_method") + if "before" in e and "beforeMethodName" not in e: + e["beforeMethodName"] = e.pop("before") + if "before_method" in e and "beforeMethodName" not in e: + e["beforeMethodName"] = e.pop("before_method") + # anchor_method → before/after based on position (default after) + if "anchor_method" in e: + anchor = e.pop("anchor_method") + pos = (e.get("position") or "after").strip().lower() + if pos == "before" and "beforeMethodName" not in e: + e["beforeMethodName"] = anchor + elif "afterMethodName" not in e: + e["afterMethodName"] = anchor + if "anchorText" in e and "anchor" not in e: + e["anchor"] = e.pop("anchorText") + if "pattern" in e and "anchor" not in e and e.get("op") and e["op"].startswith("anchor_"): + e["anchor"] = e.pop("pattern") + if "newText" in e and "text" not in e: + e["text"] = e.pop("newText") + + # CI compatibility (T‑A/T‑E): + # Accept method-anchored anchor_insert and upgrade to insert_method + # Example incoming shape: + # {"op":"anchor_insert","afterMethodName":"GetCurrentTarget","text":"..."} + if ( + e.get("op") == "anchor_insert" + and not e.get("anchor") + and (e.get("afterMethodName") or e.get("beforeMethodName")) + ): + e["op"] = "insert_method" + if "replacement" not in e: + e["replacement"] = e.get("text", "") + + # LSP-like range edit -> replace_range + if "range" in e and isinstance(e["range"], dict): + rng = e.pop("range") + start = rng.get("start", {}) + end = rng.get("end", {}) + # Convert 0-based to 1-based line/col + e["op"] = "replace_range" + e["startLine"] = int(start.get("line", 0)) + 1 + e["startCol"] = int(start.get("character", 0)) + 1 + e["endLine"] = int(end.get("line", 0)) + 1 + e["endCol"] = int(end.get("character", 0)) + 1 + if "newText" in edit and "text" not in e: + e["text"] = edit.get("newText", "") + return e + + normalized_edits: list[dict[str, Any]] = [] + for raw in edits or []: + e = _unwrap_and_alias(raw) + op = (e.get("op") or e.get("operation") or e.get( + "type") or e.get("mode") or "").strip().lower() + + # Default className to script name if missing on structured method/class ops + if op in ("replace_class", "delete_class", "replace_method", "delete_method", "insert_method") and not e.get("className"): + e["className"] = name + + # Map common aliases for text ops + if op in ("text_replace",): + e["op"] = "replace_range" + normalized_edits.append(e) + continue + if op in ("regex_delete",): + e["op"] = "regex_replace" + e.setdefault("text", "") + normalized_edits.append(e) + continue + if op == "regex_replace" and ("replacement" not in e): + if "text" in e: + e["replacement"] = e.get("text", "") + elif "insert" in e or "content" in e: + e["replacement"] = e.get( + "insert") or e.get("content") or "" + if op == "anchor_insert" and not (e.get("text") or e.get("insert") or e.get("content") or e.get("replacement")): + e["op"] = "anchor_delete" + normalized_edits.append(e) + continue + normalized_edits.append(e) + + edits = normalized_edits + normalized_for_echo = edits + + # Validate required fields and produce machine-parsable hints + def error_with_hint(message: str, expected: dict[str, Any], suggestion: dict[str, Any]) -> dict[str, Any]: + return _err("missing_field", message, expected=expected, rewrite=suggestion, normalized=normalized_for_echo) + + for e in edits or []: + op = e.get("op", "") + if op == "replace_method": + if not e.get("methodName"): + return error_with_hint( + "replace_method requires 'methodName'.", + {"op": "replace_method", "required": [ + "className", "methodName", "replacement"]}, + {"edits[0].methodName": "HasTarget"} + ) + if not (e.get("replacement") or e.get("text")): + return error_with_hint( + "replace_method requires 'replacement' (inline or base64).", + {"op": "replace_method", "required": [ + "className", "methodName", "replacement"]}, + {"edits[0].replacement": "public bool X(){ return true; }"} + ) + elif op == "insert_method": + if not (e.get("replacement") or e.get("text")): + return error_with_hint( + "insert_method requires a non-empty 'replacement'.", + {"op": "insert_method", "required": ["className", "replacement"], "position": { + "after_requires": "afterMethodName", "before_requires": "beforeMethodName"}}, + {"edits[0].replacement": "public void PrintSeries(){ Debug.Log(\"1,2,3\"); }"} + ) + pos = (e.get("position") or "").lower() + if pos == "after" and not e.get("afterMethodName"): + return error_with_hint( + "insert_method with position='after' requires 'afterMethodName'.", + {"op": "insert_method", "position": { + "after_requires": "afterMethodName"}}, + {"edits[0].afterMethodName": "GetCurrentTarget"} + ) + if pos == "before" and not e.get("beforeMethodName"): + return error_with_hint( + "insert_method with position='before' requires 'beforeMethodName'.", + {"op": "insert_method", "position": { + "before_requires": "beforeMethodName"}}, + {"edits[0].beforeMethodName": "GetCurrentTarget"} + ) + elif op == "delete_method": + if not e.get("methodName"): + return error_with_hint( + "delete_method requires 'methodName'.", + {"op": "delete_method", "required": [ + "className", "methodName"]}, + {"edits[0].methodName": "PrintSeries"} + ) + elif op in ("anchor_insert", "anchor_replace", "anchor_delete"): + if not e.get("anchor"): + return error_with_hint( + f"{op} requires 'anchor' (regex).", + {"op": op, "required": ["anchor"]}, + {"edits[0].anchor": "(?m)^\\s*public\\s+bool\\s+HasTarget\\s*\\("} + ) + if op in ("anchor_insert", "anchor_replace") and not (e.get("text") or e.get("replacement")): + return error_with_hint( + f"{op} requires 'text'.", + {"op": op, "required": ["anchor", "text"]}, + {"edits[0].text": "/* comment */\n"} + ) + + # Decide routing: structured vs text vs mixed + STRUCT = {"replace_class", "delete_class", "replace_method", "delete_method", + "insert_method", "anchor_delete", "anchor_replace", "anchor_insert"} + TEXT = {"prepend", "append", "replace_range", "regex_replace"} + ops_set = {(e.get("op") or "").lower() for e in edits or []} + all_struct = ops_set.issubset(STRUCT) + all_text = ops_set.issubset(TEXT) + mixed = not (all_struct or all_text) + + # If everything is structured (method/class/anchor ops), forward directly to Unity's structured editor. + if all_struct: + opts2 = dict(options or {}) + # For structured edits, prefer immediate refresh to avoid missed reloads when Editor is unfocused + opts2.setdefault("refresh", "immediate") + params_struct: dict[str, Any] = { + "action": "edit", + "name": name, + "path": path, + "namespace": namespace, + "scriptType": script_type, + "edits": edits, + "options": opts2, + } + resp_struct = send_command_with_retry( + "manage_script", params_struct) + if isinstance(resp_struct, dict) and resp_struct.get("success"): + pass # Optional sentinel reload removed (deprecated) + return _with_norm(resp_struct if isinstance(resp_struct, dict) else {"success": False, "message": str(resp_struct)}, normalized_for_echo, routing="structured") + + # 1) read from Unity + read_resp = send_command_with_retry("manage_script", { + "action": "read", + "name": name, + "path": path, + "namespace": namespace, + "scriptType": script_type, + }) + if not isinstance(read_resp, dict) or not read_resp.get("success"): + return read_resp if isinstance(read_resp, dict) else {"success": False, "message": str(read_resp)} + + data = read_resp.get("data") or read_resp.get( + "result", {}).get("data") or {} + contents = data.get("contents") + if contents is None and data.get("contentsEncoded") and data.get("encodedContents"): + contents = base64.b64decode( + data["encodedContents"]).decode("utf-8") + if contents is None: + return {"success": False, "message": "No contents returned from Unity read."} + + # Optional preview/dry-run: apply locally and return diff without writing + preview = bool((options or {}).get("preview")) + + # If we have a mixed batch (TEXT + STRUCT), apply text first with precondition, then structured + if mixed: + text_edits = [e for e in edits or [] if ( + e.get("op") or "").lower() in TEXT] + struct_edits = [e for e in edits or [] if ( + e.get("op") or "").lower() in STRUCT] + try: + base_text = contents + + def line_col_from_index(idx: int) -> tuple[int, int]: + line = base_text.count("\n", 0, idx) + 1 + last_nl = base_text.rfind("\n", 0, idx) + col = (idx - (last_nl + 1)) + \ + 1 if last_nl >= 0 else idx + 1 + return line, col + + at_edits: list[dict[str, Any]] = [] + for e in text_edits: + opx = (e.get("op") or e.get("operation") or e.get( + "type") or e.get("mode") or "").strip().lower() + text_field = e.get("text") or e.get("insert") or e.get( + "content") or e.get("replacement") or "" + if opx == "anchor_insert": + anchor = e.get("anchor") or "" + position = (e.get("position") or "after").lower() + flags = re.MULTILINE | ( + re.IGNORECASE if e.get("ignore_case") else 0) + try: + # Use improved anchor matching logic + m = _find_best_anchor_match( + anchor, base_text, flags, prefer_last=True) + except Exception as ex: + return _with_norm(_err("bad_regex", f"Invalid anchor regex: {ex}", normalized=normalized_for_echo, routing="mixed/text-first", extra={"hint": "Escape parentheses/braces or use a simpler anchor."}), normalized_for_echo, routing="mixed/text-first") + if not m: + return _with_norm({"success": False, "code": "anchor_not_found", "message": f"anchor not found: {anchor}"}, normalized_for_echo, routing="mixed/text-first") + idx = m.start() if position == "before" else m.end() + # Normalize insertion to avoid jammed methods + text_field_norm = text_field + if not text_field_norm.startswith("\n"): + text_field_norm = "\n" + text_field_norm + if not text_field_norm.endswith("\n"): + text_field_norm = text_field_norm + "\n" + sl, sc = line_col_from_index(idx) + at_edits.append( + {"startLine": sl, "startCol": sc, "endLine": sl, "endCol": sc, "newText": text_field_norm}) + # do not mutate base_text when building atomic spans + elif opx == "replace_range": + if all(k in e for k in ("startLine", "startCol", "endLine", "endCol")): + at_edits.append({ + "startLine": int(e.get("startLine", 1)), + "startCol": int(e.get("startCol", 1)), + "endLine": int(e.get("endLine", 1)), + "endCol": int(e.get("endCol", 1)), + "newText": text_field + }) + else: + return _with_norm(_err("missing_field", "replace_range requires startLine/startCol/endLine/endCol", normalized=normalized_for_echo, routing="mixed/text-first"), normalized_for_echo, routing="mixed/text-first") + elif opx == "regex_replace": + pattern = e.get("pattern") or "" + try: + regex_obj = re.compile(pattern, re.MULTILINE | ( + re.IGNORECASE if e.get("ignore_case") else 0)) + except Exception as ex: + return _with_norm(_err("bad_regex", f"Invalid regex pattern: {ex}", normalized=normalized_for_echo, routing="mixed/text-first", extra={"hint": "Escape special chars or prefer structured delete for methods."}), normalized_for_echo, routing="mixed/text-first") + m = regex_obj.search(base_text) + if not m: + continue + # Expand $1, $2... in replacement using this match + + def _expand_dollars(rep: str, _m=m) -> str: + return re.sub(r"\$(\d+)", lambda g: _m.group(int(g.group(1))) or "", rep) + repl = _expand_dollars(text_field) + sl, sc = line_col_from_index(m.start()) + el, ec = line_col_from_index(m.end()) + at_edits.append( + {"startLine": sl, "startCol": sc, "endLine": el, "endCol": ec, "newText": repl}) + # do not mutate base_text when building atomic spans + elif opx in ("prepend", "append"): + if opx == "prepend": + sl, sc = 1, 1 + at_edits.append( + {"startLine": sl, "startCol": sc, "endLine": sl, "endCol": sc, "newText": text_field}) + # prepend can be applied atomically without local mutation + else: + # Insert at true EOF position (handles both \n and \r\n correctly) + eof_idx = len(base_text) + sl, sc = line_col_from_index(eof_idx) + new_text = ("\n" if not base_text.endswith( + "\n") else "") + text_field + at_edits.append( + {"startLine": sl, "startCol": sc, "endLine": sl, "endCol": sc, "newText": new_text}) + # do not mutate base_text when building atomic spans + else: + return _with_norm(_err("unknown_op", f"Unsupported text edit op: {opx}", normalized=normalized_for_echo, routing="mixed/text-first"), normalized_for_echo, routing="mixed/text-first") + + sha = hashlib.sha256(base_text.encode("utf-8")).hexdigest() + if at_edits: + params_text: dict[str, Any] = { + "action": "apply_text_edits", + "name": name, + "path": path, + "namespace": namespace, + "scriptType": script_type, + "edits": at_edits, + "precondition_sha256": sha, + "options": {"refresh": (options or {}).get("refresh", "debounced"), "validate": (options or {}).get("validate", "standard"), "applyMode": ("atomic" if len(at_edits) > 1 else (options or {}).get("applyMode", "sequential"))} + } + resp_text = send_command_with_retry( + "manage_script", params_text) + if not (isinstance(resp_text, dict) and resp_text.get("success")): + return _with_norm(resp_text if isinstance(resp_text, dict) else {"success": False, "message": str(resp_text)}, normalized_for_echo, routing="mixed/text-first") + # Optional sentinel reload removed (deprecated) + except Exception as e: + return _with_norm({"success": False, "message": f"Text edit conversion failed: {e}"}, normalized_for_echo, routing="mixed/text-first") + + if struct_edits: + opts2 = dict(options or {}) + # Prefer debounced background refresh unless explicitly overridden + opts2.setdefault("refresh", "debounced") + params_struct: dict[str, Any] = { + "action": "edit", + "name": name, + "path": path, + "namespace": namespace, + "scriptType": script_type, + "edits": struct_edits, + "options": opts2 + } + resp_struct = send_command_with_retry( + "manage_script", params_struct) + if isinstance(resp_struct, dict) and resp_struct.get("success"): + pass # Optional sentinel reload removed (deprecated) + return _with_norm(resp_struct if isinstance(resp_struct, dict) else {"success": False, "message": str(resp_struct)}, normalized_for_echo, routing="mixed/text-first") + + return _with_norm({"success": True, "message": "Applied text edits (no structured ops)"}, normalized_for_echo, routing="mixed/text-first") + + # If the edits are text-ops, prefer sending them to Unity's apply_text_edits with precondition + # so header guards and validation run on the C# side. + # Supported conversions: anchor_insert, replace_range, regex_replace (first match only). + text_ops = {(e.get("op") or e.get("operation") or e.get("type") or e.get( + "mode") or "").strip().lower() for e in (edits or [])} + structured_kinds = {"replace_class", "delete_class", + "replace_method", "delete_method", "insert_method", "anchor_insert"} + if not text_ops.issubset(structured_kinds): + # Convert to apply_text_edits payload + try: + base_text = contents + + def line_col_from_index(idx: int) -> tuple[int, int]: + # 1-based line/col against base buffer + line = base_text.count("\n", 0, idx) + 1 + last_nl = base_text.rfind("\n", 0, idx) + col = (idx - (last_nl + 1)) + \ + 1 if last_nl >= 0 else idx + 1 + return line, col + + at_edits: list[dict[str, Any]] = [] + import re as _re + for e in edits or []: + op = (e.get("op") or e.get("operation") or e.get( + "type") or e.get("mode") or "").strip().lower() + # aliasing for text field + text_field = e.get("text") or e.get( + "insert") or e.get("content") or "" + if op == "anchor_insert": + anchor = e.get("anchor") or "" + position = (e.get("position") or "after").lower() + # Use improved anchor matching logic with helpful errors, honoring ignore_case + try: + flags = re.MULTILINE | ( + re.IGNORECASE if e.get("ignore_case") else 0) + m = _find_best_anchor_match( + anchor, base_text, flags, prefer_last=True) + except Exception as ex: + return _with_norm(_err("bad_regex", f"Invalid anchor regex: {ex}", normalized=normalized_for_echo, routing="text", extra={"hint": "Escape parentheses/braces or use a simpler anchor."}), normalized_for_echo, routing="text") + if not m: + return _with_norm({"success": False, "code": "anchor_not_found", "message": f"anchor not found: {anchor}"}, normalized_for_echo, routing="text") + idx = m.start() if position == "before" else m.end() + # Normalize insertion newlines + if text_field and not text_field.startswith("\n"): + text_field = "\n" + text_field + if text_field and not text_field.endswith("\n"): + text_field = text_field + "\n" + sl, sc = line_col_from_index(idx) + at_edits.append({ + "startLine": sl, + "startCol": sc, + "endLine": sl, + "endCol": sc, + "newText": text_field or "" + }) + # Do not mutate base buffer when building an atomic batch + elif op == "replace_range": + # Directly forward if already in line/col form + if "startLine" in e: + at_edits.append({ + "startLine": int(e.get("startLine", 1)), + "startCol": int(e.get("startCol", 1)), + "endLine": int(e.get("endLine", 1)), + "endCol": int(e.get("endCol", 1)), + "newText": text_field + }) + else: + # If only indices provided, skip (we don't support index-based here) + return _with_norm({"success": False, "code": "missing_field", "message": "replace_range requires startLine/startCol/endLine/endCol"}, normalized_for_echo, routing="text") + elif op == "regex_replace": + pattern = e.get("pattern") or "" + repl = text_field + flags = re.MULTILINE | ( + re.IGNORECASE if e.get("ignore_case") else 0) + # Early compile for clearer error messages + try: + regex_obj = re.compile(pattern, flags) + except Exception as ex: + return _with_norm(_err("bad_regex", f"Invalid regex pattern: {ex}", normalized=normalized_for_echo, routing="text", extra={"hint": "Escape special chars or prefer structured delete for methods."}), normalized_for_echo, routing="text") + # Use smart anchor matching for consistent behavior with anchor_insert + m = _find_best_anchor_match( + pattern, base_text, flags, prefer_last=True) + if not m: + continue + # Expand $1, $2... backrefs in replacement using the first match (consistent with mixed-path behavior) + + def _expand_dollars(rep: str, _m=m) -> str: + return re.sub(r"\$(\d+)", lambda g: _m.group(int(g.group(1))) or "", rep) + repl_expanded = _expand_dollars(repl) + # Let C# side handle validation using Unity's built-in compiler services + sl, sc = line_col_from_index(m.start()) + el, ec = line_col_from_index(m.end()) + at_edits.append({ + "startLine": sl, + "startCol": sc, + "endLine": el, + "endCol": ec, + "newText": repl_expanded + }) + # Do not mutate base buffer when building an atomic batch + else: + return _with_norm({"success": False, "code": "unsupported_op", "message": f"Unsupported text edit op for server-side apply_text_edits: {op}"}, normalized_for_echo, routing="text") + + if not at_edits: + return _with_norm({"success": False, "code": "no_spans", "message": "No applicable text edit spans computed (anchor not found or zero-length)."}, normalized_for_echo, routing="text") + + sha = hashlib.sha256(base_text.encode("utf-8")).hexdigest() + params: dict[str, Any] = { + "action": "apply_text_edits", + "name": name, + "path": path, + "namespace": namespace, + "scriptType": script_type, + "edits": at_edits, + "precondition_sha256": sha, + "options": { + "refresh": (options or {}).get("refresh", "debounced"), + "validate": (options or {}).get("validate", "standard"), + "applyMode": ("atomic" if len(at_edits) > 1 else (options or {}).get("applyMode", "sequential")) + } + } + resp = send_command_with_retry("manage_script", params) + if isinstance(resp, dict) and resp.get("success"): + pass # Optional sentinel reload removed (deprecated) + return _with_norm( + resp if isinstance(resp, dict) else { + "success": False, "message": str(resp)}, + normalized_for_echo, + routing="text" + ) + except Exception as e: + return _with_norm({"success": False, "code": "conversion_failed", "message": f"Edit conversion failed: {e}"}, normalized_for_echo, routing="text") + + # For regex_replace, honor preview consistently: if preview=true, always return diff without writing. + # If confirm=false (default) and preview not requested, return diff and instruct confirm=true to apply. + if "regex_replace" in text_ops and (preview or not (options or {}).get("confirm")): + try: + preview_text = _apply_edits_locally(contents, edits) + import difflib + diff = list(difflib.unified_diff(contents.splitlines( + ), preview_text.splitlines(), fromfile="before", tofile="after", n=2)) + if len(diff) > 800: + diff = diff[:800] + ["... (diff truncated) ..."] + if preview: + return {"success": True, "message": "Preview only (no write)", "data": {"diff": "\n".join(diff), "normalizedEdits": normalized_for_echo}} + return _with_norm({"success": False, "message": "Preview diff; set options.confirm=true to apply.", "data": {"diff": "\n".join(diff)}}, normalized_for_echo, routing="text") + except Exception as e: + return _with_norm({"success": False, "code": "preview_failed", "message": f"Preview failed: {e}"}, normalized_for_echo, routing="text") + # 2) apply edits locally (only if not text-ops) + try: + new_contents = _apply_edits_locally(contents, edits) + except Exception as e: + return {"success": False, "message": f"Edit application failed: {e}"} + + # Short-circuit no-op edits to avoid false "applied" reports downstream + if new_contents == contents: + return _with_norm({ + "success": True, + "message": "No-op: contents unchanged", + "data": {"no_op": True, "evidence": {"reason": "identical_content"}} + }, normalized_for_echo, routing="text") + + if preview: + # Produce a compact unified diff limited to small context + import difflib + a = contents.splitlines() + b = new_contents.splitlines() + diff = list(difflib.unified_diff( + a, b, fromfile="before", tofile="after", n=3)) + # Limit diff size to keep responses small + if len(diff) > 2000: + diff = diff[:2000] + ["... (diff truncated) ..."] + return {"success": True, "message": "Preview only (no write)", "data": {"diff": "\n".join(diff), "normalizedEdits": normalized_for_echo}} + + # 3) update to Unity + # Default refresh/validate for natural usage on text path as well + options = dict(options or {}) + options.setdefault("validate", "standard") + options.setdefault("refresh", "debounced") + + # Compute the SHA of the current file contents for the precondition + old_lines = contents.splitlines(keepends=True) + end_line = len(old_lines) + 1 # 1-based exclusive end + sha = hashlib.sha256(contents.encode("utf-8")).hexdigest() + + # Apply a whole-file text edit rather than the deprecated 'update' action + params = { + "action": "apply_text_edits", + "name": name, + "path": path, + "namespace": namespace, + "scriptType": script_type, + "edits": [ + { + "startLine": 1, + "startCol": 1, + "endLine": end_line, + "endCol": 1, + "newText": new_contents, + } + ], + "precondition_sha256": sha, + "options": options or {"validate": "standard", "refresh": "debounced"}, + } + + write_resp = send_command_with_retry("manage_script", params) + if isinstance(write_resp, dict) and write_resp.get("success"): + pass # Optional sentinel reload removed (deprecated) + return _with_norm( + write_resp if isinstance(write_resp, dict) + else {"success": False, "message": str(write_resp)}, + normalized_for_echo, + routing="text", + ) From 5f1ab98991ffcb61cfba929a528807e040dea2d4 Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Wed, 1 Oct 2025 19:03:32 -0400 Subject: [PATCH 05/30] Convert last remaining tools with new decorator --- .../src/tools/manage_script.py | 1025 ++++++++--------- .../src/tools/resource_tools.py | 492 ++++---- 2 files changed, 750 insertions(+), 767 deletions(-) diff --git a/UnityMcpBridge/UnityMcpServer~/src/tools/manage_script.py b/UnityMcpBridge/UnityMcpServer~/src/tools/manage_script.py index fef1e92d..cad6a88c 100644 --- a/UnityMcpBridge/UnityMcpServer~/src/tools/manage_script.py +++ b/UnityMcpBridge/UnityMcpServer~/src/tools/manage_script.py @@ -5,561 +5,548 @@ from mcp.server.fastmcp import FastMCP, Context +from registry import mcp_for_unity_tool from unity_connection import send_command_with_retry -try: - from telemetry_decorator import telemetry_tool - HAS_TELEMETRY = True -except ImportError: - HAS_TELEMETRY = False - - def telemetry_tool(tool_name: str): - def decorator(func): - return func - return decorator - - -def register_manage_script_tools(mcp: FastMCP): - """Register all script management tools with the MCP server.""" - - def _split_uri(uri: str) -> tuple[str, str]: - """Split an incoming URI or path into (name, directory) suitable for Unity. - - Rules: - - unity://path/Assets/... → keep as Assets-relative (after decode/normalize) - - file://... → percent-decode, normalize, strip host and leading slashes, - then, if any 'Assets' segment exists, return path relative to that 'Assets' root. - Otherwise, fall back to original name/dir behavior. - - plain paths → decode/normalize separators; if they contain an 'Assets' segment, - return relative to 'Assets'. - """ - raw_path: str - if uri.startswith("unity://path/"): - raw_path = uri[len("unity://path/"):] - elif uri.startswith("file://"): - parsed = urlparse(uri) - host = (parsed.netloc or "").strip() - p = parsed.path or "" - # UNC: file://server/share/... -> //server/share/... - if host and host.lower() != "localhost": - p = f"//{host}{p}" - # Use percent-decoded path, preserving leading slashes - raw_path = unquote(p) - else: - raw_path = uri - - # Percent-decode any residual encodings and normalize separators - raw_path = unquote(raw_path).replace("\\", "/") - # Strip leading slash only for Windows drive-letter forms like "/C:/..." - if os.name == "nt" and len(raw_path) >= 3 and raw_path[0] == "/" and raw_path[2] == ":": - raw_path = raw_path[1:] - - # Normalize path (collapse ../, ./) - norm = os.path.normpath(raw_path).replace("\\", "/") - - # If an 'Assets' segment exists, compute path relative to it (case-insensitive) - parts = [p for p in norm.split("/") if p not in ("", ".")] - idx = next((i for i, seg in enumerate(parts) - if seg.lower() == "assets"), None) - assets_rel = "/".join(parts[idx:]) if idx is not None else None - - effective_path = assets_rel if assets_rel else norm - # For POSIX absolute paths outside Assets, drop the leading '/' - # to return a clean relative-like directory (e.g., '/tmp' -> 'tmp'). - if effective_path.startswith("/"): - effective_path = effective_path[1:] - - name = os.path.splitext(os.path.basename(effective_path))[0] - directory = os.path.dirname(effective_path) - return name, directory - - @mcp.tool(name="apply_text_edits", description=( - """Apply small text edits to a C# script identified by URI. - IMPORTANT: This tool replaces EXACT character positions. Always verify content at target lines/columns BEFORE editing! - RECOMMENDED WORKFLOW: - 1. First call resources/read with start_line/line_count to verify exact content - 2. Count columns carefully (or use find_in_file to locate patterns) - 3. Apply your edit with precise coordinates - 4. Consider script_apply_edits with anchors for safer pattern-based replacements - Notes: - - For method/class operations, use script_apply_edits (safer, structured edits) - - For pattern-based replacements, consider anchor operations in script_apply_edits - - Lines, columns are 1-indexed - - Tabs count as 1 column""" - )) - @telemetry_tool("apply_text_edits") - def apply_text_edits( - ctx: Context, - uri: Annotated[str, "URI of the script to edit under Assets/ directory, unity://path/Assets/... or file://... or Assets/..."], - edits: Annotated[list[dict[str, Any]], "List of edits to apply to the script, i.e. a list of {startLine,startCol,endLine,endCol,newText} (1-indexed!)"], - precondition_sha256: Annotated[str, - "Optional SHA256 of the script to edit, used to prevent concurrent edits"] | None = None, - strict: Annotated[bool, - "Optional strict flag, used to enforce strict mode"] | None = None, - options: Annotated[dict[str, Any], - "Optional options, used to pass additional options to the script editor"] | None = None, - ) -> dict[str, Any]: - ctx.info(f"Processing apply_text_edits: {uri}") - name, directory = _split_uri(uri) - # Normalize common aliases/misuses for resilience: - # - Accept LSP-style range objects: {range:{start:{line,character}, end:{...}}, newText|text} - # - Accept index ranges as a 2-int array: {range:[startIndex,endIndex], text} - # If normalization is required, read current contents to map indices -> 1-based line/col. - def _needs_normalization(arr: list[dict[str, Any]]) -> bool: - for e in arr or []: - if ("startLine" not in e) or ("startCol" not in e) or ("endLine" not in e) or ("endCol" not in e) or ("newText" not in e and "text" in e): - return True - return False - - normalized_edits: list[dict[str, Any]] = [] - warnings: list[str] = [] - if _needs_normalization(edits): - # Read file to support index->line/col conversion when needed - read_resp = send_command_with_retry("manage_script", { - "action": "read", - "name": name, - "path": directory, - }) - if not (isinstance(read_resp, dict) and read_resp.get("success")): - return read_resp if isinstance(read_resp, dict) else {"success": False, "message": str(read_resp)} - data = read_resp.get("data", {}) - contents = data.get("contents") - if not contents and data.get("contentsEncoded"): - try: - contents = base64.b64decode(data.get("encodedContents", "").encode( - "utf-8")).decode("utf-8", "replace") - except Exception: - contents = contents or "" - - # Helper to map 0-based character index to 1-based line/col - def line_col_from_index(idx: int) -> tuple[int, int]: - if idx <= 0: - return 1, 1 - # Count lines up to idx and position within line - nl_count = contents.count("\n", 0, idx) - line = nl_count + 1 - last_nl = contents.rfind("\n", 0, idx) - col = (idx - (last_nl + 1)) + 1 if last_nl >= 0 else idx + 1 - return line, col - - for e in edits or []: - e2 = dict(e) - # Map text->newText if needed - if "newText" not in e2 and "text" in e2: - e2["newText"] = e2.pop("text") - - if "startLine" in e2 and "startCol" in e2 and "endLine" in e2 and "endCol" in e2: - # Guard: explicit fields must be 1-based. - zero_based = False +def _split_uri(uri: str) -> tuple[str, str]: + """Split an incoming URI or path into (name, directory) suitable for Unity. + + Rules: + - unity://path/Assets/... → keep as Assets-relative (after decode/normalize) + - file://... → percent-decode, normalize, strip host and leading slashes, + then, if any 'Assets' segment exists, return path relative to that 'Assets' root. + Otherwise, fall back to original name/dir behavior. + - plain paths → decode/normalize separators; if they contain an 'Assets' segment, + return relative to 'Assets'. + """ + raw_path: str + if uri.startswith("unity://path/"): + raw_path = uri[len("unity://path/"):] + elif uri.startswith("file://"): + parsed = urlparse(uri) + host = (parsed.netloc or "").strip() + p = parsed.path or "" + # UNC: file://server/share/... -> //server/share/... + if host and host.lower() != "localhost": + p = f"//{host}{p}" + # Use percent-decoded path, preserving leading slashes + raw_path = unquote(p) + else: + raw_path = uri + + # Percent-decode any residual encodings and normalize separators + raw_path = unquote(raw_path).replace("\\", "/") + # Strip leading slash only for Windows drive-letter forms like "/C:/..." + if os.name == "nt" and len(raw_path) >= 3 and raw_path[0] == "/" and raw_path[2] == ":": + raw_path = raw_path[1:] + + # Normalize path (collapse ../, ./) + norm = os.path.normpath(raw_path).replace("\\", "/") + + # If an 'Assets' segment exists, compute path relative to it (case-insensitive) + parts = [p for p in norm.split("/") if p not in ("", ".")] + idx = next((i for i, seg in enumerate(parts) + if seg.lower() == "assets"), None) + assets_rel = "/".join(parts[idx:]) if idx is not None else None + + effective_path = assets_rel if assets_rel else norm + # For POSIX absolute paths outside Assets, drop the leading '/' + # to return a clean relative-like directory (e.g., '/tmp' -> 'tmp'). + if effective_path.startswith("/"): + effective_path = effective_path[1:] + + name = os.path.splitext(os.path.basename(effective_path))[0] + directory = os.path.dirname(effective_path) + return name, directory + + +@mcp_for_unity_tool(description=( + """Apply small text edits to a C# script identified by URI. + IMPORTANT: This tool replaces EXACT character positions. Always verify content at target lines/columns BEFORE editing! + RECOMMENDED WORKFLOW: + 1. First call resources/read with start_line/line_count to verify exact content + 2. Count columns carefully (or use find_in_file to locate patterns) + 3. Apply your edit with precise coordinates + 4. Consider script_apply_edits with anchors for safer pattern-based replacements + Notes: + - For method/class operations, use script_apply_edits (safer, structured edits) + - For pattern-based replacements, consider anchor operations in script_apply_edits + - Lines, columns are 1-indexed + - Tabs count as 1 column""" +)) +def apply_text_edits( + ctx: Context, + uri: Annotated[str, "URI of the script to edit under Assets/ directory, unity://path/Assets/... or file://... or Assets/..."], + edits: Annotated[list[dict[str, Any]], "List of edits to apply to the script, i.e. a list of {startLine,startCol,endLine,endCol,newText} (1-indexed!)"], + precondition_sha256: Annotated[str, + "Optional SHA256 of the script to edit, used to prevent concurrent edits"] | None = None, + strict: Annotated[bool, + "Optional strict flag, used to enforce strict mode"] | None = None, + options: Annotated[dict[str, Any], + "Optional options, used to pass additional options to the script editor"] | None = None, +) -> dict[str, Any]: + ctx.info(f"Processing apply_text_edits: {uri}") + name, directory = _split_uri(uri) + + # Normalize common aliases/misuses for resilience: + # - Accept LSP-style range objects: {range:{start:{line,character}, end:{...}}, newText|text} + # - Accept index ranges as a 2-int array: {range:[startIndex,endIndex], text} + # If normalization is required, read current contents to map indices -> 1-based line/col. + def _needs_normalization(arr: list[dict[str, Any]]) -> bool: + for e in arr or []: + if ("startLine" not in e) or ("startCol" not in e) or ("endLine" not in e) or ("endCol" not in e) or ("newText" not in e and "text" in e): + return True + return False + + normalized_edits: list[dict[str, Any]] = [] + warnings: list[str] = [] + if _needs_normalization(edits): + # Read file to support index->line/col conversion when needed + read_resp = send_command_with_retry("manage_script", { + "action": "read", + "name": name, + "path": directory, + }) + if not (isinstance(read_resp, dict) and read_resp.get("success")): + return read_resp if isinstance(read_resp, dict) else {"success": False, "message": str(read_resp)} + data = read_resp.get("data", {}) + contents = data.get("contents") + if not contents and data.get("contentsEncoded"): + try: + contents = base64.b64decode(data.get("encodedContents", "").encode( + "utf-8")).decode("utf-8", "replace") + except Exception: + contents = contents or "" + + # Helper to map 0-based character index to 1-based line/col + def line_col_from_index(idx: int) -> tuple[int, int]: + if idx <= 0: + return 1, 1 + # Count lines up to idx and position within line + nl_count = contents.count("\n", 0, idx) + line = nl_count + 1 + last_nl = contents.rfind("\n", 0, idx) + col = (idx - (last_nl + 1)) + 1 if last_nl >= 0 else idx + 1 + return line, col + + for e in edits or []: + e2 = dict(e) + # Map text->newText if needed + if "newText" not in e2 and "text" in e2: + e2["newText"] = e2.pop("text") + + if "startLine" in e2 and "startCol" in e2 and "endLine" in e2 and "endCol" in e2: + # Guard: explicit fields must be 1-based. + zero_based = False + for k in ("startLine", "startCol", "endLine", "endCol"): + try: + if int(e2.get(k, 1)) < 1: + zero_based = True + except Exception: + pass + if zero_based: + if strict: + return {"success": False, "code": "zero_based_explicit_fields", "message": "Explicit line/col fields are 1-based; received zero-based.", "data": {"normalizedEdits": normalized_edits}} + # Normalize by clamping to 1 and warn for k in ("startLine", "startCol", "endLine", "endCol"): try: if int(e2.get(k, 1)) < 1: - zero_based = True + e2[k] = 1 except Exception: pass - if zero_based: - if strict: - return {"success": False, "code": "zero_based_explicit_fields", "message": "Explicit line/col fields are 1-based; received zero-based.", "data": {"normalizedEdits": normalized_edits}} - # Normalize by clamping to 1 and warn - for k in ("startLine", "startCol", "endLine", "endCol"): - try: - if int(e2.get(k, 1)) < 1: - e2[k] = 1 - except Exception: - pass - warnings.append( - "zero_based_explicit_fields_normalized") - normalized_edits.append(e2) - continue - - rng = e2.get("range") - if isinstance(rng, dict): - # LSP style: 0-based - s = rng.get("start", {}) - t = rng.get("end", {}) - e2["startLine"] = int(s.get("line", 0)) + 1 - e2["startCol"] = int(s.get("character", 0)) + 1 - e2["endLine"] = int(t.get("line", 0)) + 1 - e2["endCol"] = int(t.get("character", 0)) + 1 + warnings.append( + "zero_based_explicit_fields_normalized") + normalized_edits.append(e2) + continue + + rng = e2.get("range") + if isinstance(rng, dict): + # LSP style: 0-based + s = rng.get("start", {}) + t = rng.get("end", {}) + e2["startLine"] = int(s.get("line", 0)) + 1 + e2["startCol"] = int(s.get("character", 0)) + 1 + e2["endLine"] = int(t.get("line", 0)) + 1 + e2["endCol"] = int(t.get("character", 0)) + 1 + e2.pop("range", None) + normalized_edits.append(e2) + continue + if isinstance(rng, (list, tuple)) and len(rng) == 2: + try: + a = int(rng[0]) + b = int(rng[1]) + if b < a: + a, b = b, a + sl, sc = line_col_from_index(a) + el, ec = line_col_from_index(b) + e2["startLine"] = sl + e2["startCol"] = sc + e2["endLine"] = el + e2["endCol"] = ec e2.pop("range", None) normalized_edits.append(e2) continue - if isinstance(rng, (list, tuple)) and len(rng) == 2: + except Exception: + pass + # Could not normalize this edit + return { + "success": False, + "code": "missing_field", + "message": "apply_text_edits requires startLine/startCol/endLine/endCol/newText or a normalizable 'range'", + "data": {"expected": ["startLine", "startCol", "endLine", "endCol", "newText"], "got": e} + } + else: + # Even when edits appear already in explicit form, validate 1-based coordinates. + normalized_edits = [] + for e in edits or []: + e2 = dict(e) + has_all = all(k in e2 for k in ( + "startLine", "startCol", "endLine", "endCol")) + if has_all: + zero_based = False + for k in ("startLine", "startCol", "endLine", "endCol"): try: - a = int(rng[0]) - b = int(rng[1]) - if b < a: - a, b = b, a - sl, sc = line_col_from_index(a) - el, ec = line_col_from_index(b) - e2["startLine"] = sl - e2["startCol"] = sc - e2["endLine"] = el - e2["endCol"] = ec - e2.pop("range", None) - normalized_edits.append(e2) - continue + if int(e2.get(k, 1)) < 1: + zero_based = True except Exception: pass - # Could not normalize this edit - return { - "success": False, - "code": "missing_field", - "message": "apply_text_edits requires startLine/startCol/endLine/endCol/newText or a normalizable 'range'", - "data": {"expected": ["startLine", "startCol", "endLine", "endCol", "newText"], "got": e} - } - else: - # Even when edits appear already in explicit form, validate 1-based coordinates. - normalized_edits = [] - for e in edits or []: - e2 = dict(e) - has_all = all(k in e2 for k in ( - "startLine", "startCol", "endLine", "endCol")) - if has_all: - zero_based = False + if zero_based: + if strict: + return {"success": False, "code": "zero_based_explicit_fields", "message": "Explicit line/col fields are 1-based; received zero-based.", "data": {"normalizedEdits": [e2]}} for k in ("startLine", "startCol", "endLine", "endCol"): try: if int(e2.get(k, 1)) < 1: - zero_based = True + e2[k] = 1 except Exception: pass - if zero_based: - if strict: - return {"success": False, "code": "zero_based_explicit_fields", "message": "Explicit line/col fields are 1-based; received zero-based.", "data": {"normalizedEdits": [e2]}} - for k in ("startLine", "startCol", "endLine", "endCol"): - try: - if int(e2.get(k, 1)) < 1: - e2[k] = 1 - except Exception: - pass - if "zero_based_explicit_fields_normalized" not in warnings: - warnings.append( - "zero_based_explicit_fields_normalized") - normalized_edits.append(e2) - - # Preflight: detect overlapping ranges among normalized line/col spans - def _pos_tuple(e: dict[str, Any], key_start: bool) -> tuple[int, int]: - return ( - int(e.get("startLine", 1)) if key_start else int( - e.get("endLine", 1)), - int(e.get("startCol", 1)) if key_start else int( - e.get("endCol", 1)), - ) - - def _le(a: tuple[int, int], b: tuple[int, int]) -> bool: - return a[0] < b[0] or (a[0] == b[0] and a[1] <= b[1]) - - # Consider only true replace ranges (non-zero length). Pure insertions (zero-width) don't overlap. - spans = [] - for e in normalized_edits or []: - try: - s = _pos_tuple(e, True) - t = _pos_tuple(e, False) - if s != t: - spans.append((s, t)) - except Exception: - # If coordinates missing or invalid, let the server validate later - pass - - if spans: - spans_sorted = sorted(spans, key=lambda p: (p[0][0], p[0][1])) - for i in range(1, len(spans_sorted)): - prev_end = spans_sorted[i-1][1] - curr_start = spans_sorted[i][0] - # Overlap if prev_end > curr_start (strict), i.e., not prev_end <= curr_start - if not _le(prev_end, curr_start): - conflicts = [{ - "startA": {"line": spans_sorted[i-1][0][0], "col": spans_sorted[i-1][0][1]}, - "endA": {"line": spans_sorted[i-1][1][0], "col": spans_sorted[i-1][1][1]}, - "startB": {"line": spans_sorted[i][0][0], "col": spans_sorted[i][0][1]}, - "endB": {"line": spans_sorted[i][1][0], "col": spans_sorted[i][1][1]}, - }] - return {"success": False, "code": "overlap", "data": {"status": "overlap", "conflicts": conflicts}} - - # Note: Do not auto-compute precondition if missing; callers should supply it - # via mcp__unity__get_sha or a prior read. This avoids hidden extra calls and - # preserves existing call-count expectations in clients/tests. - - # Default options: for multi-span batches, prefer atomic to avoid mid-apply imbalance - opts: dict[str, Any] = dict(options or {}) + if "zero_based_explicit_fields_normalized" not in warnings: + warnings.append( + "zero_based_explicit_fields_normalized") + normalized_edits.append(e2) + + # Preflight: detect overlapping ranges among normalized line/col spans + def _pos_tuple(e: dict[str, Any], key_start: bool) -> tuple[int, int]: + return ( + int(e.get("startLine", 1)) if key_start else int( + e.get("endLine", 1)), + int(e.get("startCol", 1)) if key_start else int( + e.get("endCol", 1)), + ) + + def _le(a: tuple[int, int], b: tuple[int, int]) -> bool: + return a[0] < b[0] or (a[0] == b[0] and a[1] <= b[1]) + + # Consider only true replace ranges (non-zero length). Pure insertions (zero-width) don't overlap. + spans = [] + for e in normalized_edits or []: try: - if len(normalized_edits) > 1 and "applyMode" not in opts: - opts["applyMode"] = "atomic" + s = _pos_tuple(e, True) + t = _pos_tuple(e, False) + if s != t: + spans.append((s, t)) except Exception: + # If coordinates missing or invalid, let the server validate later pass - # Support optional debug preview for span-by-span simulation without write - if opts.get("debug_preview"): - try: - import difflib - # Apply locally to preview final result - lines = [] - # Build an indexable original from a read if we normalized from read; otherwise skip - prev = "" - # We cannot guarantee file contents here without a read; return normalized spans only - return { - "success": True, - "message": "Preview only (no write)", - "data": { - "normalizedEdits": normalized_edits, - "preview": True - } - } - except Exception as e: - return {"success": False, "code": "preview_failed", "message": f"debug_preview failed: {e}", "data": {"normalizedEdits": normalized_edits}} - params = { - "action": "apply_text_edits", - "name": name, - "path": directory, - "edits": normalized_edits, - "precondition_sha256": precondition_sha256, - "options": opts, - } - params = {k: v for k, v in params.items() if v is not None} - resp = send_command_with_retry("manage_script", params) - if isinstance(resp, dict): - data = resp.setdefault("data", {}) - data.setdefault("normalizedEdits", normalized_edits) - if warnings: - data.setdefault("warnings", warnings) - if resp.get("success") and (options or {}).get("force_sentinel_reload"): - # Optional: flip sentinel via menu if explicitly requested - try: - import threading - import time - import json - import glob - import os + if spans: + spans_sorted = sorted(spans, key=lambda p: (p[0][0], p[0][1])) + for i in range(1, len(spans_sorted)): + prev_end = spans_sorted[i-1][1] + curr_start = spans_sorted[i][0] + # Overlap if prev_end > curr_start (strict), i.e., not prev_end <= curr_start + if not _le(prev_end, curr_start): + conflicts = [{ + "startA": {"line": spans_sorted[i-1][0][0], "col": spans_sorted[i-1][0][1]}, + "endA": {"line": spans_sorted[i-1][1][0], "col": spans_sorted[i-1][1][1]}, + "startB": {"line": spans_sorted[i][0][0], "col": spans_sorted[i][0][1]}, + "endB": {"line": spans_sorted[i][1][0], "col": spans_sorted[i][1][1]}, + }] + return {"success": False, "code": "overlap", "data": {"status": "overlap", "conflicts": conflicts}} + + # Note: Do not auto-compute precondition if missing; callers should supply it + # via mcp__unity__get_sha or a prior read. This avoids hidden extra calls and + # preserves existing call-count expectations in clients/tests. + + # Default options: for multi-span batches, prefer atomic to avoid mid-apply imbalance + opts: dict[str, Any] = dict(options or {}) + try: + if len(normalized_edits) > 1 and "applyMode" not in opts: + opts["applyMode"] = "atomic" + except Exception: + pass + # Support optional debug preview for span-by-span simulation without write + if opts.get("debug_preview"): + try: + import difflib + # Apply locally to preview final result + lines = [] + # Build an indexable original from a read if we normalized from read; otherwise skip + prev = "" + # We cannot guarantee file contents here without a read; return normalized spans only + return { + "success": True, + "message": "Preview only (no write)", + "data": { + "normalizedEdits": normalized_edits, + "preview": True + } + } + except Exception as e: + return {"success": False, "code": "preview_failed", "message": f"debug_preview failed: {e}", "data": {"normalizedEdits": normalized_edits}} + + params = { + "action": "apply_text_edits", + "name": name, + "path": directory, + "edits": normalized_edits, + "precondition_sha256": precondition_sha256, + "options": opts, + } + params = {k: v for k, v in params.items() if v is not None} + resp = send_command_with_retry("manage_script", params) + if isinstance(resp, dict): + data = resp.setdefault("data", {}) + data.setdefault("normalizedEdits", normalized_edits) + if warnings: + data.setdefault("warnings", warnings) + if resp.get("success") and (options or {}).get("force_sentinel_reload"): + # Optional: flip sentinel via menu if explicitly requested + try: + import threading + import time + import json + import glob + import os - def _latest_status() -> dict | None: - try: - files = sorted(glob.glob(os.path.expanduser( - "~/.unity-mcp/unity-mcp-status-*.json")), key=os.path.getmtime, reverse=True) - if not files: - return None - with open(files[0], "r") as f: - return json.loads(f.read()) - except Exception: + def _latest_status() -> dict | None: + try: + files = sorted(glob.glob(os.path.expanduser( + "~/.unity-mcp/unity-mcp-status-*.json")), key=os.path.getmtime, reverse=True) + if not files: return None + with open(files[0], "r") as f: + return json.loads(f.read()) + except Exception: + return None - def _flip_async(): - try: - time.sleep(0.1) - st = _latest_status() - if st and st.get("reloading"): - return - send_command_with_retry( - "execute_menu_item", - {"menuPath": "MCP/Flip Reload Sentinel"}, - max_retries=0, - retry_ms=0, - ) - except Exception: - pass - threading.Thread(target=_flip_async, daemon=True).start() - except Exception: - pass - return resp + def _flip_async(): + try: + time.sleep(0.1) + st = _latest_status() + if st and st.get("reloading"): + return + send_command_with_retry( + "execute_menu_item", + {"menuPath": "MCP/Flip Reload Sentinel"}, + max_retries=0, + retry_ms=0, + ) + except Exception: + pass + threading.Thread(target=_flip_async, daemon=True).start() + except Exception: + pass return resp - return {"success": False, "message": str(resp)} - - @mcp.tool(name="create_script", description=("Create a new C# script at the given project path.")) - @telemetry_tool("create_script") - def create_script( - ctx: Context, - path: Annotated[str, "Path under Assets/ to create the script at, e.g., 'Assets/Scripts/My.cs'"], - contents: Annotated[str, "Contents of the script to create. Note, this is Base64 encoded over transport."], - script_type: Annotated[str, "Script type (e.g., 'C#')"] | None = None, - namespace: Annotated[str, "Namespace for the script"] | None = None, - ) -> dict[str, Any]: - ctx.info(f"Processing create_script: {path}") - name = os.path.splitext(os.path.basename(path))[0] - directory = os.path.dirname(path) - # Local validation to avoid round-trips on obviously bad input - norm_path = os.path.normpath( - (path or "").replace("\\", "/")).replace("\\", "/") - if not directory or directory.split("/")[0].lower() != "assets": - return {"success": False, "code": "path_outside_assets", "message": f"path must be under 'Assets/'; got '{path}'."} - if ".." in norm_path.split("/") or norm_path.startswith("/"): - return {"success": False, "code": "bad_path", "message": "path must not contain traversal or be absolute."} - if not name: - return {"success": False, "code": "bad_path", "message": "path must include a script file name."} - if not norm_path.lower().endswith(".cs"): - return {"success": False, "code": "bad_extension", "message": "script file must end with .cs."} - params: dict[str, Any] = { - "action": "create", + return resp + return {"success": False, "message": str(resp)} + + +@mcp_for_unity_tool(description=("Create a new C# script at the given project path.")) +def create_script( + ctx: Context, + path: Annotated[str, "Path under Assets/ to create the script at, e.g., 'Assets/Scripts/My.cs'"], + contents: Annotated[str, "Contents of the script to create. Note, this is Base64 encoded over transport."], + script_type: Annotated[str, "Script type (e.g., 'C#')"] | None = None, + namespace: Annotated[str, "Namespace for the script"] | None = None, +) -> dict[str, Any]: + ctx.info(f"Processing create_script: {path}") + name = os.path.splitext(os.path.basename(path))[0] + directory = os.path.dirname(path) + # Local validation to avoid round-trips on obviously bad input + norm_path = os.path.normpath( + (path or "").replace("\\", "/")).replace("\\", "/") + if not directory or directory.split("/")[0].lower() != "assets": + return {"success": False, "code": "path_outside_assets", "message": f"path must be under 'Assets/'; got '{path}'."} + if ".." in norm_path.split("/") or norm_path.startswith("/"): + return {"success": False, "code": "bad_path", "message": "path must not contain traversal or be absolute."} + if not name: + return {"success": False, "code": "bad_path", "message": "path must include a script file name."} + if not norm_path.lower().endswith(".cs"): + return {"success": False, "code": "bad_extension", "message": "script file must end with .cs."} + params: dict[str, Any] = { + "action": "create", + "name": name, + "path": directory, + "namespace": namespace, + "scriptType": script_type, + } + if contents: + params["encodedContents"] = base64.b64encode( + contents.encode("utf-8")).decode("utf-8") + params["contentsEncoded"] = True + params = {k: v for k, v in params.items() if v is not None} + resp = send_command_with_retry("manage_script", params) + return resp if isinstance(resp, dict) else {"success": False, "message": str(resp)} + + +@mcp_for_unity_tool(description=("Delete a C# script by URI or Assets-relative path.")) +def delete_script( + ctx: Context, + uri: Annotated[str, "URI of the script to delete under Assets/ directory, unity://path/Assets/... or file://... or Assets/..."] +) -> dict[str, Any]: + """Delete a C# script by URI.""" + ctx.info(f"Processing delete_script: {uri}") + name, directory = _split_uri(uri) + if not directory or directory.split("/")[0].lower() != "assets": + return {"success": False, "code": "path_outside_assets", "message": "URI must resolve under 'Assets/'."} + params = {"action": "delete", "name": name, "path": directory} + resp = send_command_with_retry("manage_script", params) + return resp if isinstance(resp, dict) else {"success": False, "message": str(resp)} + + +@mcp_for_unity_tool(description=("Validate a C# script and return diagnostics.")) +def validate_script( + ctx: Context, + uri: Annotated[str, "URI of the script to validate under Assets/ directory, unity://path/Assets/... or file://... or Assets/..."], + level: Annotated[Literal['basic', 'standard'], + "Validation level"] = "basic", + include_diagnostics: Annotated[bool, + "Include full diagnostics and summary"] = False +) -> dict[str, Any]: + ctx.info(f"Processing validate_script: {uri}") + name, directory = _split_uri(uri) + if not directory or directory.split("/")[0].lower() != "assets": + return {"success": False, "code": "path_outside_assets", "message": "URI must resolve under 'Assets/'."} + if level not in ("basic", "standard"): + return {"success": False, "code": "bad_level", "message": "level must be 'basic' or 'standard'."} + params = { + "action": "validate", + "name": name, + "path": directory, + "level": level, + } + resp = send_command_with_retry("manage_script", params) + if isinstance(resp, dict) and resp.get("success"): + diags = resp.get("data", {}).get("diagnostics", []) or [] + warnings = sum(1 for d in diags if str( + d.get("severity", "")).lower() == "warning") + errors = sum(1 for d in diags if str( + d.get("severity", "")).lower() in ("error", "fatal")) + if include_diagnostics: + return {"success": True, "data": {"diagnostics": diags, "summary": {"warnings": warnings, "errors": errors}}} + return {"success": True, "data": {"warnings": warnings, "errors": errors}} + return resp if isinstance(resp, dict) else {"success": False, "message": str(resp)} + + +@mcp_for_unity_tool(description=("Compatibility router for legacy script operations. Prefer apply_text_edits (ranges) or script_apply_edits (structured) for edits.")) +def manage_script( + ctx: Context, + action: Annotated[Literal['create', 'read', 'delete'], "Perform CRUD operations on C# scripts."], + name: Annotated[str, "Script name (no .cs extension)", "Name of the script to create"], + path: Annotated[str, "Asset path (default: 'Assets/')", "Path under Assets/ to create the script at, e.g., 'Assets/Scripts/My.cs'"], + contents: Annotated[str, "Contents of the script to create", + "C# code for 'create'/'update'"] | None = None, + script_type: Annotated[str, "Script type (e.g., 'C#')", + "Type hint (e.g., 'MonoBehaviour')"] | None = None, + namespace: Annotated[str, "Namespace for the script"] | None = None, +) -> dict[str, Any]: + ctx.info(f"Processing manage_script: {action}") + try: + # Prepare parameters for Unity + params = { + "action": action, "name": name, - "path": directory, + "path": path, "namespace": namespace, "scriptType": script_type, } + + # Base64 encode the contents if they exist to avoid JSON escaping issues if contents: - params["encodedContents"] = base64.b64encode( - contents.encode("utf-8")).decode("utf-8") - params["contentsEncoded"] = True + if action == 'create': + params["encodedContents"] = base64.b64encode( + contents.encode('utf-8')).decode('utf-8') + params["contentsEncoded"] = True + else: + params["contents"] = contents + params = {k: v for k, v in params.items() if v is not None} - resp = send_command_with_retry("manage_script", params) - return resp if isinstance(resp, dict) else {"success": False, "message": str(resp)} - @mcp.tool(name="delete_script", description=("Delete a C# script by URI or Assets-relative path.")) - @telemetry_tool("delete_script") - def delete_script( - ctx: Context, - uri: Annotated[str, "URI of the script to delete under Assets/ directory, unity://path/Assets/... or file://... or Assets/..."] - ) -> dict[str, Any]: - """Delete a C# script by URI.""" - ctx.info(f"Processing delete_script: {uri}") - name, directory = _split_uri(uri) - if not directory or directory.split("/")[0].lower() != "assets": - return {"success": False, "code": "path_outside_assets", "message": "URI must resolve under 'Assets/'."} - params = {"action": "delete", "name": name, "path": directory} - resp = send_command_with_retry("manage_script", params) - return resp if isinstance(resp, dict) else {"success": False, "message": str(resp)} + response = send_command_with_retry("manage_script", params) - @mcp.tool(name="validate_script", description=("Validate a C# script and return diagnostics.")) - @telemetry_tool("validate_script") - def validate_script( - ctx: Context, - uri: Annotated[str, "URI of the script to validate under Assets/ directory, unity://path/Assets/... or file://... or Assets/..."], - level: Annotated[Literal['basic', 'standard'], - "Validation level"] = "basic", - include_diagnostics: Annotated[bool, - "Include full diagnostics and summary"] = False - ) -> dict[str, Any]: - ctx.info(f"Processing validate_script: {uri}") - name, directory = _split_uri(uri) - if not directory or directory.split("/")[0].lower() != "assets": - return {"success": False, "code": "path_outside_assets", "message": "URI must resolve under 'Assets/'."} - if level not in ("basic", "standard"): - return {"success": False, "code": "bad_level", "message": "level must be 'basic' or 'standard'."} - params = { - "action": "validate", - "name": name, - "path": directory, - "level": level, - } - resp = send_command_with_retry("manage_script", params) - if isinstance(resp, dict) and resp.get("success"): - diags = resp.get("data", {}).get("diagnostics", []) or [] - warnings = sum(1 for d in diags if str( - d.get("severity", "")).lower() == "warning") - errors = sum(1 for d in diags if str( - d.get("severity", "")).lower() in ("error", "fatal")) - if include_diagnostics: - return {"success": True, "data": {"diagnostics": diags, "summary": {"warnings": warnings, "errors": errors}}} - return {"success": True, "data": {"warnings": warnings, "errors": errors}} - return resp if isinstance(resp, dict) else {"success": False, "message": str(resp)} + if isinstance(response, dict): + if response.get("success"): + if response.get("data", {}).get("contentsEncoded"): + decoded_contents = base64.b64decode( + response["data"]["encodedContents"]).decode('utf-8') + response["data"]["contents"] = decoded_contents + del response["data"]["encodedContents"] + del response["data"]["contentsEncoded"] - @mcp.tool(name="manage_script", description=("Compatibility router for legacy script operations. Prefer apply_text_edits (ranges) or script_apply_edits (structured) for edits.")) - @telemetry_tool("manage_script") - def manage_script( - ctx: Context, - action: Annotated[Literal['create', 'read', 'delete'], "Perform CRUD operations on C# scripts."], - name: Annotated[str, "Script name (no .cs extension)", "Name of the script to create"], - path: Annotated[str, "Asset path (default: 'Assets/')", "Path under Assets/ to create the script at, e.g., 'Assets/Scripts/My.cs'"], - contents: Annotated[str, "Contents of the script to create", - "C# code for 'create'/'update'"] | None = None, - script_type: Annotated[str, "Script type (e.g., 'C#')", - "Type hint (e.g., 'MonoBehaviour')"] | None = None, - namespace: Annotated[str, "Namespace for the script"] | None = None, - ) -> dict[str, Any]: - ctx.info(f"Processing manage_script: {action}") - try: - # Prepare parameters for Unity - params = { - "action": action, - "name": name, - "path": path, - "namespace": namespace, - "scriptType": script_type, - } + return { + "success": True, + "message": response.get("message", "Operation successful."), + "data": response.get("data"), + } + return response - # Base64 encode the contents if they exist to avoid JSON escaping issues - if contents: - if action == 'create': - params["encodedContents"] = base64.b64encode( - contents.encode('utf-8')).decode('utf-8') - params["contentsEncoded"] = True - else: - params["contents"] = contents - - params = {k: v for k, v in params.items() if v is not None} - - response = send_command_with_retry("manage_script", params) - - if isinstance(response, dict): - if response.get("success"): - if response.get("data", {}).get("contentsEncoded"): - decoded_contents = base64.b64decode( - response["data"]["encodedContents"]).decode('utf-8') - response["data"]["contents"] = decoded_contents - del response["data"]["encodedContents"] - del response["data"]["contentsEncoded"] - - return { - "success": True, - "message": response.get("message", "Operation successful."), - "data": response.get("data"), - } - return response - - return {"success": False, "message": str(response)} + return {"success": False, "message": str(response)} - except Exception as e: - return { - "success": False, - "message": f"Python error managing script: {str(e)}", - } + except Exception as e: + return { + "success": False, + "message": f"Python error managing script: {str(e)}", + } - @mcp.tool(name="manage_script_capabilities", description=( - """Get manage_script capabilities (supported ops, limits, and guards). - Returns: - - ops: list of supported structured ops - - text_ops: list of supported text ops - - max_edit_payload_bytes: server edit payload cap - - guards: header/using guard enabled flag""" - )) - @telemetry_tool("manage_script_capabilities") - def manage_script_capabilities(ctx: Context) -> dict[str, Any]: - ctx.info("Processing manage_script_capabilities") - try: - # Keep in sync with server/Editor ManageScript implementation - ops = [ - "replace_class", "delete_class", "replace_method", "delete_method", - "insert_method", "anchor_insert", "anchor_delete", "anchor_replace" - ] - text_ops = ["replace_range", "regex_replace", "prepend", "append"] - # Match ManageScript.MaxEditPayloadBytes if exposed; hardcode a sensible default fallback - max_edit_payload_bytes = 256 * 1024 - guards = {"using_guard": True} - extras = {"get_sha": True} - return {"success": True, "data": { - "ops": ops, - "text_ops": text_ops, - "max_edit_payload_bytes": max_edit_payload_bytes, - "guards": guards, - "extras": extras, - }} - except Exception as e: - return {"success": False, "error": f"capabilities error: {e}"} - - @mcp.tool(name="get_sha", description="Get SHA256 and basic metadata for a Unity C# script without returning file contents") - @telemetry_tool("get_sha") - def get_sha( - ctx: Context, - uri: Annotated[str, "URI of the script to edit under Assets/ directory, unity://path/Assets/... or file://... or Assets/..."] - ) -> dict[str, Any]: - ctx.info(f"Processing get_sha: {uri}") - try: - name, directory = _split_uri(uri) - params = {"action": "get_sha", "name": name, "path": directory} - resp = send_command_with_retry("manage_script", params) - if isinstance(resp, dict) and resp.get("success"): - data = resp.get("data", {}) - minimal = {"sha256": data.get( - "sha256"), "lengthBytes": data.get("lengthBytes")} - return {"success": True, "data": minimal} - return resp if isinstance(resp, dict) else {"success": False, "message": str(resp)} - except Exception as e: - return {"success": False, "message": f"get_sha error: {e}"} + +@mcp_for_unity_tool(description=( + """Get manage_script capabilities (supported ops, limits, and guards). + Returns: + - ops: list of supported structured ops + - text_ops: list of supported text ops + - max_edit_payload_bytes: server edit payload cap + - guards: header/using guard enabled flag""" +)) +def manage_script_capabilities(ctx: Context) -> dict[str, Any]: + ctx.info("Processing manage_script_capabilities") + try: + # Keep in sync with server/Editor ManageScript implementation + ops = [ + "replace_class", "delete_class", "replace_method", "delete_method", + "insert_method", "anchor_insert", "anchor_delete", "anchor_replace" + ] + text_ops = ["replace_range", "regex_replace", "prepend", "append"] + # Match ManageScript.MaxEditPayloadBytes if exposed; hardcode a sensible default fallback + max_edit_payload_bytes = 256 * 1024 + guards = {"using_guard": True} + extras = {"get_sha": True} + return {"success": True, "data": { + "ops": ops, + "text_ops": text_ops, + "max_edit_payload_bytes": max_edit_payload_bytes, + "guards": guards, + "extras": extras, + }} + except Exception as e: + return {"success": False, "error": f"capabilities error: {e}"} + + +@mcp_for_unity_tool(description="Get SHA256 and basic metadata for a Unity C# script without returning file contents") +def get_sha( + ctx: Context, + uri: Annotated[str, "URI of the script to edit under Assets/ directory, unity://path/Assets/... or file://... or Assets/..."] +) -> dict[str, Any]: + ctx.info(f"Processing get_sha: {uri}") + try: + name, directory = _split_uri(uri) + params = {"action": "get_sha", "name": name, "path": directory} + resp = send_command_with_retry("manage_script", params) + if isinstance(resp, dict) and resp.get("success"): + data = resp.get("data", {}) + minimal = {"sha256": data.get( + "sha256"), "lengthBytes": data.get("lengthBytes")} + return {"success": True, "data": minimal} + return resp if isinstance(resp, dict) else {"success": False, "message": str(resp)} + except Exception as e: + return {"success": False, "message": f"get_sha error: {e}"} diff --git a/UnityMcpBridge/UnityMcpServer~/src/tools/resource_tools.py b/UnityMcpBridge/UnityMcpServer~/src/tools/resource_tools.py index 2ae06e85..a8398f75 100644 --- a/UnityMcpBridge/UnityMcpServer~/src/tools/resource_tools.py +++ b/UnityMcpBridge/UnityMcpServer~/src/tools/resource_tools.py @@ -11,9 +11,9 @@ from typing import Annotated, Any from urllib.parse import urlparse, unquote -from mcp.server.fastmcp import FastMCP, Context -from telemetry_decorator import telemetry_tool +from mcp.server.fastmcp import Context +from registry import mcp_for_unity_tool from unity_connection import send_command_with_retry @@ -133,264 +133,260 @@ def _resolve_safe_path_from_uri(uri: str, project: Path) -> Path | None: return p -def register_resource_tools(mcp: FastMCP) -> None: - """Registers list_resources and read_resource wrapper tools.""" - - @mcp.tool(name="list_resources", description=("List project URIs (unity://path/...) under a folder (default: Assets). Only .cs files are returned by default; always appends unity://spec/script-edits.\n")) - @telemetry_tool("list_resources") - async def list_resources( - ctx: Context, - pattern: Annotated[str, "Glob, default is *.cs"] | None = "*.cs", - under: Annotated[str, - "Folder under project root, default is Assets"] = "Assets", - limit: Annotated[int, "Page limit"] = 200, - project_root: Annotated[str, "Project path"] | None = None, - ) -> dict[str, Any]: - ctx.info(f"Processing list_resources: {pattern}") +@mcp_for_unity_tool(description=("List project URIs (unity://path/...) under a folder (default: Assets). Only .cs files are returned by default; always appends unity://spec/script-edits.\n")) +async def list_resources( + ctx: Context, + pattern: Annotated[str, "Glob, default is *.cs"] | None = "*.cs", + under: Annotated[str, + "Folder under project root, default is Assets"] = "Assets", + limit: Annotated[int, "Page limit"] = 200, + project_root: Annotated[str, "Project path"] | None = None, +) -> dict[str, Any]: + ctx.info(f"Processing list_resources: {pattern}") + try: + project = _resolve_project_root(project_root) + base = (project / under).resolve() try: - project = _resolve_project_root(project_root) - base = (project / under).resolve() - try: - base.relative_to(project) - except ValueError: - return {"success": False, "error": "Base path must be under project root"} - # Enforce listing only under Assets + base.relative_to(project) + except ValueError: + return {"success": False, "error": "Base path must be under project root"} + # Enforce listing only under Assets + try: + base.relative_to(project / "Assets") + except ValueError: + return {"success": False, "error": "Listing is restricted to Assets/"} + + matches: list[str] = [] + limit_int = _coerce_int(limit, default=200, minimum=1) + for p in base.rglob("*"): + if not p.is_file(): + continue + # Resolve symlinks and ensure the real path stays under project/Assets try: - base.relative_to(project / "Assets") - except ValueError: - return {"success": False, "error": "Listing is restricted to Assets/"} + rp = p.resolve() + rp.relative_to(project / "Assets") + except Exception: + continue + # Enforce .cs extension regardless of provided pattern + if p.suffix.lower() != ".cs": + continue + if pattern and not fnmatch.fnmatch(p.name, pattern): + continue + rel = p.relative_to(project).as_posix() + matches.append(f"unity://path/{rel}") + if len(matches) >= max(1, limit_int): + break - matches: list[str] = [] - limit_int = _coerce_int(limit, default=200, minimum=1) - for p in base.rglob("*"): - if not p.is_file(): - continue - # Resolve symlinks and ensure the real path stays under project/Assets - try: - rp = p.resolve() - rp.relative_to(project / "Assets") - except Exception: - continue - # Enforce .cs extension regardless of provided pattern - if p.suffix.lower() != ".cs": - continue - if pattern and not fnmatch.fnmatch(p.name, pattern): - continue - rel = p.relative_to(project).as_posix() - matches.append(f"unity://path/{rel}") - if len(matches) >= max(1, limit_int): - break + # Always include the canonical spec resource so NL clients can discover it + if "unity://spec/script-edits" not in matches: + matches.append("unity://spec/script-edits") - # Always include the canonical spec resource so NL clients can discover it - if "unity://spec/script-edits" not in matches: - matches.append("unity://spec/script-edits") + return {"success": True, "data": {"uris": matches, "count": len(matches)}} + except Exception as e: + return {"success": False, "error": str(e)} - return {"success": True, "data": {"uris": matches, "count": len(matches)}} - except Exception as e: - return {"success": False, "error": str(e)} - @mcp.tool(name="read_resource", description=("Reads a resource by unity://path/... URI with optional slicing.")) - @telemetry_tool("read_resource") - async def read_resource( - ctx: Context, - uri: Annotated[str, "The resource URI to read under Assets/"], - start_line: Annotated[int, - "The starting line number (0-based)"] | None = None, - line_count: Annotated[int, - "The number of lines to read"] | None = None, - head_bytes: Annotated[int, - "The number of bytes to read from the start of the file"] | None = None, - tail_lines: Annotated[int, - "The number of lines to read from the end of the file"] | None = None, - project_root: Annotated[str, - "The project root directory"] | None = None, - request: Annotated[str, "The request ID"] | None = None, - ) -> dict[str, Any]: - ctx.info(f"Processing read_resource: {uri}") - try: - # Serve the canonical spec directly when requested (allow bare or with scheme) - if uri in ("unity://spec/script-edits", "spec/script-edits", "script-edits"): - spec_json = ( - '{\n' - ' "name": "Unity MCP - Script Edits v1",\n' - ' "target_tool": "script_apply_edits",\n' - ' "canonical_rules": {\n' - ' "always_use": ["op","className","methodName","replacement","afterMethodName","beforeMethodName"],\n' - ' "never_use": ["new_method","anchor_method","content","newText"],\n' - ' "defaults": {\n' - ' "className": "\u2190 server will default to \'name\' when omitted",\n' - ' "position": "end"\n' - ' }\n' - ' },\n' - ' "ops": [\n' - ' {"op":"replace_method","required":["className","methodName","replacement"],"optional":["returnType","parametersSignature","attributesContains"],"examples":[{"note":"match overload by signature","parametersSignature":"(int a, string b)"},{"note":"ensure attributes retained","attributesContains":"ContextMenu"}]},\n' - ' {"op":"insert_method","required":["className","replacement"],"position":{"enum":["start","end","after","before"],"after_requires":"afterMethodName","before_requires":"beforeMethodName"}},\n' - ' {"op":"delete_method","required":["className","methodName"]},\n' - ' {"op":"anchor_insert","required":["anchor","text"],"notes":"regex; position=before|after"}\n' - ' ],\n' - ' "apply_text_edits_recipe": {\n' - ' "step1_read": { "tool": "resources/read", "args": {"uri": "unity://path/Assets/Scripts/Interaction/SmartReach.cs"} },\n' - ' "step2_apply": {\n' - ' "tool": "manage_script",\n' - ' "args": {\n' - ' "action": "apply_text_edits",\n' - ' "name": "SmartReach", "path": "Assets/Scripts/Interaction",\n' - ' "edits": [{"startLine": 42, "startCol": 1, "endLine": 42, "endCol": 1, "newText": "[MyAttr]\\n"}],\n' - ' "precondition_sha256": "",\n' - ' "options": {"refresh": "immediate", "validate": "standard"}\n' - ' }\n' - ' },\n' - ' "note": "newText is for apply_text_edits ranges only; use replacement in script_apply_edits ops."\n' - ' },\n' - ' "examples": [\n' - ' {\n' - ' "title": "Replace a method",\n' - ' "args": {\n' - ' "name": "SmartReach",\n' - ' "path": "Assets/Scripts/Interaction",\n' - ' "edits": [\n' - ' {"op":"replace_method","className":"SmartReach","methodName":"HasTarget","replacement":"public bool HasTarget() { return currentTarget != null; }"}\n' - ' ],\n' - ' "options": { "validate": "standard", "refresh": "immediate" }\n' - ' }\n' - ' },\n' - ' {\n' - ' "title": "Insert a method after another",\n' - ' "args": {\n' - ' "name": "SmartReach",\n' - ' "path": "Assets/Scripts/Interaction",\n' - ' "edits": [\n' - ' {"op":"insert_method","className":"SmartReach","replacement":"public void PrintSeries() { Debug.Log(seriesName); }","position":"after","afterMethodName":"GetCurrentTarget"}\n' - ' ]\n' - ' }\n' - ' }\n' - ' ]\n' - '}\n' - ) - sha = hashlib.sha256(spec_json.encode("utf-8")).hexdigest() - return {"success": True, "data": {"text": spec_json, "metadata": {"sha256": sha}}} +@mcp_for_unity_tool(description=("Reads a resource by unity://path/... URI with optional slicing.")) +async def read_resource( + ctx: Context, + uri: Annotated[str, "The resource URI to read under Assets/"], + start_line: Annotated[int, + "The starting line number (0-based)"] | None = None, + line_count: Annotated[int, + "The number of lines to read"] | None = None, + head_bytes: Annotated[int, + "The number of bytes to read from the start of the file"] | None = None, + tail_lines: Annotated[int, + "The number of lines to read from the end of the file"] | None = None, + project_root: Annotated[str, + "The project root directory"] | None = None, + request: Annotated[str, "The request ID"] | None = None, +) -> dict[str, Any]: + ctx.info(f"Processing read_resource: {uri}") + try: + # Serve the canonical spec directly when requested (allow bare or with scheme) + if uri in ("unity://spec/script-edits", "spec/script-edits", "script-edits"): + spec_json = ( + '{\n' + ' "name": "Unity MCP - Script Edits v1",\n' + ' "target_tool": "script_apply_edits",\n' + ' "canonical_rules": {\n' + ' "always_use": ["op","className","methodName","replacement","afterMethodName","beforeMethodName"],\n' + ' "never_use": ["new_method","anchor_method","content","newText"],\n' + ' "defaults": {\n' + ' "className": "\u2190 server will default to \'name\' when omitted",\n' + ' "position": "end"\n' + ' }\n' + ' },\n' + ' "ops": [\n' + ' {"op":"replace_method","required":["className","methodName","replacement"],"optional":["returnType","parametersSignature","attributesContains"],"examples":[{"note":"match overload by signature","parametersSignature":"(int a, string b)"},{"note":"ensure attributes retained","attributesContains":"ContextMenu"}]},\n' + ' {"op":"insert_method","required":["className","replacement"],"position":{"enum":["start","end","after","before"],"after_requires":"afterMethodName","before_requires":"beforeMethodName"}},\n' + ' {"op":"delete_method","required":["className","methodName"]},\n' + ' {"op":"anchor_insert","required":["anchor","text"],"notes":"regex; position=before|after"}\n' + ' ],\n' + ' "apply_text_edits_recipe": {\n' + ' "step1_read": { "tool": "resources/read", "args": {"uri": "unity://path/Assets/Scripts/Interaction/SmartReach.cs"} },\n' + ' "step2_apply": {\n' + ' "tool": "manage_script",\n' + ' "args": {\n' + ' "action": "apply_text_edits",\n' + ' "name": "SmartReach", "path": "Assets/Scripts/Interaction",\n' + ' "edits": [{"startLine": 42, "startCol": 1, "endLine": 42, "endCol": 1, "newText": "[MyAttr]\\n"}],\n' + ' "precondition_sha256": "",\n' + ' "options": {"refresh": "immediate", "validate": "standard"}\n' + ' }\n' + ' },\n' + ' "note": "newText is for apply_text_edits ranges only; use replacement in script_apply_edits ops."\n' + ' },\n' + ' "examples": [\n' + ' {\n' + ' "title": "Replace a method",\n' + ' "args": {\n' + ' "name": "SmartReach",\n' + ' "path": "Assets/Scripts/Interaction",\n' + ' "edits": [\n' + ' {"op":"replace_method","className":"SmartReach","methodName":"HasTarget","replacement":"public bool HasTarget() { return currentTarget != null; }"}\n' + ' ],\n' + ' "options": { "validate": "standard", "refresh": "immediate" }\n' + ' }\n' + ' },\n' + ' {\n' + ' "title": "Insert a method after another",\n' + ' "args": {\n' + ' "name": "SmartReach",\n' + ' "path": "Assets/Scripts/Interaction",\n' + ' "edits": [\n' + ' {"op":"insert_method","className":"SmartReach","replacement":"public void PrintSeries() { Debug.Log(seriesName); }","position":"after","afterMethodName":"GetCurrentTarget"}\n' + ' ]\n' + ' }\n' + ' }\n' + ' ]\n' + '}\n' + ) + sha = hashlib.sha256(spec_json.encode("utf-8")).hexdigest() + return {"success": True, "data": {"text": spec_json, "metadata": {"sha256": sha}}} - project = _resolve_project_root(project_root) - p = _resolve_safe_path_from_uri(uri, project) - if not p or not p.exists() or not p.is_file(): - return {"success": False, "error": f"Resource not found: {uri}"} - try: - p.relative_to(project / "Assets") - except ValueError: - return {"success": False, "error": "Read restricted to Assets/"} - # Natural-language convenience: request like "last 120 lines", "first 200 lines", - # "show 40 lines around MethodName", etc. - if request: - req = request.strip().lower() - m = re.search(r"last\s+(\d+)\s+lines", req) - if m: - tail_lines = int(m.group(1)) - m = re.search(r"first\s+(\d+)\s+lines", req) - if m: - start_line = 1 - line_count = int(m.group(1)) - m = re.search(r"first\s+(\d+)\s*bytes", req) - if m: - head_bytes = int(m.group(1)) - m = re.search( - r"show\s+(\d+)\s+lines\s+around\s+([A-Za-z_][A-Za-z0-9_]*)", req) - if m: - window = int(m.group(1)) - method = m.group(2) - # naive search for method header to get a line number - text_all = p.read_text(encoding="utf-8") - lines_all = text_all.splitlines() - pat = re.compile( - rf"^\s*(?:\[[^\]]+\]\s*)*(?:public|private|protected|internal|static|virtual|override|sealed|async|extern|unsafe|new|partial).*?\b{re.escape(method)}\s*\(", re.MULTILINE) - hit_line = None - for i, line in enumerate(lines_all, start=1): - if pat.search(line): - hit_line = i - break - if hit_line: - half = max(1, window // 2) - start_line = max(1, hit_line - half) - line_count = window + project = _resolve_project_root(project_root) + p = _resolve_safe_path_from_uri(uri, project) + if not p or not p.exists() or not p.is_file(): + return {"success": False, "error": f"Resource not found: {uri}"} + try: + p.relative_to(project / "Assets") + except ValueError: + return {"success": False, "error": "Read restricted to Assets/"} + # Natural-language convenience: request like "last 120 lines", "first 200 lines", + # "show 40 lines around MethodName", etc. + if request: + req = request.strip().lower() + m = re.search(r"last\s+(\d+)\s+lines", req) + if m: + tail_lines = int(m.group(1)) + m = re.search(r"first\s+(\d+)\s+lines", req) + if m: + start_line = 1 + line_count = int(m.group(1)) + m = re.search(r"first\s+(\d+)\s*bytes", req) + if m: + head_bytes = int(m.group(1)) + m = re.search( + r"show\s+(\d+)\s+lines\s+around\s+([A-Za-z_][A-Za-z0-9_]*)", req) + if m: + window = int(m.group(1)) + method = m.group(2) + # naive search for method header to get a line number + text_all = p.read_text(encoding="utf-8") + lines_all = text_all.splitlines() + pat = re.compile( + rf"^\s*(?:\[[^\]]+\]\s*)*(?:public|private|protected|internal|static|virtual|override|sealed|async|extern|unsafe|new|partial).*?\b{re.escape(method)}\s*\(", re.MULTILINE) + hit_line = None + for i, line in enumerate(lines_all, start=1): + if pat.search(line): + hit_line = i + break + if hit_line: + half = max(1, window // 2) + start_line = max(1, hit_line - half) + line_count = window - # Coerce numeric inputs defensively (string/float -> int) - start_line = _coerce_int(start_line) - line_count = _coerce_int(line_count) - head_bytes = _coerce_int(head_bytes, minimum=1) - tail_lines = _coerce_int(tail_lines, minimum=1) + # Coerce numeric inputs defensively (string/float -> int) + start_line = _coerce_int(start_line) + line_count = _coerce_int(line_count) + head_bytes = _coerce_int(head_bytes, minimum=1) + tail_lines = _coerce_int(tail_lines, minimum=1) - # Compute SHA over full file contents (metadata-only default) - full_bytes = p.read_bytes() - full_sha = hashlib.sha256(full_bytes).hexdigest() + # Compute SHA over full file contents (metadata-only default) + full_bytes = p.read_bytes() + full_sha = hashlib.sha256(full_bytes).hexdigest() - # Selection only when explicitly requested via windowing args or request text hints - selection_requested = bool(head_bytes or tail_lines or ( - start_line is not None and line_count is not None) or request) - if selection_requested: - # Mutually exclusive windowing options precedence: - # 1) head_bytes, 2) tail_lines, 3) start_line+line_count, else full text - if head_bytes and head_bytes > 0: - raw = full_bytes[: head_bytes] - text = raw.decode("utf-8", errors="replace") - else: - text = full_bytes.decode("utf-8", errors="replace") - if tail_lines is not None and tail_lines > 0: - lines = text.splitlines() - n = max(0, tail_lines) - text = "\n".join(lines[-n:]) - elif start_line is not None and line_count is not None and line_count >= 0: - lines = text.splitlines() - s = max(0, start_line - 1) - e = min(len(lines), s + line_count) - text = "\n".join(lines[s:e]) - return {"success": True, "data": {"text": text, "metadata": {"sha256": full_sha, "lengthBytes": len(full_bytes)}}} + # Selection only when explicitly requested via windowing args or request text hints + selection_requested = bool(head_bytes or tail_lines or ( + start_line is not None and line_count is not None) or request) + if selection_requested: + # Mutually exclusive windowing options precedence: + # 1) head_bytes, 2) tail_lines, 3) start_line+line_count, else full text + if head_bytes and head_bytes > 0: + raw = full_bytes[: head_bytes] + text = raw.decode("utf-8", errors="replace") else: - # Default: metadata only - return {"success": True, "data": {"metadata": {"sha256": full_sha, "lengthBytes": len(full_bytes)}}} - except Exception as e: - return {"success": False, "error": str(e)} + text = full_bytes.decode("utf-8", errors="replace") + if tail_lines is not None and tail_lines > 0: + lines = text.splitlines() + n = max(0, tail_lines) + text = "\n".join(lines[-n:]) + elif start_line is not None and line_count is not None and line_count >= 0: + lines = text.splitlines() + s = max(0, start_line - 1) + e = min(len(lines), s + line_count) + text = "\n".join(lines[s:e]) + return {"success": True, "data": {"text": text, "metadata": {"sha256": full_sha, "lengthBytes": len(full_bytes)}}} + else: + # Default: metadata only + return {"success": True, "data": {"metadata": {"sha256": full_sha, "lengthBytes": len(full_bytes)}}} + except Exception as e: + return {"success": False, "error": str(e)} - @mcp.tool(name="find_in_file", description="Searches a file with a regex pattern and returns line numbers and excerpts.") - @telemetry_tool("find_in_file") - async def find_in_file( - ctx: Context, - uri: Annotated[str, "The resource URI to search under Assets/ or file path form supported by read_resource"], - pattern: Annotated[str, "The regex pattern to search for"], - ignore_case: Annotated[bool, "Case-insensitive search"] | None = True, - project_root: Annotated[str, - "The project root directory"] | None = None, - max_results: Annotated[int, - "Cap results to avoid huge payloads"] = 200, - ) -> dict[str, Any]: - ctx.info(f"Processing find_in_file: {uri}") - try: - project = _resolve_project_root(project_root) - p = _resolve_safe_path_from_uri(uri, project) - if not p or not p.exists() or not p.is_file(): - return {"success": False, "error": f"Resource not found: {uri}"} - text = p.read_text(encoding="utf-8") - flags = re.MULTILINE - if ignore_case: - flags |= re.IGNORECASE - rx = re.compile(pattern, flags) +@mcp_for_unity_tool(description="Searches a file with a regex pattern and returns line numbers and excerpts.") +async def find_in_file( + ctx: Context, + uri: Annotated[str, "The resource URI to search under Assets/ or file path form supported by read_resource"], + pattern: Annotated[str, "The regex pattern to search for"], + ignore_case: Annotated[bool, "Case-insensitive search"] | None = True, + project_root: Annotated[str, + "The project root directory"] | None = None, + max_results: Annotated[int, + "Cap results to avoid huge payloads"] = 200, +) -> dict[str, Any]: + ctx.info(f"Processing find_in_file: {uri}") + try: + project = _resolve_project_root(project_root) + p = _resolve_safe_path_from_uri(uri, project) + if not p or not p.exists() or not p.is_file(): + return {"success": False, "error": f"Resource not found: {uri}"} - results = [] - max_results_int = _coerce_int(max_results, default=200, minimum=1) - lines = text.splitlines() - for i, line in enumerate(lines, start=1): - m = rx.search(line) - if m: - start_col = m.start() + 1 # 1-based - end_col = m.end() + 1 # 1-based, end exclusive - results.append({ - "startLine": i, - "startCol": start_col, - "endLine": i, - "endCol": end_col, - }) - if max_results_int and len(results) >= max_results_int: - break + text = p.read_text(encoding="utf-8") + flags = re.MULTILINE + if ignore_case: + flags |= re.IGNORECASE + rx = re.compile(pattern, flags) + + results = [] + max_results_int = _coerce_int(max_results, default=200, minimum=1) + lines = text.splitlines() + for i, line in enumerate(lines, start=1): + m = rx.search(line) + if m: + start_col = m.start() + 1 # 1-based + end_col = m.end() + 1 # 1-based, end exclusive + results.append({ + "startLine": i, + "startCol": start_col, + "endLine": i, + "endCol": end_col, + }) + if max_results_int and len(results) >= max_results_int: + break - return {"success": True, "data": {"matches": results, "count": len(results)}} - except Exception as e: - return {"success": False, "error": str(e)} + return {"success": True, "data": {"matches": results, "count": len(results)}} + except Exception as e: + return {"success": False, "error": str(e)} From 08213093251fae057536dde3f8505fe171016dd5 Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Wed, 1 Oct 2025 19:27:28 -0400 Subject: [PATCH 06/30] Create an attribute so we can identify tools via Reflection --- .../Editor/Tools/McpForUnityToolAttribute.cs | 37 +++++++++++++++++++ .../Tools/McpForUnityToolAttribute.cs.meta | 11 ++++++ 2 files changed, 48 insertions(+) create mode 100644 UnityMcpBridge/Editor/Tools/McpForUnityToolAttribute.cs create mode 100644 UnityMcpBridge/Editor/Tools/McpForUnityToolAttribute.cs.meta diff --git a/UnityMcpBridge/Editor/Tools/McpForUnityToolAttribute.cs b/UnityMcpBridge/Editor/Tools/McpForUnityToolAttribute.cs new file mode 100644 index 00000000..bb4e0431 --- /dev/null +++ b/UnityMcpBridge/Editor/Tools/McpForUnityToolAttribute.cs @@ -0,0 +1,37 @@ +using System; + +namespace MCPForUnity.Editor.Tools +{ + /// + /// Marks a class as an MCP tool handler for auto-discovery. + /// The class must have a public static HandleCommand(JObject) method. + /// + [AttributeUsage(AttributeTargets.Class, AllowMultiple = false)] + public class McpForUnityToolAttribute : Attribute + { + /// + /// The command name used to route requests to this tool. + /// If not specified, defaults to the PascalCase class name converted to snake_case. + /// + public string CommandName { get; } + + /// + /// Create an MCP tool attribute with auto-generated command name. + /// The command name will be derived from the class name (PascalCase → snake_case). + /// Example: ManageAsset → manage_asset + /// + public McpForUnityToolAttribute() + { + CommandName = null; // Will be auto-generated + } + + /// + /// Create an MCP tool attribute with explicit command name. + /// + /// The command name (e.g., "manage_asset") + public McpForUnityToolAttribute(string commandName) + { + CommandName = commandName; + } + } +} diff --git a/UnityMcpBridge/Editor/Tools/McpForUnityToolAttribute.cs.meta b/UnityMcpBridge/Editor/Tools/McpForUnityToolAttribute.cs.meta new file mode 100644 index 00000000..57242c17 --- /dev/null +++ b/UnityMcpBridge/Editor/Tools/McpForUnityToolAttribute.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 804d07b886f4e4eb39316bbef34687c7 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: From d036e363de6b041af6af5c5fd9d093eaab11cee4 Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Wed, 1 Oct 2025 19:27:50 -0400 Subject: [PATCH 07/30] Add attribute to all C# tools --- UnityMcpBridge/Editor/Tools/ManageAsset.cs | 1 + UnityMcpBridge/Editor/Tools/ManageEditor.cs | 1 + UnityMcpBridge/Editor/Tools/ManageGameObject.cs | 1 + UnityMcpBridge/Editor/Tools/ManageScene.cs | 1 + UnityMcpBridge/Editor/Tools/ManageScript.cs | 1 + UnityMcpBridge/Editor/Tools/ManageShader.cs | 1 + UnityMcpBridge/Editor/Tools/MenuItems/ManageMenuItem.cs | 1 + UnityMcpBridge/Editor/Tools/Prefabs/ManagePrefabs.cs | 1 + UnityMcpBridge/Editor/Tools/ReadConsole.cs | 1 + 9 files changed, 9 insertions(+) diff --git a/UnityMcpBridge/Editor/Tools/ManageAsset.cs b/UnityMcpBridge/Editor/Tools/ManageAsset.cs index 52a5bcac..1a952f37 100644 --- a/UnityMcpBridge/Editor/Tools/ManageAsset.cs +++ b/UnityMcpBridge/Editor/Tools/ManageAsset.cs @@ -22,6 +22,7 @@ namespace MCPForUnity.Editor.Tools /// /// Handles asset management operations within the Unity project. /// + [McpForUnityTool("manage_asset")] public static class ManageAsset { // --- Main Handler --- diff --git a/UnityMcpBridge/Editor/Tools/ManageEditor.cs b/UnityMcpBridge/Editor/Tools/ManageEditor.cs index f26502dd..f8255224 100644 --- a/UnityMcpBridge/Editor/Tools/ManageEditor.cs +++ b/UnityMcpBridge/Editor/Tools/ManageEditor.cs @@ -15,6 +15,7 @@ namespace MCPForUnity.Editor.Tools /// Handles operations related to controlling and querying the Unity Editor state, /// including managing Tags and Layers. /// + [McpForUnityTool("manage_editor")] public static class ManageEditor { // Constant for starting user layer index diff --git a/UnityMcpBridge/Editor/Tools/ManageGameObject.cs b/UnityMcpBridge/Editor/Tools/ManageGameObject.cs index 71a379b0..40504a87 100644 --- a/UnityMcpBridge/Editor/Tools/ManageGameObject.cs +++ b/UnityMcpBridge/Editor/Tools/ManageGameObject.cs @@ -19,6 +19,7 @@ namespace MCPForUnity.Editor.Tools /// /// Handles GameObject manipulation within the current scene (CRUD, find, components). /// + [McpForUnityTool("manage_gameobject")] public static class ManageGameObject { // Shared JsonSerializer to avoid per-call allocation overhead diff --git a/UnityMcpBridge/Editor/Tools/ManageScene.cs b/UnityMcpBridge/Editor/Tools/ManageScene.cs index beab65b2..6a310d02 100644 --- a/UnityMcpBridge/Editor/Tools/ManageScene.cs +++ b/UnityMcpBridge/Editor/Tools/ManageScene.cs @@ -14,6 +14,7 @@ namespace MCPForUnity.Editor.Tools /// /// Handles scene management operations like loading, saving, creating, and querying hierarchy. /// + [McpForUnityTool("manage_scene")] public static class ManageScene { private sealed class SceneCommand diff --git a/UnityMcpBridge/Editor/Tools/ManageScript.cs b/UnityMcpBridge/Editor/Tools/ManageScript.cs index 073b4b98..2d970486 100644 --- a/UnityMcpBridge/Editor/Tools/ManageScript.cs +++ b/UnityMcpBridge/Editor/Tools/ManageScript.cs @@ -49,6 +49,7 @@ namespace MCPForUnity.Editor.Tools /// Note: Without Roslyn, the system falls back to basic structural validation. /// Roslyn provides full C# compiler diagnostics with line numbers and detailed error messages. /// + [McpForUnityTool("manage_script")] public static class ManageScript { /// diff --git a/UnityMcpBridge/Editor/Tools/ManageShader.cs b/UnityMcpBridge/Editor/Tools/ManageShader.cs index bcd1a6aa..2d7f4d0a 100644 --- a/UnityMcpBridge/Editor/Tools/ManageShader.cs +++ b/UnityMcpBridge/Editor/Tools/ManageShader.cs @@ -12,6 +12,7 @@ namespace MCPForUnity.Editor.Tools /// /// Handles CRUD operations for shader files within the Unity project. /// + [McpForUnityTool("manage_shader")] public static class ManageShader { /// diff --git a/UnityMcpBridge/Editor/Tools/MenuItems/ManageMenuItem.cs b/UnityMcpBridge/Editor/Tools/MenuItems/ManageMenuItem.cs index 0f213c68..e4b7eaf7 100644 --- a/UnityMcpBridge/Editor/Tools/MenuItems/ManageMenuItem.cs +++ b/UnityMcpBridge/Editor/Tools/MenuItems/ManageMenuItem.cs @@ -4,6 +4,7 @@ namespace MCPForUnity.Editor.Tools.MenuItems { + [McpForUnityTool("manage_menu_item")] public static class ManageMenuItem { /// diff --git a/UnityMcpBridge/Editor/Tools/Prefabs/ManagePrefabs.cs b/UnityMcpBridge/Editor/Tools/Prefabs/ManagePrefabs.cs index aaf67b14..9e68d20e 100644 --- a/UnityMcpBridge/Editor/Tools/Prefabs/ManagePrefabs.cs +++ b/UnityMcpBridge/Editor/Tools/Prefabs/ManagePrefabs.cs @@ -9,6 +9,7 @@ namespace MCPForUnity.Editor.Tools.Prefabs { + [McpForUnityTool("manage_prefabs")] public static class ManagePrefabs { private const string SupportedActions = "open_stage, close_stage, save_open_stage, create_from_gameobject"; diff --git a/UnityMcpBridge/Editor/Tools/ReadConsole.cs b/UnityMcpBridge/Editor/Tools/ReadConsole.cs index e58f4e79..8a0147e3 100644 --- a/UnityMcpBridge/Editor/Tools/ReadConsole.cs +++ b/UnityMcpBridge/Editor/Tools/ReadConsole.cs @@ -14,6 +14,7 @@ namespace MCPForUnity.Editor.Tools /// Handles reading and clearing Unity Editor console log entries. /// Uses reflection to access internal LogEntry methods/properties. /// + [McpForUnityTool("read_console")] public static class ReadConsole { // (Calibration removed) From c84069b6635ad8abe84a81821f6b05e6d2820656 Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Wed, 1 Oct 2025 20:49:55 -0400 Subject: [PATCH 08/30] Use reflection to load tools --- .../Editor/Tools/CommandRegistry.cs | 131 ++++++++++++++---- 1 file changed, 105 insertions(+), 26 deletions(-) diff --git a/UnityMcpBridge/Editor/Tools/CommandRegistry.cs b/UnityMcpBridge/Editor/Tools/CommandRegistry.cs index 2503391d..21f13e2b 100644 --- a/UnityMcpBridge/Editor/Tools/CommandRegistry.cs +++ b/UnityMcpBridge/Editor/Tools/CommandRegistry.cs @@ -1,50 +1,129 @@ using System; using System.Collections.Generic; +using System.Linq; +using System.Reflection; +using System.Text.RegularExpressions; +using MCPForUnity.Editor.Helpers; using Newtonsoft.Json.Linq; -using MCPForUnity.Editor.Tools.MenuItems; -using MCPForUnity.Editor.Tools.Prefabs; namespace MCPForUnity.Editor.Tools { /// - /// Registry for all MCP command handlers (Refactored Version) + /// Registry for all MCP command handlers via reflection. /// public static class CommandRegistry { - // Maps command names (matching those called from Python via ctx.bridge.unity_editor.HandlerName) - // to the corresponding static HandleCommand method in the appropriate tool class. - private static readonly Dictionary> _handlers = new() + private static readonly Dictionary> _handlers = new(); + private static bool _initialized = false; + + /// + /// Initialize and auto-discover all tools marked with [McpForUnityTool] + /// + public static void Initialize() { - { "manage_script", ManageScript.HandleCommand }, - { "manage_scene", ManageScene.HandleCommand }, - { "manage_editor", ManageEditor.HandleCommand }, - { "manage_gameobject", ManageGameObject.HandleCommand }, - { "manage_asset", ManageAsset.HandleCommand }, - { "read_console", ReadConsole.HandleCommand }, - { "manage_menu_item", ManageMenuItem.HandleCommand }, - { "manage_shader", ManageShader.HandleCommand}, - { "manage_prefabs", ManagePrefabs.HandleCommand}, - }; + if (_initialized) return; + _initialized = true; + + AutoDiscoverTools(); + } /// - /// Gets a command handler by name. + /// Convert PascalCase or camelCase to snake_case /// - /// Name of the command handler (e.g., "HandleManageAsset"). - /// The command handler function if found, null otherwise. - public static Func GetHandler(string commandName) + private static string ToSnakeCase(string name) { - if (!_handlers.TryGetValue(commandName, out var handler)) + if (string.IsNullOrEmpty(name)) return name; + + // Insert underscore before uppercase letters (except first) + var s1 = Regex.Replace(name, "(.)([A-Z][a-z]+)", "$1_$2"); + var s2 = Regex.Replace(s1, "([a-z0-9])([A-Z])", "$1_$2"); + return s2.ToLower(); + } + + /// + /// Auto-discover all types with [McpForUnityTool] attribute + /// + private static void AutoDiscoverTools() + { + try { - throw new InvalidOperationException( - $"Unknown or unsupported command type: {commandName}"); + var toolTypes = AppDomain.CurrentDomain.GetAssemblies() + .Where(a => !a.IsDynamic) + .SelectMany(a => + { + try { return a.GetTypes(); } + catch { return new Type[0]; } + }) + .Where(t => t.GetCustomAttribute() != null); + + foreach (var type in toolTypes) + { + RegisterToolType(type); + } + + McpLog.Info($"Auto-discovered {_handlers.Count} tools"); } + catch (Exception ex) + { + McpLog.Error($"Failed to auto-discover MCP tools: {ex.Message}"); + } + } - return handler; + private static void RegisterToolType(Type type) + { + var attr = type.GetCustomAttribute(); + + // Get command name (explicit or auto-generated) + string commandName = attr.CommandName; + if (string.IsNullOrEmpty(commandName)) + { + commandName = ToSnakeCase(type.Name); + } + + // Find HandleCommand method + var method = type.GetMethod( + "HandleCommand", + BindingFlags.Public | BindingFlags.Static, + null, + new[] { typeof(JObject) }, + null + ); + + if (method == null) + { + McpLog.Warn( + $"MCP tool {type.Name} is marked with [McpForUnityTool] " + + $"but has no public static HandleCommand(JObject) method" + ); + return; + } + + try + { + var handler = (Func)Delegate.CreateDelegate( + typeof(Func), + method + ); + _handlers[commandName] = handler; + } + catch (Exception ex) + { + McpLog.Error($"Failed to register tool {type.Name}: {ex.Message}"); + } } - public static void Add(string commandName, Func handler) + /// + /// Get a command handler by name + /// + public static Func GetHandler(string commandName) { - _handlers.Add(commandName, handler); + if (!_handlers.TryGetValue(commandName, out var handler)) + { + throw new InvalidOperationException( + $"Unknown or unsupported command type: {commandName}" + ); + } + return handler; } } } From b7f20704d90d9d6e8b04aee0f1c2ef5609d09cb8 Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Wed, 1 Oct 2025 20:50:26 -0400 Subject: [PATCH 09/30] Initialize command registry to load tools at startup --- UnityMcpBridge/Editor/MCPForUnityBridge.cs | 1 + 1 file changed, 1 insertion(+) diff --git a/UnityMcpBridge/Editor/MCPForUnityBridge.cs b/UnityMcpBridge/Editor/MCPForUnityBridge.cs index 39312ce4..dcc469b0 100644 --- a/UnityMcpBridge/Editor/MCPForUnityBridge.cs +++ b/UnityMcpBridge/Editor/MCPForUnityBridge.cs @@ -387,6 +387,7 @@ public static void Start() // Start background listener with cooperative cancellation cts = new CancellationTokenSource(); listenerTask = Task.Run(() => ListenerLoopAsync(cts.Token)); + CommandRegistry.Initialize(); EditorApplication.update += ProcessCommands; // Ensure lifecycle events are (re)subscribed in case Stop() removed them earlier in-domain try { AssemblyReloadEvents.beforeAssemblyReload -= OnBeforeAssemblyReload; } catch { } From 2e9aa06ad9f92507cb392d4d8eeb539f255edd50 Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Wed, 1 Oct 2025 21:13:39 -0400 Subject: [PATCH 10/30] Update tests --- .../EditMode/Tools/CommandRegistryTests.cs | 50 +++++++++++-------- 1 file changed, 30 insertions(+), 20 deletions(-) diff --git a/TestProjects/UnityMCPTests/Assets/Tests/EditMode/Tools/CommandRegistryTests.cs b/TestProjects/UnityMCPTests/Assets/Tests/EditMode/Tools/CommandRegistryTests.cs index 2bbe4616..aed1c964 100644 --- a/TestProjects/UnityMCPTests/Assets/Tests/EditMode/Tools/CommandRegistryTests.cs +++ b/TestProjects/UnityMCPTests/Assets/Tests/EditMode/Tools/CommandRegistryTests.cs @@ -1,5 +1,8 @@ using System; +using System.Collections.Generic; +using System.Linq; using Newtonsoft.Json; +using Newtonsoft.Json.Linq; using NUnit.Framework; using MCPForUnity.Editor.Tools; @@ -8,34 +11,41 @@ namespace MCPForUnityTests.Editor.Tools public class CommandRegistryTests { [Test] - public void GetHandler_ThrowException_ForUnknownCommand() + public void GetHandler_ThrowsException_ForUnknownCommand() { - var unknown = "HandleDoesNotExist"; - try - { - var handler = CommandRegistry.GetHandler(unknown); - Assert.Fail("Should throw InvalidOperation for unknown handler."); - } - catch (InvalidOperationException) - { + var unknown = "nonexistent_command_that_should_not_exist"; - } - catch + Assert.Throws(() => { - Assert.Fail("Should throw InvalidOperation for unknown handler."); - } + CommandRegistry.GetHandler(unknown); + }, "Should throw InvalidOperationException for unknown handler"); } [Test] - public void GetHandler_ReturnsManageGameObjectHandler() + public void AutoDiscovery_RegistersAllBuiltInTools() { - var handler = CommandRegistry.GetHandler("manage_gameobject"); - Assert.IsNotNull(handler, "Expected a handler for manage_gameobject."); + // Verify that all expected built-in tools are registered by trying to get their handlers + var expectedTools = new[] + { + "manage_asset", + "manage_editor", + "manage_gameobject", + "manage_scene", + "manage_script", + "manage_shader", + "read_console", + "manage_menu_item", + "manage_prefabs" + }; - var methodInfo = handler.Method; - Assert.AreEqual("HandleCommand", methodInfo.Name, "Handler method name should be HandleCommand."); - Assert.AreEqual(typeof(ManageGameObject), methodInfo.DeclaringType, "Handler should be declared on ManageGameObject."); - Assert.IsNull(handler.Target, "Handler should be a static method (no target instance)."); + foreach (var toolName in expectedTools) + { + Assert.DoesNotThrow(() => + { + var handler = CommandRegistry.GetHandler(toolName); + Assert.IsNotNull(handler, $"Handler for '{toolName}' should not be null"); + }, $"Expected tool '{toolName}' to be auto-registered"); + } } } } From 084c27eff49f70c3609cf2c807e00587e8b38a95 Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Wed, 1 Oct 2025 21:17:41 -0400 Subject: [PATCH 11/30] Move Dev docs to docs folder --- README-zh.md | 2 +- README.md | 2 +- README-DEV-zh.md => docs/README-DEV-zh.md | 2 +- README-DEV.md => docs/README-DEV.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) rename README-DEV-zh.md => docs/README-DEV-zh.md (99%) rename README-DEV.md => docs/README-DEV.md (99%) diff --git a/README-zh.md b/README-zh.md index c5f0860b..b469a537 100644 --- a/README-zh.md +++ b/README-zh.md @@ -278,7 +278,7 @@ claude mcp add UnityMCP -- "C:/Users/USERNAME/AppData/Local/Microsoft/WinGet/Lin - **自动备份系统**:具有简单回滚功能的安全测试 - **热重载工作流程**:核心开发的快速迭代周期 -📖 **查看 [README-DEV.md](README-DEV.md)** 获取完整的开发设置和工作流程文档。 +📖 **查看 [README-DEV.md](docs/README-DEV.md)** 获取完整的开发设置和工作流程文档。 ### 贡献 🤝 diff --git a/README.md b/README.md index f06217f4..a59a4c47 100644 --- a/README.md +++ b/README.md @@ -281,7 +281,7 @@ If you're contributing to MCP for Unity or want to test core changes, we have de - **Automatic Backup System**: Safe testing with easy rollback capabilities - **Hot Reload Workflow**: Fast iteration cycle for core development -📖 **See [README-DEV.md](README-DEV.md)** for complete development setup and workflow documentation. +📖 **See [README-DEV.md](docs/README-DEV.md)** for complete development setup and workflow documentation. ### Contributing 🤝 diff --git a/README-DEV-zh.md b/docs/README-DEV-zh.md similarity index 99% rename from README-DEV-zh.md rename to docs/README-DEV-zh.md index 1513cf95..6377fa47 100644 --- a/README-DEV-zh.md +++ b/docs/README-DEV-zh.md @@ -1,6 +1,6 @@ # MCP for Unity 开发工具 -| [English](README-DEV.md) | [简体中文](README-DEV-zh.md) | +| [English](docs/README-DEV.md) | [简体中文](docs/README-DEV-zh.md) | |---------------------------|------------------------------| 欢迎来到 MCP for Unity 开发环境!此目录包含简化 MCP for Unity 核心开发的工具和实用程序。 diff --git a/README-DEV.md b/docs/README-DEV.md similarity index 99% rename from README-DEV.md rename to docs/README-DEV.md index ddba6011..1ffd167e 100644 --- a/README-DEV.md +++ b/docs/README-DEV.md @@ -1,6 +1,6 @@ # MCP for Unity Development Tools -| [English](README-DEV.md) | [简体中文](README-DEV-zh.md) | +| [English](docs/README-DEV.md) | [简体中文](docs/README-DEV-zh.md) | |---------------------------|------------------------------| Welcome to the MCP for Unity development environment! This directory contains tools and utilities to streamline MCP for Unity core development. From bc5695e6089b8cac0c36fe3d5d4f9ea0cf9ee495 Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Wed, 1 Oct 2025 21:17:49 -0400 Subject: [PATCH 12/30] Add docs for adding custom tools --- docs/CUSTOM_TOOLS.md | 251 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 251 insertions(+) create mode 100644 docs/CUSTOM_TOOLS.md diff --git a/docs/CUSTOM_TOOLS.md b/docs/CUSTOM_TOOLS.md new file mode 100644 index 00000000..6cd3674e --- /dev/null +++ b/docs/CUSTOM_TOOLS.md @@ -0,0 +1,251 @@ +# Adding Custom Tools to Unity MCP + +Unity MCP now supports auto-discovery of custom tools using decorators (Python) and attributes (C#). This allows you to easily extend the MCP server with your own tools without modifying core files. + +## Python Side (MCP Server) + +### Creating a Custom Tool + +1. **Create a new Python file** in `UnityMcpBridge/UnityMcpServer~/src/tools/` (or any location that gets imported) + +2. **Use the `@mcp_for_unity_tool` decorator**: + +```python +from typing import Annotated, Any +from mcp.server.fastmcp import Context +from registry import mcp_for_unity_tool +from unity_connection import send_command_with_retry + +@mcp_for_unity_tool( + description="My custom tool that does something amazing" +) +def my_custom_tool( + ctx: Context, + param1: Annotated[str, "Description of param1"], + param2: Annotated[int, "Description of param2"] | None = None +) -> dict[str, Any]: + ctx.info(f"Processing my_custom_tool: {param1}") + + # Prepare parameters for Unity + params = { + "action": "do_something", + "param1": param1, + "param2": param2, + } + params = {k: v for k, v in params.items() if v is not None} + + # Send to Unity handler + response = send_command_with_retry("my_custom_tool", params) + return response if isinstance(response, dict) else {"success": False, "message": str(response)} +``` + +3. **The tool is automatically registered!** The decorator: + - Auto-generates the tool name from the function name (e.g., `my_custom_tool`) + - Applies telemetry tracking automatically + - Registers the tool with FastMCP during module import + +### Decorator Options + +```python +@mcp_for_unity_tool( + name="custom_name", # Optional: override auto-generated name + description="Tool description", # Required: describe what the tool does + enable_telemetry=True # Optional: enable/disable telemetry (default: True) +) +``` + +### Auto-Discovery + +Tools are automatically discovered when: +- The Python file is in the `tools/` directory +- The file is imported during server startup +- The decorator `@mcp_for_unity_tool` is used + +## C# Side (Unity Editor) + +### Creating a Custom Tool Handler + +1. **Create a new C# file** anywhere in your Unity project (typically in `Editor/`) + +2. **Add the `[McpForUnityTool]` attribute** and implement `HandleCommand`: + +```csharp +using Newtonsoft.Json.Linq; +using MCPForUnity.Editor.Helpers; + +namespace MyProject.Editor.CustomTools +{ + [McpForUnityTool("my_custom_tool")] + public static class MyCustomTool + { + public static object HandleCommand(JObject @params) + { + string action = @params["action"]?.ToString(); + string param1 = @params["param1"]?.ToString(); + int? param2 = @params["param2"]?.ToObject(); + + // Your custom logic here + if (string.IsNullOrEmpty(param1)) + { + return Response.Error("param1 is required"); + } + + // Do something amazing + DoSomethingAmazing(param1, param2); + + return Response.Success("Custom tool executed successfully!"); + } + + private static void DoSomethingAmazing(string param1, int? param2) + { + // Your implementation + } + } +} +``` + +3. **The tool is automatically registered!** Unity will discover it via reflection on startup. + +### Attribute Options + +```csharp +// Explicit command name +[McpForUnityTool("my_custom_tool")] +public static class MyCustomTool { } + +// Auto-generated from class name (MyCustomTool → my_custom_tool) +[McpForUnityTool] +public static class MyCustomTool { } +``` + +### Auto-Discovery + +Tools are automatically discovered when: +- The class has the `[McpForUnityTool]` attribute +- The class has a `public static HandleCommand(JObject)` method +- Unity loads the assembly containing the class + +## Complete Example: Custom Screenshot Tool + +### Python (`tools/screenshot_tool.py`) + +```python +from typing import Annotated, Any +from mcp.server.fastmcp import Context +from registry import mcp_for_unity_tool +from unity_connection import send_command_with_retry + +@mcp_for_unity_tool( + description="Capture screenshots in Unity" +) +def capture_screenshot( + ctx: Context, + filename: Annotated[str, "Screenshot filename"], + width: Annotated[int, "Screenshot width"] | None = None, + height: Annotated[int, "Screenshot height"] | None = None, +) -> dict[str, Any]: + ctx.info(f"Capturing screenshot: {filename}") + + params = { + "action": "capture", + "filename": filename, + "width": width, + "height": height, + } + params = {k: v for k, v in params.items() if v is not None} + + response = send_command_with_retry("capture_screenshot", params) + return response if isinstance(response, dict) else {"success": False, "message": str(response)} +``` + +### C# (`Editor/CaptureScreenshotTool.cs`) + +```csharp +using System.IO; +using Newtonsoft.Json.Linq; +using UnityEditor; +using UnityEngine; +using MCPForUnity.Editor.Helpers; + +namespace MyProject.Editor.Tools +{ + [McpForUnityTool("capture_screenshot")] + public static class CaptureScreenshotTool + { + public static object HandleCommand(JObject @params) + { + string action = @params["action"]?.ToString(); + string filename = @params["filename"]?.ToString(); + int width = @params["width"]?.ToObject() ?? 1920; + int height = @params["height"]?.ToObject() ?? 1080; + + if (string.IsNullOrEmpty(filename)) + { + return Response.Error("filename is required"); + } + + try + { + string path = Path.Combine(Application.dataPath, "..", "Screenshots", filename); + Directory.CreateDirectory(Path.GetDirectoryName(path)); + + ScreenCapture.CaptureScreenshot(path, width, height); + + return Response.Success($"Screenshot saved to {path}", new + { + path = path, + width = width, + height = height + }); + } + catch (System.Exception ex) + { + return Response.Error($"Failed to capture screenshot: {ex.Message}"); + } + } + } +} +``` + +## Best Practices + +### Python +- ✅ Use type hints with `Annotated` for parameter documentation +- ✅ Return `dict[str, Any]` with `{"success": bool, "message": str, "data": Any}` +- ✅ Use `ctx.info()` for logging +- ✅ Handle errors gracefully and return structured error responses +- ✅ Use `send_command_with_retry()` for Unity communication + +### C# +- ✅ Use the `Response.Success()` and `Response.Error()` helper methods +- ✅ Validate input parameters before processing +- ✅ Use `@params["key"]?.ToObject()` for safe type conversion +- ✅ Return structured responses with meaningful data +- ✅ Handle exceptions and return error responses + +## Debugging + +### Python +- Check server logs: `~/Library/Application Support/UnityMCP/Logs/unity_mcp_server.log` +- Look for: `"Registered X MCP tools"` message on startup +- Use `ctx.info()` for debugging messages + +### C# +- Check Unity Console for: `"MCP-FOR-UNITY: Auto-discovered X tools"` message +- Look for warnings about missing `HandleCommand` methods +- Use `Debug.Log()` in your handler for debugging + +## Troubleshooting + +**Tool not appearing:** +- Python: Ensure the file is in `tools/` directory and imports the decorator +- C#: Ensure the class has `[McpForUnityTool]` attribute and `HandleCommand` method + +**Name conflicts:** +- Use explicit names in decorators/attributes to avoid conflicts +- Check registered tools: `CommandRegistry.GetAllCommandNames()` in C# + +**Tool not being called:** +- Verify the command name matches between Python and C# +- Check that parameters are being passed correctly +- Look for errors in logs From f154e43f641d627273cb8ed34df08ac3c38c9bb4 Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Wed, 1 Oct 2025 21:38:47 -0400 Subject: [PATCH 13/30] Update function docs for Python decorator --- .../UnityMcpServer~/src/registry/tool_registry.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/UnityMcpBridge/UnityMcpServer~/src/registry/tool_registry.py b/UnityMcpBridge/UnityMcpServer~/src/registry/tool_registry.py index a7dee182..bbe36439 100644 --- a/UnityMcpBridge/UnityMcpServer~/src/registry/tool_registry.py +++ b/UnityMcpBridge/UnityMcpServer~/src/registry/tool_registry.py @@ -13,16 +13,13 @@ def mcp_for_unity_tool( **kwargs ) -> Callable: """ - Decorator for registering MCP tools with auto-discovery. + Decorator for registering MCP tools in the server's tools directory. - Automatically applies: - - Telemetry tracking (if available and enabled) - - Registration in the global tool registry + Tools are registered in the global tool registry. Args: name: Tool name (defaults to function name) description: Tool description - enable_telemetry: Whether to enable telemetry for this tool (default: True) **kwargs: Additional arguments passed to @mcp.tool() Example: From 1e13517213fef6ba7fe8da96761dc528316046d5 Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Wed, 1 Oct 2025 22:04:02 -0400 Subject: [PATCH 14/30] Add working example of adding a screenshot tool --- docs/CUSTOM_TOOLS.md | 68 ++++++++++++++++++++++++++++++-------------- 1 file changed, 46 insertions(+), 22 deletions(-) diff --git a/docs/CUSTOM_TOOLS.md b/docs/CUSTOM_TOOLS.md index 6cd3674e..674e9fc1 100644 --- a/docs/CUSTOM_TOOLS.md +++ b/docs/CUSTOM_TOOLS.md @@ -41,7 +41,6 @@ def my_custom_tool( 3. **The tool is automatically registered!** The decorator: - Auto-generates the tool name from the function name (e.g., `my_custom_tool`) - - Applies telemetry tracking automatically - Registers the tool with FastMCP during module import ### Decorator Options @@ -50,7 +49,6 @@ def my_custom_tool( @mcp_for_unity_tool( name="custom_name", # Optional: override auto-generated name description="Tool description", # Required: describe what the tool does - enable_telemetry=True # Optional: enable/disable telemetry (default: True) ) ``` @@ -131,26 +129,25 @@ Tools are automatically discovered when: ```python from typing import Annotated, Any + from mcp.server.fastmcp import Context + from registry import mcp_for_unity_tool from unity_connection import send_command_with_retry + @mcp_for_unity_tool( - description="Capture screenshots in Unity" + description="Capture screenshots in Unity, saving them as PNGs" ) def capture_screenshot( ctx: Context, - filename: Annotated[str, "Screenshot filename"], - width: Annotated[int, "Screenshot width"] | None = None, - height: Annotated[int, "Screenshot height"] | None = None, + filename: Annotated[str, "Screenshot filename without extension, e.g., screenshot_01"], ) -> dict[str, Any]: ctx.info(f"Capturing screenshot: {filename}") params = { "action": "capture", "filename": filename, - "width": width, - "height": height, } params = {k: v for k, v in params.items() if v is not None} @@ -163,9 +160,8 @@ def capture_screenshot( ```csharp using System.IO; using Newtonsoft.Json.Linq; -using UnityEditor; using UnityEngine; -using MCPForUnity.Editor.Helpers; +using MCPForUnity.Editor.Tools; namespace MyProject.Editor.Tools { @@ -174,33 +170,61 @@ namespace MyProject.Editor.Tools { public static object HandleCommand(JObject @params) { - string action = @params["action"]?.ToString(); string filename = @params["filename"]?.ToString(); - int width = @params["width"]?.ToObject() ?? 1920; - int height = @params["height"]?.ToObject() ?? 1080; if (string.IsNullOrEmpty(filename)) { - return Response.Error("filename is required"); + return MCPForUnity.Editor.Helpers.Response.Error("filename is required"); } try { - string path = Path.Combine(Application.dataPath, "..", "Screenshots", filename); - Directory.CreateDirectory(Path.GetDirectoryName(path)); + string absolutePath = Path.Combine(Application.dataPath, "Screenshots", filename); + Directory.CreateDirectory(Path.GetDirectoryName(absolutePath)); + + // Find the main camera + Camera camera = Camera.main; + if (camera == null) + { + camera = Object.FindFirstObjectByType(); + } + + if (camera == null) + { + return MCPForUnity.Editor.Helpers.Response.Error("No camera found in the scene"); + } + + // Create a RenderTexture + RenderTexture rt = new RenderTexture(Screen.width, Screen.height, 24); + camera.targetTexture = rt; + + // Render the camera's view + camera.Render(); + + // Read pixels from the RenderTexture + RenderTexture.active = rt; + Texture2D screenshot = new Texture2D(Screen.width, Screen.height, TextureFormat.RGB24, false); + screenshot.ReadPixels(new Rect(0, 0, Screen.width, Screen.height), 0, 0); + screenshot.Apply(); + + // Clean up + camera.targetTexture = null; + RenderTexture.active = null; + Object.DestroyImmediate(rt); - ScreenCapture.CaptureScreenshot(path, width, height); + // Save to file + byte[] bytes = screenshot.EncodeToPNG(); + File.WriteAllBytes(absolutePath, bytes); + Object.DestroyImmediate(screenshot); - return Response.Success($"Screenshot saved to {path}", new + return MCPForUnity.Editor.Helpers.Response.Success($"Screenshot saved to {absolutePath}", new { - path = path, - width = width, - height = height + path = absolutePath, }); } catch (System.Exception ex) { - return Response.Error($"Failed to capture screenshot: {ex.Message}"); + return MCPForUnity.Editor.Helpers.Response.Error($"Failed to capture screenshot: {ex.Message}"); } } } From bf6480c14c6ebea4f06ce5015cbce8d77b85fcfb Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Thu, 2 Oct 2025 16:47:51 -0400 Subject: [PATCH 15/30] docs: update relative links in README files Updated the relative links in both README-DEV.md and README-DEV-zh.md to use direct filenames instead of paths relative to the docs directory, improving link correctness when files are accessed from the root directory. --- docs/README-DEV-zh.md | 2 +- docs/README-DEV.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/README-DEV-zh.md b/docs/README-DEV-zh.md index 6377fa47..1513cf95 100644 --- a/docs/README-DEV-zh.md +++ b/docs/README-DEV-zh.md @@ -1,6 +1,6 @@ # MCP for Unity 开发工具 -| [English](docs/README-DEV.md) | [简体中文](docs/README-DEV-zh.md) | +| [English](README-DEV.md) | [简体中文](README-DEV-zh.md) | |---------------------------|------------------------------| 欢迎来到 MCP for Unity 开发环境!此目录包含简化 MCP for Unity 核心开发的工具和实用程序。 diff --git a/docs/README-DEV.md b/docs/README-DEV.md index 1ffd167e..ddba6011 100644 --- a/docs/README-DEV.md +++ b/docs/README-DEV.md @@ -1,6 +1,6 @@ # MCP for Unity Development Tools -| [English](docs/README-DEV.md) | [简体中文](docs/README-DEV-zh.md) | +| [English](README-DEV.md) | [简体中文](README-DEV-zh.md) | |---------------------------|------------------------------| Welcome to the MCP for Unity development environment! This directory contains tools and utilities to streamline MCP for Unity core development. From 2f873576205c74063d595c866f6ccbb9eac2fb58 Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Thu, 2 Oct 2025 16:54:12 -0400 Subject: [PATCH 16/30] docs: update telemetry documentation path reference Updated the link to TELEMETRY.md in README.md to point to the new docs/ directory location to ensure users can access the telemetry documentation correctly. Also moved the TELEMETRY.md file to the docs/ directory as part of the documentation restructuring. --- README-zh.md | 2 +- README.md | 2 +- TELEMETRY.md => docs/TELEMETRY.md | 0 3 files changed, 2 insertions(+), 2 deletions(-) rename TELEMETRY.md => docs/TELEMETRY.md (100%) diff --git a/README-zh.md b/README-zh.md index b469a537..a7e700fd 100644 --- a/README-zh.md +++ b/README-zh.md @@ -299,7 +299,7 @@ Unity MCP 包含**注重隐私的匿名遥测**来帮助我们改进产品。我 - **🔒 匿名**:仅随机 UUID,无个人数据 - **🚫 轻松退出**:设置 `DISABLE_TELEMETRY=true` 环境变量 -- **📖 透明**:查看 [TELEMETRY.md](TELEMETRY.md) 获取完整详情 +- **📖 透明**:查看 [TELEMETRY.md](docs/TELEMETRY.md) 获取完整详情 您的隐私对我们很重要。所有遥测都是可选的,旨在尊重您的工作流程。 diff --git a/README.md b/README.md index a59a4c47..93da9a9f 100644 --- a/README.md +++ b/README.md @@ -302,7 +302,7 @@ Unity MCP includes **privacy-focused, anonymous telemetry** to help us improve t - **🔒 Anonymous**: Random UUIDs only, no personal data - **🚫 Easy opt-out**: Set `DISABLE_TELEMETRY=true` environment variable -- **📖 Transparent**: See [TELEMETRY.md](TELEMETRY.md) for full details +- **📖 Transparent**: See [TELEMETRY.md](docs/TELEMETRY.md) for full details Your privacy matters to us. All telemetry is optional and designed to respect your workflow. diff --git a/TELEMETRY.md b/docs/TELEMETRY.md similarity index 100% rename from TELEMETRY.md rename to docs/TELEMETRY.md From 8173e01a80888901f1e5a43acfd0e53904162604 Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Thu, 2 Oct 2025 16:54:17 -0400 Subject: [PATCH 17/30] rename CursorHelp.md to docs/CURSOR_HELP.md Moved the CursorHelp.md file to the docs directory to better organize documentation files and improve project structure. --- CursorHelp.md => docs/CURSOR_HELP.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename CursorHelp.md => docs/CURSOR_HELP.md (100%) diff --git a/CursorHelp.md b/docs/CURSOR_HELP.md similarity index 100% rename from CursorHelp.md rename to docs/CURSOR_HELP.md From d46b0e674726edf24ffd59a8e40e3c75308b53bc Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Thu, 2 Oct 2025 17:21:44 -0400 Subject: [PATCH 18/30] docs: update CUSTOM_TOOLS.md with improved tool naming documentation and path corrections - Clarified that the `name` argument in `@mcp_for_unity_tool` decorator is optional and defaults to the function name - Added documentation about using all FastMCP `mcp.tool` function decorator options - Updated class naming documentation to mention snake_case conversion by default - Corrected Python file path from `tools/screenshot_tool.py` to `UnityMcpServer~/src/tools/screenshot_tool.py` - Enhanced documentation for tool discovery and usage examples --- docs/CUSTOM_TOOLS.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/docs/CUSTOM_TOOLS.md b/docs/CUSTOM_TOOLS.md index 674e9fc1..40ac7e5a 100644 --- a/docs/CUSTOM_TOOLS.md +++ b/docs/CUSTOM_TOOLS.md @@ -47,11 +47,13 @@ def my_custom_tool( ```python @mcp_for_unity_tool( - name="custom_name", # Optional: override auto-generated name + name="custom_name", # Optional: the function name is used by default description="Tool description", # Required: describe what the tool does ) ``` +You can use all options available in FastMCP's `mcp.tool` function decorator: . The `description` is the only required field. + ### Auto-Discovery Tools are automatically discovered when: @@ -73,6 +75,7 @@ using MCPForUnity.Editor.Helpers; namespace MyProject.Editor.CustomTools { + // The name argument is optional, it uses a snake_case version of the class name by default [McpForUnityTool("my_custom_tool")] public static class MyCustomTool { @@ -125,7 +128,7 @@ Tools are automatically discovered when: ## Complete Example: Custom Screenshot Tool -### Python (`tools/screenshot_tool.py`) +### Python (`UnityMcpServer~/src/tools/screenshot_tool.py`) ```python from typing import Annotated, Any From 7b5c1561494be30eaa890432778e4f39b513a376 Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Thu, 2 Oct 2025 18:05:38 -0400 Subject: [PATCH 19/30] docs: restructure development documentation and add custom tools guide Rearranged the development section in README.md to better organize the documentation flow. Added a dedicated section for "Adding Custom Tools" with a link to the new CUSTOM_TOOLS.md file, and renamed the previous "For Developers" section to "Contributing to the Project" to better reflect its content. This improves discoverability and organization of the development setup documentation. --- README-zh.md | 6 +++++- README.md | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/README-zh.md b/README-zh.md index a7e700fd..62c6ae01 100644 --- a/README-zh.md +++ b/README-zh.md @@ -270,7 +270,11 @@ claude mcp add UnityMCP -- "C:/Users/USERNAME/AppData/Local/Microsoft/WinGet/Lin ## 开发和贡献 🛠️ -### 开发者 +### 添加自定义工具 + +MCP for Unity 使用与 Unity 的 C# 脚本绑定的 Python MCP 服务器来实现工具功能。如果您想使用自己的工具扩展功能,请参阅 **[CUSTOM_TOOLS.md](docs/CUSTOM_TOOLS.md)** 了解如何操作。 + +### 贡献项目 如果您正在为 MCP for Unity 做贡献或想要测试核心更改,我们有开发工具来简化您的工作流程: diff --git a/README.md b/README.md index 93da9a9f..074b0dc6 100644 --- a/README.md +++ b/README.md @@ -273,7 +273,11 @@ On Windows, set `command` to the absolute shim, e.g. `C:\\Users\\YOU\\AppData\\L ## Development & Contributing 🛠️ -### For Developers +### Adding Custom Tools + +MCP for Unity uses a Python MCP Server tied with Unity's C# scripts for tools. If you'd like to extend the functionality with your own tools, learn how to do so in **[CUSTOM_TOOLS.md](docs/CUSTOM_TOOLS.md)**. + +### Contributing to the Project If you're contributing to MCP for Unity or want to test core changes, we have development tools to streamline your workflow: From 0d07efdf674075aaa3dc013aee6522e1e7e0d300 Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Fri, 3 Oct 2025 16:57:43 -0400 Subject: [PATCH 20/30] docs: update developer documentation and add README links MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Added links to developer READMEs in CUSTOM_TOOLS.md to guide users to the appropriate documentation - Fixed typo in README-DEV.md ("roote" → "root") for improved clarity - These changes improve the developer experience by providing better documentation navigation and correcting technical inaccuracies --- docs/CUSTOM_TOOLS.md | 5 +++++ docs/README-DEV.md | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/docs/CUSTOM_TOOLS.md b/docs/CUSTOM_TOOLS.md index 40ac7e5a..c6074f0d 100644 --- a/docs/CUSTOM_TOOLS.md +++ b/docs/CUSTOM_TOOLS.md @@ -2,6 +2,11 @@ Unity MCP now supports auto-discovery of custom tools using decorators (Python) and attributes (C#). This allows you to easily extend the MCP server with your own tools without modifying core files. +Be sure to review the developer README first: + +| [English](README-DEV.md) | [简体中文](README-DEV-zh.md) | +|---------------------------|------------------------------| + ## Python Side (MCP Server) ### Creating a Custom Tool diff --git a/docs/README-DEV.md b/docs/README-DEV.md index ddba6011..3bc63566 100644 --- a/docs/README-DEV.md +++ b/docs/README-DEV.md @@ -21,7 +21,7 @@ Quick deployment and testing tools for MCP for Unity core changes. ## Switching MCP package sources quickly -Run this from the unity-mcp repo, not your game's roote directory. Use `mcp_source.py` to quickly switch between different MCP for Unity package sources: +Run this from the unity-mcp repo, not your game's root directory. Use `mcp_source.py` to quickly switch between different MCP for Unity package sources: **Usage:** ```bash From f79fda6a72019105c13d78e62f6f2c5ceca34261 Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Fri, 3 Oct 2025 17:27:00 -0400 Subject: [PATCH 21/30] feat(tools): enhance tool registration with wrapped function assignment Updated the tool registration process to properly chain the mcp.tool decorator and telemetry wrapper, ensuring the wrapped function is correctly assigned to tool_info['func'] for proper tool execution and telemetry tracking. This change improves the reliability of tool registration and monitoring. --- UnityMcpBridge/UnityMcpServer~/src/tools/__init__.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/UnityMcpBridge/UnityMcpServer~/src/tools/__init__.py b/UnityMcpBridge/UnityMcpServer~/src/tools/__init__.py index c7507fc5..6ede53d3 100644 --- a/UnityMcpBridge/UnityMcpServer~/src/tools/__init__.py +++ b/UnityMcpBridge/UnityMcpServer~/src/tools/__init__.py @@ -51,8 +51,10 @@ def register_all_tools(mcp: FastMCP): kwargs = tool_info['kwargs'] # Apply the @mcp.tool decorator and telemetry - mcp.tool(name=tool_name, description=description, **kwargs)(func) - telemetry_tool(tool_name)(func) + wrapped = mcp.tool( + name=tool_name, description=description, **kwargs)(func) + wrapped = telemetry_tool(tool_name)(wrapped) + tool_info['func'] = wrapped logger.info(f"Registered tool: {tool_name} - {description}") logger.info(f"Registered {len(tools)} MCP tools") From d8fd19d5936aa6df158d13938a39083f62d28a2a Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Fri, 3 Oct 2025 17:40:55 -0400 Subject: [PATCH 22/30] Remove AI generated code that was never used... --- .../Editor/Dependencies/DependencyManager.cs | 161 ------------------ 1 file changed, 161 deletions(-) diff --git a/UnityMcpBridge/Editor/Dependencies/DependencyManager.cs b/UnityMcpBridge/Editor/Dependencies/DependencyManager.cs index 2f7b5ca1..ce6efef2 100644 --- a/UnityMcpBridge/Editor/Dependencies/DependencyManager.cs +++ b/UnityMcpBridge/Editor/Dependencies/DependencyManager.cs @@ -80,69 +80,6 @@ public static DependencyCheckResult CheckAllDependencies() return result; } - /// - /// Quick check if system is ready for MCP operations - /// - public static bool IsSystemReady() - { - try - { - var result = CheckAllDependencies(); - return result.IsSystemReady; - } - catch - { - return false; - } - } - - /// - /// Get a summary of missing dependencies - /// - public static string GetMissingDependenciesSummary() - { - try - { - var result = CheckAllDependencies(); - var missing = result.GetMissingRequired(); - - if (missing.Count == 0) - { - return "All required dependencies are available."; - } - - var names = missing.Select(d => d.Name).ToArray(); - return $"Missing required dependencies: {string.Join(", ", names)}"; - } - catch (Exception ex) - { - return $"Error checking dependencies: {ex.Message}"; - } - } - - /// - /// Check if a specific dependency is available - /// - public static bool IsDependencyAvailable(string dependencyName) - { - try - { - var detector = GetCurrentPlatformDetector(); - - return dependencyName.ToLowerInvariant() switch - { - "python" => detector.DetectPython().IsAvailable, - "uv" => detector.DetectUV().IsAvailable, - "mcpserver" or "mcp-server" => detector.DetectMCPServer().IsAvailable, - _ => false - }; - } - catch - { - return false; - } - } - /// /// Get installation recommendations for the current platform /// @@ -175,104 +112,6 @@ public static (string pythonUrl, string uvUrl) GetInstallationUrls() } } - /// - /// Validate that the MCP server can be started - /// - public static bool ValidateMCPServerStartup() - { - try - { - // Check if Python and UV are available - if (!IsDependencyAvailable("python") || !IsDependencyAvailable("uv")) - { - return false; - } - - // Try to ensure server is installed - ServerInstaller.EnsureServerInstalled(); - - // Check if server files exist - var serverStatus = GetCurrentPlatformDetector().DetectMCPServer(); - return serverStatus.IsAvailable; - } - catch (Exception ex) - { - McpLog.Error($"Error validating MCP server startup: {ex.Message}"); - return false; - } - } - - /// - /// Attempt to repair the Python environment - /// - public static bool RepairPythonEnvironment() - { - try - { - McpLog.Info("Attempting to repair Python environment..."); - return ServerInstaller.RepairPythonEnvironment(); - } - catch (Exception ex) - { - McpLog.Error($"Error repairing Python environment: {ex.Message}"); - return false; - } - } - - /// - /// Get detailed dependency information for diagnostics - /// - public static string GetDependencyDiagnostics() - { - try - { - var result = CheckAllDependencies(); - var detector = GetCurrentPlatformDetector(); - - var diagnostics = new System.Text.StringBuilder(); - diagnostics.AppendLine($"Platform: {detector.PlatformName}"); - diagnostics.AppendLine($"Check Time: {result.CheckedAt:yyyy-MM-dd HH:mm:ss} UTC"); - diagnostics.AppendLine($"System Ready: {result.IsSystemReady}"); - diagnostics.AppendLine(); - - foreach (var dep in result.Dependencies) - { - diagnostics.AppendLine($"=== {dep.Name} ==="); - diagnostics.AppendLine($"Available: {dep.IsAvailable}"); - diagnostics.AppendLine($"Required: {dep.IsRequired}"); - - if (!string.IsNullOrEmpty(dep.Version)) - diagnostics.AppendLine($"Version: {dep.Version}"); - - if (!string.IsNullOrEmpty(dep.Path)) - diagnostics.AppendLine($"Path: {dep.Path}"); - - if (!string.IsNullOrEmpty(dep.Details)) - diagnostics.AppendLine($"Details: {dep.Details}"); - - if (!string.IsNullOrEmpty(dep.ErrorMessage)) - diagnostics.AppendLine($"Error: {dep.ErrorMessage}"); - - diagnostics.AppendLine(); - } - - if (result.RecommendedActions.Count > 0) - { - diagnostics.AppendLine("=== Recommended Actions ==="); - foreach (var action in result.RecommendedActions) - { - diagnostics.AppendLine($"- {action}"); - } - } - - return diagnostics.ToString(); - } - catch (Exception ex) - { - return $"Error generating diagnostics: {ex.Message}"; - } - } - private static void GenerateRecommendations(DependencyCheckResult result, IPlatformDetector detector) { var missing = result.GetMissingDependencies(); From a2d76b6e154af469881f3c8e0361799223e4b4e8 Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Fri, 3 Oct 2025 17:56:06 -0400 Subject: [PATCH 23/30] feat: Rebuild MCP server installation with embedded source Refactored the server repair logic to implement a full rebuild of the MCP server installation using the embedded source. The new RebuildMcpServer method now: - Uses embedded server source instead of attempting repair of existing installation - Deletes the entire existing server directory before re-copying - Handles UV process cleanup for the target path - Simplifies the installation flow by removing the complex Python environment repair logic - Maintains the same installation behavior but with a cleaner, more reliable rebuild approach This change improves reliability of server installations by ensuring a clean slate rebuild rather than attempting to repair potentially corrupted environments. --- .../Editor/Helpers/ServerInstaller.cs | 110 ++++++------------ .../Editor/Windows/MCPForUnityEditorWindow.cs | 12 +- UnityMcpBridge/README.md | 2 +- 3 files changed, 40 insertions(+), 84 deletions(-) diff --git a/UnityMcpBridge/Editor/Helpers/ServerInstaller.cs b/UnityMcpBridge/Editor/Helpers/ServerInstaller.cs index 26c0fbb2..f41e03c3 100644 --- a/UnityMcpBridge/Editor/Helpers/ServerInstaller.cs +++ b/UnityMcpBridge/Editor/Helpers/ServerInstaller.cs @@ -423,105 +423,61 @@ private static void CopyDirectoryRecursive(string sourceDir, string destinationD } } - public static bool RepairPythonEnvironment() + public static bool RebuildMcpServer() { try { - string serverSrc = GetServerPath(); - bool hasServer = File.Exists(Path.Combine(serverSrc, "server.py")); - if (!hasServer) - { - // In dev mode or if not installed yet, try the embedded/dev source - if (TryGetEmbeddedServerSource(out string embeddedSrc) && File.Exists(Path.Combine(embeddedSrc, "server.py"))) - { - serverSrc = embeddedSrc; - hasServer = true; - } - else - { - // Attempt to install then retry - EnsureServerInstalled(); - serverSrc = GetServerPath(); - hasServer = File.Exists(Path.Combine(serverSrc, "server.py")); - } - } - - if (!hasServer) - { - Debug.LogWarning("RepairPythonEnvironment: server.py not found; ensure server is installed first."); - return false; - } - - // Remove stale venv and pinned version file if present - string venvPath = Path.Combine(serverSrc, ".venv"); - if (Directory.Exists(venvPath)) - { - try { Directory.Delete(venvPath, recursive: true); } catch (Exception ex) { Debug.LogWarning($"Failed to delete .venv: {ex.Message}"); } - } - string pyPin = Path.Combine(serverSrc, ".python-version"); - if (File.Exists(pyPin)) - { - try { File.Delete(pyPin); } catch (Exception ex) { Debug.LogWarning($"Failed to delete .python-version: {ex.Message}"); } - } - - string uvPath = FindUvPath(); - if (uvPath == null) + // Find embedded source + if (!TryGetEmbeddedServerSource(out string embeddedSrc)) { - Debug.LogError("UV not found. Please install uv (https://docs.astral.sh/uv/)."); + Debug.LogError("RebuildMcpServer: Could not find embedded server source."); return false; } - var psi = new System.Diagnostics.ProcessStartInfo - { - FileName = uvPath, - Arguments = "sync", - WorkingDirectory = serverSrc, - UseShellExecute = false, - RedirectStandardOutput = true, - RedirectStandardError = true, - CreateNoWindow = true - }; + string saveLocation = GetSaveLocation(); + string destRoot = Path.Combine(saveLocation, ServerFolder); + string destSrc = Path.Combine(destRoot, "src"); - using var proc = new System.Diagnostics.Process { StartInfo = psi }; - var sbOut = new StringBuilder(); - var sbErr = new StringBuilder(); - proc.OutputDataReceived += (_, e) => { if (e.Data != null) sbOut.AppendLine(e.Data); }; - proc.ErrorDataReceived += (_, e) => { if (e.Data != null) sbErr.AppendLine(e.Data); }; + // Kill any running uv processes for this server + TryKillUvForPath(destSrc); - if (!proc.Start()) + // Delete the entire installed server directory + if (Directory.Exists(destRoot)) { - Debug.LogError("Failed to start uv process."); - return false; + try + { + Directory.Delete(destRoot, recursive: true); + Debug.Log($"MCP-FOR-UNITY: Deleted existing server at {destRoot}"); + } + catch (Exception ex) + { + Debug.LogError($"Failed to delete existing server: {ex.Message}"); + return false; + } } - proc.BeginOutputReadLine(); - proc.BeginErrorReadLine(); + // Re-copy from embedded source + string embeddedRoot = Path.GetDirectoryName(embeddedSrc) ?? embeddedSrc; + Directory.CreateDirectory(destRoot); + CopyDirectoryRecursive(embeddedRoot, destRoot); - if (!proc.WaitForExit(60000)) + // Write version file + string embeddedVer = ReadVersionFile(Path.Combine(embeddedSrc, VersionFileName)) ?? "unknown"; + try { - try { proc.Kill(); } catch { } - Debug.LogError("uv sync timed out."); - return false; + File.WriteAllText(Path.Combine(destSrc, VersionFileName), embeddedVer); } - - // Ensure async buffers flushed - proc.WaitForExit(); - - string stdout = sbOut.ToString(); - string stderr = sbErr.ToString(); - - if (proc.ExitCode != 0) + catch (Exception ex) { - Debug.LogError($"uv sync failed: {stderr}\n{stdout}"); - return false; + Debug.LogWarning($"Failed to write version file: {ex.Message}"); } - Debug.Log("MCP-FOR-UNITY: Python environment repaired successfully."); + Debug.Log($"MCP-FOR-UNITY: Server rebuilt successfully at {destRoot} (version {embeddedVer})"); return true; } catch (Exception ex) { - Debug.LogError($"RepairPythonEnvironment failed: {ex.Message}"); + Debug.LogError($"RebuildMcpServer failed: {ex.Message}"); return false; } } diff --git a/UnityMcpBridge/Editor/Windows/MCPForUnityEditorWindow.cs b/UnityMcpBridge/Editor/Windows/MCPForUnityEditorWindow.cs index ed70181b..98a5295e 100644 --- a/UnityMcpBridge/Editor/Windows/MCPForUnityEditorWindow.cs +++ b/UnityMcpBridge/Editor/Windows/MCPForUnityEditorWindow.cs @@ -368,25 +368,25 @@ private void DrawServerStatusSection() } EditorGUILayout.Space(4); - // Repair Python Env button with tooltip tag + // Rebuild MCP Server button with tooltip tag using (new EditorGUILayout.HorizontalScope()) { GUILayout.FlexibleSpace(); GUIContent repairLabel = new GUIContent( - "Repair Python Env", - "Deletes the server's .venv and runs 'uv sync' to rebuild a clean environment. Use this if modules are missing or Python upgraded." + "Rebuild MCP Server", + "Deletes the installed server and re-copies it from the package. Use this to update the server after making source code changes or if the installation is corrupted." ); if (GUILayout.Button(repairLabel, GUILayout.Width(160), GUILayout.Height(22))) { - bool ok = global::MCPForUnity.Editor.Helpers.ServerInstaller.RepairPythonEnvironment(); + bool ok = global::MCPForUnity.Editor.Helpers.ServerInstaller.RebuildMcpServer(); if (ok) { - EditorUtility.DisplayDialog("MCP For Unity", "Python environment repaired.", "OK"); + EditorUtility.DisplayDialog("MCP For Unity", "Server rebuilt successfully.", "OK"); UpdatePythonServerInstallationStatus(); } else { - EditorUtility.DisplayDialog("MCP For Unity", "Repair failed. Please check Console for details.", "OK"); + EditorUtility.DisplayDialog("MCP For Unity", "Rebuild failed. Please check Console for details.", "OK"); } } } diff --git a/UnityMcpBridge/README.md b/UnityMcpBridge/README.md index b073a5fc..b26b9f19 100644 --- a/UnityMcpBridge/README.md +++ b/UnityMcpBridge/README.md @@ -29,7 +29,7 @@ The window has four areas: Server Status, Unity Bridge, MCP Client Configuration - Ports: Unity (varies; shown in UI), MCP 6500. - Actions: - Auto-Setup: Registers/updates your selected MCP client(s), ensures bridge connectivity. Shows “Connected ✓” after success. - - Repair Python Env: Rebuilds a clean Python environment (deletes `.venv`, runs `uv sync`). + - Rebuild MCP Server: Rebuilds the Python based MCP server - Select server folder…: Choose the folder containing `server.py`. - Verify again: Re-checks server presence. - If Python isn’t detected, use “Open Install Instructions”. From 8e7b202444caecb89f73dcef088d31f6809b9797 Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Fri, 3 Oct 2025 17:57:42 -0400 Subject: [PATCH 24/30] Add the rebuild server step --- docs/CUSTOM_TOOLS.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/CUSTOM_TOOLS.md b/docs/CUSTOM_TOOLS.md index c6074f0d..743237bc 100644 --- a/docs/CUSTOM_TOOLS.md +++ b/docs/CUSTOM_TOOLS.md @@ -48,6 +48,8 @@ def my_custom_tool( - Auto-generates the tool name from the function name (e.g., `my_custom_tool`) - Registers the tool with FastMCP during module import +4. **Rebuild the server** in the MCP for Unity window (in the Unity Editor) to apply the changes. + ### Decorator Options ```python From 1d5291e9e243720454f3462542b8e69788ba9631 Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Fri, 3 Oct 2025 18:27:10 -0400 Subject: [PATCH 25/30] docs: clarify tool description field requirements and client compatibility --- docs/CUSTOM_TOOLS.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/CUSTOM_TOOLS.md b/docs/CUSTOM_TOOLS.md index 743237bc..1b988e1c 100644 --- a/docs/CUSTOM_TOOLS.md +++ b/docs/CUSTOM_TOOLS.md @@ -59,7 +59,9 @@ def my_custom_tool( ) ``` -You can use all options available in FastMCP's `mcp.tool` function decorator: . The `description` is the only required field. +You can use all options available in FastMCP's `mcp.tool` function decorator: . + +**Note:** All tools should have the `description` field. It's not strictly required, however, that parameter is the best place to define a description so that most MCP clients can read it. See [issue #289](https://github.com/CoplayDev/unity-mcp/issues/289). ### Auto-Discovery From 7db81f1d3d9b450868902c45826292d3d88b8d8d Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Fri, 3 Oct 2025 18:32:29 -0400 Subject: [PATCH 26/30] fix: move initialization flag after tool discovery to prevent race conditions --- UnityMcpBridge/Editor/Tools/CommandRegistry.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/UnityMcpBridge/Editor/Tools/CommandRegistry.cs b/UnityMcpBridge/Editor/Tools/CommandRegistry.cs index 21f13e2b..ccca57e0 100644 --- a/UnityMcpBridge/Editor/Tools/CommandRegistry.cs +++ b/UnityMcpBridge/Editor/Tools/CommandRegistry.cs @@ -22,9 +22,9 @@ public static class CommandRegistry public static void Initialize() { if (_initialized) return; - _initialized = true; AutoDiscoverTools(); + _initialized = true; } /// From 3d107e596f8c862bccfc365a16f9ed6ed94be3ad Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Fri, 3 Oct 2025 18:33:59 -0400 Subject: [PATCH 27/30] refactor: remove redundant TryParseVersion overrides in platform detectors --- .../Dependencies/PlatformDetectors/LinuxPlatformDetector.cs | 5 ----- .../Dependencies/PlatformDetectors/MacOSPlatformDetector.cs | 5 ----- .../PlatformDetectors/WindowsPlatformDetector.cs | 5 ----- 3 files changed, 15 deletions(-) diff --git a/UnityMcpBridge/Editor/Dependencies/PlatformDetectors/LinuxPlatformDetector.cs b/UnityMcpBridge/Editor/Dependencies/PlatformDetectors/LinuxPlatformDetector.cs index 09fded14..ab91c955 100644 --- a/UnityMcpBridge/Editor/Dependencies/PlatformDetectors/LinuxPlatformDetector.cs +++ b/UnityMcpBridge/Editor/Dependencies/PlatformDetectors/LinuxPlatformDetector.cs @@ -244,10 +244,5 @@ private bool TryFindInPath(string executable, out string fullPath) return false; } - - private bool TryParseVersion(string version, out int major, out int minor) - { - return base.TryParseVersion(version, out major, out minor); - } } } diff --git a/UnityMcpBridge/Editor/Dependencies/PlatformDetectors/MacOSPlatformDetector.cs b/UnityMcpBridge/Editor/Dependencies/PlatformDetectors/MacOSPlatformDetector.cs index 715338ce..08d8ffb5 100644 --- a/UnityMcpBridge/Editor/Dependencies/PlatformDetectors/MacOSPlatformDetector.cs +++ b/UnityMcpBridge/Editor/Dependencies/PlatformDetectors/MacOSPlatformDetector.cs @@ -244,10 +244,5 @@ private bool TryFindInPath(string executable, out string fullPath) return false; } - - private bool TryParseVersion(string version, out int major, out int minor) - { - return base.TryParseVersion(version, out major, out minor); - } } } diff --git a/UnityMcpBridge/Editor/Dependencies/PlatformDetectors/WindowsPlatformDetector.cs b/UnityMcpBridge/Editor/Dependencies/PlatformDetectors/WindowsPlatformDetector.cs index ea57d5ef..7489ca50 100644 --- a/UnityMcpBridge/Editor/Dependencies/PlatformDetectors/WindowsPlatformDetector.cs +++ b/UnityMcpBridge/Editor/Dependencies/PlatformDetectors/WindowsPlatformDetector.cs @@ -223,10 +223,5 @@ private bool TryFindInPath(string executable, out string fullPath) return false; } - - private bool TryParseVersion(string version, out int major, out int minor) - { - return base.TryParseVersion(version, out major, out minor); - } } } From 4314e2aef69407530e5cafd7d81d5e175805201e Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Fri, 3 Oct 2025 18:36:38 -0400 Subject: [PATCH 28/30] refactor: remove duplicate UV validation code from platform detectors --- .../LinuxPlatformDetector.cs | 36 ------------------- .../MacOSPlatformDetector.cs | 36 ------------------- .../WindowsPlatformDetector.cs | 36 ------------------- 3 files changed, 108 deletions(-) diff --git a/UnityMcpBridge/Editor/Dependencies/PlatformDetectors/LinuxPlatformDetector.cs b/UnityMcpBridge/Editor/Dependencies/PlatformDetectors/LinuxPlatformDetector.cs index ab91c955..4ace9756 100644 --- a/UnityMcpBridge/Editor/Dependencies/PlatformDetectors/LinuxPlatformDetector.cs +++ b/UnityMcpBridge/Editor/Dependencies/PlatformDetectors/LinuxPlatformDetector.cs @@ -159,42 +159,6 @@ private bool TryValidatePython(string pythonPath, out string version, out string return false; } - private bool TryValidateUV(string uvPath, out string version) - { - version = null; - - try - { - var psi = new ProcessStartInfo - { - FileName = uvPath, - Arguments = "--version", - UseShellExecute = false, - RedirectStandardOutput = true, - RedirectStandardError = true, - CreateNoWindow = true - }; - - using var process = Process.Start(psi); - if (process == null) return false; - - string output = process.StandardOutput.ReadToEnd().Trim(); - process.WaitForExit(5000); - - if (process.ExitCode == 0 && output.StartsWith("uv ")) - { - version = output.Substring(3); // Remove "uv " prefix - return true; - } - } - catch - { - // Ignore validation errors - } - - return false; - } - private bool TryFindInPath(string executable, out string fullPath) { fullPath = null; diff --git a/UnityMcpBridge/Editor/Dependencies/PlatformDetectors/MacOSPlatformDetector.cs b/UnityMcpBridge/Editor/Dependencies/PlatformDetectors/MacOSPlatformDetector.cs index 08d8ffb5..c89e7cb9 100644 --- a/UnityMcpBridge/Editor/Dependencies/PlatformDetectors/MacOSPlatformDetector.cs +++ b/UnityMcpBridge/Editor/Dependencies/PlatformDetectors/MacOSPlatformDetector.cs @@ -159,42 +159,6 @@ private bool TryValidatePython(string pythonPath, out string version, out string return false; } - private bool TryValidateUV(string uvPath, out string version) - { - version = null; - - try - { - var psi = new ProcessStartInfo - { - FileName = uvPath, - Arguments = "--version", - UseShellExecute = false, - RedirectStandardOutput = true, - RedirectStandardError = true, - CreateNoWindow = true - }; - - using var process = Process.Start(psi); - if (process == null) return false; - - string output = process.StandardOutput.ReadToEnd().Trim(); - process.WaitForExit(5000); - - if (process.ExitCode == 0 && output.StartsWith("uv ")) - { - version = output.Substring(3); // Remove "uv " prefix - return true; - } - } - catch - { - // Ignore validation errors - } - - return false; - } - private bool TryFindInPath(string executable, out string fullPath) { fullPath = null; diff --git a/UnityMcpBridge/Editor/Dependencies/PlatformDetectors/WindowsPlatformDetector.cs b/UnityMcpBridge/Editor/Dependencies/PlatformDetectors/WindowsPlatformDetector.cs index 7489ca50..bd9c6f03 100644 --- a/UnityMcpBridge/Editor/Dependencies/PlatformDetectors/WindowsPlatformDetector.cs +++ b/UnityMcpBridge/Editor/Dependencies/PlatformDetectors/WindowsPlatformDetector.cs @@ -147,42 +147,6 @@ private bool TryValidatePython(string pythonPath, out string version, out string return false; } - private bool TryValidateUV(string uvPath, out string version) - { - version = null; - - try - { - var psi = new ProcessStartInfo - { - FileName = uvPath, - Arguments = "--version", - UseShellExecute = false, - RedirectStandardOutput = true, - RedirectStandardError = true, - CreateNoWindow = true - }; - - using var process = Process.Start(psi); - if (process == null) return false; - - string output = process.StandardOutput.ReadToEnd().Trim(); - process.WaitForExit(5000); - - if (process.ExitCode == 0 && output.StartsWith("uv ")) - { - version = output.Substring(3); // Remove "uv " prefix - return true; - } - } - catch - { - // Ignore validation errors - } - - return false; - } - private bool TryFindInPath(string executable, out string fullPath) { fullPath = null; From fe1326007e9645d018216a21cdf449d227cf35b4 Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Fri, 3 Oct 2025 18:40:50 -0400 Subject: [PATCH 29/30] Update UnityMcpBridge/Editor/Tools/CommandRegistry.cs Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> --- UnityMcpBridge/Editor/Tools/CommandRegistry.cs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/UnityMcpBridge/Editor/Tools/CommandRegistry.cs b/UnityMcpBridge/Editor/Tools/CommandRegistry.cs index ccca57e0..79003d55 100644 --- a/UnityMcpBridge/Editor/Tools/CommandRegistry.cs +++ b/UnityMcpBridge/Editor/Tools/CommandRegistry.cs @@ -80,6 +80,15 @@ private static void RegisterToolType(Type type) commandName = ToSnakeCase(type.Name); } + // Check for duplicate command names + if (_handlers.ContainsKey(commandName)) + { + McpLog.Warn( + $"Duplicate command name '{commandName}' detected. " + + $"Tool {type.Name} will override previously registered handler." + ); + } + // Find HandleCommand method var method = type.GetMethod( "HandleCommand", From 2af34131640249553be27cd830651cb3b236318d Mon Sep 17 00:00:00 2001 From: Marcus Sanatan Date: Fri, 3 Oct 2025 18:50:36 -0400 Subject: [PATCH 30/30] refactor: replace WriteToConfig reflection with direct McpConfigurationHelper call --- .../WriteToConfigTests.cs | 39 +++++++------------ .../WriteToConfigTests.cs.meta | 0 2 files changed, 13 insertions(+), 26 deletions(-) rename TestProjects/UnityMCPTests/Assets/Tests/EditMode/{Windows => Helpers}/WriteToConfigTests.cs (87%) rename TestProjects/UnityMCPTests/Assets/Tests/EditMode/{Windows => Helpers}/WriteToConfigTests.cs.meta (100%) diff --git a/TestProjects/UnityMCPTests/Assets/Tests/EditMode/Windows/WriteToConfigTests.cs b/TestProjects/UnityMCPTests/Assets/Tests/EditMode/Helpers/WriteToConfigTests.cs similarity index 87% rename from TestProjects/UnityMCPTests/Assets/Tests/EditMode/Windows/WriteToConfigTests.cs rename to TestProjects/UnityMCPTests/Assets/Tests/EditMode/Helpers/WriteToConfigTests.cs index 3fd77088..88f4118d 100644 --- a/TestProjects/UnityMCPTests/Assets/Tests/EditMode/Windows/WriteToConfigTests.cs +++ b/TestProjects/UnityMCPTests/Assets/Tests/EditMode/Helpers/WriteToConfigTests.cs @@ -1,17 +1,14 @@ using System; using System.Diagnostics; using System.IO; -using System.Reflection; using System.Runtime.InteropServices; using Newtonsoft.Json.Linq; using NUnit.Framework; using UnityEditor; -using UnityEngine; -using MCPForUnity.Editor.Data; +using MCPForUnity.Editor.Helpers; using MCPForUnity.Editor.Models; -using MCPForUnity.Editor.Windows; -namespace MCPForUnityTests.Editor.Windows +namespace MCPForUnityTests.Editor.Helpers { public class WriteToConfigTests { @@ -68,7 +65,7 @@ public void TearDown() public void AddsEnvAndDisabledFalse_ForWindsurf() { var configPath = Path.Combine(_tempRoot, "windsurf.json"); - WriteInitialConfig(configPath, isVSCode:false, command:_fakeUvPath, directory:"/old/path"); + WriteInitialConfig(configPath, isVSCode: false, command: _fakeUvPath, directory: "/old/path"); var client = new McpClient { name = "Windsurf", mcpType = McpTypes.Windsurf }; InvokeWriteToConfig(configPath, client); @@ -85,7 +82,7 @@ public void AddsEnvAndDisabledFalse_ForWindsurf() public void AddsEnvAndDisabledFalse_ForKiro() { var configPath = Path.Combine(_tempRoot, "kiro.json"); - WriteInitialConfig(configPath, isVSCode:false, command:_fakeUvPath, directory:"/old/path"); + WriteInitialConfig(configPath, isVSCode: false, command: _fakeUvPath, directory: "/old/path"); var client = new McpClient { name = "Kiro", mcpType = McpTypes.Kiro }; InvokeWriteToConfig(configPath, client); @@ -102,7 +99,7 @@ public void AddsEnvAndDisabledFalse_ForKiro() public void DoesNotAddEnvOrDisabled_ForCursor() { var configPath = Path.Combine(_tempRoot, "cursor.json"); - WriteInitialConfig(configPath, isVSCode:false, command:_fakeUvPath, directory:"/old/path"); + WriteInitialConfig(configPath, isVSCode: false, command: _fakeUvPath, directory: "/old/path"); var client = new McpClient { name = "Cursor", mcpType = McpTypes.Cursor }; InvokeWriteToConfig(configPath, client); @@ -118,7 +115,7 @@ public void DoesNotAddEnvOrDisabled_ForCursor() public void DoesNotAddEnvOrDisabled_ForVSCode() { var configPath = Path.Combine(_tempRoot, "vscode.json"); - WriteInitialConfig(configPath, isVSCode:true, command:_fakeUvPath, directory:"/old/path"); + WriteInitialConfig(configPath, isVSCode: true, command: _fakeUvPath, directory: "/old/path"); var client = new McpClient { name = "VSCode", mcpType = McpTypes.VSCode }; InvokeWriteToConfig(configPath, client); @@ -219,25 +216,15 @@ private static void WriteInitialConfig(string configPath, bool isVSCode, string File.WriteAllText(configPath, root.ToString()); } - private static MCPForUnityEditorWindow CreateWindow() - { - return ScriptableObject.CreateInstance(); - } - private static void InvokeWriteToConfig(string configPath, McpClient client) { - var window = CreateWindow(); - var mi = typeof(MCPForUnityEditorWindow).GetMethod("WriteToConfig", BindingFlags.Instance | BindingFlags.NonPublic); - Assert.NotNull(mi, "Could not find WriteToConfig via reflection"); - - // pythonDir is unused by WriteToConfig, but pass server src to keep it consistent - var result = (string)mi!.Invoke(window, new object[] { - /* pythonDir */ string.Empty, - /* configPath */ configPath, - /* mcpClient */ client - }); - - Assert.AreEqual("Configured successfully", result, "WriteToConfig should return success"); + var result = McpConfigurationHelper.WriteMcpConfiguration( + pythonDir: string.Empty, + configPath: configPath, + mcpClient: client + ); + + Assert.AreEqual("Configured successfully", result, "WriteMcpConfiguration should return success"); } } } diff --git a/TestProjects/UnityMCPTests/Assets/Tests/EditMode/Windows/WriteToConfigTests.cs.meta b/TestProjects/UnityMCPTests/Assets/Tests/EditMode/Helpers/WriteToConfigTests.cs.meta similarity index 100% rename from TestProjects/UnityMCPTests/Assets/Tests/EditMode/Windows/WriteToConfigTests.cs.meta rename to TestProjects/UnityMCPTests/Assets/Tests/EditMode/Helpers/WriteToConfigTests.cs.meta