diff --git a/src/basic_memory/cli/commands/cloud/api_client.py b/src/basic_memory/cli/commands/cloud/api_client.py index 4de527d33..4d8a2a351 100644 --- a/src/basic_memory/cli/commands/cloud/api_client.py +++ b/src/basic_memory/cli/commands/cloud/api_client.py @@ -52,7 +52,7 @@ async def get_authenticated_headers(auth: CLIAuth | None = None) -> dict[str, st auth_obj = auth or CLIAuth(client_id=client_id, authkit_domain=domain) token = await auth_obj.get_valid_token() if not token: - console.print("[red]Not authenticated. Please run 'basic-memory cloud login' first.[/red]") + console.print("[red]Not authenticated. Please run 'bm cloud login' first.[/red]") raise typer.Exit(1) return {"Authorization": f"Bearer {token}"} diff --git a/src/basic_memory/cli/commands/cloud/core_commands.py b/src/basic_memory/cli/commands/cloud/core_commands.py index 142d26b7f..b3f6bb089 100644 --- a/src/basic_memory/cli/commands/cloud/core_commands.py +++ b/src/basic_memory/cli/commands/cloud/core_commands.py @@ -151,7 +151,7 @@ def setup() -> None: """Set up cloud sync by installing rclone and configuring credentials. After setup, use project commands for syncing: - bm project add --local-path ~/projects/ + bm project add --cloud --local-path ~/projects/ bm project bisync --name --resync # First time bm project bisync --name # Subsequent syncs """ @@ -183,7 +183,7 @@ def setup() -> None: console.print("\n[bold green]Cloud setup completed successfully![/bold green]") console.print("\n[bold]Next steps:[/bold]") console.print("1. Add a project with local sync path:") - console.print(" bm project add research --local-path ~/Documents/research") + console.print(" bm project add research --cloud --local-path ~/Documents/research") console.print("\n Or configure sync for an existing project:") console.print(" bm project sync-setup research ~/Documents/research") console.print("\n2. Preview the initial sync (recommended):") diff --git a/src/basic_memory/cli/commands/cloud/upload_command.py b/src/basic_memory/cli/commands/cloud/upload_command.py index eeb51786e..b27c83ffa 100644 --- a/src/basic_memory/cli/commands/cloud/upload_command.py +++ b/src/basic_memory/cli/commands/cloud/upload_command.py @@ -13,6 +13,7 @@ sync_project, ) from basic_memory.cli.commands.cloud.upload import upload_path +from basic_memory.mcp.async_client import get_cloud_control_plane_client console = Console() @@ -86,7 +87,7 @@ async def _upload(): console.print( f"[red]Project '{project}' does not exist.[/red]\n" f"[yellow]Options:[/yellow]\n" - f" 1. Create it first: bm project add {project}\n" + f" 1. Create it first: bm project add {project} --cloud\n" f" 2. Use --create-project flag to create automatically" ) raise typer.Exit(1) @@ -100,7 +101,12 @@ async def _upload(): console.print(f"[blue]Uploading {path} to project '{project}'...[/blue]") success = await upload_path( - path, project, verbose=verbose, use_gitignore=not no_gitignore, dry_run=dry_run + path, + project, + verbose=verbose, + use_gitignore=not no_gitignore, + dry_run=dry_run, + client_cm_factory=get_cloud_control_plane_client, ) if not success: console.print("[red]Upload failed[/red]") diff --git a/src/basic_memory/cli/commands/command_utils.py b/src/basic_memory/cli/commands/command_utils.py index 9cfa46b58..a60021788 100644 --- a/src/basic_memory/cli/commands/command_utils.py +++ b/src/basic_memory/cli/commands/command_utils.py @@ -97,5 +97,19 @@ async def get_project_info(project: str): response = await call_get(client, f"/v2/projects/{project_item.external_id}/info") return ProjectInfoResponse.model_validate(response.json()) except (ToolError, ValueError) as e: - console.print(f"[red]Sync failed: {e}[/red]") + error_text = str(e) + if "internal proxy error" in error_text.lower() and "not found in configuration" in ( + error_text.lower() + ): + console.print( + "[red]Project info failed: cloud returned an internal configuration error for " + "this project.[/red]" + ) + console.print( + "[yellow]This is a cloud backend issue for detailed info lookups. " + "Use `bm project list --cloud` for project metadata until the service is updated." + "[/yellow]" + ) + else: + console.print(f"[red]Project info failed: {e}[/red]") raise typer.Exit(1) diff --git a/src/basic_memory/cli/commands/format.py b/src/basic_memory/cli/commands/format.py index d55abcbf1..fb59b38c3 100644 --- a/src/basic_memory/cli/commands/format.py +++ b/src/basic_memory/cli/commands/format.py @@ -183,10 +183,10 @@ def format( By default, formats all .md, .json, and .canvas files in the current project. Examples: - basic-memory format # Format all files in current project - basic-memory format --project research # Format files in specific project - basic-memory format notes/meeting.md # Format a specific file - basic-memory format notes/ # Format all files in directory + bm format # Format all files in current project + bm format --project research # Format files in specific project + bm format notes/meeting.md # Format a specific file + bm format notes/ # Format all files in directory """ try: run_with_cleanup(run_format(path, project)) diff --git a/src/basic_memory/cli/commands/import_chatgpt.py b/src/basic_memory/cli/commands/import_chatgpt.py index c9d7a8592..1fac9d9a3 100644 --- a/src/basic_memory/cli/commands/import_chatgpt.py +++ b/src/basic_memory/cli/commands/import_chatgpt.py @@ -44,7 +44,7 @@ def import_chatgpt( 2. Convert them to linear markdown conversations 3. Save as clean, readable markdown files - After importing, run 'basic-memory sync' to index the new files. + After importing, run 'bm reindex --search' to index the new files. """ try: @@ -81,7 +81,7 @@ def import_chatgpt( ) ) - console.print("\nRun 'basic-memory sync' to index the new files.") + console.print("\nRun 'bm reindex --search' to index the new files.") except Exception as e: logger.error("Import failed") diff --git a/src/basic_memory/cli/commands/import_claude_conversations.py b/src/basic_memory/cli/commands/import_claude_conversations.py index 4dee5e0ee..c852ae799 100644 --- a/src/basic_memory/cli/commands/import_claude_conversations.py +++ b/src/basic_memory/cli/commands/import_claude_conversations.py @@ -44,7 +44,7 @@ def import_claude( 2. Create markdown files for each conversation 3. Format content in clean, readable markdown - After importing, run 'basic-memory sync' to index the new files. + After importing, run 'bm reindex --search' to index the new files. """ config = get_project_config() @@ -84,7 +84,7 @@ def import_claude( ) ) - console.print("\nRun 'basic-memory sync' to index the new files.") + console.print("\nRun 'bm reindex --search' to index the new files.") except Exception as e: logger.error("Import failed") diff --git a/src/basic_memory/cli/commands/import_claude_projects.py b/src/basic_memory/cli/commands/import_claude_projects.py index 94a8665b3..4b8de75dd 100644 --- a/src/basic_memory/cli/commands/import_claude_projects.py +++ b/src/basic_memory/cli/commands/import_claude_projects.py @@ -44,7 +44,7 @@ def import_projects( 2. Store docs in a docs/ subdirectory 3. Place prompt template in project root - After importing, run 'basic-memory sync' to index the new files. + After importing, run 'bm reindex --search' to index the new files. """ config = get_project_config() try: @@ -83,7 +83,7 @@ def import_projects( ) ) - console.print("\nRun 'basic-memory sync' to index the new files.") + console.print("\nRun 'bm reindex --search' to index the new files.") except Exception as e: logger.error("Import failed") diff --git a/src/basic_memory/cli/commands/mcp.py b/src/basic_memory/cli/commands/mcp.py index 731db9723..49655ca8b 100644 --- a/src/basic_memory/cli/commands/mcp.py +++ b/src/basic_memory/cli/commands/mcp.py @@ -36,7 +36,7 @@ def mcp( This command starts an MCP server using one of three transport options: - stdio: Standard I/O (good for local usage) - - streamable-http: Recommended for web deployments (default) + - streamable-http: Recommended for web deployments - sse: Server-Sent Events (for compatibility with existing clients) Initialization, file sync, and cleanup are handled by the MCP server's lifespan. diff --git a/src/basic_memory/cli/commands/project.py b/src/basic_memory/cli/commands/project.py index 36f5a8a3c..9e003e008 100644 --- a/src/basic_memory/cli/commands/project.py +++ b/src/basic_memory/cli/commands/project.py @@ -1149,6 +1149,8 @@ def display_project_info( ) console.print(f"\nTimestamp: [cyan]{current_time.strftime('%Y-%m-%d %H:%M:%S')}[/cyan]") + except typer.Exit: + raise except Exception as e: # pragma: no cover typer.echo(f"Error getting project info: {e}", err=True) raise typer.Exit(1) diff --git a/src/basic_memory/cli/commands/tool.py b/src/basic_memory/cli/commands/tool.py index 875bd0d92..dc4d257a1 100644 --- a/src/basic_memory/cli/commands/tool.py +++ b/src/basic_memory/cli/commands/tool.py @@ -298,7 +298,7 @@ def write_note( content: Annotated[ Optional[str], typer.Option( - help="The content of the note. If not provided, content will be read from stdin. This allows piping content from other commands, e.g.: cat file.md | basic-memory tools write-note" + help="The content of the note. If not provided, content will be read from stdin. This allows piping content from other commands, e.g.: cat file.md | bm tool write-note" ), ] = None, tags: Annotated[ @@ -322,13 +322,13 @@ def write_note( Examples: # Using content parameter - basic-memory tools write-note --title "My Note" --folder "notes" --content "Note content" + bm tool write-note --title "My Note" --folder "notes" --content "Note content" # Using stdin pipe - echo "# My Note Content" | basic-memory tools write-note --title "My Note" --folder "notes" + echo "# My Note Content" | bm tool write-note --title "My Note" --folder "notes" # Using heredoc - cat << EOF | basic-memory tools write-note --title "My Note" --folder "notes" + cat << EOF | bm tool write-note --title "My Note" --folder "notes" # My Document This is my document content. @@ -338,10 +338,10 @@ def write_note( EOF # Reading from a file - cat document.md | basic-memory tools write-note --title "Document" --folder "docs" + cat document.md | bm tool write-note --title "Document" --folder "docs" # Force local routing in cloud mode - basic-memory tools write-note --title "My Note" --folder "notes" --content "..." --local + bm tool write-note --title "My Note" --folder "notes" --content "..." --local """ try: validate_routing_flags(local, cloud) diff --git a/src/basic_memory/db.py b/src/basic_memory/db.py index e12b8a76b..26aa0a379 100644 --- a/src/basic_memory/db.py +++ b/src/basic_memory/db.py @@ -476,13 +476,14 @@ async def run_migrations( so it's safe to call this multiple times - it will only run pending migrations. """ logger.info("Running database migrations...") + temp_engine: AsyncEngine | None = None try: revisions_before_upgrade: set[str] = set() # Trigger: run_migrations() can be invoked before module-level session maker is set. # Why: we still need reliable before/after revision detection for one-time backfill. # Outcome: create a short-lived session maker when needed, then dispose it immediately. if _session_maker is None: - temp_engine, temp_session_maker = _create_engine_and_session( + precheck_engine, temp_session_maker = _create_engine_and_session( app_config.database_path, database_type, app_config, @@ -490,7 +491,7 @@ async def run_migrations( try: revisions_before_upgrade = await _load_applied_alembic_revisions(temp_session_maker) finally: - await temp_engine.dispose() + await precheck_engine.dispose() else: revisions_before_upgrade = await _load_applied_alembic_revisions(_session_maker) @@ -517,7 +518,9 @@ async def run_migrations( # Get session maker - ensure we don't trigger recursive migration calls if _session_maker is None: - _, session_maker = _create_engine_and_session(app_config.database_path, database_type) + temp_engine, session_maker = _create_engine_and_session( + app_config.database_path, database_type, app_config + ) else: session_maker = _session_maker @@ -542,3 +545,11 @@ async def run_migrations( except Exception as e: # pragma: no cover logger.error(f"Error running migrations: {e}") raise + finally: + # Trigger: run_migrations() created a temporary engine while module-level + # session maker was not initialized. + # Why: temporary aiosqlite worker threads can outlive CLI command execution + # and block process shutdown if the engine is not disposed. + # Outcome: always dispose temporary engines after migration work completes. + if temp_engine is not None: + await temp_engine.dispose() diff --git a/src/basic_memory/mcp/tools/project_management.py b/src/basic_memory/mcp/tools/project_management.py index f4323bf93..677124241 100644 --- a/src/basic_memory/mcp/tools/project_management.py +++ b/src/basic_memory/mcp/tools/project_management.py @@ -65,9 +65,7 @@ async def list_memory_projects( result = "Available projects:\n" for project in project_list.projects: label = ( - f"{project.display_name} ({project.name})" - if project.display_name - else project.name + f"{project.display_name} ({project.name})" if project.display_name else project.name ) result += f"• {label}\n" diff --git a/src/basic_memory/mcp/tools/utils.py b/src/basic_memory/mcp/tools/utils.py index f892cedcc..fd526548b 100644 --- a/src/basic_memory/mcp/tools/utils.py +++ b/src/basic_memory/mcp/tools/utils.py @@ -23,6 +23,8 @@ from loguru import logger from mcp.server.fastmcp.exceptions import ToolError +from basic_memory.config import ConfigManager + def get_error_message( status_code: int, url: URL | str, method: str, msg: Optional[str] = None @@ -74,6 +76,65 @@ def get_error_message( return f"HTTP error {status_code}: {method} request to '{path}' failed" +def _extract_response_data(response: Response) -> typing.Any: + """Safely decode response payload for error reporting.""" + try: + return response.json() + except Exception: + return None + + +def _response_detail_text(response_data: typing.Any) -> str | None: + """Extract textual error detail from API payloads.""" + if isinstance(response_data, dict): + detail = response_data.get("detail") + if isinstance(detail, str): + return detail + if isinstance(detail, dict): + nested_message = detail.get("message") + if isinstance(nested_message, str): + return nested_message + return str(detail) + if detail is not None: + return str(detail) + return None + + +def _has_configured_cloud_api_key() -> bool: + """Check whether a cloud API key is currently configured.""" + try: + return bool(ConfigManager().config.cloud_api_key) + except Exception: + return False + + +def _resolve_error_message( + status_code: int, url: URL | str, method: str, response_data: typing.Any +) -> str: + """Resolve a user-facing error message with cloud auth remediation when relevant.""" + detail_text = _response_detail_text(response_data) + + if status_code == 401 and _has_configured_cloud_api_key(): + detail_lower = detail_text.lower() if detail_text else "" + if ( + "invalid jwt" in detail_lower + or "invalid token" in detail_lower + or "authentication required" in detail_lower + or not detail_lower + ): + return ( + "Authentication failed: the configured cloud API key was rejected by the server. " + "Basic Memory prioritizes cloud_api_key over OAuth for cloud routing. " + "Fix by running `bm cloud set-key ` " + "or remove `cloud_api_key` and use `bm cloud login`." + ) + + if detail_text: + return detail_text + + return get_error_message(status_code, url, method) + + async def call_get( client: AsyncClient, url: URL | str, @@ -125,12 +186,8 @@ async def call_get( # Handle different status codes differently status_code = response.status_code - # get the message if available - response_data = response.json() - if isinstance(response_data, dict) and "detail" in response_data: - error_message = response_data["detail"] - else: - error_message = get_error_message(status_code, url, "PUT") + response_data = _extract_response_data(response) + error_message = _resolve_error_message(status_code, url, "GET", response_data) # Log at appropriate level based on status code if 400 <= status_code < 500: @@ -215,12 +272,8 @@ async def call_put( # Handle different status codes differently status_code = response.status_code - # get the message if available - response_data = response.json() - if isinstance(response_data, dict) and "detail" in response_data: - error_message = response_data["detail"] # pragma: no cover - else: - error_message = get_error_message(status_code, url, "PUT") + response_data = _extract_response_data(response) + error_message = _resolve_error_message(status_code, url, "PUT", response_data) # Log at appropriate level based on status code if 400 <= status_code < 500: @@ -304,15 +357,8 @@ async def call_patch( # Handle different status codes differently status_code = response.status_code - # Try to extract specific error message from response body - try: - response_data = response.json() - if isinstance(response_data, dict) and "detail" in response_data: - error_message = response_data["detail"] - else: - error_message = get_error_message(status_code, url, "PATCH") # pragma: no cover - except Exception: # pragma: no cover - error_message = get_error_message(status_code, url, "PATCH") # pragma: no cover + response_data = _extract_response_data(response) + error_message = _resolve_error_message(status_code, url, "PATCH", response_data) # Log at appropriate level based on status code if 400 <= status_code < 500: @@ -332,15 +378,8 @@ async def call_patch( except HTTPStatusError as e: status_code = e.response.status_code - # Try to extract specific error message from response body - try: - response_data = e.response.json() - if isinstance(response_data, dict) and "detail" in response_data: - error_message = response_data["detail"] - else: - error_message = get_error_message(status_code, url, "PATCH") # pragma: no cover - except Exception: # pragma: no cover - error_message = get_error_message(status_code, url, "PATCH") # pragma: no cover + response_data = _extract_response_data(e.response) + error_message = _resolve_error_message(status_code, url, "PATCH", response_data) raise ToolError(error_message) from e @@ -409,12 +448,8 @@ async def call_post( # Handle different status codes differently status_code = response.status_code - # get the message if available - response_data = response.json() - if isinstance(response_data, dict) and "detail" in response_data: - error_message = response_data["detail"] - else: - error_message = get_error_message(status_code, url, "POST") + response_data = _extract_response_data(response) + error_message = _resolve_error_message(status_code, url, "POST", response_data) # Log at appropriate level based on status code if 400 <= status_code < 500: @@ -518,12 +553,8 @@ async def call_delete( # Handle different status codes differently status_code = response.status_code - # get the message if available - response_data = response.json() - if isinstance(response_data, dict) and "detail" in response_data: - error_message = response_data["detail"] # pragma: no cover - else: - error_message = get_error_message(status_code, url, "DELETE") + response_data = _extract_response_data(response) + error_message = _resolve_error_message(status_code, url, "DELETE", response_data) # Log at appropriate level based on status code if 400 <= status_code < 500: diff --git a/src/basic_memory/repository/sqlite_search_repository.py b/src/basic_memory/repository/sqlite_search_repository.py index b63ebdc9c..437652155 100644 --- a/src/basic_memory/repository/sqlite_search_repository.py +++ b/src/basic_memory/repository/sqlite_search_repository.py @@ -607,6 +607,7 @@ async def search( # --- FTS mode (SQLite-specific) --- conditions = [] + match_conditions = [] params = {} order_by_clause = "" from_clause = "search_index" @@ -621,7 +622,7 @@ async def search( # Use _prepare_search_term to handle both Boolean and non-Boolean queries processed_text = self._prepare_search_term(search_text.strip()) params["text"] = processed_text - conditions.append( + match_conditions.append( "(search_index.title MATCH :text OR search_index.content_stems MATCH :text)" ) @@ -629,7 +630,7 @@ async def search( if title: title_text = self._prepare_search_term(title.strip(), is_prefix=False) params["title_text"] = title_text - conditions.append("search_index.title MATCH :title_text") + match_conditions.append("search_index.title MATCH :title_text") # Handle permalink exact search if permalink: @@ -652,7 +653,7 @@ async def search( else: permalink_text = self._prepare_search_term(permalink_text, is_prefix=False) params["permalink"] = permalink_text - conditions.append("search_index.permalink MATCH :permalink") + match_conditions.append("search_index.permalink MATCH :permalink") # Handle entity type filter if search_item_types: @@ -756,6 +757,18 @@ async def search( conditions.append(f"{compare_expr} {operator} :{value_param}") continue + # Trigger: SQLite FTS MATCH predicates combined with JOINs can fail with + # "unable to use function MATCH in the requested context". + # Why: MATCH needs to run in an FTS-valid context. + # Outcome: evaluate MATCH clauses in an FTS subquery and filter outer rows by rowid. + if metadata_filters and match_conditions: + match_where = " AND ".join(match_conditions) + conditions.append( + f"search_index.rowid IN (SELECT rowid FROM search_index WHERE {match_where})" + ) + else: + conditions.extend(match_conditions) + # Always filter by project_id params["project_id"] = self.project_id conditions.append("search_index.project_id = :project_id") diff --git a/src/basic_memory/services/project_service.py b/src/basic_memory/services/project_service.py index d45b405da..93d4fb99a 100644 --- a/src/basic_memory/services/project_service.py +++ b/src/basic_memory/services/project_service.py @@ -39,9 +39,7 @@ class ProjectService: repository: ProjectRepository - def __init__( - self, repository: ProjectRepository, file_service: Optional["FileService"] = None - ): + def __init__(self, repository: ProjectRepository, file_service: Optional["FileService"] = None): """Initialize the project service.""" super().__init__() self.repository = repository @@ -574,19 +572,24 @@ async def get_project_info(self, project_name: Optional[str] = None) -> ProjectI raise ValueError("Repository is required for get_project_info") # Use specified project or fall back to config project - project_name = project_name or self.config.project - # Get project path from configuration - name, project_path = self.config_manager.get_project(project_name) - if not name: # pragma: no cover - raise ValueError(f"Project '{project_name}' not found in configuration") - - assert project_path is not None - project_permalink = generate_permalink(project_name) + requested_project_name = project_name or self.config.project + project_permalink = generate_permalink(requested_project_name) # Get project from database to get project_id db_project = await self.repository.get_by_permalink(project_permalink) if not db_project: # pragma: no cover - raise ValueError(f"Project '{project_name}' not found in database") + raise ValueError(f"Project '{requested_project_name}' not found in database") + + # Trigger: cloud-only projects may exist in DB but not in local config. + # Why: cloud routing should not require local config entries for project info. + # Outcome: prefer config path when available, otherwise use DB path. + config_name, config_path = self.config_manager.get_project(db_project.name) + if config_name and config_path: + resolved_project_name = config_name + resolved_project_path = config_path + else: + resolved_project_name = db_project.name + resolved_project_path = db_project.path # Get statistics for the specified project statistics = await self.get_statistics(db_project.id) @@ -603,24 +606,45 @@ async def get_project_info(self, project_name: Optional[str] = None) -> ProjectI # Get default project info default_project = self.config_manager.default_project + if default_project is None: + for project in db_projects: + if project.is_default: + default_project = project.name + break # Convert config projects to include database info enhanced_projects = {} - for name, path in self.config_manager.projects.items(): - config_permalink = generate_permalink(name) - db_project = db_projects_by_permalink.get(config_permalink) - enhanced_projects[name] = { - "path": path, - "active": db_project.is_active if db_project else True, - "id": db_project.id if db_project else None, - "is_default": (name == default_project), - "permalink": db_project.permalink if db_project else name.lower().replace(" ", "-"), + for config_project_name, config_project_path in self.config_manager.projects.items(): + config_permalink = generate_permalink(config_project_name) + config_db_project = db_projects_by_permalink.get(config_permalink) + enhanced_projects[config_project_name] = { + "path": config_project_path, + "active": config_db_project.is_active if config_db_project else True, + "id": config_db_project.id if config_db_project else None, + "is_default": (config_project_name == default_project), + "permalink": ( + config_db_project.permalink + if config_db_project + else config_project_name.lower().replace(" ", "-") + ), + } + + # Include active DB projects that are not present in local config (cloud-only). + for active_db_project in db_projects: + if active_db_project.name in enhanced_projects: + continue + enhanced_projects[active_db_project.name] = { + "path": active_db_project.path, + "active": active_db_project.is_active, + "id": active_db_project.id, + "is_default": bool(active_db_project.is_default), + "permalink": active_db_project.permalink, } # Construct the response return ProjectInfoResponse( - project_name=project_name, - project_path=project_path, + project_name=resolved_project_name, + project_path=resolved_project_path, available_projects=enhanced_projects, default_project=default_project, statistics=statistics, diff --git a/test-int/cli/test_search_notes_meta_integration.py b/test-int/cli/test_search_notes_meta_integration.py new file mode 100644 index 000000000..4f3e029cd --- /dev/null +++ b/test-int/cli/test_search_notes_meta_integration.py @@ -0,0 +1,71 @@ +"""Integration coverage for tool search-notes with metadata filters.""" + +import json + +from typer.testing import CliRunner + +from basic_memory.cli.main import app as cli_app + +runner = CliRunner() + + +def test_search_notes_query_plus_meta_filter(app, app_config, test_project, config_manager): + """`bm tool search-notes` should support query + metadata filter together.""" + active_content = "---\nstatus: active\n---\n# Active Meta Note\n\nMetaFilterToken" + inactive_content = "---\nstatus: inactive\n---\n# Inactive Meta Note\n\nMetaFilterToken" + + active_write = runner.invoke( + cli_app, + [ + "tool", + "write-note", + "--title", + "Active Meta Note", + "--folder", + "meta-tests", + "--content", + active_content, + "--format", + "json", + ], + ) + assert active_write.exit_code == 0, active_write.output + active_data = json.loads(active_write.stdout) + + inactive_write = runner.invoke( + cli_app, + [ + "tool", + "write-note", + "--title", + "Inactive Meta Note", + "--folder", + "meta-tests", + "--content", + inactive_content, + "--format", + "json", + ], + ) + assert inactive_write.exit_code == 0, inactive_write.output + inactive_data = json.loads(inactive_write.stdout) + + search = runner.invoke( + cli_app, + [ + "tool", + "search-notes", + "MetaFilterToken", + "--meta", + "status=active", + "--local", + "--page-size", + "20", + ], + ) + assert search.exit_code == 0, search.output + + payload = json.loads(search.stdout) + permalinks = {item["permalink"] for item in payload["results"]} + assert active_data["permalink"] in permalinks + assert inactive_data["permalink"] not in permalinks diff --git a/tests/cli/cloud/test_upload_command_routing.py b/tests/cli/cloud/test_upload_command_routing.py new file mode 100644 index 000000000..6bb0115b0 --- /dev/null +++ b/tests/cli/cloud/test_upload_command_routing.py @@ -0,0 +1,55 @@ +"""Tests for cloud upload command routing behavior.""" + +from contextlib import asynccontextmanager + +import httpx +from typer.testing import CliRunner + +from basic_memory.cli.app import app + +runner = CliRunner() + + +def test_cloud_upload_uses_control_plane_client(monkeypatch, tmp_path): + """Upload command should use control-plane cloud client for WebDAV PUT operations.""" + import basic_memory.cli.commands.cloud.upload_command as upload_command + + upload_dir = tmp_path / "upload" + upload_dir.mkdir() + (upload_dir / "note.md").write_text("hello", encoding="utf-8") + + seen: dict[str, str] = {} + + async def fake_project_exists(_project_name: str) -> bool: + return True + + @asynccontextmanager + async def fake_get_client(): + async with httpx.AsyncClient(base_url="https://cloud.example.test") as client: + yield client + + async def fake_upload_path(*args, **kwargs): + client_cm_factory = kwargs.get("client_cm_factory") + assert client_cm_factory is not None + async with client_cm_factory() as client: + seen["base_url"] = str(client.base_url).rstrip("/") + return True + + monkeypatch.setattr(upload_command, "project_exists", fake_project_exists) + monkeypatch.setattr(upload_command, "get_cloud_control_plane_client", fake_get_client) + monkeypatch.setattr(upload_command, "upload_path", fake_upload_path) + + result = runner.invoke( + app, + [ + "cloud", + "upload", + str(upload_dir), + "--project", + "routing-test", + "--no-sync", + ], + ) + + assert result.exit_code == 0, result.output + assert seen["base_url"] == "https://cloud.example.test" diff --git a/tests/cli/test_db_reset_exit.py b/tests/cli/test_db_reset_exit.py new file mode 100644 index 000000000..f9fdae9d2 --- /dev/null +++ b/tests/cli/test_db_reset_exit.py @@ -0,0 +1,56 @@ +"""Regression tests for bm reset command process exit behavior.""" + +import os +import platform +import subprocess +from pathlib import Path + +import pytest + + +IS_WINDOWS = platform.system() == "Windows" +skip_on_windows = pytest.mark.skipif( + IS_WINDOWS, reason="Subprocess cleanup tests are unreliable on Windows CI" +) + + +def _isolated_env(tmp_path: Path) -> dict[str, str]: + env = dict(os.environ) + env["HOME"] = str(tmp_path) + if os.name == "nt": + env["USERPROFILE"] = str(tmp_path) + env["BASIC_MEMORY_HOME"] = str(tmp_path / "basic-memory") + env["BASIC_MEMORY_CONFIG_DIR"] = str(tmp_path / ".basic-memory") + return env + + +@skip_on_windows +def test_bm_reset_exits_cleanly(tmp_path: Path): + """bm reset should finish and exit cleanly with non-interactive confirmation.""" + result = subprocess.run( + ["uv", "run", "bm", "reset"], + input="y\n", + capture_output=True, + text=True, + timeout=20, + cwd=Path(__file__).parent.parent.parent, + env=_isolated_env(tmp_path), + ) + assert result.returncode == 0, result.stderr + assert "Database reset complete" in result.stdout + + +@skip_on_windows +def test_bm_reset_reindex_exits_cleanly(tmp_path: Path): + """bm reset --reindex should finish and exit cleanly with non-interactive confirmation.""" + result = subprocess.run( + ["uv", "run", "bm", "reset", "--reindex"], + input="y\n", + capture_output=True, + text=True, + timeout=30, + cwd=Path(__file__).parent.parent.parent, + env=_isolated_env(tmp_path), + ) + assert result.returncode == 0, result.stderr + assert "Reindex complete" in result.stdout diff --git a/tests/cli/test_project_info_errors.py b/tests/cli/test_project_info_errors.py new file mode 100644 index 000000000..19b00e487 --- /dev/null +++ b/tests/cli/test_project_info_errors.py @@ -0,0 +1,58 @@ +"""Regression tests for project info error handling.""" + +from contextlib import asynccontextmanager +from types import SimpleNamespace + +import pytest +import typer +from mcp.server.fastmcp.exceptions import ToolError +from typer.testing import CliRunner + +from basic_memory.cli.app import app +import basic_memory.cli.commands.command_utils as command_utils +import basic_memory.cli.commands.project as project_cmd # noqa: F401 + +runner = CliRunner() + + +@pytest.mark.asyncio +async def test_get_project_info_cloud_config_error_has_clear_message(monkeypatch, capsys): + """Cloud internal proxy config failures should surface actionable guidance.""" + + @asynccontextmanager + async def fake_get_client(project_name=None): + yield object() + + async def fake_get_active_project(client, project, context): + return SimpleNamespace(external_id="proj-123") + + async def fake_call_get(client, url): + raise ToolError("Internal proxy error: Project 'demo' not found in configuration") + + monkeypatch.setattr(command_utils, "get_client", fake_get_client) + monkeypatch.setattr(command_utils, "get_active_project", fake_get_active_project) + monkeypatch.setattr(command_utils, "call_get", fake_call_get) + + with pytest.raises(typer.Exit) as exc: + await command_utils.get_project_info("demo") + + assert exc.value.exit_code == 1 + captured = capsys.readouterr() + combined_output = captured.out + captured.err + assert "Project info failed: cloud returned an internal configuration error" in combined_output + assert "bm project list" in combined_output + assert "--cloud" in combined_output + + +def test_project_info_does_not_print_wrapper_exit_code(monkeypatch): + """project info should not append a secondary 'Error getting project info: 1' line.""" + + async def fake_get_project_info(_project_name: str): + raise typer.Exit(1) + + monkeypatch.setattr(project_cmd, "get_project_info", fake_get_project_info) + + result = runner.invoke(app, ["project", "info", "demo"]) + + assert result.exit_code == 1 + assert "Error getting project info" not in result.output diff --git a/tests/mcp/test_tool_utils_cloud_auth.py b/tests/mcp/test_tool_utils_cloud_auth.py new file mode 100644 index 000000000..4ecd52ffa --- /dev/null +++ b/tests/mcp/test_tool_utils_cloud_auth.py @@ -0,0 +1,56 @@ +"""Cloud auth error translation tests for MCP tool HTTP helpers.""" + +import pytest +from httpx import HTTPStatusError +from mcp.server.fastmcp.exceptions import ToolError + +from basic_memory.mcp.tools.utils import call_post + + +class _MockResponse: + def __init__(self, status_code: int, payload: dict): + self.status_code = status_code + self._payload = payload + self.is_success = status_code < 400 + + def json(self): + return self._payload + + def raise_for_status(self): + if self.status_code >= 400: + raise HTTPStatusError( + message=f"HTTP Error {self.status_code}", + request=None, + response=self, + ) + + +class _PostClient: + def __init__(self, response: _MockResponse): + self._response = response + + async def post(self, *args, **kwargs): + return self._response + + +@pytest.mark.asyncio +async def test_call_post_401_with_cloud_key_shows_actionable_remediation(config_manager): + """401 auth failures with configured cloud_api_key should provide clear remediation.""" + config = config_manager.load_config() + config.cloud_api_key = "bmc_invalid_test_key" + config_manager.save_config(config) + + client = _PostClient( + _MockResponse( + 401, + {"detail": "Invalid JWT token. Authentication required."}, + ) + ) + + with pytest.raises(ToolError) as exc: + await call_post(client, "/v2/projects/", json={"name": "test"}) + + message = str(exc.value) + assert "configured cloud API key was rejected" in message + assert "bm cloud set-key " in message + assert "cloud_api_key" in message diff --git a/tests/repository/test_search_text_with_metadata_filters.py b/tests/repository/test_search_text_with_metadata_filters.py new file mode 100644 index 000000000..0d746562a --- /dev/null +++ b/tests/repository/test_search_text_with_metadata_filters.py @@ -0,0 +1,65 @@ +"""Regression coverage for combined text + metadata filtering.""" + +from datetime import datetime, timezone + +import pytest + +from basic_memory import db +from basic_memory.models.knowledge import Entity +from basic_memory.repository.search_index_row import SearchIndexRow +from basic_memory.schemas.search import SearchItemType + + +async def _index_entity(search_repository, session_maker, title: str, status: str) -> Entity: + slug = "-".join(title.lower().split()) + now = datetime.now(timezone.utc) + file_path = f"notes/{slug}.md" + permalink = f"notes/{slug}" + + async with db.scoped_session(session_maker) as session: + entity = Entity( + project_id=search_repository.project_id, + title=title, + entity_type="note", + permalink=permalink, + file_path=file_path, + content_type="text/markdown", + entity_metadata={"status": status}, + created_at=now, + updated_at=now, + ) + session.add(entity) + await session.flush() + + await search_repository.index_item( + SearchIndexRow( + project_id=search_repository.project_id, + id=entity.id, + type=SearchItemType.ENTITY.value, + title=entity.title, + content_stems="CLI metadata filter regression", + content_snippet="CLI metadata filter regression", + permalink=entity.permalink, + file_path=entity.file_path, + entity_id=entity.id, + metadata={"entity_type": entity.entity_type}, + created_at=entity.created_at, + updated_at=entity.updated_at, + ) + ) + + return entity + + +@pytest.mark.asyncio +async def test_search_text_and_metadata_filters_work_together(search_repository, session_maker): + """Combined text + metadata filters should work without MATCH context errors.""" + active = await _index_entity(search_repository, session_maker, "CLI Active Result", "active") + await _index_entity(search_repository, session_maker, "CLI Inactive Result", "inactive") + + results = await search_repository.search( + search_text="CLI", + metadata_filters={"status": "active"}, + ) + + assert {row.id for row in results} == {active.id} diff --git a/tests/services/test_project_service_cloud_info.py b/tests/services/test_project_service_cloud_info.py new file mode 100644 index 000000000..c672a9468 --- /dev/null +++ b/tests/services/test_project_service_cloud_info.py @@ -0,0 +1,38 @@ +"""Regression tests for cloud-only project info handling.""" + +import os + +import pytest + + +@pytest.mark.asyncio +async def test_get_project_info_supports_db_only_project( + project_service, + project_repository, + config_manager, +): + """Project info should work when project exists in DB but not local config.""" + suffix = os.urandom(4).hex() + project_name = f"cloud-only-{suffix}" + project_path = f"/app/data/{project_name}" + + # Ensure the project is not present in local config. + config = config_manager.load_config() + config.projects.pop(project_name, None) + config_manager.save_config(config) + + await project_repository.create( + { + "name": project_name, + "path": project_path, + "is_active": True, + "is_default": False, + } + ) + + info = await project_service.get_project_info(project_name) + + assert info.project_name == project_name + assert info.project_path == project_path + assert project_name in info.available_projects + assert info.available_projects[project_name]["path"] == project_path