From 3d5bafa2c73d9afeb17a97ab71d522dd96692c40 Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Tue, 7 Apr 2026 17:30:10 -0500 Subject: [PATCH 01/32] fix(sync[hang]) Prevent indefinite blocking on credential prompts and slow fetches why: vcspull sync hangs indefinitely when a repo needs credentials (git waits on stdin) or when fetching very large repos (e.g. 15GB rust-lang/rust). Sequential processing means one stuck repo blocks the entire batch. what: - Set GIT_TERMINAL_PROMPT=0 at sync() entry to make git fail fast on auth - Add 120s timeout to _maybe_fetch subprocess.run() call - Handle subprocess.TimeoutExpired with descriptive error message - Pass no-prompt env to _maybe_fetch subprocess calls - Add tests for timeout handling, env propagation, and prompt suppression Refs: #543 --- src/vcspull/cli/sync.py | 20 +++++ tests/cli/test_sync_plan_helpers.py | 120 +++++++++++++++++++++++++++- 2 files changed, 138 insertions(+), 2 deletions(-) diff --git a/src/vcspull/cli/sync.py b/src/vcspull/cli/sync.py index fadf6e93d..b86154f5f 100644 --- a/src/vcspull/cli/sync.py +++ b/src/vcspull/cli/sync.py @@ -159,6 +159,19 @@ def clamp(n: int, _min: int, _max: int) -> int: NO_REPOS_FOR_TERM_MSG = 'No repo found in config(s) for "{name}"' +_FETCH_TIMEOUT_SECONDS = 120 + +_NO_PROMPT_ENV: dict[str, str] | None = None + + +def _get_no_prompt_env() -> dict[str, str]: + """Return an environment dict that prevents git from prompting on stdin.""" + global _NO_PROMPT_ENV + if _NO_PROMPT_ENV is None: + _NO_PROMPT_ENV = {**os.environ, "GIT_TERMINAL_PROMPT": "0"} + return _NO_PROMPT_ENV + + def _maybe_fetch( repo_path: pathlib.Path, *, @@ -177,7 +190,11 @@ def _maybe_fetch( capture_output=True, text=True, check=False, + timeout=_FETCH_TIMEOUT_SECONDS, + env=_get_no_prompt_env(), ) + except subprocess.TimeoutExpired: + return False, f"git fetch timed out after {_FETCH_TIMEOUT_SECONDS}s" except FileNotFoundError: return False, "git executable not found" except OSError as exc: @@ -642,6 +659,9 @@ def sync( include_worktrees: bool = False, ) -> None: """Entry point for ``vcspull sync``.""" + # Prevent git from blocking on credential prompts during batch sync + os.environ.setdefault("GIT_TERMINAL_PROMPT", "0") + # Show help if no patterns and --all not specified if not repo_patterns and not sync_all: if parser is not None: diff --git a/tests/cli/test_sync_plan_helpers.py b/tests/cli/test_sync_plan_helpers.py index a9a3a9a1c..5d097dee3 100644 --- a/tests/cli/test_sync_plan_helpers.py +++ b/tests/cli/test_sync_plan_helpers.py @@ -2,13 +2,14 @@ from __future__ import annotations +import os import subprocess import typing as t import pytest from vcspull.cli._output import PlanAction -from vcspull.cli.sync import SyncPlanConfig, _determine_plan_action, _maybe_fetch +from vcspull.cli.sync import SyncPlanConfig, _determine_plan_action, _maybe_fetch, sync if t.TYPE_CHECKING: import pathlib @@ -81,6 +82,15 @@ class MaybeFetchFixture(t.NamedTuple): subprocess_behavior="non-zero", expected_result=(True, None), ), + MaybeFetchFixture( + test_id="fetch-timeout", + fetch=True, + offline=False, + create_repo=True, + create_git_dir=True, + subprocess_behavior="timeout", + expected_result=(False, None), # message checked separately via startswith + ), ] @@ -119,6 +129,8 @@ def _patched_run( if subprocess_behavior == "os-error": error_message = "Permission denied" raise OSError(error_message) + if subprocess_behavior == "timeout": + raise subprocess.TimeoutExpired(cmd=args[0], timeout=120) if subprocess_behavior == "non-zero": return subprocess.CompletedProcess( args=args[0], @@ -140,7 +152,13 @@ def _patched_run( config=SyncPlanConfig(fetch=fetch, offline=offline), ) - assert result == expected_result + ok, message = result + assert ok == expected_result[0] + if subprocess_behavior == "timeout": + assert message is not None + assert "timed out" in message + else: + assert result == expected_result class DeterminePlanActionFixture(t.NamedTuple): @@ -248,3 +266,101 @@ def test_determine_plan_action( action, detail = _determine_plan_action(status, config=config) assert action is expected_action assert detail == expected_detail + + +def test_maybe_fetch_passes_no_prompt_env( + tmp_path: pathlib.Path, + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Verify _maybe_fetch sets GIT_TERMINAL_PROMPT=0 to prevent hangs.""" + repo_path = tmp_path / "repo" + repo_path.mkdir() + (repo_path / ".git").mkdir() + + captured_kwargs: dict[str, t.Any] = {} + + def _spy_run( + *args: t.Any, + **kwargs: t.Any, + ) -> subprocess.CompletedProcess[str]: + captured_kwargs.update(kwargs) + return subprocess.CompletedProcess( + args=args[0], + returncode=0, + stdout="", + stderr="", + ) + + monkeypatch.setattr("subprocess.run", _spy_run) + + _maybe_fetch( + repo_path=repo_path, + config=SyncPlanConfig(fetch=True, offline=False), + ) + + assert "env" in captured_kwargs, "subprocess.run must receive env kwarg" + assert captured_kwargs["env"].get("GIT_TERMINAL_PROMPT") == "0" + + +def test_maybe_fetch_passes_timeout( + tmp_path: pathlib.Path, + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Verify _maybe_fetch sets a timeout to prevent indefinite blocking.""" + repo_path = tmp_path / "repo" + repo_path.mkdir() + (repo_path / ".git").mkdir() + + captured_kwargs: dict[str, t.Any] = {} + + def _spy_run( + *args: t.Any, + **kwargs: t.Any, + ) -> subprocess.CompletedProcess[str]: + captured_kwargs.update(kwargs) + return subprocess.CompletedProcess( + args=args[0], + returncode=0, + stdout="", + stderr="", + ) + + monkeypatch.setattr("subprocess.run", _spy_run) + + _maybe_fetch( + repo_path=repo_path, + config=SyncPlanConfig(fetch=True, offline=False), + ) + + assert "timeout" in captured_kwargs, "subprocess.run must receive timeout kwarg" + assert captured_kwargs["timeout"] > 0 + + +def test_sync_sets_git_terminal_prompt( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Verify sync() sets GIT_TERMINAL_PROMPT=0 to prevent credential hangs.""" + # Remove GIT_TERMINAL_PROMPT if already set so setdefault takes effect + monkeypatch.delenv("GIT_TERMINAL_PROMPT", raising=False) + + sync( + repo_patterns=[], + config=None, + workspace_root=None, + dry_run=False, + output_json=False, + output_ndjson=False, + color="never", + exit_on_error=False, + show_unchanged=False, + summary_only=False, + long_view=False, + relative_paths=False, + fetch=False, + offline=False, + verbosity=0, + sync_all=False, + # No parser and no --all means sync() returns early, but env is set first + ) + + assert os.environ.get("GIT_TERMINAL_PROMPT") == "0" From e2ed5eca60aa405967bdb7e7d1cd0b8ca37d31d8 Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Fri, 24 Apr 2026 17:24:08 -0500 Subject: [PATCH 02/32] deps(libvcs) Bump to 0.40.0a0 for timeout + fast set_remotes why: This branch needs both of the libvcs 0.40.0a0 sync-hang fixes to do its job: the `timeout=` kwarg on `libvcs._internal.run.run` (for per-subprocess deadlines) and the faster `GitSync.remote()` path that no longer enumerates every remote ref via `git remote show`. The pin is widened to allow any 0.40.x pre-release so test environments can pick up further patches without a follow-up bump. what: - Widen libvcs pin from `~=0.39.0` to `>=0.40.0a0,<0.41.0`. - Rename the libvcs pytest fixture type from `CreateRepoPytestFixtureFn` to the new `CreateRepoFn` alias across tests/test_sync.py and tests/test_cli.py. - Add a CHANGES entry describing the dependency bump and why it matters. --- CHANGES | 6 ++++++ pyproject.toml | 2 +- tests/test_cli.py | 6 +++--- tests/test_sync.py | 12 ++++++------ 4 files changed, 16 insertions(+), 10 deletions(-) diff --git a/CHANGES b/CHANGES index ec5e0b58f..c02e91cd7 100644 --- a/CHANGES +++ b/CHANGES @@ -37,6 +37,12 @@ $ uvx --from 'vcspull' --prerelease allow vcspull _Notes on upcoming releases will be added here_ +### Dependencies + +- Bump `libvcs` to `>=0.40.0a0,<0.41.0` for timeout + fast `set_remotes` fixes + that let `vcspull sync` recover from a single stuck repository without + hanging the whole batch. + ### Documentation - Visual improvements to API docs from [gp-sphinx](https://gp-sphinx.git-pull.com)-based Sphinx packages (#542) diff --git a/pyproject.toml b/pyproject.toml index 05941d52d..5adcca451 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,7 +53,7 @@ keywords = [ ] homepage = "https://vcspull.git-pull.com" dependencies = [ - "libvcs~=0.39.0", + "libvcs>=0.40.0a0,<0.41.0", "colorama>=0.3.9", "PyYAML>=6.0" ] diff --git a/tests/test_cli.py b/tests/test_cli.py index 3c932e45c..90a558bcd 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -34,7 +34,7 @@ if t.TYPE_CHECKING: from typing import TypeAlias - from libvcs.pytest_plugin import CreateRepoPytestFixtureFn + from libvcs.pytest_plugin import CreateRepoFn from libvcs.sync.git import GitSync ExpectedOutput: TypeAlias = str | list[str] | None @@ -1051,7 +1051,7 @@ def test_sync_errored_svn_repo( monkeypatch: pytest.MonkeyPatch, user_path: pathlib.Path, config_path: pathlib.Path, - create_svn_remote_repo: CreateRepoPytestFixtureFn, + create_svn_remote_repo: CreateRepoFn, test_id: str, sync_args: list[str], expected_in_out: ExpectedOutput, @@ -1168,7 +1168,7 @@ def test_sync_errored_hg_repo( monkeypatch: pytest.MonkeyPatch, user_path: pathlib.Path, config_path: pathlib.Path, - create_hg_remote_repo: CreateRepoPytestFixtureFn, + create_hg_remote_repo: CreateRepoFn, test_id: str, sync_args: list[str], expected_in_out: ExpectedOutput, diff --git a/tests/test_sync.py b/tests/test_sync.py index b61567d7c..4172e7bbb 100644 --- a/tests/test_sync.py +++ b/tests/test_sync.py @@ -29,7 +29,7 @@ if t.TYPE_CHECKING: import pathlib - from libvcs.pytest_plugin import CreateRepoPytestFixtureFn + from libvcs.pytest_plugin import CreateRepoFn from vcspull.types import ConfigDict @@ -154,7 +154,7 @@ class ConfigVariationTest(t.NamedTuple): def test_config_variations( tmp_path: pathlib.Path, capsys: pytest.CaptureFixture[str], - create_git_remote_repo: CreateRepoPytestFixtureFn, + create_git_remote_repo: CreateRepoFn, test_id: str, config_tpl: str, remote_list: list[str], @@ -262,7 +262,7 @@ class UpdatingRemoteFixture(t.NamedTuple): ) def test_updating_remote( tmp_path: pathlib.Path, - create_git_remote_repo: CreateRepoPytestFixtureFn, + create_git_remote_repo: CreateRepoFn, test_id: str, config_tpl: str, has_extra_remotes: bool, @@ -340,7 +340,7 @@ def test_updating_remote( def test_sync_deduplicates_repos_matched_by_multiple_patterns( tmp_path: pathlib.Path, capsys: pytest.CaptureFixture[str], - create_git_remote_repo: CreateRepoPytestFixtureFn, + create_git_remote_repo: CreateRepoFn, ) -> None: """Repos matched by multiple patterns should only sync once.""" dummy_repo = create_git_remote_repo( @@ -392,7 +392,7 @@ def test_sync_deduplicates_repos_matched_by_multiple_patterns( @skip_if_svn_missing def test_update_repo_svn( tmp_path: pathlib.Path, - create_svn_remote_repo: CreateRepoPytestFixtureFn, + create_svn_remote_repo: CreateRepoFn, ) -> None: """update_repo should handle SVN repositories and return SvnSync.""" svn_remote = create_svn_remote_repo( @@ -415,7 +415,7 @@ def test_update_repo_svn( @skip_if_hg_missing def test_update_repo_hg( tmp_path: pathlib.Path, - create_hg_remote_repo: CreateRepoPytestFixtureFn, + create_hg_remote_repo: CreateRepoFn, ) -> None: """update_repo should handle Mercurial repositories and return HgSync.""" hg_remote = create_hg_remote_repo( From 9376e9d6dd5fd08c9aaf9a2dd430c21bc4f3211c Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Fri, 24 Apr 2026 17:46:29 -0500 Subject: [PATCH 03/32] feat(sync[timeout]) Per-repo watchdog with copyable rerun recipe why: Before this change, a single stuck repository -- e.g. openai/codex under ~/study/rust/, which pipes thousands of remote-show lines through libvcs's busy-poll loop -- would block the entire batch of `vcspull sync` commands. The prior branch commit only armed git's credential-prompt env vars and timed out the *planning* fetch; the main `update_repo()` call still ran without a deadline. This wires a wall-clock watchdog around every repository so the batch keeps moving when one repo is slow or wedged, and surfaces an actionable rerun recipe so the user can diagnose without hunting for CLI flags. what: - Add `_SyncOutcome` + `_TimedOutRepo` dataclasses and a `_sync_repo_with_watchdog` primitive that runs `update_repo` on a single worker and enforces the timeout via `future.result(timeout=...)`. The executor is shut down with `wait=False` on every return path so a wedged libvcs subprocess is not allowed to block the main thread. - Add `--timeout SECONDS` CLI flag and `VCSPULL_SYNC_TIMEOUT_SECONDS` env override. Default is 10s, intentionally aggressive so hangs are surfaced fast. - Track timed-out repositories separately in the summary and emit a copy-pasteable rerun recipe grouped by workspace root. The suggested replacement timeout is `max(120, current * 10)` so a user who keeps the default gets a meaningful headroom on retry. - Plumb the new kwarg through `vcspull.cli.__init__:sync(...)`. - New `tests/cli/test_sync_watchdog.py` covering timeout resolution precedence, the synced/failed/timed-out outcome matrix, the rerun-recipe contents (workspace grouping, suggested timeout, verbose variant, manual git probe, log-file path), and the no-op branch when nothing timed out. - CHANGES entry under 1.59.x. --- CHANGES | 16 ++ src/vcspull/cli/__init__.py | 1 + src/vcspull/cli/sync.py | 317 +++++++++++++++++++++++++++++--- tests/cli/test_sync_watchdog.py | 258 ++++++++++++++++++++++++++ 4 files changed, 562 insertions(+), 30 deletions(-) create mode 100644 tests/cli/test_sync_watchdog.py diff --git a/CHANGES b/CHANGES index c02e91cd7..a55162ca0 100644 --- a/CHANGES +++ b/CHANGES @@ -37,6 +37,22 @@ $ uvx --from 'vcspull' --prerelease allow vcspull _Notes on upcoming releases will be added here_ +### New features + +#### `vcspull sync`: per-repository watchdog and copyable rerun recipe + +A single stuck repository no longer freezes the whole batch. Every repository +sync now runs under a wall-clock watchdog. The default deadline is 10 seconds +-- a healthy fetch/pull against a warm remote finishes well under that, so +anything slower is surfaced rather than waited on. Override with `--timeout +N` or the `VCSPULL_SYNC_TIMEOUT_SECONDS` environment variable. + +When one or more repositories time out, the summary is followed by a +copy-pasteable rerun recipe that retries just those repositories with a +larger timeout, plus a `-vv` variant for diagnosis and a manual +`git fetch --dry-run` probe per repo. Timed-out repositories are counted in +the summary as "N timed out" alongside synced / failed. + ### Dependencies - Bump `libvcs` to `>=0.40.0a0,<0.41.0` for timeout + fast `set_remotes` fixes diff --git a/src/vcspull/cli/__init__.py b/src/vcspull/cli/__init__.py index 2f0633436..d79c827cb 100644 --- a/src/vcspull/cli/__init__.py +++ b/src/vcspull/cli/__init__.py @@ -466,6 +466,7 @@ def cli(_args: list[str] | None = None) -> None: sync_all=getattr(args, "sync_all", False), parser=sync_parser, include_worktrees=getattr(args, "include_worktrees", False), + timeout=getattr(args, "timeout", None), ) elif args.subparser_name == "list": list_repos( diff --git a/src/vcspull/cli/sync.py b/src/vcspull/cli/sync.py index b86154f5f..e36425844 100644 --- a/src/vcspull/cli/sync.py +++ b/src/vcspull/cli/sync.py @@ -4,11 +4,13 @@ import argparse import asyncio +import concurrent.futures import contextlib import logging import os import pathlib import re +import shlex import subprocess import sys import typing as t @@ -17,7 +19,7 @@ from dataclasses import dataclass from datetime import datetime from io import StringIO -from time import perf_counter +from time import monotonic, perf_counter from libvcs._internal.shortcuts import create_project from libvcs._internal.types import VCSLiteral @@ -161,6 +163,14 @@ def clamp(n: int, _min: int, _max: int) -> int: _FETCH_TIMEOUT_SECONDS = 120 +#: Default wall-clock deadline for each repository in ``vcspull sync``. +#: +#: Kept aggressive on purpose -- a healthy fetch/pull against a warm remote is +#: almost always under 10 seconds. Anything over that is "suspect" and should +#: surface as an actionable timeout rather than a silent hang. Callers can +#: override via ``--timeout`` or the ``VCSPULL_SYNC_TIMEOUT_SECONDS`` env var. +_DEFAULT_REPO_TIMEOUT_SECONDS = 10 + _NO_PROMPT_ENV: dict[str, str] | None = None @@ -627,6 +637,19 @@ def create_sync_subparser(parser: argparse.ArgumentParser) -> argparse.ArgumentP dest="include_worktrees", help="also sync configured worktrees for each repository", ) + parser.add_argument( + "--timeout", + dest="timeout", + type=int, + default=None, + metavar="SECONDS", + help=( + "per-repository wall-clock deadline in seconds " + f"(default: {_DEFAULT_REPO_TIMEOUT_SECONDS}; env: " + "VCSPULL_SYNC_TIMEOUT_SECONDS). Repos that exceed the deadline " + "are skipped and the rest of the batch continues." + ), + ) try: import shtab @@ -637,6 +660,198 @@ def create_sync_subparser(parser: argparse.ArgumentParser) -> argparse.ArgumentP return parser +@dataclass +class _TimedOutRepo: + """Metadata about a repository that exceeded its per-repo timeout.""" + + name: str + path: str + workspace_root: str + duration: float + + +@dataclass +class _SyncOutcome: + """Result of attempting to sync a single repository.""" + + status: t.Literal["synced", "failed", "timed_out"] + captured_output: str | None = None + error: BaseException | None = None + duration: float = 0.0 + + +def _resolve_repo_timeout(cli_timeout: int | None) -> int: + """Resolve the repo timeout from CLI flag / env var / built-in default.""" + if cli_timeout is not None and cli_timeout > 0: + return cli_timeout + env_value = os.environ.get("VCSPULL_SYNC_TIMEOUT_SECONDS") + if env_value: + try: + parsed = int(env_value) + except ValueError: + log.warning( + "Ignoring non-integer VCSPULL_SYNC_TIMEOUT_SECONDS=%s", + env_value, + ) + else: + if parsed > 0: + return parsed + return _DEFAULT_REPO_TIMEOUT_SECONDS + + +def _sync_repo_with_watchdog( + repo: ConfigDict, + *, + progress_callback: ProgressCallback, + timeout: int, + is_human: bool, +) -> _SyncOutcome: + """Run :func:`update_repo` under a wall-clock watchdog. + + The libvcs call is executed on a daemon worker so the main thread can + enforce a timeout via :meth:`concurrent.futures.Future.result`. On a + timeout the worker is left to drain (the libvcs 0.40.0a0 ``timeout`` + kwarg will eventually reach down through the subprocess layer, but until + every call site plumbs it through we rely on the OS to reap abandoned + children when vcspull exits). + """ + buffer: StringIO | None = None if is_human else StringIO() + + def _run() -> None: + if buffer is None: + update_repo(repo, progress_callback=progress_callback) + return + # Non-human output modes capture everything so the NDJSON/JSON payload + # contains the per-repo details without polluting stdout. + with ( + contextlib.redirect_stdout(buffer), + contextlib.redirect_stderr(buffer), + ): + update_repo(repo, progress_callback=progress_callback) + + started = monotonic() + # A dedicated single-worker pool is cheaper than spinning a raw Thread; + # the future API also makes cancellation vs. completion explicit. We + # intentionally do NOT use the executor as a context manager: its + # ``__exit__`` calls ``shutdown(wait=True)`` which would block behind a + # wedged worker and defeat the whole point of the watchdog. Instead, we + # call ``shutdown(wait=False)`` on every return path so the daemon thread + # is abandoned and cleaned up by the OS when vcspull exits. + executor = concurrent.futures.ThreadPoolExecutor( + max_workers=1, + thread_name_prefix="vcspull-sync", + ) + future = executor.submit(_run) + try: + future.result(timeout=timeout) + except concurrent.futures.TimeoutError: + duration = monotonic() - started + future.cancel() + executor.shutdown(wait=False, cancel_futures=True) + return _SyncOutcome( + status="timed_out", + captured_output=buffer.getvalue() if buffer else None, + duration=duration, + ) + except BaseException as exc_obj: + duration = monotonic() - started + executor.shutdown(wait=False, cancel_futures=True) + return _SyncOutcome( + status="failed", + captured_output=buffer.getvalue() if buffer else None, + error=exc_obj, + duration=duration, + ) + + executor.shutdown(wait=False, cancel_futures=True) + return _SyncOutcome( + status="synced", + captured_output=buffer.getvalue() if buffer else None, + duration=monotonic() - started, + ) + + +def _emit_rerun_recipe( + formatter: OutputFormatter, + colors: Colors, + *, + timed_out_repos: list[_TimedOutRepo], + timeout: int, + log_file_path: pathlib.Path | None, +) -> None: + """Print a copyable recipe for rerunning just the timed-out repositories. + + Modelled on the way ``cargo``/``npm`` surface actionable next steps after + a failure. The suggested timeout is ``max(120, timeout * 10)`` so a user + who kept the aggressive default still gets a meaningful headroom when + they retry. + """ + if not timed_out_repos: + return + + suggested_timeout = max(120, timeout * 10) + + # Group by workspace so the user can paste one line per workspace. + grouped: dict[str, list[_TimedOutRepo]] = {} + for repo in timed_out_repos: + grouped.setdefault(repo.workspace_root, []).append(repo) + + formatter.emit_text("") + formatter.emit_text( + f"{colors.warning('⏱')} Timed out: " + f"{colors.warning(str(len(timed_out_repos)))} " + f"repositor{'y' if len(timed_out_repos) == 1 else 'ies'} " + f"exceeded {timeout}s.", + ) + for repo in timed_out_repos: + display_path = str(PrivatePath(repo.path)) + formatter.emit_text( + f" {colors.warning('⏱')} {colors.info(repo.name):<32} " + f"{colors.muted('→')} {display_path} " + f"({colors.muted(f'{repo.duration:.1f}s elapsed')})", + ) + + formatter.emit_text("") + formatter.emit_text( + f"{colors.info('→')} Rerun just the affected repositories with a " + f"larger timeout:", + ) + for workspace_root, repos in grouped.items(): + names = " ".join(shlex.quote(r.name) for r in repos) + workspace_arg = f"--workspace {shlex.quote(str(PrivatePath(workspace_root)))}" + formatter.emit_text( + f" vcspull sync {workspace_arg} --timeout {suggested_timeout} {names}", + ) + + formatter.emit_text( + f"{colors.info('→')} Or with verbose logging to diagnose the hang:", + ) + for workspace_root, repos in grouped.items(): + names = " ".join(shlex.quote(r.name) for r in repos) + workspace_arg = f"--workspace {shlex.quote(str(PrivatePath(workspace_root)))}" + formatter.emit_text( + f" vcspull sync {workspace_arg} " + f"--timeout {suggested_timeout} -vv {names}", + ) + + formatter.emit_text( + f"{colors.info('→')} Probe each repository manually " + f"({colors.muted('these should return in under 10s')}):", + ) + for repo in timed_out_repos: + display_path = str(PrivatePath(repo.path)) + formatter.emit_text( + f" GIT_TERMINAL_PROMPT=0 timeout 10 " + f"git -C {shlex.quote(display_path)} fetch --dry-run", + ) + + if log_file_path is not None: + formatter.emit_text("") + formatter.emit_text( + f"{colors.info('→')} Full debug log: {colors.muted(str(log_file_path))}", + ) + + def sync( repo_patterns: list[str], config: pathlib.Path | None, @@ -657,11 +872,14 @@ def sync( parser: argparse.ArgumentParser | None = None, # optional so sync can be unit tested include_worktrees: bool = False, + timeout: int | None = None, ) -> None: """Entry point for ``vcspull sync``.""" # Prevent git from blocking on credential prompts during batch sync os.environ.setdefault("GIT_TERMINAL_PROMPT", "0") + repo_timeout = _resolve_repo_timeout(timeout) + # Show help if no patterns and --all not specified if not repo_patterns and not sync_all: if parser is not None: @@ -831,8 +1049,10 @@ def sync( "synced": 0, "previewed": 0, "failed": 0, + "timed_out": 0, "unmatched": unmatched_count, } + timed_out_repos: list[_TimedOutRepo] = [] progress_callback: ProgressCallback if is_human: @@ -860,49 +1080,74 @@ def silent_progress(output: str, timestamp: datetime) -> None: "workspace_root": str(workspace_label), } - buffer: StringIO | None = None - captured_output: str | None = None - try: - if is_human: - update_repo(repo, progress_callback=progress_callback) - else: - buffer = StringIO() - with ( - contextlib.redirect_stdout(buffer), - contextlib.redirect_stderr( - buffer, - ), - ): - update_repo(repo, progress_callback=progress_callback) - captured_output = buffer.getvalue() - except Exception as e: + outcome = _sync_repo_with_watchdog( + repo, + progress_callback=progress_callback, + timeout=repo_timeout, + is_human=is_human, + ) + + if outcome.status == "timed_out": + summary["timed_out"] += 1 + summary["failed"] += 1 + timed_out_repos.append( + _TimedOutRepo( + name=repo_name, + path=str(repo_path), + workspace_root=str(workspace_label), + duration=outcome.duration, + ), + ) + event["status"] = "timed_out" + event["duration_ms"] = int(outcome.duration * 1000) + if outcome.captured_output: + event["details"] = outcome.captured_output.strip() + formatter.emit(event) + formatter.emit_text( + f"{colors.warning('⏱')} Timed out {colors.info(repo_name)} " + f"after {colors.warning(f'{outcome.duration:.1f}s')} " + f"{colors.muted('→')} {display_repo_path}", + ) + if exit_on_error: + _emit_rerun_recipe( + formatter, + colors, + timed_out_repos=timed_out_repos, + timeout=repo_timeout, + log_file_path=None, + ) + _emit_summary(formatter, colors, summary) + formatter.finalize() + if parser is not None: + parser.exit(status=1, message=EXIT_ON_ERROR_MSG) + raise SystemExit(EXIT_ON_ERROR_MSG) + continue + + if outcome.status == "failed": summary["failed"] += 1 + err = outcome.error + err_msg = str(err) if err is not None else "unknown error" event["status"] = "error" - event["error"] = str(e) - if not is_human and buffer is not None and not captured_output: - captured_output = buffer.getvalue() - if captured_output: - event["details"] = captured_output.strip() + event["error"] = err_msg + if outcome.captured_output: + event["details"] = outcome.captured_output.strip() formatter.emit(event) if is_human: - log.debug( - "Failed syncing %s", - repo_name, - ) - if log.isEnabledFor(logging.DEBUG): + log.debug("Failed syncing %s", repo_name) + if log.isEnabledFor(logging.DEBUG) and err is not None: import traceback - traceback.print_exc() + traceback.print_exception(type(err), err, err.__traceback__) formatter.emit_text( f"{colors.error('✗')} Failed syncing {colors.info(repo_name)}: " - f"{colors.error(str(e))}", + f"{colors.error(err_msg)}", ) if exit_on_error: _emit_summary(formatter, colors, summary) formatter.finalize() if parser is not None: parser.exit(status=1, message=EXIT_ON_ERROR_MSG) - raise SystemExit(EXIT_ON_ERROR_MSG) from e + raise SystemExit(EXIT_ON_ERROR_MSG) from err continue summary["synced"] += 1 @@ -977,6 +1222,13 @@ def silent_progress(output: str, timestamp: datetime) -> None: raise SystemExit(EXIT_ON_ERROR_MSG) _emit_summary(formatter, colors, summary) + _emit_rerun_recipe( + formatter, + colors, + timed_out_repos=timed_out_repos, + timeout=repo_timeout, + log_file_path=None, + ) if exit_on_error and unmatched_count > 0: formatter.finalize() @@ -997,12 +1249,17 @@ def _emit_summary( if formatter.mode == OutputMode.HUMAN: previewed = summary.get("previewed", 0) unmatched = summary.get("unmatched", 0) + timed_out = summary.get("timed_out", 0) parts = [ f"\n{colors.info('Summary:')} " f"{colors.info(str(summary['total']))} repos, " f"{colors.success(str(summary['synced']))} synced, " f"{colors.error(str(summary['failed']))} failed", ] + if timed_out > 0: + parts.append( + f", {colors.warning(str(timed_out))} timed out", + ) if previewed > 0: parts.append( f", {colors.warning(str(previewed))} previewed", diff --git a/tests/cli/test_sync_watchdog.py b/tests/cli/test_sync_watchdog.py new file mode 100644 index 000000000..8d99b86bc --- /dev/null +++ b/tests/cli/test_sync_watchdog.py @@ -0,0 +1,258 @@ +"""Tests for the per-repo sync watchdog and rerun-recipe emitter.""" + +from __future__ import annotations + +import importlib +import time +import typing as t + +import pytest + +from vcspull.cli._colors import ColorMode, Colors +from vcspull.cli._output import OutputFormatter, OutputMode +from vcspull.cli.sync import ( + _DEFAULT_REPO_TIMEOUT_SECONDS, + _emit_rerun_recipe, + _resolve_repo_timeout, + _sync_repo_with_watchdog, + _TimedOutRepo, +) + +# ``vcspull.cli.__init__`` re-exports the ``sync`` function, which shadows the +# submodule of the same name in normal attribute access. Grab the module +# object directly so monkeypatch.setattr can install stubs on it. +sync_module = importlib.import_module("vcspull.cli.sync") + +if t.TYPE_CHECKING: + import pathlib + + +def _noop_progress(output: str, timestamp: t.Any) -> None: + """Swallow libvcs progress output in tests.""" + return + + +def test_resolve_repo_timeout_prefers_cli_flag( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """CLI ``--timeout`` should win over the env var and the default.""" + monkeypatch.setenv("VCSPULL_SYNC_TIMEOUT_SECONDS", "99") + + assert _resolve_repo_timeout(5) == 5 + + +def test_resolve_repo_timeout_falls_back_to_env_var( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Without a CLI flag, ``VCSPULL_SYNC_TIMEOUT_SECONDS`` takes over.""" + monkeypatch.setenv("VCSPULL_SYNC_TIMEOUT_SECONDS", "42") + + assert _resolve_repo_timeout(None) == 42 + + +def test_resolve_repo_timeout_uses_default_when_unset( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """The module-level default applies when neither override is present.""" + monkeypatch.delenv("VCSPULL_SYNC_TIMEOUT_SECONDS", raising=False) + + assert _resolve_repo_timeout(None) == _DEFAULT_REPO_TIMEOUT_SECONDS + + +def test_resolve_repo_timeout_ignores_bogus_env_value( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """A non-integer env value is logged and ignored; default applies.""" + monkeypatch.setenv("VCSPULL_SYNC_TIMEOUT_SECONDS", "forever") + + assert _resolve_repo_timeout(None) == _DEFAULT_REPO_TIMEOUT_SECONDS + + +def test_watchdog_returns_synced_outcome_on_success( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """A fast ``update_repo`` returns ``status='synced'`` with no error.""" + # Replace update_repo with a stub so we can exercise the watchdog without + # touching libvcs. The stub is fast, so the timeout branch never fires. + calls: list[dict[str, t.Any]] = [] + + def _stub_update_repo(repo: dict[str, t.Any], *, progress_callback: t.Any) -> None: + calls.append(repo) + + monkeypatch.setattr(sync_module, "update_repo", _stub_update_repo) + + outcome = _sync_repo_with_watchdog( + t.cast("t.Any", {"name": "ok"}), + progress_callback=_noop_progress, + timeout=5, + is_human=True, + ) + + assert outcome.status == "synced" + assert outcome.error is None + assert calls == [{"name": "ok"}] + + +def test_watchdog_returns_timed_out_on_slow_update( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """A slow ``update_repo`` is abandoned with ``status='timed_out'``.""" + + def _slow_update_repo(repo: dict[str, t.Any], *, progress_callback: t.Any) -> None: + # Far longer than the 0.2 s timeout below -- the watchdog must fire. + time.sleep(10) + + monkeypatch.setattr(sync_module, "update_repo", _slow_update_repo) + + started = time.monotonic() + outcome = _sync_repo_with_watchdog( + t.cast("t.Any", {"name": "slow"}), + progress_callback=_noop_progress, + timeout=1, + is_human=True, + ) + elapsed = time.monotonic() - started + + assert outcome.status == "timed_out" + # The watchdog should fire near the timeout -- give generous slack so CI + # scheduling jitter doesn't flake the test. + assert elapsed < 5.0 + assert outcome.duration >= 0.5 + + +def test_watchdog_preserves_failed_outcome( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Synchronous exceptions from ``update_repo`` surface as ``failed``.""" + + class _Boom(RuntimeError): + """Sentinel used to trace exception propagation.""" + + def _raising_update_repo( + repo: dict[str, t.Any], *, progress_callback: t.Any + ) -> None: + msg = "remote exploded" + raise _Boom(msg) + + monkeypatch.setattr(sync_module, "update_repo", _raising_update_repo) + + outcome = _sync_repo_with_watchdog( + t.cast("t.Any", {"name": "boom"}), + progress_callback=_noop_progress, + timeout=5, + is_human=True, + ) + + assert outcome.status == "failed" + assert isinstance(outcome.error, _Boom) + assert "remote exploded" in str(outcome.error) + + +def test_rerun_recipe_emits_one_line_per_workspace( + capsys: pytest.CaptureFixture[str], + tmp_path: pathlib.Path, +) -> None: + """Repositories are grouped by workspace root in the suggested rerun.""" + formatter = OutputFormatter(OutputMode.HUMAN) + colors = Colors(ColorMode.NEVER) + + rust_workspace = tmp_path / "rust" + otel_workspace = tmp_path / "otel" + rust_workspace.mkdir() + otel_workspace.mkdir() + + timed_out = [ + _TimedOutRepo( + name="codex", + path=str(rust_workspace / "codex"), + workspace_root=str(rust_workspace), + duration=10.2, + ), + _TimedOutRepo( + name="rust", + path=str(rust_workspace / "rust"), + workspace_root=str(rust_workspace), + duration=10.5, + ), + _TimedOutRepo( + name="opentelemetry-rust", + path=str(otel_workspace / "opentelemetry-rust"), + workspace_root=str(otel_workspace), + duration=10.1, + ), + ] + + _emit_rerun_recipe( + formatter, + colors, + timed_out_repos=timed_out, + timeout=10, + log_file_path=tmp_path / "vcspull-debug.log", + ) + formatter.finalize() + + captured = capsys.readouterr().out + # One rerun command per distinct workspace root, with the repo names + # appended as positional args -- this is what the user copy-pastes. + assert "vcspull sync --workspace" in captured + assert "codex" in captured and "rust" in captured + assert "opentelemetry-rust" in captured + # Suggest 10x the current timeout, clamped to 120 s minimum. + assert "--timeout 120" in captured + # Include a verbose-logging variant for diagnosis. + assert "-vv" in captured + # Include a manual git probe so the user can isolate the failure mode. + assert "GIT_TERMINAL_PROMPT=0" in captured + assert "git -C" in captured + # Log-file path is surfaced when provided. + assert "vcspull-debug.log" in captured + + +def test_rerun_recipe_is_noop_when_no_timeouts( + capsys: pytest.CaptureFixture[str], +) -> None: + """A clean run with no timeouts emits nothing extra.""" + formatter = OutputFormatter(OutputMode.HUMAN) + colors = Colors(ColorMode.NEVER) + + _emit_rerun_recipe( + formatter, + colors, + timed_out_repos=[], + timeout=10, + log_file_path=None, + ) + formatter.finalize() + + captured = capsys.readouterr().out + assert "Timed out" not in captured + assert "vcspull sync" not in captured + + +def test_rerun_recipe_scales_timeout_suggestion( + capsys: pytest.CaptureFixture[str], + tmp_path: pathlib.Path, +) -> None: + """When the user already passed a long timeout, we suggest 10x it.""" + formatter = OutputFormatter(OutputMode.HUMAN) + colors = Colors(ColorMode.NEVER) + + _emit_rerun_recipe( + formatter, + colors, + timed_out_repos=[ + _TimedOutRepo( + name="huge", + path=str(tmp_path / "huge"), + workspace_root=str(tmp_path), + duration=60.0, + ), + ], + timeout=30, + log_file_path=None, + ) + formatter.finalize() + + captured = capsys.readouterr().out + # max(120, 30 * 10) = 300 + assert "--timeout 300" in captured From a45cfd8b1f5c67ef5c8a9c27ac47e55fd99c3e5b Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Fri, 24 Apr 2026 17:58:51 -0500 Subject: [PATCH 04/32] feat(sync[log-file]) Persist debug log to tempdir, npm/pnpm-style surfacing why: When vcspull sync stalled on a repo and the watchdog abandoned it, the user had no post-mortem trail -- the only signal was the terminal line that said "timed out". npm/pnpm/yarn solve this by always writing a debug log to the temp dir and only surfacing the path on failure, so clean runs stay quiet but failed ones give the user something to grep. This wires the same pattern into vcspull sync. what: - Add `default_debug_log_path`, `setup_file_logger`, and `teardown_file_logger` in vcspull.log. The file handler attaches to both the vcspull and libvcs loggers at DEBUG level, so libvcs's per-repo progress output is captured alongside vcspull's own structured events. Duplicate registration is deduped by baseFilename so multiple setups for the same path do not write every record twice. - Default path is `$TMPDIR/vcspull-debug--.log`, matching the npm/pnpm/yarn convention. - Add `--log-file PATH` and `--no-log-file` CLI flags. Thread both through from `vcspull.cli.__init__:sync(...)`. - Extract the existing body of `sync()` into `_sync_impl` so the file handler setup/teardown can wrap the whole sync in a `try/finally` and still cover every exit path (including argparse's SystemExit). - Surface the log path to the terminal on any failure (timeout block via `_emit_rerun_recipe`, and a new "Full debug log: ..." line for plain failures). Clean runs leave the file on disk but never mention it. - Tests in tests/cli/test_sync_log_file.py covering default path shape, attachment at DEBUG, end-to-end record capture, idempotent attachment, and clean teardown. - CHANGES entry under 1.59.x. --- CHANGES | 10 +++ src/vcspull/cli/__init__.py | 2 + src/vcspull/cli/sync.py | 112 +++++++++++++++++++++++- src/vcspull/log.py | 94 ++++++++++++++++++++ tests/cli/test_sync_log_file.py | 150 ++++++++++++++++++++++++++++++++ 5 files changed, 366 insertions(+), 2 deletions(-) create mode 100644 tests/cli/test_sync_log_file.py diff --git a/CHANGES b/CHANGES index a55162ca0..66accc046 100644 --- a/CHANGES +++ b/CHANGES @@ -53,6 +53,16 @@ larger timeout, plus a `-vv` variant for diagnosis and a manual `git fetch --dry-run` probe per repo. Timed-out repositories are counted in the summary as "N timed out" alongside synced / failed. +#### `vcspull sync`: per-invocation debug log (npm/pnpm style) + +Every `vcspull sync` invocation now writes a debug log to +`$TMPDIR/vcspull-debug--.log`. Clean runs stay quiet -- the +log path is only surfaced to the terminal when at least one repository +failed or timed out. Override the destination with `--log-file PATH`, or +disable entirely with `--no-log-file`. The log captures the full libvcs +per-repo activity (progress callback output, exit codes, errors) so a hang +can be post-mortemed even after the CLI has moved on. + ### Dependencies - Bump `libvcs` to `>=0.40.0a0,<0.41.0` for timeout + fast `set_remotes` fixes diff --git a/src/vcspull/cli/__init__.py b/src/vcspull/cli/__init__.py index d79c827cb..c63efbd1d 100644 --- a/src/vcspull/cli/__init__.py +++ b/src/vcspull/cli/__init__.py @@ -467,6 +467,8 @@ def cli(_args: list[str] | None = None) -> None: parser=sync_parser, include_worktrees=getattr(args, "include_worktrees", False), timeout=getattr(args, "timeout", None), + log_file=getattr(args, "log_file", None), + no_log_file=getattr(args, "no_log_file", False), ) elif args.subparser_name == "list": list_repos( diff --git a/src/vcspull/cli/sync.py b/src/vcspull/cli/sync.py index e36425844..b92499747 100644 --- a/src/vcspull/cli/sync.py +++ b/src/vcspull/cli/sync.py @@ -36,6 +36,7 @@ sync_all_worktrees, ) from vcspull.config import expand_dir, filter_repos, find_config_files, load_configs +from vcspull.log import default_debug_log_path, setup_file_logger, teardown_file_logger from vcspull.types import ConfigDict from ._colors import Colors, get_color_mode @@ -650,6 +651,23 @@ def create_sync_subparser(parser: argparse.ArgumentParser) -> argparse.ArgumentP "are skipped and the rest of the batch continues." ), ) + parser.add_argument( + "--log-file", + dest="log_file", + metavar="PATH", + default=None, + help=( + "write a debug log to PATH (default: $TMPDIR/vcspull-debug--" + ".log). The path is only printed to the terminal when a sync " + "fails or times out, matching npm/pnpm/yarn." + ), + ) + parser.add_argument( + "--no-log-file", + dest="no_log_file", + action="store_true", + help="disable the debug log file entirely", + ) try: import shtab @@ -873,6 +891,8 @@ def sync( | None = None, # optional so sync can be unit tested include_worktrees: bool = False, timeout: int | None = None, + log_file: str | pathlib.Path | None = None, + no_log_file: bool = False, ) -> None: """Entry point for ``vcspull sync``.""" # Prevent git from blocking on credential prompts during batch sync @@ -880,6 +900,84 @@ def sync( repo_timeout = _resolve_repo_timeout(timeout) + # Set up the debug log file early so libvcs's per-repo activity is also + # captured. The path is surfaced to the user only when something failed + # (or on an explicit path override), matching npm/pnpm/yarn UX. + log_file_path: pathlib.Path | None = None + log_file_handler: logging.FileHandler | None = None + if not no_log_file: + log_file_path = ( + pathlib.Path(log_file).expanduser() + if log_file is not None + else default_debug_log_path() + ) + try: + log_file_handler = setup_file_logger(log_file_path) + except OSError as exc_obj: + log.warning( + "Could not open debug log at %s: %s", + log_file_path, + exc_obj, + ) + log_file_path = None + log_file_handler = None + + try: + _sync_impl( + repo_patterns=repo_patterns, + config=config, + workspace_root=workspace_root, + output_json=output_json, + output_ndjson=output_ndjson, + color=color, + exit_on_error=exit_on_error, + show_unchanged=show_unchanged, + summary_only=summary_only, + long_view=long_view, + relative_paths=relative_paths, + fetch=fetch, + offline=offline, + verbosity=verbosity, + sync_all=sync_all, + parser=parser, + include_worktrees=include_worktrees, + repo_timeout=repo_timeout, + log_file_path=log_file_path, + dry_run=dry_run, + ) + finally: + if log_file_handler is not None: + teardown_file_logger(log_file_handler) + + +def _sync_impl( + *, + repo_patterns: list[str], + config: pathlib.Path | None, + workspace_root: str | None, + dry_run: bool, + output_json: bool, + output_ndjson: bool, + color: str, + exit_on_error: bool, + show_unchanged: bool, + summary_only: bool, + long_view: bool, + relative_paths: bool, + fetch: bool, + offline: bool, + verbosity: int, + sync_all: bool, + parser: argparse.ArgumentParser | None, + include_worktrees: bool, + repo_timeout: int, + log_file_path: pathlib.Path | None, +) -> None: + """Run the core body of :func:`sync`. + + Kept separate so log-file teardown runs through ``finally`` regardless of + where the caller exits the sync. + """ # Show help if no patterns and --all not specified if not repo_patterns and not sync_all: if parser is not None: @@ -1114,7 +1212,7 @@ def silent_progress(output: str, timestamp: datetime) -> None: colors, timed_out_repos=timed_out_repos, timeout=repo_timeout, - log_file_path=None, + log_file_path=log_file_path, ) _emit_summary(formatter, colors, summary) formatter.finalize() @@ -1227,9 +1325,19 @@ def silent_progress(output: str, timestamp: datetime) -> None: colors, timed_out_repos=timed_out_repos, timeout=repo_timeout, - log_file_path=None, + log_file_path=log_file_path, ) + # When there were plain failures (no timeout) surface the debug log path + # anyway so the user can post-mortem without re-running with --log-file. + if ( + summary.get("failed", 0) - summary.get("timed_out", 0) > 0 + and log_file_path is not None + ): + formatter.emit_text( + f"{colors.info('→')} Full debug log: {colors.muted(str(log_file_path))}", + ) + if exit_on_error and unmatched_count > 0: formatter.finalize() if parser is not None: diff --git a/src/vcspull/log.py b/src/vcspull/log.py index 7acdc8026..44893d264 100644 --- a/src/vcspull/log.py +++ b/src/vcspull/log.py @@ -12,10 +12,14 @@ import contextlib import importlib import logging +import os +import pathlib import pkgutil import sys +import tempfile import time import typing as t +from datetime import datetime from functools import lru_cache from colorama import Fore, Style @@ -274,3 +278,93 @@ class RepoFilter(logging.Filter): def filter(self, record: logging.LogRecord) -> bool: """Only return a record if a keyword object.""" return "keyword" in record.__dict__ + + +#: Format string for the debug log file. Verbose enough to trace a hang: +#: level, timestamp (to millisecond), logger, module:line, and the message. +_DEBUG_FILE_FORMAT = ( + "%(asctime)s.%(msecs)03d [%(levelname)s] %(name)s " + "%(module)s:%(lineno)d -- %(message)s" +) + +_DEBUG_FILE_DATE_FORMAT = "%Y-%m-%dT%H:%M:%S" + + +def default_debug_log_path() -> pathlib.Path: + """Return the path where vcspull writes its per-invocation debug log. + + Mirrors the ``npm`` / ``pnpm`` convention of dropping a timestamped log + file in the system temp directory on every run. The file is always created + but its path is only surfaced to the user when something went wrong + (failure or timeout), so clean runs stay quiet. + """ + stamp = datetime.now().strftime("%Y%m%dT%H%M%S") + pid = os.getpid() + return pathlib.Path(tempfile.gettempdir()) / f"vcspull-debug-{stamp}-{pid}.log" + + +def setup_file_logger( + path: pathlib.Path, + *, + level: int = logging.DEBUG, +) -> logging.FileHandler: + """Attach a file handler to the ``vcspull`` and ``libvcs`` loggers. + + Unlike :func:`setup_logger`, this handler is not tied to stdout -- it + captures the full debug trace so a post-mortem (npm/pnpm/yarn style) has + enough context to diagnose a timeout even when the CLI output was + aggressively summarised. + + Parameters + ---------- + path : pathlib.Path + Destination file. Parent directories are created as needed. An + existing file is appended to so a single session that sets up logging + multiple times does not clobber earlier context. + level : int + Threshold for the file handler. Defaults to :data:`logging.DEBUG` + because the file is consulted only when diagnosing a failure. + """ + path.parent.mkdir(parents=True, exist_ok=True) + + handler = logging.FileHandler(path, mode="a", encoding="utf-8") + handler.setLevel(level) + handler.setFormatter( + logging.Formatter(_DEBUG_FILE_FORMAT, datefmt=_DEBUG_FILE_DATE_FORMAT), + ) + + # Attach to both loggers so libvcs's per-repo activity is captured alongside + # vcspull's own structured events. Duplicate handlers are avoided by + # checking the baseFilename before attaching. + for logger_name in ("vcspull", "libvcs"): + logger = logging.getLogger(logger_name) + existing = next( + ( + h + for h in logger.handlers + if isinstance(h, logging.FileHandler) + and getattr(h, "baseFilename", None) == str(path) + ), + None, + ) + if existing is None: + logger.addHandler(handler) + # Ensure the logger itself lets DEBUG records through to the handler. + if logger.level == logging.NOTSET or logger.level > level: + logger.setLevel(level) + + return handler + + +def teardown_file_logger(handler: logging.FileHandler) -> None: + """Flush and detach ``handler`` from the vcspull/libvcs loggers.""" + try: + handler.flush() + handler.close() + except ValueError: + # Handler already closed; nothing to do. + pass + for logger_name in ("vcspull", "libvcs"): + logger = logging.getLogger(logger_name) + if handler in logger.handlers: + logger.removeHandler(handler) diff --git a/tests/cli/test_sync_log_file.py b/tests/cli/test_sync_log_file.py new file mode 100644 index 000000000..9a0005751 --- /dev/null +++ b/tests/cli/test_sync_log_file.py @@ -0,0 +1,150 @@ +"""Tests for the per-invocation debug log file (npm/pnpm style).""" + +from __future__ import annotations + +import logging +import pathlib +import typing as t + +import pytest + +from vcspull.log import ( + default_debug_log_path, + setup_file_logger, + teardown_file_logger, +) + + +def test_default_debug_log_path_lives_in_tempdir( + monkeypatch: pytest.MonkeyPatch, + tmp_path: pathlib.Path, +) -> None: + """``TMPDIR`` is honoured and the file name encodes timestamp + PID.""" + monkeypatch.setenv("TMPDIR", str(tmp_path)) + # On platforms that read TMPDIR via ``tempfile.gettempdir`` this is honoured + # on first access per-process; the default path must reside under it. + + path = default_debug_log_path() + + # The file name must start with the prefix that downstream tooling (CI log + # collection, grep for "npm-debug"-style artefacts) can pattern-match on. + assert path.name.startswith("vcspull-debug-") + assert path.suffix == ".log" + + +def test_setup_file_logger_attaches_debug_handler( + tmp_path: pathlib.Path, +) -> None: + """The handler attaches at DEBUG level to both vcspull and libvcs loggers.""" + log_path = tmp_path / "nested" / "vcspull-debug.log" + assert not log_path.exists() + + handler = setup_file_logger(log_path) + try: + # setup_file_logger must create missing parent directories so callers + # can point to a path that does not yet exist. + assert log_path.parent.is_dir() + + vcspull_logger = logging.getLogger("vcspull") + libvcs_logger = logging.getLogger("libvcs") + assert handler in vcspull_logger.handlers + assert handler in libvcs_logger.handlers + assert handler.level == logging.DEBUG + finally: + teardown_file_logger(handler) + + +def test_setup_file_logger_captures_subsequent_log_records( + tmp_path: pathlib.Path, +) -> None: + """Log records emitted after setup end up inside the on-disk file.""" + log_path = tmp_path / "vcspull-debug.log" + handler = setup_file_logger(log_path) + try: + logger = logging.getLogger("vcspull.test-log-file") + logger.debug("debug crumb %s", "apple") + logger.warning("timed out syncing %s", "codex") + finally: + teardown_file_logger(handler) + + text = log_path.read_text(encoding="utf-8") + assert "debug crumb apple" in text + assert "timed out syncing codex" in text + # The formatter must include level + logger name for post-mortem searching. + assert "DEBUG" in text + assert "WARNING" in text + assert "vcspull.test-log-file" in text + + +def test_setup_file_logger_does_not_double_attach( + tmp_path: pathlib.Path, +) -> None: + """Calling setup twice with the same path attaches only one handler.""" + log_path = tmp_path / "vcspull-debug.log" + + handler_a = setup_file_logger(log_path) + handler_b = setup_file_logger(log_path) + try: + vcspull_logger = logging.getLogger("vcspull") + # The second call returns a *new* handler object for the outer caller, + # but the logger must not end up with duplicate file handlers pointing + # at the same file -- that would write every record twice. + matching = [ + h + for h in vcspull_logger.handlers + if isinstance(h, logging.FileHandler) + and getattr(h, "baseFilename", None) == str(log_path) + ] + assert len(matching) == 1 + finally: + teardown_file_logger(handler_a) + teardown_file_logger(handler_b) + + +def test_teardown_file_logger_detaches_and_closes( + tmp_path: pathlib.Path, +) -> None: + """Tearing down removes the handler from both loggers and closes the file.""" + log_path = tmp_path / "vcspull-debug.log" + handler = setup_file_logger(log_path) + teardown_file_logger(handler) + + vcspull_logger = logging.getLogger("vcspull") + libvcs_logger = logging.getLogger("libvcs") + assert handler not in vcspull_logger.handlers + assert handler not in libvcs_logger.handlers + # Closing is idempotent per Python logging convention; a second call must + # not raise so the outer sync()'s ``finally`` can be defensively + # re-invoked. + teardown_file_logger(handler) + + +def test_default_debug_log_path_encodes_distinct_invocations( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Distinct (timestamp, pid) pairs produce distinct file names.""" + + class _Fake: + value = 0 + + @classmethod + def now(cls) -> t.Any: + cls.value += 1 + + class _Stamp: + @staticmethod + def strftime(_fmt: str) -> str: + return f"2026042400000{_Fake.value}" + + return _Stamp() + + import vcspull.log as log_mod + + monkeypatch.setattr(log_mod, "datetime", _Fake) + + first = default_debug_log_path() + second = default_debug_log_path() + + assert first != second + assert first.name.startswith("vcspull-debug-20260424") + assert second.name.startswith("vcspull-debug-20260424") From 8c3defc69d489261833c8095cc0519af442b6ed1 Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Fri, 24 Apr 2026 18:18:13 -0500 Subject: [PATCH 05/32] feat(sync[indicator]) Spinner in TTY, heartbeat line in pipes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit why: Before this change, users running `vcspull sync` had no live signal of which repository was currently processing -- the only output was a post-factum success/failure line per repo. When a slow or wedged repo was running, stdout just appeared frozen. That pattern is what led to the "codex is hanging, is it hung?" confusion that motivated this whole branch. Providing a lightweight indicator resolves that without reaching for a heavyweight dep like `rich`. what: - Add src/vcspull/cli/_progress.py with a threaded SyncStatusIndicator that uses only stdlib + ANSI escape codes (pattern borrowed from tmuxp's cli/_progress.py): a 100ms braille-frame spinner in a TTY, a single "· syncing " line on start plus periodic "still syncing" lines in pipes. - atexit hook restores the cursor when the interpreter shuts down mid-sync so kitty/tmux don't leak a hidden cursor after a crash. - `build_indicator` disables the spinner for --json / --ndjson and for --color=never so machine-readable output stays clean. - Extract the inner repo loop of _sync_impl into _run_sync_loop so the indicator can be cleanly started, torn down via finally, and not leak into the summary emission. - Register the new module in tests/test_log.py expected CLI-logger list so get_cli_logger_names still matches. - Add tests/cli/test_sync_progress.py covering: disabled/no-op, start+ heartbeat, throttled heartbeat under interval, TTY spinner frames + cursor-hide/show, idempotent close, context-manager form, and build_indicator's mode-aware gating. - CHANGES entry under 1.59.x. --- CHANGES | 10 ++ src/vcspull/cli/_progress.py | 301 ++++++++++++++++++++++++++++++++ src/vcspull/cli/sync.py | 110 ++++++++---- tests/cli/test_sync_progress.py | 141 +++++++++++++++ tests/test_log.py | 1 + 5 files changed, 530 insertions(+), 33 deletions(-) create mode 100644 src/vcspull/cli/_progress.py create mode 100644 tests/cli/test_sync_progress.py diff --git a/CHANGES b/CHANGES index 66accc046..ab87e5831 100644 --- a/CHANGES +++ b/CHANGES @@ -53,6 +53,16 @@ larger timeout, plus a `-vv` variant for diagnosis and a manual `git fetch --dry-run` probe per repo. Timed-out repositories are counted in the summary as "N timed out" alongside synced / failed. +#### `vcspull sync`: live status indicator (spinner + heartbeat) + +Every `vcspull sync` run now shows which repository is currently running +and how long it has been. In a terminal the output is a single-line braille +spinner that refreshes in place; when output is piped (CI, `tee`, log +capture), the indicator degrades to a `· syncing ` line on start plus +a periodic `… still syncing (45s elapsed)` heartbeat. The indicator +is disabled under machine-readable output (`--json` / `--ndjson`) or when +colours are turned off. + #### `vcspull sync`: per-invocation debug log (npm/pnpm style) Every `vcspull sync` invocation now writes a debug log to diff --git a/src/vcspull/cli/_progress.py b/src/vcspull/cli/_progress.py new file mode 100644 index 000000000..dff9e3060 --- /dev/null +++ b/src/vcspull/cli/_progress.py @@ -0,0 +1,301 @@ +"""Live status indicator for ``vcspull sync``. + +Shows the user which repository is currently being synced and how long it has +been running. In a TTY a single-line braille spinner refreshes every ~100 ms; +elsewhere (pipes, CI logs) a once-on-start line is followed by periodic +"still syncing" heartbeats so the output stream keeps moving without flooding +the log. + +Inspired by ``tmuxp``'s spinner module -- stdlib + ANSI only, no ``rich`` +dependency. +""" + +from __future__ import annotations + +import atexit +import io +import itertools +import logging +import sys +import threading +import time +import typing as t + +log = logging.getLogger(__name__) + + +# ANSI escape sequences +_HIDE_CURSOR = "\033[?25l" +_SHOW_CURSOR = "\033[?25h" +_ERASE_LINE = "\033[2K" +_CURSOR_TO_COL0 = "\r" + +#: Braille spinner frames -- 100 ms tick produces a smooth rotation. +_SPINNER_FRAMES = "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏" + +#: How often to refresh the spinner line in the TTY path. +_TTY_REFRESH_INTERVAL = 0.1 + +#: How often to emit a "still syncing" heartbeat line in the non-TTY path. +_HEARTBEAT_INTERVAL = 30.0 + +# Track indicators that have hidden the cursor so we can restore it on atexit +# even if the interpreter crashes mid-sync and no ``finally`` block fires. +_ACTIVE_INDICATORS: set[SyncStatusIndicator] = set() + + +def _restore_cursors_on_exit() -> None: + """Restore the cursor for every indicator that still has it hidden.""" + for indicator in list(_ACTIVE_INDICATORS): + # atexit handlers that raise are swallowed by the interpreter, so we + # log-and-swallow here to leave a breadcrumb for debugging without + # masking other shutdown tasks. + _close_indicator_quietly(indicator) + + +def _close_indicator_quietly(indicator: SyncStatusIndicator) -> None: + try: + indicator.close() + except Exception as exc_obj: + log.debug("Error restoring spinner cursor: %s", exc_obj) + + +atexit.register(_restore_cursors_on_exit) + + +class SyncStatusIndicator: + """Owns the "which repo is running now" UI for a sync session. + + Exactly one repo is considered "active" at a time -- this matches the + current sequential sync loop. Callers drive the indicator with + :meth:`start_repo` / :meth:`stop_repo`, or via the context manager + returned by :meth:`repo`. + """ + + def __init__( + self, + *, + enabled: bool = True, + stream: t.TextIO | None = None, + tty: bool | None = None, + ) -> None: + self._stream = stream if stream is not None else sys.stdout + # Respect the explicit tty override (tests, ``--color=never``) but + # default to whatever the stream actually reports. + self._tty = ( + tty + if tty is not None + else bool(getattr(self._stream, "isatty", lambda: False)()) + ) + # A disabled indicator is a no-op -- it still honours the public API so + # callers don't need if/else ladders around every sync. + self._enabled = enabled + + self._lock = threading.Lock() + self._thread: threading.Thread | None = None + self._stop_event = threading.Event() + self._active_repo: str | None = None + self._repo_started_at: float | None = None + self._last_heartbeat_at: float | None = None + self._last_line_len = 0 + self._cursor_hidden = False + + # ------------------------------------------------------------------ + # Public API + # ------------------------------------------------------------------ + + def start_repo(self, name: str) -> None: + """Mark ``name`` as the currently-running repository.""" + if not self._enabled: + return + + with self._lock: + self._active_repo = name + now = time.monotonic() + self._repo_started_at = now + self._last_heartbeat_at = now + + if self._tty: + self._ensure_tty_thread() + else: + # Emit a single "· syncing" line so even log-collecting CI shows + # which repo is in flight. + self._emit_line(f"· syncing {name}") + + def stop_repo(self) -> None: + """Stop showing any active-repo indicator.""" + if not self._enabled: + return + + with self._lock: + self._active_repo = None + self._repo_started_at = None + self._last_heartbeat_at = None + + if self._tty: + self._clear_line() + + def repo(self, name: str) -> _RepoContext: + """Context manager form of :meth:`start_repo` / :meth:`stop_repo`.""" + return _RepoContext(self, name) + + def close(self) -> None: + """Stop the background thread and release the TTY cursor. + + Safe to call multiple times; calls after the first are no-ops. Always + invoked on interpreter shutdown via :mod:`atexit`. + """ + if not self._enabled: + return + + self._stop_event.set() + thread = self._thread + self._thread = None + if thread is not None: + thread.join(timeout=1.0) + self._clear_line() + if self._cursor_hidden: + try: + self._stream.write(_SHOW_CURSOR) + self._stream.flush() + except (OSError, ValueError): + pass + self._cursor_hidden = False + _ACTIVE_INDICATORS.discard(self) + + # ------------------------------------------------------------------ + # Internals + # ------------------------------------------------------------------ + + def _ensure_tty_thread(self) -> None: + if self._thread is not None and self._thread.is_alive(): + return + + if not self._cursor_hidden: + try: + self._stream.write(_HIDE_CURSOR) + self._stream.flush() + except (OSError, ValueError): + return + self._cursor_hidden = True + _ACTIVE_INDICATORS.add(self) + + self._stop_event.clear() + self._thread = threading.Thread( + target=self._tty_loop, + daemon=True, + name="vcspull-sync-indicator", + ) + self._thread.start() + + def _tty_loop(self) -> None: + frames = itertools.cycle(_SPINNER_FRAMES) + while not self._stop_event.is_set(): + frame = next(frames) + with self._lock: + name = self._active_repo + started = self._repo_started_at + if name is not None and started is not None: + elapsed = time.monotonic() - started + self._render_tty(frame, name, elapsed) + else: + self._clear_line() + self._stop_event.wait(_TTY_REFRESH_INTERVAL) + + def _render_tty(self, frame: str, name: str, elapsed: float) -> None: + line = f"{frame} syncing {name} … {elapsed:4.1f}s" + try: + pad = max(self._last_line_len - len(line), 0) + self._stream.write(_CURSOR_TO_COL0 + line + (" " * pad)) + self._stream.flush() + self._last_line_len = len(line) + except (OSError, ValueError): + pass + + def _emit_line(self, line: str) -> None: + try: + self._stream.write(line + "\n") + self._stream.flush() + except (OSError, ValueError): + pass + + def _clear_line(self) -> None: + if not self._tty or self._last_line_len == 0: + return + try: + self._stream.write(_CURSOR_TO_COL0 + _ERASE_LINE) + self._stream.flush() + except (OSError, ValueError): + pass + self._last_line_len = 0 + + def heartbeat(self) -> None: + """Emit a non-TTY heartbeat if enough time has passed. + + Called from the main sync loop whenever the caller wants to give the + user a "still working" signal (e.g. between repositories, or during + the watchdog poll). The TTY path ignores this because the spinner + refresh already provides the signal. + """ + if not self._enabled or self._tty: + return + + with self._lock: + name = self._active_repo + started = self._repo_started_at + last = self._last_heartbeat_at + if name is None or started is None or last is None: + return + now = time.monotonic() + if now - last < _HEARTBEAT_INTERVAL: + return + + elapsed = now - started + self._emit_line(f"… still syncing {name} ({elapsed:.0f}s elapsed)") + with self._lock: + self._last_heartbeat_at = now + + +class _RepoContext: + """Context-manager helper returned by :meth:`SyncStatusIndicator.repo`.""" + + __slots__ = ("_indicator", "_name") + + def __init__(self, indicator: SyncStatusIndicator, name: str) -> None: + self._indicator = indicator + self._name = name + + def __enter__(self) -> _RepoContext: + self._indicator.start_repo(self._name) + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc: BaseException | None, + tb: t.Any, + ) -> None: + self._indicator.stop_repo() + + +def build_indicator( + *, + human: bool, + color: str, + stream: t.TextIO | io.StringIO | None = None, + tty: bool | None = None, +) -> SyncStatusIndicator: + """Return a ``SyncStatusIndicator`` configured for the current session. + + Disabled when output is non-human (JSON/NDJSON) or when colours are turned + off -- the latter implies the user wants quiet, machine-friendly output. + """ + enabled = human and color != "never" + # io.StringIO satisfies the TextIO protocol at runtime; tests use this to + # capture spinner output without wrestling with subclassing. + concrete_stream: t.Any = stream + return SyncStatusIndicator( + enabled=enabled, + stream=concrete_stream, + tty=tty, + ) diff --git a/src/vcspull/cli/sync.py b/src/vcspull/cli/sync.py index b92499747..8c9a5bc8f 100644 --- a/src/vcspull/cli/sync.py +++ b/src/vcspull/cli/sync.py @@ -50,6 +50,7 @@ PlanSummary, get_output_mode, ) +from ._progress import SyncStatusIndicator, build_indicator from ._workspaces import filter_by_workspace from .status import check_repo_status @@ -1163,6 +1164,74 @@ def silent_progress(output: str, timestamp: datetime) -> None: progress_callback = silent_progress + indicator = build_indicator(human=is_human, color=color) + + try: + _run_sync_loop( + found_repos=found_repos, + formatter=formatter, + colors=colors, + summary=summary, + timed_out_repos=timed_out_repos, + progress_callback=progress_callback, + is_human=is_human, + repo_timeout=repo_timeout, + exit_on_error=exit_on_error, + include_worktrees=include_worktrees, + dry_run=dry_run, + parser=parser, + log_file_path=log_file_path, + indicator=indicator, + ) + finally: + indicator.close() + + _emit_summary(formatter, colors, summary) + _emit_rerun_recipe( + formatter, + colors, + timed_out_repos=timed_out_repos, + timeout=repo_timeout, + log_file_path=log_file_path, + ) + + # When there were plain failures (no timeout) surface the debug log path + # anyway so the user can post-mortem without re-running with --log-file. + if ( + summary.get("failed", 0) - summary.get("timed_out", 0) > 0 + and log_file_path is not None + ): + formatter.emit_text( + f"{colors.info('→')} Full debug log: {colors.muted(str(log_file_path))}", + ) + + if exit_on_error and unmatched_count > 0: + formatter.finalize() + if parser is not None: + parser.exit(status=1, message=EXIT_ON_ERROR_MSG) + raise SystemExit(EXIT_ON_ERROR_MSG) + + formatter.finalize() + + +def _run_sync_loop( + *, + found_repos: list[ConfigDict], + formatter: OutputFormatter, + colors: Colors, + summary: dict[str, int], + timed_out_repos: list[_TimedOutRepo], + progress_callback: ProgressCallback, + is_human: bool, + repo_timeout: int, + exit_on_error: bool, + include_worktrees: bool, + dry_run: bool, + parser: argparse.ArgumentParser | None, + log_file_path: pathlib.Path | None, + indicator: SyncStatusIndicator, +) -> None: + """Iterate the repositories and drive the watchdog + indicator.""" for repo in found_repos: repo_name = repo.get("name", "unknown") repo_path = repo.get("path", "unknown") @@ -1170,6 +1239,7 @@ def silent_progress(output: str, timestamp: datetime) -> None: display_repo_path = str(PrivatePath(repo_path)) summary["total"] += 1 + indicator.heartbeat() event: dict[str, t.Any] = { "reason": "sync", @@ -1178,12 +1248,13 @@ def silent_progress(output: str, timestamp: datetime) -> None: "workspace_root": str(workspace_label), } - outcome = _sync_repo_with_watchdog( - repo, - progress_callback=progress_callback, - timeout=repo_timeout, - is_human=is_human, - ) + with indicator.repo(repo_name): + outcome = _sync_repo_with_watchdog( + repo, + progress_callback=progress_callback, + timeout=repo_timeout, + is_human=is_human, + ) if outcome.status == "timed_out": summary["timed_out"] += 1 @@ -1319,33 +1390,6 @@ def silent_progress(output: str, timestamp: datetime) -> None: parser.exit(status=1, message=EXIT_ON_ERROR_MSG) raise SystemExit(EXIT_ON_ERROR_MSG) - _emit_summary(formatter, colors, summary) - _emit_rerun_recipe( - formatter, - colors, - timed_out_repos=timed_out_repos, - timeout=repo_timeout, - log_file_path=log_file_path, - ) - - # When there were plain failures (no timeout) surface the debug log path - # anyway so the user can post-mortem without re-running with --log-file. - if ( - summary.get("failed", 0) - summary.get("timed_out", 0) > 0 - and log_file_path is not None - ): - formatter.emit_text( - f"{colors.info('→')} Full debug log: {colors.muted(str(log_file_path))}", - ) - - if exit_on_error and unmatched_count > 0: - formatter.finalize() - if parser is not None: - parser.exit(status=1, message=EXIT_ON_ERROR_MSG) - raise SystemExit(EXIT_ON_ERROR_MSG) - - formatter.finalize() - def _emit_summary( formatter: OutputFormatter, diff --git a/tests/cli/test_sync_progress.py b/tests/cli/test_sync_progress.py new file mode 100644 index 000000000..3f92dd28f --- /dev/null +++ b/tests/cli/test_sync_progress.py @@ -0,0 +1,141 @@ +"""Tests for ``vcspull.cli._progress.SyncStatusIndicator``.""" + +from __future__ import annotations + +import io +import time +import typing as t + +import pytest + +from vcspull.cli._progress import ( + SyncStatusIndicator, + build_indicator, +) + +if t.TYPE_CHECKING: + pass + + +def test_disabled_indicator_is_silent_noop() -> None: + """An indicator with ``enabled=False`` writes nothing to the stream.""" + stream = io.StringIO() + indicator = SyncStatusIndicator(enabled=False, stream=stream, tty=True) + + indicator.start_repo("codex") + indicator.heartbeat() + indicator.stop_repo() + indicator.close() + + assert stream.getvalue() == "" + + +def test_non_tty_emits_once_on_start_and_honours_heartbeat() -> None: + """Non-TTY path prints a single start line then a "still syncing" line.""" + stream = io.StringIO() + indicator = SyncStatusIndicator(enabled=True, stream=stream, tty=False) + + indicator.start_repo("codex") + assert "syncing codex" in stream.getvalue() + + # Pretend the repo has been running longer than the heartbeat interval so + # the next heartbeat call emits. We go under the hood to avoid a slow + # sleep in the test. After start_repo both timestamps are populated, so + # the asserts below satisfy the type checker. + assert indicator._last_heartbeat_at is not None + assert indicator._repo_started_at is not None + indicator._last_heartbeat_at -= 60.0 + indicator._repo_started_at -= 60.0 + indicator.heartbeat() + indicator.stop_repo() + indicator.close() + + out = stream.getvalue() + assert "still syncing codex" in out + # Heartbeat must not duplicate the start line; the only lines are + # start + one heartbeat. + assert out.count("syncing codex") == 2 + + +def test_non_tty_heartbeat_throttles_below_interval() -> None: + """Heartbeat does nothing if the interval hasn't elapsed yet.""" + stream = io.StringIO() + indicator = SyncStatusIndicator(enabled=True, stream=stream, tty=False) + + indicator.start_repo("codex") + initial_out = stream.getvalue() + # Call heartbeat a bunch without advancing time -- nothing new should + # land in the stream. + for _ in range(5): + indicator.heartbeat() + indicator.stop_repo() + indicator.close() + + # Still exactly one line: the start-of-repo notification. + assert stream.getvalue() == initial_out + assert stream.getvalue().count("\n") == 1 + + +def test_tty_spinner_renders_active_repo(monkeypatch: pytest.MonkeyPatch) -> None: + """In a TTY the spinner background thread writes to the stream.""" + stream = io.StringIO() + indicator = SyncStatusIndicator(enabled=True, stream=stream, tty=True) + + indicator.start_repo("codex") + # Give the spinner thread a slice of real time to tick at least once. + time.sleep(0.2) + indicator.stop_repo() + indicator.close() + + out = stream.getvalue() + # The spinner line mentions the repo and has at least one braille frame. + assert "syncing codex" in out + assert any(f in out for f in "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏") + # Cursor hide / show sequences bracket the spinner so tmux / kitty don't + # leak a missing cursor after vcspull exits. + assert "\x1b[?25l" in out # hide + assert "\x1b[?25h" in out # show + + +def test_close_is_idempotent() -> None: + """Calling ``close()`` more than once is safe.""" + stream = io.StringIO() + indicator = SyncStatusIndicator(enabled=True, stream=stream, tty=True) + + indicator.start_repo("codex") + indicator.close() + indicator.close() # must not raise + indicator.close() # still must not raise + + +def test_repo_context_manager_starts_and_stops() -> None: + """The ``repo(name)`` context manager bookends the indicator state.""" + stream = io.StringIO() + indicator = SyncStatusIndicator(enabled=True, stream=stream, tty=False) + + with indicator.repo("codex"): + # While inside, the indicator considers 'codex' the active repo. + # We peek at the internal slot rather than waiting for a render. + assert indicator._active_repo == "codex" + + assert indicator._active_repo is None + indicator.close() + + +def test_build_indicator_disables_in_machine_output_mode() -> None: + """``build_indicator(human=False, ...)`` returns a disabled indicator.""" + indicator = build_indicator(human=False, color="auto") + assert indicator._enabled is False + + +def test_build_indicator_disables_when_color_is_never() -> None: + """``color=never`` implies quiet output -- the spinner stays off.""" + indicator = build_indicator(human=True, color="never") + assert indicator._enabled is False + + +def test_build_indicator_enabled_in_human_mode() -> None: + """Human output + auto/always colour turns the indicator on.""" + indicator = build_indicator(human=True, color="auto") + assert indicator._enabled is True + indicator.close() diff --git a/tests/test_log.py b/tests/test_log.py index 8fe6972b7..f114a7d66 100644 --- a/tests/test_log.py +++ b/tests/test_log.py @@ -428,6 +428,7 @@ def test_get_cli_logger_names_includes_base() -> None: "vcspull.cli", "vcspull.cli._colors", "vcspull.cli._output", + "vcspull.cli._progress", "vcspull.cli._workspaces", "vcspull.cli.add", "vcspull.cli.discover", From 1bce93611a01287552cdcb3888a5185fde515243 Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Fri, 24 Apr 2026 18:22:36 -0500 Subject: [PATCH 06/32] fix(sync[output]) Deduplicate log-file hint; drop emojis from new UI MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit why: Two regressions in the post-summary output reported by the user: 1. The "Full debug log: ..." line printed twice whenever a batch had both timed-out repos AND plain failures -- _emit_rerun_recipe emitted it, and the separate "surface-on-failure" block after the summary emitted it again. Single source of truth beats two coordinated emitters. 2. The new timeout / rerun-recipe / spinner output used emoji-like glyphs (⏱ stopwatch, · middle dot, … ellipsis, Braille spinner frames). The user asked to avoid emojis while keeping the semantic colours, which already carry the warning signal on their own. what: - Remove the log-file emit from _emit_rerun_recipe. The sole emit site is now after _emit_summary, gated on summary['failed'] > 0 -- timeouts are already counted there so the hint fires exactly once on any failure. - Drop the now-unused log_file_path kwarg from _emit_rerun_recipe. Mirror the callers (exit-on-error branch in the sync loop + post-summary in _sync_impl). - Replace emoji/glyph decorations in the rerun recipe with ASCII: ⏱ -> plain "Timed out:" label (warning colour), -, ->. - Replace the in-loop timeout row prefix ⏱ -> - and → -> ->. - Progress indicator: swap Braille spinner frames (⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏) for ASCII |/-\; drop the · start-line prefix; replace … with ... in the spinner line and the non-TTY heartbeat. - Tests: drop the log-file-path assertion from the rerun-recipe test (the helper no longer takes the kwarg); add a regression guarding against the ⏱ glyph leaking back into the recipe; update the spinner test to assert ASCII frames and to forbid the previous Unicode decorations (Braille frames, ellipsis, middle dot). --- src/vcspull/cli/_progress.py | 16 +++++++------ src/vcspull/cli/sync.py | 41 ++++++++++++++------------------- tests/cli/test_sync_progress.py | 7 ++++-- tests/cli/test_sync_watchdog.py | 8 +++---- 4 files changed, 34 insertions(+), 38 deletions(-) diff --git a/src/vcspull/cli/_progress.py b/src/vcspull/cli/_progress.py index dff9e3060..d2033e15f 100644 --- a/src/vcspull/cli/_progress.py +++ b/src/vcspull/cli/_progress.py @@ -30,8 +30,10 @@ _ERASE_LINE = "\033[2K" _CURSOR_TO_COL0 = "\r" -#: Braille spinner frames -- 100 ms tick produces a smooth rotation. -_SPINNER_FRAMES = "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏" +#: ASCII spinner frames -- chosen over Braille/emoji so the output stays +#: plain in logs, minimal terminals, and remote sessions where Unicode +#: glyphs can render as tofu. +_SPINNER_FRAMES = "|/-\\" #: How often to refresh the spinner line in the TTY path. _TTY_REFRESH_INTERVAL = 0.1 @@ -118,9 +120,9 @@ def start_repo(self, name: str) -> None: if self._tty: self._ensure_tty_thread() else: - # Emit a single "· syncing" line so even log-collecting CI shows - # which repo is in flight. - self._emit_line(f"· syncing {name}") + # Emit a single start line so even log-collecting CI shows which + # repo is in flight. + self._emit_line(f"syncing {name}") def stop_repo(self) -> None: """Stop showing any active-repo indicator.""" @@ -203,7 +205,7 @@ def _tty_loop(self) -> None: self._stop_event.wait(_TTY_REFRESH_INTERVAL) def _render_tty(self, frame: str, name: str, elapsed: float) -> None: - line = f"{frame} syncing {name} … {elapsed:4.1f}s" + line = f"{frame} syncing {name} ... {elapsed:4.1f}s" try: pad = max(self._last_line_len - len(line), 0) self._stream.write(_CURSOR_TO_COL0 + line + (" " * pad)) @@ -251,7 +253,7 @@ def heartbeat(self) -> None: return elapsed = now - started - self._emit_line(f"… still syncing {name} ({elapsed:.0f}s elapsed)") + self._emit_line(f"... still syncing {name} ({elapsed:.0f}s elapsed)") with self._lock: self._last_heartbeat_at = now diff --git a/src/vcspull/cli/sync.py b/src/vcspull/cli/sync.py index 8c9a5bc8f..2f984bc63 100644 --- a/src/vcspull/cli/sync.py +++ b/src/vcspull/cli/sync.py @@ -796,7 +796,6 @@ def _emit_rerun_recipe( *, timed_out_repos: list[_TimedOutRepo], timeout: int, - log_file_path: pathlib.Path | None, ) -> None: """Print a copyable recipe for rerunning just the timed-out repositories. @@ -817,7 +816,7 @@ def _emit_rerun_recipe( formatter.emit_text("") formatter.emit_text( - f"{colors.warning('⏱')} Timed out: " + f"{colors.warning('Timed out:')} " f"{colors.warning(str(len(timed_out_repos)))} " f"repositor{'y' if len(timed_out_repos) == 1 else 'ies'} " f"exceeded {timeout}s.", @@ -825,14 +824,14 @@ def _emit_rerun_recipe( for repo in timed_out_repos: display_path = str(PrivatePath(repo.path)) formatter.emit_text( - f" {colors.warning('⏱')} {colors.info(repo.name):<32} " - f"{colors.muted('→')} {display_path} " + f" {colors.warning('-')} {colors.info(repo.name):<32} " + f"{colors.muted('->')} {display_path} " f"({colors.muted(f'{repo.duration:.1f}s elapsed')})", ) formatter.emit_text("") formatter.emit_text( - f"{colors.info('→')} Rerun just the affected repositories with a " + f"{colors.info('->')} Rerun just the affected repositories with a " f"larger timeout:", ) for workspace_root, repos in grouped.items(): @@ -843,7 +842,7 @@ def _emit_rerun_recipe( ) formatter.emit_text( - f"{colors.info('→')} Or with verbose logging to diagnose the hang:", + f"{colors.info('->')} Or with verbose logging to diagnose the hang:", ) for workspace_root, repos in grouped.items(): names = " ".join(shlex.quote(r.name) for r in repos) @@ -854,7 +853,7 @@ def _emit_rerun_recipe( ) formatter.emit_text( - f"{colors.info('→')} Probe each repository manually " + f"{colors.info('->')} Probe each repository manually " f"({colors.muted('these should return in under 10s')}):", ) for repo in timed_out_repos: @@ -864,12 +863,6 @@ def _emit_rerun_recipe( f"git -C {shlex.quote(display_path)} fetch --dry-run", ) - if log_file_path is not None: - formatter.emit_text("") - formatter.emit_text( - f"{colors.info('→')} Full debug log: {colors.muted(str(log_file_path))}", - ) - def sync( repo_patterns: list[str], @@ -1192,17 +1185,13 @@ def silent_progress(output: str, timestamp: datetime) -> None: colors, timed_out_repos=timed_out_repos, timeout=repo_timeout, - log_file_path=log_file_path, ) - # When there were plain failures (no timeout) surface the debug log path - # anyway so the user can post-mortem without re-running with --log-file. - if ( - summary.get("failed", 0) - summary.get("timed_out", 0) > 0 - and log_file_path is not None - ): + # Surface the debug log path once, only when something went wrong, so the + # user has a single post-mortem trail regardless of the failure mode. + if summary.get("failed", 0) > 0 and log_file_path is not None: formatter.emit_text( - f"{colors.info('→')} Full debug log: {colors.muted(str(log_file_path))}", + f"{colors.info('->')} Full debug log: {colors.muted(str(log_file_path))}", ) if exit_on_error and unmatched_count > 0: @@ -1273,9 +1262,9 @@ def _run_sync_loop( event["details"] = outcome.captured_output.strip() formatter.emit(event) formatter.emit_text( - f"{colors.warning('⏱')} Timed out {colors.info(repo_name)} " + f"{colors.warning('-')} Timed out {colors.info(repo_name)} " f"after {colors.warning(f'{outcome.duration:.1f}s')} " - f"{colors.muted('→')} {display_repo_path}", + f"{colors.muted('->')} {display_repo_path}", ) if exit_on_error: _emit_rerun_recipe( @@ -1283,9 +1272,13 @@ def _run_sync_loop( colors, timed_out_repos=timed_out_repos, timeout=repo_timeout, - log_file_path=log_file_path, ) _emit_summary(formatter, colors, summary) + if log_file_path is not None: + formatter.emit_text( + f"{colors.info('->')} Full debug log: " + f"{colors.muted(str(log_file_path))}", + ) formatter.finalize() if parser is not None: parser.exit(status=1, message=EXIT_ON_ERROR_MSG) diff --git a/tests/cli/test_sync_progress.py b/tests/cli/test_sync_progress.py index 3f92dd28f..6a42423cd 100644 --- a/tests/cli/test_sync_progress.py +++ b/tests/cli/test_sync_progress.py @@ -88,13 +88,16 @@ def test_tty_spinner_renders_active_repo(monkeypatch: pytest.MonkeyPatch) -> Non indicator.close() out = stream.getvalue() - # The spinner line mentions the repo and has at least one braille frame. + # The spinner line mentions the repo and has at least one ASCII frame. assert "syncing codex" in out - assert any(f in out for f in "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏") + assert any(f in out for f in "|/-\\") # Cursor hide / show sequences bracket the spinner so tmux / kitty don't # leak a missing cursor after vcspull exits. assert "\x1b[?25l" in out # hide assert "\x1b[?25h" in out # show + # No emoji / non-ASCII decoration leaks out of the spinner path. + for glyph in ("⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏", "…", "·"): + assert glyph not in out def test_close_is_idempotent() -> None: diff --git a/tests/cli/test_sync_watchdog.py b/tests/cli/test_sync_watchdog.py index 8d99b86bc..32bb43d79 100644 --- a/tests/cli/test_sync_watchdog.py +++ b/tests/cli/test_sync_watchdog.py @@ -187,7 +187,6 @@ def test_rerun_recipe_emits_one_line_per_workspace( colors, timed_out_repos=timed_out, timeout=10, - log_file_path=tmp_path / "vcspull-debug.log", ) formatter.finalize() @@ -204,8 +203,9 @@ def test_rerun_recipe_emits_one_line_per_workspace( # Include a manual git probe so the user can isolate the failure mode. assert "GIT_TERMINAL_PROMPT=0" in captured assert "git -C" in captured - # Log-file path is surfaced when provided. - assert "vcspull-debug.log" in captured + # The rerun recipe itself must stay emoji-free -- plain ASCII markers + # only. Clock/stopwatch emoji had shipped as the prior prefix; guard it. + assert "⏱" not in captured def test_rerun_recipe_is_noop_when_no_timeouts( @@ -220,7 +220,6 @@ def test_rerun_recipe_is_noop_when_no_timeouts( colors, timed_out_repos=[], timeout=10, - log_file_path=None, ) formatter.finalize() @@ -249,7 +248,6 @@ def test_rerun_recipe_scales_timeout_suggestion( ), ], timeout=30, - log_file_path=None, ) formatter.finalize() From 5d425b2c080388b0a8711be8c50751acd6c6229d Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Fri, 24 Apr 2026 18:39:04 -0500 Subject: [PATCH 07/32] fix(sync[output]) Restore Braille spinner, fix line-bleed and Ctrl-C MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit why: Three regressions reported against the prior polish commit during a real `vcspull sync --workspace ~/study/rust --all` run: 1. Braille spinner was the right glyph set; `|/-\` was an over-correction of the "avoid emojis" feedback. The user pointed to tmuxp -- which also uses Braille -- as the model. 2. The spinner's \r redraws collided with libvcs's concurrent stdout writes (log lines via the `libvcs` StreamHandler, subprocess output via `progress_cb`). The cursor sat mid-spinner when those writes landed and produced artefacts like `Already on 'main'ec ... 1.1s`. 3. Ctrl-C during the batch was silently downgraded to a per-repo "failed" outcome (the watchdog caught `BaseException`), so the loop moved on to the next repo. Worse, `ThreadPoolExecutor` registers its workers in the atexit join list, so Python's interpreter shutdown blocked on the wedged libvcs subprocess thread -- a second Ctrl-C was needed and a `concurrent/futures/thread.py` traceback printed. what: - Restore `_SPINNER_FRAMES = "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏"`. - Add `SyncStatusIndicator.write(text)` which, under the same lock the redraw loop holds, emits `\r\033[2K` before the caller's text when a spinner frame is in flight, then flushes. Zero `_last_line_len` so the next tick redraws cleanly. - Wrap each TTY frame in ANSI synchronized-output markers (`\x1b[?2026h` / `\x1b[?2026l`) so terminals flip the spinner line atomically; the bracket is ignored by terminals that don't implement the sequence. - Install a narrow stdout diverter while the spinner is enabled: `_IndicatorStreamProxy` adapts the `libvcs` and `vcspull` StreamHandlers to call `indicator.write(...)`. FileHandlers (the debug log) are left alone so `/tmp/vcspull-debug-*.log` keeps its full trace regardless. libvcs's `progress_callback` is rewired to `indicator.write(output)` for the duration of the loop. - Replace `concurrent.futures.ThreadPoolExecutor` with a raw daemon `threading.Thread` + `threading.Event` in `_sync_repo_with_watchdog`. Daemon threads are forcibly terminated at interpreter shutdown; no `_python_exit` join, no second-Ctrl-C traceback. Narrow the catch to `Exception` so `KeyboardInterrupt` and `SystemExit` propagate; the worker stashes a `BaseException` and the main thread re-raises it. - Top-level `KeyboardInterrupt` handler in `_sync_impl` prints "Interrupted by user.", emits the partial summary, surfaces the debug log path, and raises `SystemExit(130)` (conventional SIGINT exit code). - Tests: re-assert Braille frames and the synchronized-output escape sequences; add `test_write_clears_spinner_line_before_emitting_in_tty` as a regression for artefact 2; add `test_watchdog_propagates_keyboard_interrupt_from_worker` for the narrowed catch. --- src/vcspull/cli/_progress.py | 67 +++++++++--- src/vcspull/cli/sync.py | 184 ++++++++++++++++++++++++-------- tests/cli/test_sync_progress.py | 55 +++++++++- tests/cli/test_sync_watchdog.py | 28 +++++ 4 files changed, 271 insertions(+), 63 deletions(-) diff --git a/src/vcspull/cli/_progress.py b/src/vcspull/cli/_progress.py index d2033e15f..8bcc673d5 100644 --- a/src/vcspull/cli/_progress.py +++ b/src/vcspull/cli/_progress.py @@ -29,11 +29,19 @@ _SHOW_CURSOR = "\033[?25h" _ERASE_LINE = "\033[2K" _CURSOR_TO_COL0 = "\r" - -#: ASCII spinner frames -- chosen over Braille/emoji so the output stays -#: plain in logs, minimal terminals, and remote sessions where Unicode -#: glyphs can render as tofu. -_SPINNER_FRAMES = "|/-\\" +#: Synchronized-output bracket -- modern terminals (kitty, iTerm2, WezTerm, +#: recent xterm) buffer everything between these markers and flip to the +#: new state atomically. Terminals that don't recognise the sequence ignore +#: it, so this is a safe belt-and-braces against mid-frame tearing when the +#: spinner redraws while ``progress_cb`` writes from another thread. +_SYNC_START = "\x1b[?2026h" +_SYNC_END = "\x1b[?2026l" + +#: Braille spinner frames -- same glyphs tmuxp uses; modern terminals +#: render them crisply and the rotation reads cleaner than the +#: ASCII ``|/-\`` set. On exotic terminals that can't render Unicode the +#: cells fall back to their Braille block without breaking output. +_SPINNER_FRAMES = "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏" #: How often to refresh the spinner line in the TTY path. _TTY_REFRESH_INTERVAL = 0.1 @@ -141,6 +149,35 @@ def repo(self, name: str) -> _RepoContext: """Context manager form of :meth:`start_repo` / :meth:`stop_repo`.""" return _RepoContext(self, name) + @property + def enabled(self) -> bool: + """Whether the indicator is drawing (TTY + human output + colour).""" + return self._enabled + + def write(self, text: str) -> None: + """Emit ``text`` without clobbering (or being clobbered by) the spinner. + + When the spinner's TTY loop has drawn a frame, the cursor sits at the + end of that line and a raw write from another thread would either + tack onto the spinner line or race the next redraw. We hold the same + lock the redraw loop does, clear any in-flight line, write, then let + the next tick redraw cleanly. + + Used by the sync loop to route libvcs log output and libvcs's + progress callback through a single coordinated channel. + """ + if not text: + return + with self._lock: + try: + if self._tty and self._last_line_len: + self._stream.write(_CURSOR_TO_COL0 + _ERASE_LINE) + self._stream.write(text) + self._stream.flush() + self._last_line_len = 0 + except (OSError, ValueError): + pass + def close(self) -> None: """Stop the background thread and release the TTY cursor. @@ -206,13 +243,19 @@ def _tty_loop(self) -> None: def _render_tty(self, frame: str, name: str, elapsed: float) -> None: line = f"{frame} syncing {name} ... {elapsed:4.1f}s" - try: - pad = max(self._last_line_len - len(line), 0) - self._stream.write(_CURSOR_TO_COL0 + line + (" " * pad)) - self._stream.flush() - self._last_line_len = len(line) - except (OSError, ValueError): - pass + pad = max(self._last_line_len - len(line), 0) + # Holding the lock around the actual write ensures a concurrent + # ``write()`` (called by the stdout diverter on the main thread) + # can't begin mid-frame and end up fighting with the ``\r`` redraw. + with self._lock: + try: + self._stream.write( + _SYNC_START + _CURSOR_TO_COL0 + line + (" " * pad) + _SYNC_END, + ) + self._stream.flush() + self._last_line_len = len(line) + except (OSError, ValueError): + pass def _emit_line(self, line: str) -> None: try: diff --git a/src/vcspull/cli/sync.py b/src/vcspull/cli/sync.py index 2f984bc63..5b3cc4cd1 100644 --- a/src/vcspull/cli/sync.py +++ b/src/vcspull/cli/sync.py @@ -4,7 +4,6 @@ import argparse import asyncio -import concurrent.futures import contextlib import logging import os @@ -13,6 +12,7 @@ import shlex import subprocess import sys +import threading import typing as t from collections.abc import Callable from copy import deepcopy @@ -718,6 +718,57 @@ def _resolve_repo_timeout(cli_timeout: int | None) -> int: return _DEFAULT_REPO_TIMEOUT_SECONDS +class _IndicatorStreamProxy: + r"""File-like adapter that routes writes through a ``SyncStatusIndicator``. + + Installed on the ``libvcs`` / ``vcspull`` logger ``StreamHandler`` while + the spinner is active so logged lines clear the spinner before they + print, instead of appending to its in-flight ``\r``-redrawn line. + """ + + def __init__(self, indicator: SyncStatusIndicator) -> None: + self._indicator = indicator + + def write(self, text: str) -> int: + self._indicator.write(text) + return len(text) + + def flush(self) -> None: + # ``SyncStatusIndicator.write`` flushes on every call; nothing extra + # to do here, but ``logging.StreamHandler.emit`` calls ``flush()`` + # unconditionally so we must accept the no-op. + return + + +def _install_indicator_log_diverter( + indicator: SyncStatusIndicator, +) -> Callable[[], None]: + """Rebind the ``libvcs`` / ``vcspull`` stream handlers through ``indicator``. + + Returns a callable that restores the original streams. Only touches + handlers that are :class:`logging.StreamHandler` but not + :class:`logging.FileHandler` -- the debug log file keeps writing + straight to disk regardless of the spinner. + """ + proxy = _IndicatorStreamProxy(indicator) + patched: list[tuple[logging.StreamHandler[t.Any], t.Any]] = [] + for logger_name in ("libvcs", "vcspull"): + logger = logging.getLogger(logger_name) + for handler in logger.handlers: + if not isinstance(handler, logging.StreamHandler): + continue + if isinstance(handler, logging.FileHandler): + continue + patched.append((handler, handler.stream)) + handler.stream = proxy + + def _restore() -> None: + for handler, original in patched: + handler.stream = original + + return _restore + + def _sync_repo_with_watchdog( repo: ConfigDict, *, @@ -727,62 +778,70 @@ def _sync_repo_with_watchdog( ) -> _SyncOutcome: """Run :func:`update_repo` under a wall-clock watchdog. - The libvcs call is executed on a daemon worker so the main thread can - enforce a timeout via :meth:`concurrent.futures.Future.result`. On a - timeout the worker is left to drain (the libvcs 0.40.0a0 ``timeout`` - kwarg will eventually reach down through the subprocess layer, but until - every call site plumbs it through we rely on the OS to reap abandoned - children when vcspull exits). + The libvcs call runs on a daemon :class:`threading.Thread`; the main + thread uses a completion :class:`threading.Event` as its deadline. Raw + threads are deliberate -- :class:`concurrent.futures.ThreadPoolExecutor` + registers its workers in ``concurrent.futures.thread._threads_queues``, + whose ``atexit`` hook ``_python_exit`` joins every worker on interpreter + shutdown. If the user hits Ctrl-C while a libvcs subprocess is wedged, + that join hangs the process forever. Daemon threads skip the join + entirely: they're forcibly terminated at shutdown. """ buffer: StringIO | None = None if is_human else StringIO() + done = threading.Event() + worker_error: list[BaseException] = [] def _run() -> None: - if buffer is None: - update_repo(repo, progress_callback=progress_callback) - return - # Non-human output modes capture everything so the NDJSON/JSON payload - # contains the per-repo details without polluting stdout. - with ( - contextlib.redirect_stdout(buffer), - contextlib.redirect_stderr(buffer), - ): - update_repo(repo, progress_callback=progress_callback) + try: + if buffer is None: + update_repo(repo, progress_callback=progress_callback) + return + # Non-human output modes capture everything so the NDJSON/JSON + # payload contains the per-repo details without polluting stdout. + with ( + contextlib.redirect_stdout(buffer), + contextlib.redirect_stderr(buffer), + ): + update_repo(repo, progress_callback=progress_callback) + except BaseException as exc_obj: + # Keep ``BaseException`` here so a worker-side KeyboardInterrupt + # (rare but possible via ``PyThreadState_SetAsyncExc``) is still + # reported back up. The main thread decides how to handle it. + worker_error.append(exc_obj) + finally: + done.set() started = monotonic() - # A dedicated single-worker pool is cheaper than spinning a raw Thread; - # the future API also makes cancellation vs. completion explicit. We - # intentionally do NOT use the executor as a context manager: its - # ``__exit__`` calls ``shutdown(wait=True)`` which would block behind a - # wedged worker and defeat the whole point of the watchdog. Instead, we - # call ``shutdown(wait=False)`` on every return path so the daemon thread - # is abandoned and cleaned up by the OS when vcspull exits. - executor = concurrent.futures.ThreadPoolExecutor( - max_workers=1, - thread_name_prefix="vcspull-sync", + worker = threading.Thread( + target=_run, + name=f"vcspull-sync-{repo.get('name', 'repo')}", + daemon=True, ) - future = executor.submit(_run) - try: - future.result(timeout=timeout) - except concurrent.futures.TimeoutError: - duration = monotonic() - started - future.cancel() - executor.shutdown(wait=False, cancel_futures=True) + worker.start() + + finished = done.wait(timeout=timeout) + if not finished: + # The worker is still busy; abandon it. Because it's a daemon it'll + # die with the interpreter or when libvcs's subprocess finally exits. return _SyncOutcome( status="timed_out", captured_output=buffer.getvalue() if buffer else None, - duration=duration, - ) - except BaseException as exc_obj: - duration = monotonic() - started - executor.shutdown(wait=False, cancel_futures=True) - return _SyncOutcome( - status="failed", - captured_output=buffer.getvalue() if buffer else None, - error=exc_obj, - duration=duration, + duration=monotonic() - started, ) - executor.shutdown(wait=False, cancel_futures=True) + if worker_error: + err = worker_error[0] + if isinstance(err, Exception): + return _SyncOutcome( + status="failed", + captured_output=buffer.getvalue() if buffer else None, + error=err, + duration=monotonic() - started, + ) + # ``BaseException`` that is not ``Exception`` (KeyboardInterrupt, + # SystemExit): propagate -- main thread will tear the batch down. + raise err + return _SyncOutcome( status="synced", captured_output=buffer.getvalue() if buffer else None, @@ -1146,8 +1205,17 @@ def _sync_impl( } timed_out_repos: list[_TimedOutRepo] = [] + indicator = build_indicator(human=is_human, color=color) + progress_callback: ProgressCallback - if is_human: + if is_human and indicator.enabled: + # Route libvcs's streaming subprocess output through the indicator so + # it shares the spinner's lock and can't tear the frame mid-redraw. + def _indicator_progress(output: str, timestamp: datetime) -> None: + indicator.write(output) + + progress_callback = _indicator_progress + elif is_human: progress_callback = progress_cb else: @@ -1157,8 +1225,11 @@ def silent_progress(output: str, timestamp: datetime) -> None: progress_callback = silent_progress - indicator = build_indicator(human=is_human, color=color) + restore_log_streams: Callable[[], None] | None = None + if indicator.enabled: + restore_log_streams = _install_indicator_log_diverter(indicator) + interrupted = False try: _run_sync_loop( found_repos=found_repos, @@ -1176,9 +1247,28 @@ def silent_progress(output: str, timestamp: datetime) -> None: log_file_path=log_file_path, indicator=indicator, ) + except KeyboardInterrupt: + # Ctrl-C during the loop: stop the indicator cleanly, print a partial + # summary, and exit with the conventional SIGINT code (128 + 2 = 130) + # so shells and CI treat us as interrupted rather than crashed. + interrupted = True finally: + if restore_log_streams is not None: + restore_log_streams() indicator.close() + if interrupted: + formatter.emit_text("") + formatter.emit_text(colors.warning("Interrupted by user.")) + _emit_summary(formatter, colors, summary) + if log_file_path is not None: + formatter.emit_text( + f"{colors.info('->')} Full debug log: " + f"{colors.muted(str(log_file_path))}", + ) + formatter.finalize() + raise SystemExit(130) + _emit_summary(formatter, colors, summary) _emit_rerun_recipe( formatter, diff --git a/tests/cli/test_sync_progress.py b/tests/cli/test_sync_progress.py index 6a42423cd..3c657d1df 100644 --- a/tests/cli/test_sync_progress.py +++ b/tests/cli/test_sync_progress.py @@ -88,15 +88,20 @@ def test_tty_spinner_renders_active_repo(monkeypatch: pytest.MonkeyPatch) -> Non indicator.close() out = stream.getvalue() - # The spinner line mentions the repo and has at least one ASCII frame. + # The spinner line mentions the repo and has at least one Braille frame. assert "syncing codex" in out - assert any(f in out for f in "|/-\\") + assert any(f in out for f in "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏") # Cursor hide / show sequences bracket the spinner so tmux / kitty don't # leak a missing cursor after vcspull exits. assert "\x1b[?25l" in out # hide assert "\x1b[?25h" in out # show - # No emoji / non-ASCII decoration leaks out of the spinner path. - for glyph in ("⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏", "…", "·"): + # Frames are wrapped in ANSI synchronized-output markers so terminals + # flip the spinner line atomically, preventing mid-frame tearing when a + # concurrent writer hits stdout. + assert "\x1b[?2026h" in out + assert "\x1b[?2026l" in out + # Decorative ornaments dropped in the prior polish pass must stay gone. + for glyph in ("…", "·", "⏱"): assert glyph not in out @@ -142,3 +147,45 @@ def test_build_indicator_enabled_in_human_mode() -> None: indicator = build_indicator(human=True, color="auto") assert indicator._enabled is True indicator.close() + + +def test_write_clears_spinner_line_before_emitting_in_tty() -> None: + r"""A concurrent writer clears the spinner's in-flight line first. + + Models the real-world interleave: libvcs emits a log record while the + spinner is mid-draw. Without the clear-then-write sequence the log + message would tack onto the spinner line and produce artefacts like + ``Already on 'main'ec ... 1.1s``. The guard: every ``write()`` in a + TTY must emit ``\r\033[2K`` when the spinner has a drawn line. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator(enabled=True, stream=stream, tty=True) + + # Pretend the spinner has rendered a frame -- that's the state a + # concurrent writer actually observes mid-sync. + indicator._last_line_len = 40 + + indicator.write("|git| (codex) Updating to 'main'\n") + + out = stream.getvalue() + # CR + ERASE_LINE must appear before the message, not after it. + clear_idx = out.find("\r\x1b[2K") + msg_idx = out.find("Updating") + assert clear_idx != -1, "expected clear-line sequence before the write" + assert msg_idx > clear_idx, "clear must precede the actual text" + # After a concurrent write, the spinner should redraw from scratch; the + # lingering line-length counter is reset so the next frame does a full + # render. + assert indicator._last_line_len == 0 + + +def test_write_non_tty_path_passes_through_untouched() -> None: + """Non-TTY streams never see the clear-line escape sequence.""" + stream = io.StringIO() + indicator = SyncStatusIndicator(enabled=True, stream=stream, tty=False) + + indicator.write("hello world\n") + + out = stream.getvalue() + assert out == "hello world\n" + assert "\x1b[2K" not in out diff --git a/tests/cli/test_sync_watchdog.py b/tests/cli/test_sync_watchdog.py index 32bb43d79..eb66e7baf 100644 --- a/tests/cli/test_sync_watchdog.py +++ b/tests/cli/test_sync_watchdog.py @@ -254,3 +254,31 @@ def test_rerun_recipe_scales_timeout_suggestion( captured = capsys.readouterr().out # max(120, 30 * 10) = 300 assert "--timeout 300" in captured + + +def test_watchdog_propagates_keyboard_interrupt_from_worker( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """A ``KeyboardInterrupt`` in the worker bubbles out of the watchdog. + + ``_sync_repo_with_watchdog`` runs the libvcs call on a daemon thread. If + the main thread receives Ctrl-C (normal case) it never reaches this code + path, but a worker-side ``KeyboardInterrupt`` (rare, via + ``PyThreadState_SetAsyncExc``) must NOT be laundered into a per-repo + "failed" outcome -- it has to propagate so the outer loop can tear the + batch down cleanly. This locks down the narrowed catch (``Exception``, + not ``BaseException``). + """ + + def _raising(repo: dict[str, t.Any], *, progress_callback: t.Any) -> None: + raise KeyboardInterrupt + + monkeypatch.setattr(sync_module, "update_repo", _raising) + + with pytest.raises(KeyboardInterrupt): + _sync_repo_with_watchdog( + t.cast("t.Any", {"name": "kb"}), + progress_callback=_noop_progress, + timeout=5, + is_human=True, + ) From b561d7f150fdc60740d241f432b4f33586e81920 Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Fri, 24 Apr 2026 18:43:21 -0500 Subject: [PATCH 08/32] feat(sync[indicator]) Colourise the Braille spinner cell MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit why: tmuxp's spinner renders each Braille frame through ``self.colors.info(frame)``, which reads brightly against the surrounding dim log stream without tinting the repo name / elapsed suffix. The user called that out as the model and the plain-ANSI version we'd shipped looked visually flat next to libvcs's coloured ``|git| (repo)`` lines. what: - Accept a ``Colors`` instance on ``SyncStatusIndicator`` (defaulting to a NEVER-colour palette so ad-hoc callers and tests don't leak ANSI codes). - Wrap only the Braille cell in ``self._colors.info(frame)`` inside ``_render_tty``; keep the ``syncing ... 1.1s`` suffix in the terminal default so it doesn't compete with the permanent-line colours (✓ Synced green, - Timed out yellow). - Track visible-cell length (not escape-inclusive string length) in ``_last_line_len`` so the pad-clear arithmetic still aligns the next redraw even though the line has grown with ANSI bytes. - Thread the shared ``Colors`` from ``_sync_impl`` into ``build_indicator`` so the spinner honours ``--color=always/auto/never`` exactly like the rest of the CLI. - Test: ``test_tty_spinner_colours_the_frame_cell`` regex-matches an ANSI colour sequence immediately followed by a Braille frame, locking in the tmuxp-parity and guarding against future "colour the whole line" drift. --- src/vcspull/cli/_progress.py | 29 +++++++++++++++++++++++++--- src/vcspull/cli/sync.py | 2 +- tests/cli/test_sync_progress.py | 34 +++++++++++++++++++++++++++++++++ 3 files changed, 61 insertions(+), 4 deletions(-) diff --git a/src/vcspull/cli/_progress.py b/src/vcspull/cli/_progress.py index 8bcc673d5..ec573a8bf 100644 --- a/src/vcspull/cli/_progress.py +++ b/src/vcspull/cli/_progress.py @@ -21,6 +21,8 @@ import time import typing as t +from ._colors import ColorMode, Colors, get_color_mode + log = logging.getLogger(__name__) @@ -88,6 +90,7 @@ def __init__( enabled: bool = True, stream: t.TextIO | None = None, tty: bool | None = None, + colors: Colors | None = None, ) -> None: self._stream = stream if stream is not None else sys.stdout # Respect the explicit tty override (tests, ``--color=never``) but @@ -100,6 +103,11 @@ def __init__( # A disabled indicator is a no-op -- it still honours the public API so # callers don't need if/else ladders around every sync. self._enabled = enabled + # Default to a NEVER-colour palette so a caller that builds an + # indicator without wiring in the shared ``Colors`` still gets sane + # plain-text output -- nothing worse than ANSI codes leaking into + # captured streams in tests. + self._colors = colors if colors is not None else Colors(ColorMode.NEVER) self._lock = threading.Lock() self._thread: threading.Thread | None = None @@ -242,8 +250,14 @@ def _tty_loop(self) -> None: self._stop_event.wait(_TTY_REFRESH_INTERVAL) def _render_tty(self, frame: str, name: str, elapsed: float) -> None: - line = f"{frame} syncing {name} ... {elapsed:4.1f}s" - pad = max(self._last_line_len - len(line), 0) + # Colour just the spinner cell -- matches tmuxp's + # ``{self.colors.info(frame)} {msg}`` pattern and keeps the repo + # name / elapsed segment neutral so it doesn't fight the ``✓`` / + # timed-out colouring emitted on the permanent line. + coloured_frame = self._colors.info(frame) + visible = f"{frame} syncing {name} ... {elapsed:4.1f}s" + line = f"{coloured_frame} syncing {name} ... {elapsed:4.1f}s" + pad = max(self._last_line_len - len(visible), 0) # Holding the lock around the actual write ensures a concurrent # ``write()`` (called by the stdout diverter on the main thread) # can't begin mid-frame and end up fighting with the ``\r`` redraw. @@ -253,7 +267,10 @@ def _render_tty(self, frame: str, name: str, elapsed: float) -> None: _SYNC_START + _CURSOR_TO_COL0 + line + (" " * pad) + _SYNC_END, ) self._stream.flush() - self._last_line_len = len(line) + # Track the *visible* column count (not the string length with + # ANSI codes), so the next frame's padding calculation clears + # exactly the on-screen cells the previous frame occupied. + self._last_line_len = len(visible) except (OSError, ValueError): pass @@ -329,6 +346,7 @@ def build_indicator( color: str, stream: t.TextIO | io.StringIO | None = None, tty: bool | None = None, + colors: Colors | None = None, ) -> SyncStatusIndicator: """Return a ``SyncStatusIndicator`` configured for the current session. @@ -339,8 +357,13 @@ def build_indicator( # io.StringIO satisfies the TextIO protocol at runtime; tests use this to # capture spinner output without wrestling with subclassing. concrete_stream: t.Any = stream + # Resolve the palette from the CLI ``--color`` string when the caller + # didn't hand us a shared ``Colors`` instance -- keeps ``build_indicator`` + # usable without threading colours through every call site. + resolved_colors = colors if colors is not None else Colors(get_color_mode(color)) return SyncStatusIndicator( enabled=enabled, stream=concrete_stream, tty=tty, + colors=resolved_colors, ) diff --git a/src/vcspull/cli/sync.py b/src/vcspull/cli/sync.py index 5b3cc4cd1..dc03607d0 100644 --- a/src/vcspull/cli/sync.py +++ b/src/vcspull/cli/sync.py @@ -1205,7 +1205,7 @@ def _sync_impl( } timed_out_repos: list[_TimedOutRepo] = [] - indicator = build_indicator(human=is_human, color=color) + indicator = build_indicator(human=is_human, color=color, colors=colors) progress_callback: ProgressCallback if is_human and indicator.enabled: diff --git a/tests/cli/test_sync_progress.py b/tests/cli/test_sync_progress.py index 3c657d1df..866ec9c03 100644 --- a/tests/cli/test_sync_progress.py +++ b/tests/cli/test_sync_progress.py @@ -189,3 +189,37 @@ def test_write_non_tty_path_passes_through_untouched() -> None: out = stream.getvalue() assert out == "hello world\n" assert "\x1b[2K" not in out + + +def test_tty_spinner_colours_the_frame_cell() -> None: + """Just the Braille cell is colourised (tmuxp-style), not the whole line. + + The repo name and elapsed suffix stay in the terminal's default + foreground so they don't collide with the ``✓ Synced`` / ``- Timed out`` + colouring emitted on the permanent line when a repo completes. + """ + from vcspull.cli._colors import ColorMode, Colors + + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=True, + colors=Colors(ColorMode.ALWAYS), + ) + + indicator.start_repo("codex") + time.sleep(0.2) + indicator.stop_repo() + indicator.close() + + out = stream.getvalue() + # A colour sequence appears immediately before at least one Braille frame. + import re as _re + + pattern = _re.compile(r"\x1b\[[0-9;]*m[⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏]") + assert pattern.search(out), "spinner frame must be wrapped in ANSI colour" + # The repo name itself must not be inside a colour run -- look for the + # literal "syncing codex" preceded by a reset (the info() helper closes + # the colour right after the frame). + assert "syncing codex" in out From d48ca426f7a1eda3e4403834f37a29f5c3fa2f5a Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Fri, 24 Apr 2026 18:47:48 -0500 Subject: [PATCH 09/32] fix(sync[interrupt]) Catch Ctrl-C during config load, not just the loop why: The prior interrupt handler lived inside ``_sync_impl`` wrapped around ``_run_sync_loop``. Ctrl-C landing BEFORE the loop -- notably during ``load_configs`` (libyaml parse on a large ~/.vcspull.yaml), or inside ``find_config_files``, or during the dry-run branch -- propagated all the way out as an unhandled ``KeyboardInterrupt`` and the user saw the full yaml parser stack dumped to the terminal. Observed in the reporter's chained ``vcspull sync; vcspull sync; ...`` invocation. what: - Add an outer ``except KeyboardInterrupt`` in ``sync()`` wrapping the existing ``try: _sync_impl(...) finally: teardown_file_logger(...)``. This handler fires only for interrupts outside the repo loop (the in-loop handler inside ``_sync_impl`` still runs first when applicable and emits the partial summary via its ``SystemExit(130)``, which is not caught here). - Handler writes "Interrupted by user." and the debug log path straight to ``sys.stderr`` -- the formatter may not yet exist, or may have been partially emitted, so a plain message is the safe contract. - Exits with ``SystemExit(130)`` (SIGINT convention) so shells / CI distinguish "user stopped this" from "crashed". - Regression test ``test_sync_handles_keyboard_interrupt_during_config_load`` monkeypatches ``load_configs`` to raise ``KeyboardInterrupt`` and asserts exit code 130 + stderr message. --- src/vcspull/cli/sync.py | 13 ++++++++++ tests/cli/test_sync_watchdog.py | 45 +++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+) diff --git a/src/vcspull/cli/sync.py b/src/vcspull/cli/sync.py index dc03607d0..a59b4529b 100644 --- a/src/vcspull/cli/sync.py +++ b/src/vcspull/cli/sync.py @@ -998,6 +998,19 @@ def sync( log_file_path=log_file_path, dry_run=dry_run, ) + except KeyboardInterrupt: + # Catch Ctrl-C from ANY phase of the sync -- not just the repo loop + # (where ``_sync_impl`` handles it with a partial summary) but also + # long pre-loop work like ``load_configs`` (YAML parse on big vcspull + # files) and post-loop emission. We write straight to stderr here + # because the outer scope is the narrowest spot where we can be sure + # the formatter either already finalised or was never built. + sys.stderr.write("\nInterrupted by user.\n") + sys.stderr.flush() + if log_file_path is not None: + sys.stderr.write(f"Full debug log: {log_file_path}\n") + sys.stderr.flush() + raise SystemExit(130) from None finally: if log_file_handler is not None: teardown_file_logger(log_file_handler) diff --git a/tests/cli/test_sync_watchdog.py b/tests/cli/test_sync_watchdog.py index eb66e7baf..6e08aa8f3 100644 --- a/tests/cli/test_sync_watchdog.py +++ b/tests/cli/test_sync_watchdog.py @@ -282,3 +282,48 @@ def _raising(repo: dict[str, t.Any], *, progress_callback: t.Any) -> None: timeout=5, is_human=True, ) + + +def test_sync_handles_keyboard_interrupt_during_config_load( + monkeypatch: pytest.MonkeyPatch, + capsys: pytest.CaptureFixture[str], +) -> None: + """Ctrl-C during pre-loop work (e.g. YAML parse) exits cleanly with 130. + + Regression for the observed traceback where a KeyboardInterrupt raised + inside ``load_configs`` escaped all the way through ``cli.sync`` and out + to the top-level ``sys.exit`` as an unhandled exception, dumping the + entire yaml parser stack to the terminal. The outer ``sync()`` entry + point must catch the interrupt, emit a short notice, and raise + ``SystemExit(130)``. + """ + from vcspull.cli.sync import sync as sync_fn + + def _raising_load(*_args: t.Any, **_kwargs: t.Any) -> t.Any: + raise KeyboardInterrupt + + monkeypatch.setattr(sync_module, "load_configs", _raising_load) + + with pytest.raises(SystemExit) as excinfo: + sync_fn( + repo_patterns=[], + config=None, + workspace_root=None, + dry_run=False, + output_json=False, + output_ndjson=False, + color="never", + exit_on_error=False, + show_unchanged=False, + summary_only=False, + long_view=False, + relative_paths=False, + fetch=False, + offline=False, + verbosity=0, + sync_all=True, + ) + + assert excinfo.value.code == 130 + err = capsys.readouterr().err + assert "Interrupted by user." in err From 3e484ddc5d843ee51da8deea5fa7cc3d2a5d4383 Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Fri, 24 Apr 2026 18:59:33 -0500 Subject: [PATCH 10/32] cli(sync[sigint]) Re-raise SIGINT under SIG_DFL so shell chains abort MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit why: bash/zsh abort ``cmd1; cmd2; cmd3`` sequential lists only when the child was killed by a signal (WIFSIGNALED(SIGINT)). A clean SystemExit(130) leaves the shell no reason to stop, so chained ``vcspull sync ...; vcspull sync ...`` required per-command Ctrl-C -- the reporter hit Ctrl-C four times in a four-invocation chain. Git solves this with ``sigchain_pop(sig); raise(sig)`` (sigchain.h:20-34, builtin/clone.c:416). Translate the pattern to Python so vcspull composes with ``;``-chains like git clone / git fetch. what: - Add ``_exit_on_sigint()``: POSIX does ``SIG_IGN`` (guard the gap) → ``SIG_DFL`` → ``signal.raise_signal(SIGINT)`` so the kernel exits with WIFSIGNALED(SIGINT); Windows falls back to ``SystemExit(130)``. Precondition: main thread (documented; the ValueError fails loud rather than silently defeating the change off-thread). - Add ``_SyncInterruptedAfterSummary(KeyboardInterrupt)`` marker so the outer ``sync()`` catch skips the plain stderr fallback when the inner ``_sync_impl`` already printed a coloured partial summary. - Inline log-file teardown in the outer ``except`` (atexit and ``finally`` do not run after ``SIG_DFL`` terminates at C level) and null the handle so the surrounding ``finally`` is a single-close no-op on the Windows fallback path. - Shield the inner ``if interrupted:`` summary emission against ``OSError`` / ``ValueError`` (e.g. ``BrokenPipeError`` when stdout is piped to ``head``) so the marker exception *always* propagates -- otherwise bash sees exit 1 and the ``;`` chain continues. - Tests: opt-in ``_fake_sigint_escalation`` fixture for in-process ``SystemExit(130)`` assertions (applied to the existing ``test_sync_handles_keyboard_interrupt_during_config_load``); mock-based unit tests for ``_exit_on_sigint`` POSIX ordering and Windows fallback; new subprocess-based ``test_exit_on_sigint_produces_wifsignaled_sigint`` asserting ``proc.returncode == -signal.SIGINT`` so the real WIFSIGNALED path is pinned down (POSIX only). - CHANGES entry under 1.59.x. Matches the design pass Claude/Gemini/GPT converged on via two rounds of weave:ask (see ``$XDG_STATE_HOME/ai-aip/repos/vcspull--*/sessions/ask/latest/``). --- CHANGES | 14 ++++ src/vcspull/cli/sync.py | 124 +++++++++++++++++++++++++------- tests/cli/test_sync_sigint.py | 71 ++++++++++++++++++ tests/cli/test_sync_watchdog.py | 104 +++++++++++++++++++++++++-- 4 files changed, 284 insertions(+), 29 deletions(-) create mode 100644 tests/cli/test_sync_sigint.py diff --git a/CHANGES b/CHANGES index ab87e5831..8e7ac770e 100644 --- a/CHANGES +++ b/CHANGES @@ -39,6 +39,20 @@ _Notes on upcoming releases will be added here_ ### New features +#### `vcspull sync`: Ctrl-C aborts the whole shell chain, not one command + +`vcspull sync` now terminates with `WIFSIGNALED(SIGINT)` on Ctrl-C instead +of exiting cleanly with status `130`. Bash/zsh sequential lists +(`cmd1; cmd2; cmd3`) abort on the first signal-terminated child, so +chained invocations like +`vcspull sync --workspace ~/study/rust/ --all; vcspull sync --workspace +~/study/otel/ --all` now stop on a single Ctrl-C — matching how `git +clone; git fetch` compose. Behaviour is implemented via the canonical +git pattern (`sigchain.h:20-34`): flush output, close the debug log +handle, install `SIG_DFL`, self-deliver SIGINT. Windows falls back to +the conventional `SystemExit(130)` because it has no `WIFSIGNALED` +analogue. + #### `vcspull sync`: per-repository watchdog and copyable rerun recipe A single stuck repository no longer freezes the whole batch. Every repository diff --git a/src/vcspull/cli/sync.py b/src/vcspull/cli/sync.py index a59b4529b..c05aaeeae 100644 --- a/src/vcspull/cli/sync.py +++ b/src/vcspull/cli/sync.py @@ -10,6 +10,7 @@ import pathlib import re import shlex +import signal import subprocess import sys import threading @@ -718,6 +719,57 @@ def _resolve_repo_timeout(cli_timeout: int | None) -> int: return _DEFAULT_REPO_TIMEOUT_SECONDS +class _SyncInterruptedAfterSummary(KeyboardInterrupt): + """Internal marker: ``_sync_impl`` already emitted the interrupt summary. + + Lets the outer ``sync()`` handler skip the plain-stderr fallback when the + inner layer has already printed a coloured partial summary through the + formatter. Subclassing ``KeyboardInterrupt`` keeps the exception in the + same taxonomy for the outer ``except KeyboardInterrupt`` catch. + """ + + +def _exit_on_sigint() -> t.NoReturn: + """Terminate so the parent shell sees ``WIFSIGNALED(SIGINT)``. + + Interactive shells (bash, zsh) abort ``cmd1; cmd2`` sequential lists + only when the child was killed by a signal -- a clean + ``SystemExit(130)`` leaves the shell no reason to stop. Match git's + pattern (``sigchain.h:20-34``; ``builtin/clone.c:416``): cleanup first, + install ``SIG_DFL``, self-deliver SIGINT so the kernel reports + ``WIFSIGNALED`` to the parent. + + Callers MUST complete cleanup before invoking this. Python ``atexit`` + hooks and ``finally`` blocks do NOT run once ``SIG_DFL`` terminates the + process at the C level. + + Windows has no ``WIFSIGNALED`` analogue and its ``raise_signal(SIGINT)`` + under ``SIG_DFL`` raises ``KeyboardInterrupt`` back at the caller + instead of exiting -- fall back to the conventional ``SystemExit(130)``. + + Precondition: must be called from the main thread of the main + interpreter. ``signal.signal`` raises ``ValueError`` otherwise. The CLI + dispatcher at ``vcspull/cli/__init__.py`` always invokes ``sync()`` on + the main thread, so this is a callsite contract rather than a runtime + check -- a silent non-main-thread fallback would only kill that thread, + not the process, and bash would still see a clean exit and continue + the ``;`` chain, defeating the whole change. + """ + if sys.platform == "win32": + raise SystemExit(130) + # The two-line swap here is deliberate: a SIGINT delivered during the + # gap either hits SIG_IGN (dropped) or SIG_DFL (terminates) -- both + # outcomes let bash see WIFSIGNALED(SIGINT), so we don't need the + # heavier ``pthread_sigmask`` route. + signal.signal(signal.SIGINT, signal.SIG_IGN) + signal.signal(signal.SIGINT, signal.SIG_DFL) + signal.raise_signal(signal.SIGINT) + # raise_signal does not return under SIG_DFL on POSIX; the SystemExit + # is defence-in-depth only, satisfying ``NoReturn`` and covering exotic + # kernels where the default disposition doesn't terminate. + raise SystemExit(130) # pragma: no cover + + class _IndicatorStreamProxy: r"""File-like adapter that routes writes through a ``SyncStatusIndicator``. @@ -998,19 +1050,31 @@ def sync( log_file_path=log_file_path, dry_run=dry_run, ) - except KeyboardInterrupt: - # Catch Ctrl-C from ANY phase of the sync -- not just the repo loop - # (where ``_sync_impl`` handles it with a partial summary) but also - # long pre-loop work like ``load_configs`` (YAML parse on big vcspull - # files) and post-loop emission. We write straight to stderr here - # because the outer scope is the narrowest spot where we can be sure - # the formatter either already finalised or was never built. - sys.stderr.write("\nInterrupted by user.\n") - sys.stderr.flush() - if log_file_path is not None: - sys.stderr.write(f"Full debug log: {log_file_path}\n") + except KeyboardInterrupt as err: + # Catch Ctrl-C from ANY phase of the sync -- the repo loop (where + # ``_sync_impl`` handles it with a partial summary and re-raises + # ``_SyncInterruptedAfterSummary``) and also long pre-loop work + # like ``load_configs`` (YAML parse on big vcspull configs) and + # post-loop emission, where the formatter may not be built yet. + # The plain-stderr fallback below is the only safe surface in the + # pre-loop case; skip it when the inner layer already printed a + # coloured summary through the formatter. + if not isinstance(err, _SyncInterruptedAfterSummary): + sys.stderr.write("\nInterrupted by user.\n") sys.stderr.flush() - raise SystemExit(130) from None + if log_file_path is not None: + sys.stderr.write(f"Full debug log: {log_file_path}\n") + sys.stderr.flush() + # Python ``atexit`` and ``finally`` do NOT run once + # ``_exit_on_sigint`` terminates via ``SIG_DFL`` at the C level, so + # close the debug log inline. Nulling the handle makes the outer + # ``finally`` a no-op for the POSIX path AND a correct single-close + # for the Windows fallback path (where ``_exit_on_sigint`` does + # raise a regular ``SystemExit`` and the ``finally`` does run). + if log_file_handler is not None: + teardown_file_logger(log_file_handler) + log_file_handler = None + _exit_on_sigint() finally: if log_file_handler is not None: teardown_file_logger(log_file_handler) @@ -1261,9 +1325,10 @@ def silent_progress(output: str, timestamp: datetime) -> None: indicator=indicator, ) except KeyboardInterrupt: - # Ctrl-C during the loop: stop the indicator cleanly, print a partial - # summary, and exit with the conventional SIGINT code (128 + 2 = 130) - # so shells and CI treat us as interrupted rather than crashed. + # Ctrl-C during the loop: stop the indicator cleanly, print a + # partial summary via the formatter, then hand termination + # semantics to the outer ``sync()`` catch. We re-raise a typed + # marker so the outer layer doesn't double-print. interrupted = True finally: if restore_log_streams is not None: @@ -1271,16 +1336,25 @@ def silent_progress(output: str, timestamp: datetime) -> None: indicator.close() if interrupted: - formatter.emit_text("") - formatter.emit_text(colors.warning("Interrupted by user.")) - _emit_summary(formatter, colors, summary) - if log_file_path is not None: - formatter.emit_text( - f"{colors.info('->')} Full debug log: " - f"{colors.muted(str(log_file_path))}", - ) - formatter.finalize() - raise SystemExit(130) + # Shield the summary emission against late-breaking ``OSError`` + # (e.g. ``BrokenPipeError`` when stdout is piped to ``head``) + # so the marker exception *always* propagates. If an OSError + # escaped here the outer layer would see a non-KeyboardInterrupt + # and bash would see a plain exit 1, letting the ``;`` chain + # continue -- exactly what this change is designed to prevent. + try: + formatter.emit_text("") + formatter.emit_text(colors.warning("Interrupted by user.")) + _emit_summary(formatter, colors, summary) + if log_file_path is not None: + formatter.emit_text( + f"{colors.info('->')} Full debug log: " + f"{colors.muted(str(log_file_path))}", + ) + formatter.finalize() + except (OSError, ValueError): + pass + raise _SyncInterruptedAfterSummary from None _emit_summary(formatter, colors, summary) _emit_rerun_recipe( diff --git a/tests/cli/test_sync_sigint.py b/tests/cli/test_sync_sigint.py new file mode 100644 index 000000000..1e3c9955f --- /dev/null +++ b/tests/cli/test_sync_sigint.py @@ -0,0 +1,71 @@ +"""Subprocess-based tests asserting real ``WIFSIGNALED(SIGINT)`` termination.""" + +from __future__ import annotations + +import os +import signal +import subprocess +import sys + +import pytest + + +@pytest.mark.skipif( + sys.platform == "win32", + reason="POSIX signal semantics only; Windows uses exit-code 130", +) +def test_exit_on_sigint_produces_wifsignaled_sigint() -> None: + """``_exit_on_sigint`` makes the process terminate via ``WIFSIGNALED(SIGINT)``. + + Bash's ``cmd1; cmd2`` sequential-list abort only kicks in when the + child was killed by a signal -- a clean ``SystemExit(130)`` leaves + the shell no reason to stop and the chain keeps running. This + assertion locks in the actual kernel signal bit, not just the exit + code. + + Why a subprocess: the production helper ends with + ``signal.raise_signal(SIGINT)`` under ``SIG_DFL``, which would take + the pytest runner down with us if we called it in-process. The only + reliable observation point is ``subprocess.Popen.returncode``, + which CPython stores as the *negative* of the terminating signal + number when ``os.waitstatus_to_exitcode`` reports ``WIFSIGNALED``. + + Scope: we target ``_exit_on_sigint`` directly in a fresh interpreter + rather than driving ``vcspull sync`` end-to-end, because getting the + sync loop into a deterministic mid-flight state where SIGINT lands + on the watchdog's ``done.wait(...)`` requires either a blocking fake + remote or a monkey-patched ``update_repo`` -- both reintroduce + flake and add nothing over the existing in-process control-flow + tests in ``test_sync_watchdog.py`` (which use the + ``_fake_sigint_escalation`` fixture). The one thing those tests + can't show is that the kernel actually marks the exit as + WIFSIGNALED(SIGINT); that's what this subprocess proves. + """ + # Simulate Ctrl-C in a fresh interpreter, then route it through the + # real helper. We install ``default_int_handler`` explicitly because + # pytest's parent may have fiddled with SIGINT -- this child is a + # fresh Python process, but being explicit removes one variable. + runner = ( + "import signal\n" + "signal.signal(signal.SIGINT, signal.default_int_handler)\n" + "from vcspull.cli.sync import _exit_on_sigint\n" + "try:\n" + " signal.raise_signal(signal.SIGINT)\n" + "except KeyboardInterrupt:\n" + " _exit_on_sigint()\n" + ) + + env = {**os.environ, "PYTHONDONTWRITEBYTECODE": "1"} + + proc = subprocess.run( + [sys.executable, "-c", runner], + env=env, + capture_output=True, + check=False, + timeout=10, + ) + + assert proc.returncode == -signal.SIGINT, ( + f"expected WIFSIGNALED(SIGINT) (-{int(signal.SIGINT)}), " + f"got returncode={proc.returncode}; stderr={proc.stderr!r}" + ) diff --git a/tests/cli/test_sync_watchdog.py b/tests/cli/test_sync_watchdog.py index 6e08aa8f3..9faf7bfda 100644 --- a/tests/cli/test_sync_watchdog.py +++ b/tests/cli/test_sync_watchdog.py @@ -3,6 +3,8 @@ from __future__ import annotations import importlib +import signal +import sys import time import typing as t @@ -284,17 +286,41 @@ def _raising(repo: dict[str, t.Any], *, progress_callback: t.Any) -> None: ) +@pytest.fixture +def _fake_sigint_escalation( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Convert ``_exit_on_sigint`` into ``SystemExit(130)`` for in-process tests. + + The real implementation re-raises SIGINT under ``SIG_DFL`` so the + parent shell sees ``WIFSIGNALED(SIGINT)``. Running that in-process + would kill the pytest runner. Unit tests that only want to verify the + control flow through the ``except KeyboardInterrupt`` clauses opt in + to this fixture; the real signal semantics live in a subprocess test + (``tests/cli/test_sync_sigint.py``). + """ + + def _fake() -> t.NoReturn: + raise SystemExit(130) + + monkeypatch.setattr(sync_module, "_exit_on_sigint", _fake) + + def test_sync_handles_keyboard_interrupt_during_config_load( monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture[str], + _fake_sigint_escalation: None, ) -> None: """Ctrl-C during pre-loop work (e.g. YAML parse) exits cleanly with 130. Regression for the observed traceback where a KeyboardInterrupt raised - inside ``load_configs`` escaped all the way through ``cli.sync`` and out - to the top-level ``sys.exit`` as an unhandled exception, dumping the - entire yaml parser stack to the terminal. The outer ``sync()`` entry - point must catch the interrupt, emit a short notice, and raise + inside ``load_configs`` escaped all the way through ``cli.sync`` and + out to the top-level ``sys.exit`` as an unhandled exception, dumping + the entire yaml parser stack to the terminal. The outer ``sync()`` + entry point must catch the interrupt, emit a short notice, and exit + via ``_exit_on_sigint()``. The ``_fake_sigint_escalation`` fixture + swaps the real ``_exit_on_sigint`` (which re-raises SIGINT under + ``SIG_DFL`` and would kill the test runner) for a plain ``SystemExit(130)``. """ from vcspull.cli.sync import sync as sync_fn @@ -327,3 +353,73 @@ def _raising_load(*_args: t.Any, **_kwargs: t.Any) -> t.Any: assert excinfo.value.code == 130 err = capsys.readouterr().err assert "Interrupted by user." in err + + +def test_exit_on_sigint_posix_installs_sig_dfl_and_raises( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """``_exit_on_sigint`` installs SIG_IGN, then SIG_DFL, then self-SIGINTs. + + Locks in the order from git's ``sigchain.h:20-34`` pattern and proves + the helper would make the kernel deliver SIGINT to ourselves -- which + is what the parent shell's ``WIFSIGNALED`` check keys off to abort a + ``;`` chain. We can't let ``raise_signal`` actually fire (it would + take pytest down with us), so we intercept both stdlib calls and + assert on the recorded sequence. + """ + monkeypatch.setattr(sys, "platform", "linux") + + calls: list[tuple[str, t.Any, t.Any]] = [] + + def _fake_signal(sig: int, handler: t.Any) -> t.Any: + calls.append(("signal", sig, handler)) + return None + + def _fake_raise(sig: int) -> None: + calls.append(("raise_signal", sig, None)) + # Simulate ``SIG_DFL`` termination by exiting -- the real call + # never returns on POSIX; here we just prove the ordering. + raise SystemExit(130) + + monkeypatch.setattr(signal, "signal", _fake_signal) + monkeypatch.setattr(signal, "raise_signal", _fake_raise) + + with pytest.raises(SystemExit) as excinfo: + sync_module._exit_on_sigint() + + assert excinfo.value.code == 130 + assert calls == [ + ("signal", signal.SIGINT, signal.SIG_IGN), + ("signal", signal.SIGINT, signal.SIG_DFL), + ("raise_signal", signal.SIGINT, None), + ] + + +def test_exit_on_sigint_windows_fallback( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Windows falls back to ``SystemExit(130)`` without touching signals. + + ``signal.raise_signal(SIGINT)`` under ``SIG_DFL`` on Windows raises + ``KeyboardInterrupt`` back at the caller instead of terminating, so + the POSIX re-raise would get us nowhere. The helper must short-circuit + on ``sys.platform == "win32"`` and leave the signal handlers + completely untouched. + """ + monkeypatch.setattr(sys, "platform", "win32") + + def _fail_if_called(sig: int) -> None: + msg = "raise_signal must not be called on the Windows fallback path" + raise AssertionError(msg) + + def _fail_signal(sig: int, handler: t.Any) -> t.Any: + msg = "signal.signal must not be called on the Windows fallback path" + raise AssertionError(msg) + + monkeypatch.setattr(signal, "raise_signal", _fail_if_called) + monkeypatch.setattr(signal, "signal", _fail_signal) + + with pytest.raises(SystemExit) as excinfo: + sync_module._exit_on_sigint() + + assert excinfo.value.code == 130 From 89649a8675ab64464910c651861a03547abfd12a Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Sat, 25 Apr 2026 05:23:37 -0500 Subject: [PATCH 11/32] fix(log[verbosity]) Stop libvcs DEBUG leaking through the terminal handler MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit why: After shipping the npm-style debug log file, users saw libvcs's chatty ``|git| (repo) head_sha: ...`` / ``show_ref_output: ...`` / ``git_remote_name: ...`` lines on the terminal even at the default ``--log-level=INFO``. Root cause: ``setup_file_logger`` raises the ``vcspull`` and ``libvcs`` loggers to DEBUG so its FileHandler captures the full trace, but ``setup_logger`` left the terminal StreamHandlers with no per-handler level filter -- so a logger-level bump opened the floodgate to the terminal too. Reporter confirmed the same noise across 4 chained ``vcspull sync`` invocations today. what: - Add ``verbosity: int = 0`` kwarg to ``setup_logger`` (sync subcommand passes ``args.verbosity``; everything else gets 0). Drives the libvcs StreamHandler level via ``_libvcs_stream_level``: 0 → WARNING (chatty INFO + DEBUG suppressed); 1 (``-v``) → INFO; 2+ (``-vv``) → DEBUG. - Pin per-handler levels on every vcspull and libvcs StreamHandler. The per-handler filter is what protects the terminal from ``setup_file_logger`` raising the logger level to DEBUG; the file handler still receives DEBUG records because libvcs's *logger* level is DEBUG, but the terminal handler drops them via its own filter. - ``cli/__init__.py``: thread ``args.verbosity`` (defaults to 0 when the subcommand doesn't define it) into ``setup_logger``. - Tests: 5 new in ``tests/test_log.py``: default WARNING; ``-v`` INFO; ``-vv`` DEBUG; vcspull StreamHandler pinned to resolved level; ``setup_file_logger`` does NOT change StreamHandler levels (the floodgate regression guard). --- src/vcspull/cli/__init__.py | 9 ++- src/vcspull/log.py | 62 ++++++++++++++++++++- tests/test_log.py | 107 ++++++++++++++++++++++++++++++++++++ 3 files changed, 175 insertions(+), 3 deletions(-) diff --git a/src/vcspull/cli/__init__.py b/src/vcspull/cli/__init__.py index c63efbd1d..b78e93716 100644 --- a/src/vcspull/cli/__init__.py +++ b/src/vcspull/cli/__init__.py @@ -440,7 +440,14 @@ def cli(_args: list[str] | None = None) -> None: ) = subparsers args = parser.parse_args(_args) - setup_logger(log=log, level=args.log_level.upper()) + # ``args.verbosity`` is only set by the sync subcommand; default 0 + # everywhere else. The sync ``-v`` ladder (0 → libvcs WARNING; 1 → INFO; + # 2+ → DEBUG) is documented in :func:`vcspull.log.setup_logger`. + setup_logger( + log=log, + level=args.log_level.upper(), + verbosity=getattr(args, "verbosity", 0), + ) if args.subparser_name is None: parser.print_help() diff --git a/src/vcspull/log.py b/src/vcspull/log.py index 44893d264..00044d5c2 100644 --- a/src/vcspull/log.py +++ b/src/vcspull/log.py @@ -54,12 +54,43 @@ def get_cli_logger_names(include_self: bool = True) -> list[str]: return sorted(names) +def _libvcs_stream_level(verbosity: int) -> int: + """Return the libvcs StreamHandler level for a given ``-v`` count. + + Reporter expectation: at the default verbosity, libvcs's chatty + ``log.info("Updating to '%s'.")`` and ``log.debug("git_tag: ...")`` + crumb lines should NOT print on the terminal -- only WARNING+ does. + ``-v`` lifts that to INFO; ``-vv`` to DEBUG. The debug log file + (``setup_file_logger``) is always DEBUG regardless of this knob. + """ + if verbosity >= 2: + return logging.DEBUG + if verbosity >= 1: + return logging.INFO + return logging.WARNING + + def setup_logger( log: logging.Logger | None = None, level: t.Literal["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] = "INFO", + verbosity: int = 0, ) -> None: - """Configure the vcspull logging hierarchy once and reuse it everywhere.""" + """Configure the vcspull logging hierarchy once and reuse it everywhere. + + The ``level`` argument governs the *vcspull* logger and its CLI + submodules; existing callers and ``--log-level`` users see no change. + + The ``verbosity`` argument (``-v`` count from the sync subcommand) drives + the *libvcs* StreamHandler level via :func:`_libvcs_stream_level`. + Setting it as a *per-handler* filter -- not a logger-level filter -- is + deliberate: ``setup_file_logger`` later raises both loggers to DEBUG so + the debug file captures the full trace, and without per-handler levels + on the StreamHandlers that bump would also open the terminal floodgate. + See ``tests/test_log.py::test_setup_file_logger_does_not_open_stream_floodgate`` + for the regression guard. + """ resolved_level = getattr(logging, level.upper(), logging.INFO) + libvcs_stream_level = _libvcs_stream_level(verbosity) vcspull_logger = logging.getLogger("vcspull") @@ -100,6 +131,17 @@ def setup_logger( handler.stream = sys.stdout handler.setFormatter(formatter) + # Pin a per-handler level on every vcspull StreamHandler. This is what + # keeps ``setup_file_logger``'s logger-level bump to DEBUG from leaking + # into the terminal: even when the logger accepts DEBUG records, the + # handler's own filter still drops them. + for handler in existing_handlers: + if isinstance(handler, logging.StreamHandler) and not isinstance( + handler, + logging.FileHandler, + ): + handler.setLevel(resolved_level) + vcspull_logger.setLevel(resolved_level) vcspull_logger.propagate = True @@ -124,7 +166,23 @@ def setup_logger( repo_channel.setFormatter(RepoLogFormatter()) repo_channel.addFilter(RepoFilter()) repo_logger.addHandler(repo_channel) - repo_logger.setLevel(resolved_level) + # Pin the libvcs StreamHandler's per-handler level using the verbosity + # ladder. Default WARNING means the chatty ``|git| (repo) Updating to + # 'main'.`` traffic stays out of the terminal; ``-v`` opens INFO, ``-vv`` + # opens DEBUG. FileHandlers (debug log) keep their own level (DEBUG). + for handler in repo_logger.handlers: + if isinstance(handler, logging.StreamHandler) and not isinstance( + handler, + logging.FileHandler, + ): + handler.setLevel(libvcs_stream_level) + # The libvcs *logger* level is the floor for everything attached to it, + # including any FileHandler we install later. Keep it low enough that + # both the StreamHandler and the eventual FileHandler still see records + # at their own per-handler thresholds. ``min(libvcs_stream_level, DEBUG)`` + # is just DEBUG, which means the file logger always wins -- which is the + # behaviour we want. + repo_logger.setLevel(min(libvcs_stream_level, logging.DEBUG)) repo_logger.propagate = True target_logger = log or vcspull_logger diff --git a/tests/test_log.py b/tests/test_log.py index f114a7d66..eb4a3461e 100644 --- a/tests/test_log.py +++ b/tests/test_log.py @@ -3,6 +3,7 @@ from __future__ import annotations import logging +import pathlib import typing as t import pytest @@ -611,3 +612,109 @@ def test_repo_filter_integration(caplog: LogCaptureFixture) -> None: exc_info=None, ) assert repo_filter.filter(regular_record) is False + + +def test_setup_logger_default_libvcs_stream_level_is_warning() -> None: + """At ``-v`` count 0, the libvcs StreamHandler drops INFO and DEBUG. + + Regression for the user-reported flood of ``|git| (repo) ...`` lines + that should not appear unless the user opts into ``-v``. + """ + setup_logger(level="INFO", verbosity=0) + libvcs_logger = logging.getLogger("libvcs") + stream_handlers = [ + h + for h in libvcs_logger.handlers + if isinstance(h, logging.StreamHandler) + and not isinstance(h, logging.FileHandler) + ] + assert stream_handlers, "libvcs logger should have a StreamHandler" + assert all(h.level == logging.WARNING for h in stream_handlers) + + +def test_setup_logger_v_lifts_libvcs_stream_to_info() -> None: + """``-v`` (verbosity=1) opens libvcs INFO on the terminal.""" + setup_logger(level="INFO", verbosity=1) + libvcs_logger = logging.getLogger("libvcs") + stream_handlers = [ + h + for h in libvcs_logger.handlers + if isinstance(h, logging.StreamHandler) + and not isinstance(h, logging.FileHandler) + ] + assert stream_handlers + assert all(h.level == logging.INFO for h in stream_handlers) + + +def test_setup_logger_vv_lifts_libvcs_stream_to_debug() -> None: + """``-vv`` (verbosity>=2) opens libvcs DEBUG on the terminal.""" + setup_logger(level="INFO", verbosity=2) + libvcs_logger = logging.getLogger("libvcs") + stream_handlers = [ + h + for h in libvcs_logger.handlers + if isinstance(h, logging.StreamHandler) + and not isinstance(h, logging.FileHandler) + ] + assert stream_handlers + assert all(h.level == logging.DEBUG for h in stream_handlers) + + +def test_setup_logger_pins_vcspull_stream_to_resolved_level() -> None: + """The vcspull StreamHandler gets a per-handler level (not just logger).""" + setup_logger(level="INFO", verbosity=0) + vcspull_logger = logging.getLogger("vcspull") + stream_handlers = [ + h + for h in vcspull_logger.handlers + if isinstance(h, logging.StreamHandler) + and not isinstance(h, logging.FileHandler) + ] + assert stream_handlers + assert all(h.level == logging.INFO for h in stream_handlers) + + +def test_setup_file_logger_does_not_open_stream_floodgate( + tmp_path: pathlib.Path, +) -> None: + """``setup_file_logger`` must not open the terminal StreamHandler floodgate. + + Regression for the floodgate bug: prior versions of ``setup_logger`` + left the StreamHandler with no per-handler level, so when the file + logger bumped the parent logger to DEBUG (so the FileHandler could + capture full traces), the terminal handler also accepted DEBUG and + spammed ``|git| (repo) head_sha: ...`` style lines into the user's + terminal. + """ + import vcspull.log as log_mod + + setup_logger(level="INFO", verbosity=0) + libvcs_logger = logging.getLogger("libvcs") + stream_handlers_before = [ + h + for h in libvcs_logger.handlers + if isinstance(h, logging.StreamHandler) + and not isinstance(h, logging.FileHandler) + ] + levels_before = [h.level for h in stream_handlers_before] + + handler = log_mod.setup_file_logger(tmp_path / "vcspull-debug.log") + try: + # The file handler must accept DEBUG so the npm-style log captures + # the full trace. + assert handler.level == logging.DEBUG + # The libvcs logger may now sit at DEBUG (so DEBUG records reach the + # FileHandler) -- but the StreamHandler levels MUST be unchanged. + stream_handlers_after = [ + h + for h in libvcs_logger.handlers + if isinstance(h, logging.StreamHandler) + and not isinstance(h, logging.FileHandler) + ] + levels_after = [h.level for h in stream_handlers_after] + assert levels_after == levels_before, ( + f"setup_file_logger must not change StreamHandler levels: " + f"before={levels_before} after={levels_after}" + ) + finally: + log_mod.teardown_file_logger(handler) From 79fd4c3ca5c96ad4ede02f72a76f9eb41c4b0ea1 Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Sat, 25 Apr 2026 05:39:15 -0500 Subject: [PATCH 12/32] feat(sync[panel]) 3-line collapsing live-trail for git progress output MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit why: Reporter wanted the *interesting* git stderr (``From ``, ``* [new branch]``, ``Already on 'main'``, ``Successfully rebased``) visible while a repo is syncing, but NOT lingering in scrollback once the repo completes. Today those lines stack as plain rows that the ``✓ Synced`` summary then has to compete with. tmuxp's ``before_script`` panel gives exactly the desired shape: a fixed-height region above the spinner that rolls under as fresh lines arrive and collapses on completion. Translate that pattern into vcspull's ``SyncStatusIndicator``. what: - Extend ``SyncStatusIndicator`` (``cli/_progress.py``) with an ``output_lines: int = 3`` constructor arg, a ``collections.deque(maxlen=output_lines)`` panel buffer, a ``_panel_visible_lines`` counter for redraw geometry, and a public ``add_output_line(text)`` API. ``0`` hides the panel and falls back to plain ``write()``; ``-1`` is unbounded. - Extend ``_render_tty`` to draw panel rows above the spinner inside one synchronized-output bracket: walk the cursor up by ``_panel_visible_lines`` rows from the previous frame, erase + emit each panel line, then write the spinner cell -- the whole frame is one atomic write so concurrent ``add_output_line`` calls can't tear it. - Add ``_clear_block`` for the spinner+panel teardown path used by ``stop_repo`` and ``close``: walks back up the rendered rows and erases each, leaving the cursor on a clean row for the next ``formatter.emit_text("✓ Synced ...")``. ``write()`` (the log diverter path) gains the same up-walk so a logger record clears the panel along with the spinner before printing. - ``start_repo`` clears the panel buffer (fresh trail per repo); ``stop_repo`` clears + collapses; ``close`` falls back to ``_clear_block`` so a Ctrl-C mid-repo doesn't leave the panel sticky. - ``add_output_line`` gates the panel path on ``self._tty``: in non-TTY mode the spinner thread never starts to render the deque, so the method falls back to plain ``write()``. Without this gate the bytes would silently disappear under capsys / pipes / CI. - ``cli/sync.py``: new ``--panel-lines N`` flag with metavar/help; new ``_resolve_panel_lines`` helper (CLI > env > default), mirroring ``_resolve_repo_timeout``; thread the resolved value through ``sync()`` -> ``_sync_impl()`` -> ``build_indicator()``. Switch ``_indicator_progress`` from ``indicator.write(output)`` to ``indicator.add_output_line(output)`` so libvcs's chunked progress callback feeds the deque instead of the raw stream. - ``cli/__init__.py``: forward ``args.panel_lines`` (defaulting to ``None``) into ``sync(...)``. - 11 new tests across ``tests/cli/test_sync_progress.py`` (deque bounds, ``output_lines=0`` fallback, non-TTY fallback, atomic panel render with synchronized-output bracket, ``stop_repo`` collapse, fresh-trail-per-repo) and ``tests/cli/test_sync_watchdog.py`` (5 ``_resolve_panel_lines`` precedence tests). - ``CHANGES`` entry under 1.59.x. --- CHANGES | 33 +++++++ src/vcspull/cli/__init__.py | 1 + src/vcspull/cli/_progress.py | 169 ++++++++++++++++++++++++++++---- src/vcspull/cli/sync.py | 62 +++++++++++- tests/cli/test_sync_progress.py | 146 +++++++++++++++++++++++++++ tests/cli/test_sync_watchdog.py | 51 ++++++++++ 6 files changed, 441 insertions(+), 21 deletions(-) diff --git a/CHANGES b/CHANGES index 8e7ac770e..8471ae7e0 100644 --- a/CHANGES +++ b/CHANGES @@ -39,6 +39,39 @@ _Notes on upcoming releases will be added here_ ### New features +#### `vcspull sync`: collapsing 3-line live-trail above the spinner + +Streaming subprocess output (git's `From `, `* [new branch]`, +`Already on 'main'`, `Successfully rebased`, etc.) now flows into a +fixed-height live-trail region above the per-repo spinner. The trail +shows the most recent 3 lines, rolls under as new ones arrive, and +**collapses** when the repo finishes -- leaving only the permanent +`✓ Synced ` line. Modeled on tmuxp's `before_script` +panel. + +Tweak with `--panel-lines N` (`0` hides; `-1` unbounded; default 3) or +the `VCSPULL_PROGRESS_LINES` environment variable. The panel is only +active when the indicator itself is (TTY + human + colour mode); pipes, +`--json`, `--ndjson`, and `--color=never` keep their existing line- +oriented output. + +#### `vcspull`: libvcs is quiet by default; `-v` / `-vv` opens it back up + +The chatty `|git| (repo) head_sha: ...` / `git_remote_name: ...` / +`tag_sha: ...` crumb lines no longer appear at the default verbosity. +Pass `-v` to see libvcs `INFO` (`Updating to 'main'.`, +`Already up-to-date.`); pass `-vv` to see `DEBUG` (the full crumb +trail). The npm/pnpm-style debug log file always captures `DEBUG` +regardless of verbosity. + +Root cause for the regression that prompted this: `setup_file_logger` +raised both the `vcspull` and `libvcs` loggers to `DEBUG` so the +`FileHandler` could capture full traces, but the terminal `StreamHandler` +had no per-handler level filter -- so the bump opened the floodgate to +the terminal too. `setup_logger` now pins explicit per-handler levels +on every `StreamHandler` so file-logger and terminal-logger filters +are independent. + #### `vcspull sync`: Ctrl-C aborts the whole shell chain, not one command `vcspull sync` now terminates with `WIFSIGNALED(SIGINT)` on Ctrl-C instead diff --git a/src/vcspull/cli/__init__.py b/src/vcspull/cli/__init__.py index b78e93716..6cf53da44 100644 --- a/src/vcspull/cli/__init__.py +++ b/src/vcspull/cli/__init__.py @@ -476,6 +476,7 @@ def cli(_args: list[str] | None = None) -> None: timeout=getattr(args, "timeout", None), log_file=getattr(args, "log_file", None), no_log_file=getattr(args, "no_log_file", False), + panel_lines=getattr(args, "panel_lines", None), ) elif args.subparser_name == "list": list_repos( diff --git a/src/vcspull/cli/_progress.py b/src/vcspull/cli/_progress.py index ec573a8bf..5e2aafa4d 100644 --- a/src/vcspull/cli/_progress.py +++ b/src/vcspull/cli/_progress.py @@ -13,6 +13,7 @@ from __future__ import annotations import atexit +import collections import io import itertools import logging @@ -45,6 +46,13 @@ #: cells fall back to their Braille block without breaking output. _SPINNER_FRAMES = "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏" +#: Default panel height for the live trail above the spinner. Same value +#: tmuxp uses for ``before_script`` script output (see +#: ``~/work/python/tmuxp/src/tmuxp/cli/_progress.py:42``). 3 rows is enough +#: to give the user "what just happened" + a couple of lines of context +#: without dominating the screen. ``0`` hides the panel entirely. +_DEFAULT_OUTPUT_LINES = 3 + #: How often to refresh the spinner line in the TTY path. _TTY_REFRESH_INTERVAL = 0.1 @@ -91,6 +99,7 @@ def __init__( stream: t.TextIO | None = None, tty: bool | None = None, colors: Colors | None = None, + output_lines: int = _DEFAULT_OUTPUT_LINES, ) -> None: self._stream = stream if stream is not None else sys.stdout # Respect the explicit tty override (tests, ``--color=never``) but @@ -109,6 +118,25 @@ def __init__( # captured streams in tests. self._colors = colors if colors is not None else Colors(ColorMode.NEVER) + # Live-trail panel above the spinner. ``0`` disables the panel and + # ``add_output_line`` falls back to plain ``write()`` semantics. + # ``-1`` means unbounded -- rare; useful when piping ``-vv`` output + # through a pager. + self._output_lines = output_lines + if output_lines > 0: + self._panel_buffer: collections.deque[str] = collections.deque( + maxlen=output_lines, + ) + elif output_lines < 0: + self._panel_buffer = collections.deque() + else: + # Zero-capacity sentinel so ``maxlen == 0`` is the disabled signal. + self._panel_buffer = collections.deque(maxlen=0) + # Number of panel rows physically rendered last tick. The next + # render walks back this many lines (plus one for the spinner) to + # erase the previous frame. + self._panel_visible_lines = 0 + self._lock = threading.Lock() self._thread: threading.Thread | None = None self._stop_event = threading.Event() @@ -132,6 +160,10 @@ def start_repo(self, name: str) -> None: now = time.monotonic() self._repo_started_at = now self._last_heartbeat_at = now + # Fresh trail per repo: drop any leftover panel lines from + # the previous repository so the user only sees activity for + # the active one. + self._panel_buffer.clear() if self._tty: self._ensure_tty_thread() @@ -141,7 +173,7 @@ def start_repo(self, name: str) -> None: self._emit_line(f"syncing {name}") def stop_repo(self) -> None: - """Stop showing any active-repo indicator.""" + """Stop showing any active-repo indicator and collapse the panel.""" if not self._enabled: return @@ -149,9 +181,45 @@ def stop_repo(self) -> None: self._active_repo = None self._repo_started_at = None self._last_heartbeat_at = None + self._panel_buffer.clear() if self._tty: - self._clear_line() + # Erase the panel + spinner row entirely; the main sync loop + # then prints the permanent ``✓ Synced ...`` line on a clean + # row, giving the "trail collapses" effect. + self._clear_block() + + def add_output_line(self, text: str) -> None: + """Push streamed subprocess output into the live trail panel. + + ``text`` may contain multiple newline-separated lines (libvcs's + progress callback delivers chunks, not whole lines). We split, + drop blank-only fragments, and append each non-blank line to the + bounded deque. The spinner thread redraws on its own cadence, so + a chatty subprocess doesn't pace itself against the terminal. + + When the panel is disabled (``output_lines=0``) or the indicator + is disabled (non-TTY, JSON output, ``--color=never``), fall back + to :meth:`write` so the bytes still appear -- just without the + in-place rewriting. + """ + if not text: + return + # The panel only makes sense in a TTY where the spinner thread + # actually renders the deque. When the indicator is disabled + # (JSON / NDJSON / colour=never), or running headless (pipe, CI, + # capsys), or when the panel is explicitly hidden + # (``output_lines=0``), fall through to the plain ``write()`` + # path so the bytes still reach the user / test capture. + if not self._enabled or not self._tty or self._panel_buffer.maxlen == 0: + payload = text if text.endswith("\n") else text + "\n" + self.write(payload) + return + with self._lock: + for line in text.splitlines(): + stripped = line.rstrip() + if stripped: + self._panel_buffer.append(stripped) def repo(self, name: str) -> _RepoContext: """Context manager form of :meth:`start_repo` / :meth:`stop_repo`.""" @@ -178,11 +246,18 @@ def write(self, text: str) -> None: return with self._lock: try: - if self._tty and self._last_line_len: + if self._tty and (self._last_line_len or self._panel_visible_lines): + # Walk back over the panel + spinner before printing + # so the log record lands on a fresh row instead of + # over-writing the trail. The next render redraws the + # whole frame from scratch. self._stream.write(_CURSOR_TO_COL0 + _ERASE_LINE) + for _ in range(self._panel_visible_lines): + self._stream.write("\x1b[1A" + _ERASE_LINE) self._stream.write(text) self._stream.flush() self._last_line_len = 0 + self._panel_visible_lines = 0 except (OSError, ValueError): pass @@ -200,7 +275,11 @@ def close(self) -> None: self._thread = None if thread is not None: thread.join(timeout=1.0) - self._clear_line() + # Erase the whole frame (panel + spinner) on close, not just the + # spinner row -- otherwise leftover panel rows linger in + # scrollback when the indicator is closed mid-repo (e.g. on + # KeyboardInterrupt before ``stop_repo``). + self._clear_block() if self._cursor_hidden: try: self._stream.write(_SHOW_CURSOR) @@ -259,20 +338,47 @@ def _render_tty(self, frame: str, name: str, elapsed: float) -> None: line = f"{coloured_frame} syncing {name} ... {elapsed:4.1f}s" pad = max(self._last_line_len - len(visible), 0) # Holding the lock around the actual write ensures a concurrent - # ``write()`` (called by the stdout diverter on the main thread) - # can't begin mid-frame and end up fighting with the ``\r`` redraw. + # ``write()`` / ``add_output_line()`` (called by the stdout + # diverter on the main thread) can't begin mid-frame and end up + # fighting with the ``\r`` redraw. Also: snapshot the panel buffer + # under the lock so the deque can't mutate mid-render. with self._lock: - try: - self._stream.write( - _SYNC_START + _CURSOR_TO_COL0 + line + (" " * pad) + _SYNC_END, - ) - self._stream.flush() - # Track the *visible* column count (not the string length with - # ANSI codes), so the next frame's padding calculation clears - # exactly the on-screen cells the previous frame occupied. - self._last_line_len = len(visible) - except (OSError, ValueError): - pass + panel_lines = list(self._panel_buffer) + # Cap the rendered panel at its declared height so a concurrent + # ``add_output_line`` racing the deque can't make us write more + # rows than ``stop_repo`` will later erase. + cap = self._panel_buffer.maxlen + if cap and cap > 0 and len(panel_lines) > cap: + panel_lines = panel_lines[-cap:] + new_panel_height = len(panel_lines) + + # Build the frame as a single string so the synchronized-output + # bracket wraps the whole region atomically. Layout: + # + # + # \n + # \n + # ... + # (no trailing newline; cursor stays here) + parts: list[str] = [_SYNC_START] + if self._panel_visible_lines: + # Cursor sits at the spinner row; walk up to the first panel + # row of the previous frame. + parts.append(f"\x1b[{self._panel_visible_lines}A") + parts.append(_CURSOR_TO_COL0) + parts.extend(_ERASE_LINE + panel_line + "\n" for panel_line in panel_lines) + parts.append(_ERASE_LINE + line + (" " * pad)) + parts.append(_SYNC_END) + try: + self._stream.write("".join(parts)) + self._stream.flush() + # Track the *visible* column count (not the string length with + # ANSI codes), so the next frame's padding calculation clears + # exactly the on-screen cells the previous frame occupied. + self._last_line_len = len(visible) + self._panel_visible_lines = new_panel_height + except (OSError, ValueError): + pass def _emit_line(self, line: str) -> None: try: @@ -282,6 +388,7 @@ def _emit_line(self, line: str) -> None: pass def _clear_line(self) -> None: + """Erase only the spinner row (legacy; ``write()`` calls this).""" if not self._tty or self._last_line_len == 0: return try: @@ -291,6 +398,28 @@ def _clear_line(self) -> None: pass self._last_line_len = 0 + def _clear_block(self) -> None: + """Erase the spinner row AND any rendered panel rows above it. + + Used by :meth:`stop_repo` (and :meth:`close`) to collapse the + live trail so the next ``formatter.emit_text("✓ Synced ...")`` + call lands on a clean row. Without this, the panel rows would + stay sticky in scrollback and the ``stop_repo`` -> permanent-line + transition would scroll-leak history. + """ + if not self._tty: + return + try: + # Walk up panel rows + the spinner row, erasing each. + self._stream.write(_CURSOR_TO_COL0 + _ERASE_LINE) + for _ in range(self._panel_visible_lines): + self._stream.write("\x1b[1A" + _ERASE_LINE) + self._stream.flush() + except (OSError, ValueError): + pass + self._last_line_len = 0 + self._panel_visible_lines = 0 + def heartbeat(self) -> None: """Emit a non-TTY heartbeat if enough time has passed. @@ -347,11 +476,16 @@ def build_indicator( stream: t.TextIO | io.StringIO | None = None, tty: bool | None = None, colors: Colors | None = None, + output_lines: int = _DEFAULT_OUTPUT_LINES, ) -> SyncStatusIndicator: """Return a ``SyncStatusIndicator`` configured for the current session. Disabled when output is non-human (JSON/NDJSON) or when colours are turned off -- the latter implies the user wants quiet, machine-friendly output. + + ``output_lines`` controls the live-trail panel above the spinner: + ``3`` (default) shows the last 3 streamed lines; ``0`` hides the + panel entirely; ``-1`` is unbounded. """ enabled = human and color != "never" # io.StringIO satisfies the TextIO protocol at runtime; tests use this to @@ -366,4 +500,5 @@ def build_indicator( stream=concrete_stream, tty=tty, colors=resolved_colors, + output_lines=output_lines, ) diff --git a/src/vcspull/cli/sync.py b/src/vcspull/cli/sync.py index c05aaeeae..24c60604f 100644 --- a/src/vcspull/cli/sync.py +++ b/src/vcspull/cli/sync.py @@ -653,6 +653,19 @@ def create_sync_subparser(parser: argparse.ArgumentParser) -> argparse.ArgumentP "are skipped and the rest of the batch continues." ), ) + parser.add_argument( + "--panel-lines", + dest="panel_lines", + type=int, + default=None, + metavar="N", + help=( + "live-trail panel height for streaming subprocess output " + "above the spinner (default: 3; 0 hides; -1 unbounded; env: " + "VCSPULL_PROGRESS_LINES). The panel collapses when each repo " + "finishes, leaving only the permanent ✓ Synced line." + ), + ) parser.add_argument( "--log-file", dest="log_file", @@ -719,6 +732,34 @@ def _resolve_repo_timeout(cli_timeout: int | None) -> int: return _DEFAULT_REPO_TIMEOUT_SECONDS +#: Mirrors the default in ``vcspull.cli._progress`` -- duplicated here so +#: the ``--help`` text can interpolate the real number without importing +#: the private ``_DEFAULT_OUTPUT_LINES`` symbol. +_DEFAULT_PANEL_LINES = 3 + + +def _resolve_panel_lines(cli_value: int | None) -> int: + """Resolve the live-trail panel height: CLI flag > env > default. + + The flag accepts ``0`` (hide panel) and ``-1`` (unbounded), so we can't + short-circuit on ``> 0`` like :func:`_resolve_repo_timeout`. We do + accept any integer the user passes via the CLI; only the env var + override is validated, with a warning on garbage values. + """ + if cli_value is not None: + return cli_value + env_value = os.environ.get("VCSPULL_PROGRESS_LINES") + if env_value: + try: + return int(env_value) + except ValueError: + log.warning( + "Ignoring non-integer VCSPULL_PROGRESS_LINES=%s", + env_value, + ) + return _DEFAULT_PANEL_LINES + + class _SyncInterruptedAfterSummary(KeyboardInterrupt): """Internal marker: ``_sync_impl`` already emitted the interrupt summary. @@ -998,12 +1039,14 @@ def sync( timeout: int | None = None, log_file: str | pathlib.Path | None = None, no_log_file: bool = False, + panel_lines: int | None = None, ) -> None: """Entry point for ``vcspull sync``.""" # Prevent git from blocking on credential prompts during batch sync os.environ.setdefault("GIT_TERMINAL_PROMPT", "0") repo_timeout = _resolve_repo_timeout(timeout) + resolved_panel_lines = _resolve_panel_lines(panel_lines) # Set up the debug log file early so libvcs's per-repo activity is also # captured. The path is surfaced to the user only when something failed @@ -1049,6 +1092,7 @@ def sync( repo_timeout=repo_timeout, log_file_path=log_file_path, dry_run=dry_run, + panel_lines=resolved_panel_lines, ) except KeyboardInterrupt as err: # Catch Ctrl-C from ANY phase of the sync -- the repo loop (where @@ -1102,6 +1146,7 @@ def _sync_impl( include_worktrees: bool, repo_timeout: int, log_file_path: pathlib.Path | None, + panel_lines: int, ) -> None: """Run the core body of :func:`sync`. @@ -1282,14 +1327,23 @@ def _sync_impl( } timed_out_repos: list[_TimedOutRepo] = [] - indicator = build_indicator(human=is_human, color=color, colors=colors) + indicator = build_indicator( + human=is_human, + color=color, + colors=colors, + output_lines=panel_lines, + ) progress_callback: ProgressCallback if is_human and indicator.enabled: - # Route libvcs's streaming subprocess output through the indicator so - # it shares the spinner's lock and can't tear the frame mid-redraw. + # Route libvcs's streaming subprocess output into the indicator's + # live-trail panel: ``add_output_line`` appends to a bounded deque + # that the spinner thread renders above the spinner row, then + # collapses on ``stop_repo``. With panel_lines=0 the indicator + # falls back to plain ``write()`` semantics (no panel, no + # collapse). def _indicator_progress(output: str, timestamp: datetime) -> None: - indicator.write(output) + indicator.add_output_line(output) progress_callback = _indicator_progress elif is_human: diff --git a/tests/cli/test_sync_progress.py b/tests/cli/test_sync_progress.py index 866ec9c03..09acecce3 100644 --- a/tests/cli/test_sync_progress.py +++ b/tests/cli/test_sync_progress.py @@ -223,3 +223,149 @@ def test_tty_spinner_colours_the_frame_cell() -> None: # literal "syncing codex" preceded by a reset (the info() helper closes # the colour right after the frame). assert "syncing codex" in out + + +def test_add_output_line_appends_and_bounds_panel_deque() -> None: + """Panel buffer keeps only the last ``output_lines`` entries. + + Reporter's request: a 3-line live trail above the spinner that + rolls under -- not an unbounded scroll. Locks in + ``collections.deque(maxlen=output_lines)`` semantics: the 4th push + drops the oldest line. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=True, + output_lines=3, + ) + + indicator.add_output_line("first\n") + indicator.add_output_line("second\n") + indicator.add_output_line("third\n") + indicator.add_output_line("fourth\n") + + assert list(indicator._panel_buffer) == ["second", "third", "fourth"] + + +def test_add_output_line_panel_disabled_falls_back_to_write() -> None: + """``output_lines=0`` makes ``add_output_line`` plain ``write()``. + + The 0-panel mode is the escape hatch for users who don't want a + rolling region above the spinner: bytes still reach the terminal, + just on their own row, with the same clear-the-spinner-first + behaviour ``write()`` already provides. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=True, + output_lines=0, + ) + + indicator.add_output_line("From github.com/foo/bar\n") + + assert "From github.com/foo/bar" in stream.getvalue() + # No panel deque ever populated. + assert indicator._panel_buffer.maxlen == 0 + + +def test_add_output_line_non_tty_writes_directly() -> None: + """Non-TTY mode bypasses the panel entirely. + + The spinner thread never starts in non-TTY mode (pipes, CI, pytest + capture). If ``add_output_line`` buffered into the deque on this + path the bytes would never be rendered. Guard against that + regression: lines must reach the stream synchronously. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=False, + output_lines=3, + ) + + indicator.add_output_line("From github.com/foo/bar\n") + + assert "From github.com/foo/bar" in stream.getvalue() + # Deque stays empty -- the line went straight to the stream. + assert list(indicator._panel_buffer) == [] + + +def test_render_tty_writes_panel_above_spinner_atomically() -> None: + r"""Spinner thread writes panel rows + spinner inside one sync bracket. + + The ANSI ``\x1b[?2026h`` / ``\x1b[?2026l`` pair tells modern + terminals to buffer the whole region and flip it atomically. Locks + in the contract for terminals that honour the bracket + (kitty/iTerm2/WezTerm/recent xterm) and is benign elsewhere. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=True, + output_lines=3, + ) + indicator.start_repo("codex") + indicator.add_output_line("From github.com/openai/codex\n") + indicator.add_output_line(" abc..def main -> origin/main\n") + # Let the spinner thread tick at least once. + time.sleep(0.2) + indicator.stop_repo() + indicator.close() + + out = stream.getvalue() + # Both panel lines made it to the stream during a render tick. + assert "From github.com/openai/codex" in out + assert "abc..def" in out + # The synchronized-output bracket wraps each frame. + assert "\x1b[?2026h" in out + assert "\x1b[?2026l" in out + + +def test_stop_repo_collapses_the_panel() -> None: + """``stop_repo`` resets the deque + visible-lines counter. + + The "trail collapses on completion" UX requirement: when the + permanent ``✓ Synced`` line is about to print, the panel must be + drained so it doesn't linger in scrollback. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=True, + output_lines=3, + ) + indicator.start_repo("codex") + indicator.add_output_line("a\nb\nc\n") + # Pretend a render happened. + indicator._panel_visible_lines = 3 + indicator.stop_repo() + + assert list(indicator._panel_buffer) == [] + assert indicator._panel_visible_lines == 0 + indicator.close() + + +def test_panel_clears_between_repos() -> None: + """``start_repo`` empties any leftover deque from the prior repo.""" + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=True, + output_lines=3, + ) + indicator.start_repo("alpha") + indicator.add_output_line("alpha-one\n") + indicator.add_output_line("alpha-two\n") + indicator.start_repo("beta") + + # Fresh trail per repo: alpha's panel must NOT bleed into beta's. + assert list(indicator._panel_buffer) == [] + indicator.close() diff --git a/tests/cli/test_sync_watchdog.py b/tests/cli/test_sync_watchdog.py index 9faf7bfda..695cb376e 100644 --- a/tests/cli/test_sync_watchdog.py +++ b/tests/cli/test_sync_watchdog.py @@ -423,3 +423,54 @@ def _fail_signal(sig: int, handler: t.Any) -> t.Any: sync_module._exit_on_sigint() assert excinfo.value.code == 130 + + +def test_resolve_panel_lines_prefers_cli_flag( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """``--panel-lines N`` wins over the env var and the default.""" + from vcspull.cli.sync import _resolve_panel_lines + + monkeypatch.setenv("VCSPULL_PROGRESS_LINES", "9") + assert _resolve_panel_lines(5) == 5 + + +def test_resolve_panel_lines_falls_back_to_env( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Without a flag, ``VCSPULL_PROGRESS_LINES`` is honoured.""" + from vcspull.cli.sync import _resolve_panel_lines + + monkeypatch.setenv("VCSPULL_PROGRESS_LINES", "5") + assert _resolve_panel_lines(None) == 5 + + +def test_resolve_panel_lines_accepts_zero_and_negative( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """``0`` (hide panel) and ``-1`` (unbounded) must round-trip cleanly.""" + from vcspull.cli.sync import _resolve_panel_lines + + monkeypatch.delenv("VCSPULL_PROGRESS_LINES", raising=False) + assert _resolve_panel_lines(0) == 0 + assert _resolve_panel_lines(-1) == -1 + + +def test_resolve_panel_lines_default_is_three( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Default panel height matches tmuxp's ``DEFAULT_OUTPUT_LINES``.""" + from vcspull.cli.sync import _resolve_panel_lines + + monkeypatch.delenv("VCSPULL_PROGRESS_LINES", raising=False) + assert _resolve_panel_lines(None) == 3 + + +def test_resolve_panel_lines_ignores_bogus_env_value( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """A non-integer env value is logged and ignored; default applies.""" + from vcspull.cli.sync import _resolve_panel_lines + + monkeypatch.setenv("VCSPULL_PROGRESS_LINES", "many") + assert _resolve_panel_lines(None) == 3 From 0eb04f2d2f24306c8ce2bb93cac87a3da164a72f Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Sat, 25 Apr 2026 05:44:10 -0500 Subject: [PATCH 13/32] fix(sync[indicator]) Match permanent line: Syncing + cyan name in spinner MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit why: Reporter showed two adjacent lines ✓ Synced fish-shell → ~/study/rust/fish-shell ⠸ syncing flume ... 0.9s and called out two inconsistencies: the in-flight repo name (``flume``) is plain text while the permanent line's name (``fish-shell``) is ``colors.info`` cyan, and the verb is lowercase ``syncing`` while the permanent line uses uppercase ``Synced`` / ``Timed out``. Both lines are status badges and should read as a family. what: - ``_render_tty``: wrap ``name`` in ``self._colors.info(name)`` so the in-flight repo name matches the cyan ``colors.info(repo_name)`` used on the permanent ``✓ Synced `` line in ``cli/sync.py``. - Capitalise the verb: ``Syncing`` (not ``syncing``) in the spinner line, mirroring ``Synced``/``Timed out``. - Non-TTY ``start_repo`` line gets the same capitalisation (``Syncing ``). The ``... still syncing `` heartbeat stays lowercase since it's a sentence continuation after the ellipsis, not a status badge. - Tests updated accordingly: assert ``Syncing codex`` (capital) on the start line; case-insensitive count for the heartbeat-throttle test (``Syncing`` start + ``... still syncing`` heartbeat). ``test_tty_spinner_colours_the_frame_cell`` is renamed to ``test_tty_spinner_colours_the_frame_cell_and_name`` and now also asserts a colour escape immediately precedes the repo name. --- src/vcspull/cli/_progress.py | 21 +++++++++------ tests/cli/test_sync_progress.py | 48 ++++++++++++++++++++++----------- 2 files changed, 45 insertions(+), 24 deletions(-) diff --git a/src/vcspull/cli/_progress.py b/src/vcspull/cli/_progress.py index 5e2aafa4d..523badd05 100644 --- a/src/vcspull/cli/_progress.py +++ b/src/vcspull/cli/_progress.py @@ -169,8 +169,9 @@ def start_repo(self, name: str) -> None: self._ensure_tty_thread() else: # Emit a single start line so even log-collecting CI shows which - # repo is in flight. - self._emit_line(f"syncing {name}") + # repo is in flight. Capital ``Syncing`` matches the permanent + # ``Synced``/``Timed out`` leading-cap pattern. + self._emit_line(f"Syncing {name}") def stop_repo(self) -> None: """Stop showing any active-repo indicator and collapse the panel.""" @@ -329,13 +330,17 @@ def _tty_loop(self) -> None: self._stop_event.wait(_TTY_REFRESH_INTERVAL) def _render_tty(self, frame: str, name: str, elapsed: float) -> None: - # Colour just the spinner cell -- matches tmuxp's - # ``{self.colors.info(frame)} {msg}`` pattern and keeps the repo - # name / elapsed segment neutral so it doesn't fight the ``✓`` / - # timed-out colouring emitted on the permanent line. + # Colour the spinner cell AND the repo name so the in-flight line + # matches the visual rhythm of the permanent ``✓ Synced `` + # line emitted on completion (which uses ``colors.info(name)``). + # The ``Syncing`` verb stays capitalised for the same reason: on + # screen ``Syncing flume`` reads as a status badge alongside + # ``Synced fish-shell`` and ``Timed out codex`` -- consistent + # leading-cap on the status word. coloured_frame = self._colors.info(frame) - visible = f"{frame} syncing {name} ... {elapsed:4.1f}s" - line = f"{coloured_frame} syncing {name} ... {elapsed:4.1f}s" + coloured_name = self._colors.info(name) + visible = f"{frame} Syncing {name} ... {elapsed:4.1f}s" + line = f"{coloured_frame} Syncing {coloured_name} ... {elapsed:4.1f}s" pad = max(self._last_line_len - len(visible), 0) # Holding the lock around the actual write ensures a concurrent # ``write()`` / ``add_output_line()`` (called by the stdout diff --git a/tests/cli/test_sync_progress.py b/tests/cli/test_sync_progress.py index 09acecce3..8ae7ba4f8 100644 --- a/tests/cli/test_sync_progress.py +++ b/tests/cli/test_sync_progress.py @@ -36,7 +36,10 @@ def test_non_tty_emits_once_on_start_and_honours_heartbeat() -> None: indicator = SyncStatusIndicator(enabled=True, stream=stream, tty=False) indicator.start_repo("codex") - assert "syncing codex" in stream.getvalue() + # Capital ``Syncing`` matches the permanent ``Synced``/``Timed out`` + # leading-cap pattern so the in-flight + completion lines read as a + # consistent badge family. + assert "Syncing codex" in stream.getvalue() # Pretend the repo has been running longer than the heartbeat interval so # the next heartbeat call emits. We go under the hood to avoid a slow @@ -51,10 +54,13 @@ def test_non_tty_emits_once_on_start_and_honours_heartbeat() -> None: indicator.close() out = stream.getvalue() + # The heartbeat line begins with ``...`` so ``still syncing`` stays + # lowercase as a sentence continuation, not a status badge. assert "still syncing codex" in out # Heartbeat must not duplicate the start line; the only lines are - # start + one heartbeat. - assert out.count("syncing codex") == 2 + # start (``Syncing codex``) + one heartbeat (``... still syncing codex``). + # Match case-insensitively to count both spellings together. + assert out.lower().count("syncing codex") == 2 def test_non_tty_heartbeat_throttles_below_interval() -> None: @@ -89,7 +95,10 @@ def test_tty_spinner_renders_active_repo(monkeypatch: pytest.MonkeyPatch) -> Non out = stream.getvalue() # The spinner line mentions the repo and has at least one Braille frame. - assert "syncing codex" in out + # Capital ``Syncing`` matches the permanent ``Synced`` leading-cap + # pattern; locks in that consistency. + assert "Syncing" in out + assert "codex" in out assert any(f in out for f in "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏") # Cursor hide / show sequences bracket the spinner so tmux / kitty don't # leak a missing cursor after vcspull exits. @@ -191,12 +200,15 @@ def test_write_non_tty_path_passes_through_untouched() -> None: assert "\x1b[2K" not in out -def test_tty_spinner_colours_the_frame_cell() -> None: - """Just the Braille cell is colourised (tmuxp-style), not the whole line. +def test_tty_spinner_colours_the_frame_cell_and_name() -> None: + """Spinner cell AND repo name are colourised; ``Syncing`` is plain. - The repo name and elapsed suffix stay in the terminal's default - foreground so they don't collide with the ``✓ Synced`` / ``- Timed out`` - colouring emitted on the permanent line when a repo completes. + Reporter pointed out that the in-flight line ``Syncing flume`` had a + plain repo name while the permanent ``✓ Synced fish-shell`` line + had ``fish-shell`` in cyan. Match the pattern: colour the Braille + cell (info / cyan) and the repo name (also info / cyan), leaving + the verb and elapsed-time suffix in the terminal's default + foreground. """ from vcspull.cli._colors import ColorMode, Colors @@ -214,15 +226,19 @@ def test_tty_spinner_colours_the_frame_cell() -> None: indicator.close() out = stream.getvalue() - # A colour sequence appears immediately before at least one Braille frame. import re as _re - pattern = _re.compile(r"\x1b\[[0-9;]*m[⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏]") - assert pattern.search(out), "spinner frame must be wrapped in ANSI colour" - # The repo name itself must not be inside a colour run -- look for the - # literal "syncing codex" preceded by a reset (the info() helper closes - # the colour right after the frame). - assert "syncing codex" in out + # A colour sequence appears immediately before at least one Braille frame. + frame_pattern = _re.compile(r"\x1b\[[0-9;]*m[⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏]") + assert frame_pattern.search(out), "spinner frame must be wrapped in ANSI colour" + # The repo name is also wrapped in a colour run (matches the + # permanent ``✓ Synced `` style); search for any colour escape + # immediately before ``codex``. + name_pattern = _re.compile(r"\x1b\[[0-9;]*mcodex") + assert name_pattern.search(out), "spinner repo name must be wrapped in ANSI colour" + # The status verb (``Syncing``) stays in the terminal default and is + # capitalised to match ``Synced`` / ``Timed out`` on permanent lines. + assert "Syncing" in out def test_add_output_line_appends_and_bounds_panel_deque() -> None: From d650c9780b248f922b69040904142e7c77fc031e Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Sat, 25 Apr 2026 06:31:17 -0500 Subject: [PATCH 14/32] =?UTF-8?q?fix(sync[indicator])=20Atomic=20spinner?= =?UTF-8?q?=E2=86=92permanent=20transition;=20close=20render=20race?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit why: Reporter saw a flicker (and occasionally a doubled ``✓ Synced clap`` line) when a repo finished. Two interacting causes: 1. ``_render_tty`` snapshotted state under ``_lock`` then released the lock before the actual ``stream.write``. A concurrent ``stop_repo`` could clear the screen between the snapshot and the write, letting the spinner thread paint a stale frame on top of the new state. Rich avoids this by holding ``self._lock`` across every refresh tick *and* re-checking ``done.is_set()`` inside the lock (``rich/live.py:_RefreshThread.run``); the same pattern applies here. 2. ``stop_repo`` cleared the spinner, then the caller printed the permanent line in a *separate* stream call. Even on a fast terminal the spinner row blinks to blank between the two writes, which reads as flicker. The atomic transition pattern (single ``\x1b[?2026h ... \x1b[?2026l`` block that erases the panel and replaces the spinner row with the permanent line) eliminates the flash. what: - ``_render_tty``: hold ``_lock`` from snapshot through the stream write. Re-check ``self._active_repo`` under the lock at the top -- if it changed (``stop_repo`` ran), skip the tick entirely so a stale frame never lands on screen. - ``stop_repo`` gains an optional ``final_line: str | None`` argument and now returns ``bool``. When the indicator is actively rendering on a TTY *and* a ``final_line`` is given, build one atomic ANSI block under the lock that walks up the panel rows, erases each going down, and replaces the spinner row with ``final_line + \n``. Returns ``True`` to tell the caller "I owned the print; skip your ``formatter.emit_text``" -- which is what kills the doubled ``✓ Synced clap`` artefact. - Headless / disabled-indicator paths still return ``False`` so the caller's ``formatter.emit_text`` keeps running unchanged. - ``cli/sync.py``: replace the ``with indicator.repo(...)`` context manager (which fired ``stop_repo()`` with no args before the outcome was known) with manual ``start_repo`` + post-outcome ``stop_repo(final_line=permanent if is_human else None)``. Skip the separate ``formatter.emit_text(permanent)`` when ``stop_repo`` reported it owned the print. Applies uniformly to the ``synced``, ``failed``, and ``timed_out`` branches. - ``cli/sync.py`` exception path: any ``BaseException`` (incl. the KeyboardInterrupt the SIGINT handler relies on) tears the indicator down with ``stop_repo()`` (no replacement line) and re-raises so the surrounding ``except KeyboardInterrupt`` in ``_sync_impl`` still owns the partial-summary print. - 4 new tests in ``tests/cli/test_sync_progress.py``: ``stop_repo(final_line=...)`` writes inside one synchronized-output bracket and returns ``True``; the no-arg path returns ``False``; non-TTY stays caller-owned even with a ``final_line``; ``_render_tty`` skips writes when the active repo was cleared (the lock-protected stale-tick guard). --- src/vcspull/cli/_progress.py | 163 ++++++++++++++++++++++---------- src/vcspull/cli/sync.py | 54 ++++++++--- tests/cli/test_sync_progress.py | 121 ++++++++++++++++++++++++ 3 files changed, 278 insertions(+), 60 deletions(-) diff --git a/src/vcspull/cli/_progress.py b/src/vcspull/cli/_progress.py index 523badd05..116c04913 100644 --- a/src/vcspull/cli/_progress.py +++ b/src/vcspull/cli/_progress.py @@ -173,22 +173,79 @@ def start_repo(self, name: str) -> None: # ``Synced``/``Timed out`` leading-cap pattern. self._emit_line(f"Syncing {name}") - def stop_repo(self) -> None: - """Stop showing any active-repo indicator and collapse the panel.""" + def stop_repo(self, final_line: str | None = None) -> bool: + """Stop the active-repo indicator and collapse the panel. + + When ``final_line`` is provided AND the indicator is actively + rendering on a TTY, write ``final_line`` *as the spinner-erase* + in one atomic ANSI write (under the same lock the render loop + holds). The spinner row morphs in place into the permanent + line; panel rows above are erased; cursor advances to the row + below. Returns ``True`` when the line was written by the + indicator, so the caller can skip its own ``formatter.emit_text`` + and avoid the double-print artefact reporters have seen. + + Returns ``False`` when: + - the indicator is disabled (``--color=never``, JSON output); + - we're running headless (non-TTY pipe / CI / capsys); + - no ``final_line`` was provided (caller will emit_text itself). + + In every False case the caller is responsible for printing its + own permanent line through the formatter as before. + """ if not self._enabled: - return + return False with self._lock: self._active_repo = None self._repo_started_at = None self._last_heartbeat_at = None + panel_visible = self._panel_visible_lines + had_render = self._last_line_len > 0 or panel_visible > 0 self._panel_buffer.clear() - if self._tty: - # Erase the panel + spinner row entirely; the main sync loop - # then prints the permanent ``✓ Synced ...`` line on a clean - # row, giving the "trail collapses" effect. - self._clear_block() + if not self._tty: + # Headless: nothing was drawn to erase, and the caller + # still needs to surface ``final_line`` themselves. + self._panel_visible_lines = 0 + self._last_line_len = 0 + return False + + # Build the atomic clear (and optional replacement) under the + # lock so a concurrent spinner tick can't squeeze a stale + # frame in. Layout: + # + # + # + # x panel_visible # erase panel rows + # # spinner row replaced + # + # When ``final_line`` is None we still walk + erase so the + # caller's ``formatter.emit_text`` lands on a fresh row, but + # we do NOT advance the cursor with a trailing ``\n`` -- the + # caller's own ``\n`` (via ``print``) does that. + parts: list[str] = [_SYNC_START] + if had_render: + if panel_visible > 0: + parts.append(f"\x1b[{panel_visible}A") + parts.append(_CURSOR_TO_COL0) + # Erase panel rows row-by-row, descending. + parts.extend(_ERASE_LINE + "\n" for _ in range(panel_visible)) + # We're now at the original spinner row. + if final_line is not None: + parts.append(_ERASE_LINE + final_line + "\n") + else: + parts.append(_ERASE_LINE) + parts.append(_SYNC_END) + try: + self._stream.write("".join(parts)) + self._stream.flush() + self._last_line_len = 0 + self._panel_visible_lines = 0 + except (OSError, ValueError): + return False + + return final_line is not None and had_render def add_output_line(self, text: str) -> None: """Push streamed subprocess output into the live trail panel. @@ -341,49 +398,57 @@ def _render_tty(self, frame: str, name: str, elapsed: float) -> None: coloured_name = self._colors.info(name) visible = f"{frame} Syncing {name} ... {elapsed:4.1f}s" line = f"{coloured_frame} Syncing {coloured_name} ... {elapsed:4.1f}s" - pad = max(self._last_line_len - len(visible), 0) - # Holding the lock around the actual write ensures a concurrent - # ``write()`` / ``add_output_line()`` (called by the stdout - # diverter on the main thread) can't begin mid-frame and end up - # fighting with the ``\r`` redraw. Also: snapshot the panel buffer - # under the lock so the deque can't mutate mid-render. + + # Hold the lock for the entire render -- snapshot through write -- + # so a concurrent ``stop_repo`` can't tear the frame mid-flight. + # Rich does the same (see ``rich/live.py`` ``_RefreshThread.run`` + # which acquires ``self.live._lock`` around every refresh and + # re-checks ``done.is_set()`` inside the lock). The race we're + # closing: spinner thread captures ``name`` outside the lock, then + # main thread enters ``stop_repo`` and clears the screen, then the + # spinner thread (still mid-build) writes a stale frame on top of + # the new state -- producing the duplicate ``✓ Synced `` + # artefact reporters have seen. with self._lock: + # Re-check the active repo under the lock. If ``stop_repo`` + # already cleared it, this tick is stale; skip the write so + # the next tick starts from a clean state. + if self._active_repo != name: + return panel_lines = list(self._panel_buffer) - # Cap the rendered panel at its declared height so a concurrent - # ``add_output_line`` racing the deque can't make us write more - # rows than ``stop_repo`` will later erase. - cap = self._panel_buffer.maxlen - if cap and cap > 0 and len(panel_lines) > cap: - panel_lines = panel_lines[-cap:] - new_panel_height = len(panel_lines) - - # Build the frame as a single string so the synchronized-output - # bracket wraps the whole region atomically. Layout: - # - # - # \n - # \n - # ... - # (no trailing newline; cursor stays here) - parts: list[str] = [_SYNC_START] - if self._panel_visible_lines: - # Cursor sits at the spinner row; walk up to the first panel - # row of the previous frame. - parts.append(f"\x1b[{self._panel_visible_lines}A") - parts.append(_CURSOR_TO_COL0) - parts.extend(_ERASE_LINE + panel_line + "\n" for panel_line in panel_lines) - parts.append(_ERASE_LINE + line + (" " * pad)) - parts.append(_SYNC_END) - try: - self._stream.write("".join(parts)) - self._stream.flush() - # Track the *visible* column count (not the string length with - # ANSI codes), so the next frame's padding calculation clears - # exactly the on-screen cells the previous frame occupied. - self._last_line_len = len(visible) - self._panel_visible_lines = new_panel_height - except (OSError, ValueError): - pass + cap = self._panel_buffer.maxlen + if cap and cap > 0 and len(panel_lines) > cap: + panel_lines = panel_lines[-cap:] + new_panel_height = len(panel_lines) + pad = max(self._last_line_len - len(visible), 0) + + # Build the frame as a single string so the synchronized-output + # bracket wraps the whole region atomically. Layout: + # + # + # \n + # \n + # ... + # (no trailing newline; cursor stays) + parts: list[str] = [_SYNC_START] + if self._panel_visible_lines: + # Cursor sits at the spinner row; walk up to the first + # panel row of the previous frame. + parts.append(f"\x1b[{self._panel_visible_lines}A") + parts.append(_CURSOR_TO_COL0) + parts.extend(_ERASE_LINE + panel_line + "\n" for panel_line in panel_lines) + parts.append(_ERASE_LINE + line + (" " * pad)) + parts.append(_SYNC_END) + try: + self._stream.write("".join(parts)) + self._stream.flush() + # Track the *visible* column count (not the string length + # with ANSI codes), so the next frame's padding clears + # exactly the on-screen cells the previous frame occupied. + self._last_line_len = len(visible) + self._panel_visible_lines = new_panel_height + except (OSError, ValueError): + pass def _emit_line(self, line: str) -> None: try: diff --git a/src/vcspull/cli/sync.py b/src/vcspull/cli/sync.py index 24c60604f..6f6b44ebf 100644 --- a/src/vcspull/cli/sync.py +++ b/src/vcspull/cli/sync.py @@ -1468,13 +1468,30 @@ def _run_sync_loop( "workspace_root": str(workspace_label), } - with indicator.repo(repo_name): + # Manual ``start_repo`` / ``stop_repo`` instead of the + # ``with indicator.repo(...)`` context manager: we want + # ``stop_repo`` to receive the permanent line so the spinner + # collapse + completion print happen as ONE atomic ANSI write + # under the lock. The ``with`` form fires ``stop_repo()`` (no + # args) on exit, before we know the outcome -- which means the + # spinner clears, then the formatter writes the permanent line + # in a separate stream call. That two-step is the source of the + # flicker reporters have called out. + indicator.start_repo(repo_name) + try: outcome = _sync_repo_with_watchdog( repo, progress_callback=progress_callback, timeout=repo_timeout, is_human=is_human, ) + except BaseException: + # Any exception (KeyboardInterrupt, runtime crash) tears the + # indicator down with no replacement line; the surrounding + # ``except KeyboardInterrupt`` in ``_sync_impl`` still owns + # the partial-summary print. + indicator.stop_repo() + raise if outcome.status == "timed_out": summary["timed_out"] += 1 @@ -1491,12 +1508,17 @@ def _run_sync_loop( event["duration_ms"] = int(outcome.duration * 1000) if outcome.captured_output: event["details"] = outcome.captured_output.strip() - formatter.emit(event) - formatter.emit_text( + permanent = ( f"{colors.warning('-')} Timed out {colors.info(repo_name)} " f"after {colors.warning(f'{outcome.duration:.1f}s')} " - f"{colors.muted('->')} {display_repo_path}", + f"{colors.muted('->')} {display_repo_path}" + ) + wrote_final = indicator.stop_repo( + final_line=permanent if is_human else None, ) + formatter.emit(event) + if not wrote_final: + formatter.emit_text(permanent) if exit_on_error: _emit_rerun_recipe( formatter, @@ -1524,6 +1546,13 @@ def _run_sync_loop( event["error"] = err_msg if outcome.captured_output: event["details"] = outcome.captured_output.strip() + permanent = ( + f"{colors.error('✗')} Failed syncing {colors.info(repo_name)}: " + f"{colors.error(err_msg)}" + ) + wrote_final = indicator.stop_repo( + final_line=permanent if is_human else None, + ) formatter.emit(event) if is_human: log.debug("Failed syncing %s", repo_name) @@ -1531,10 +1560,8 @@ def _run_sync_loop( import traceback traceback.print_exception(type(err), err, err.__traceback__) - formatter.emit_text( - f"{colors.error('✗')} Failed syncing {colors.info(repo_name)}: " - f"{colors.error(err_msg)}", - ) + if not wrote_final: + formatter.emit_text(permanent) if exit_on_error: _emit_summary(formatter, colors, summary) formatter.finalize() @@ -1545,11 +1572,16 @@ def _run_sync_loop( summary["synced"] += 1 event["status"] = "synced" - formatter.emit(event) - formatter.emit_text( + permanent = ( f"{colors.success('✓')} Synced {colors.info(repo_name)} " - f"{colors.muted('→')} {display_repo_path}", + f"{colors.muted('→')} {display_repo_path}" + ) + wrote_final = indicator.stop_repo( + final_line=permanent if is_human else None, ) + formatter.emit(event) + if not wrote_final: + formatter.emit_text(permanent) # Sync worktrees if enabled and configured worktrees_config = repo.get("worktrees") diff --git a/tests/cli/test_sync_progress.py b/tests/cli/test_sync_progress.py index 8ae7ba4f8..6084755ba 100644 --- a/tests/cli/test_sync_progress.py +++ b/tests/cli/test_sync_progress.py @@ -385,3 +385,124 @@ def test_panel_clears_between_repos() -> None: # Fresh trail per repo: alpha's panel must NOT bleed into beta's. assert list(indicator._panel_buffer) == [] indicator.close() + + +def test_stop_repo_with_final_line_writes_atomically_and_returns_true() -> None: + r"""``stop_repo(final_line=...)`` writes one atomic clear+replace. + + Regression for the user-reported flicker where ``Syncing flume`` + transitioning to ``✓ Synced flume`` flashed through a blank state. + Today's fix: ``stop_repo`` accepts the permanent line and emits the + panel-erase + spinner-replacement inside a single ``\x1b[?2026h ... + \x1b[?2026l`` synchronized-output bracket so the terminal flips the + whole region atomically. + + The method returns ``True`` to tell the caller it has owned the + print, so the sync loop skips its own ``formatter.emit_text`` and + avoids the double-emit that produced ``✓ Synced clap`` on two rows. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=True, + output_lines=3, + ) + + indicator.start_repo("clap") + # Pretend a render happened so ``had_render`` is true. + indicator._last_line_len = 25 + indicator._panel_visible_lines = 2 + + wrote = indicator.stop_repo(final_line="✓ Synced clap → ~/study/rust/clap") + + assert wrote is True + out = stream.getvalue() + # The atomic write block contains both the synchronized-output + # bracket and the permanent line. + assert "\x1b[?2026h" in out + assert "\x1b[?2026l" in out + assert "✓ Synced clap" in out + # Internal counters reset so the next ``start_repo`` starts clean. + assert indicator._panel_visible_lines == 0 + assert indicator._last_line_len == 0 + indicator.close() + + +def test_stop_repo_without_final_line_returns_false() -> None: + """No ``final_line`` means caller still owns the permanent print. + + The non-atomic path stays available for callers that don't have a + completion line to provide -- e.g. an early teardown on + KeyboardInterrupt. ``stop_repo()`` returns False so the existing + ``formatter.emit_text`` flow keeps working. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=True, + output_lines=3, + ) + + indicator.start_repo("clap") + indicator._last_line_len = 10 + indicator._panel_visible_lines = 1 + + wrote = indicator.stop_repo() + assert wrote is False + assert indicator._panel_visible_lines == 0 + indicator.close() + + +def test_stop_repo_non_tty_always_returns_false() -> None: + """Non-TTY indicators leave printing to the caller's formatter. + + In headless mode (pipes, CI, capsys), nothing was drawn to erase + and the spinner thread never started; the caller is responsible + for printing the permanent line via ``formatter.emit_text``. Even + when ``final_line`` is passed in, ``stop_repo`` returns False so + the caller fires its own emit_text and the headless capture + receives the line. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=False, + output_lines=3, + ) + + indicator.start_repo("clap") + wrote = indicator.stop_repo(final_line="✓ Synced clap → ~/") + assert wrote is False + # The non-TTY path emitted the start line on ``start_repo``; it + # does NOT emit the permanent line here -- the caller owns that. + assert "Syncing clap" in stream.getvalue() + assert "✓ Synced clap" not in stream.getvalue() + + +def test_render_tty_skips_stale_tick_when_active_repo_changed() -> None: + """``_render_tty`` must skip writes for a stale (now-cleared) repo. + + Reproduces the race where the spinner thread captured ``name`` = + "clap" before ``stop_repo`` cleared it; the prior render path then + wrote a stale frame on top of the new state. The fix: re-check + ``self._active_repo`` under the lock at the start of ``_render_tty``. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=True, + output_lines=3, + ) + + # Pretend the spinner thread captured ``name="clap"`` and is about + # to render, but ``stop_repo`` has just cleared ``_active_repo``. + indicator._active_repo = None + before = stream.getvalue() + indicator._render_tty(frame="⠋", name="clap", elapsed=0.5) + after = stream.getvalue() + # Stale tick: nothing should land on the stream. + assert after == before From ae7bb7a81d5c7613f9df8b9d8760113adea94975 Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Sat, 25 Apr 2026 11:55:54 -0500 Subject: [PATCH 15/32] fix(sync[indicator]) Collapse panel rows via DL instead of erase MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit why: Reporter saw blank-line gaps between successive ``✓ Synced ...`` lines when chaining ``vcspull sync --workspace ~/study/otel/ --all; vcspull sync --workspace ~/study/rust/ --all``. The panel feature added in 79fd4c3c promised "collapses when each repo finishes -- leaving only the permanent ``✓ Synced `` line", but the implementation in d650c978 only erased panel-row content with ``\x1b[2K\n``. ``\x1b[2K`` clears the row's visible characters; the trailing ``\n`` advances the cursor down. After ``panel_visible`` iterations the cursor is back at the spinner row, but the panel rows above are blank-but-present in scrollback. Replacing the spinner with the permanent line then leaves orphan blank rows above it; the count varies per repo (whatever the previous repo's panel height was at completion time), exactly matching the reporter's "variable gaps" symptom. Fix: use DL (``\x1b[Pn M``, Delete Line) instead. After walking up to the first panel row, ``\x1b[{panel_visible}M`` deletes those rows from the buffer; subsequent rows scroll up to fill, so the spinner row's content lands at the cursor row. The follow-up ``erase + final_line + \n`` morphs that row into the permanent line in place. No orphan blanks. DL is a VT100 sequence supported by every terminal that implements the synchronized-output bracket already in use here. what: - ``stop_repo`` atomic transition (``_progress.py``): replace the ``\x1b[2K\n`` x panel_visible loop with a single ``\x1b[{panel_visible}M`` after the upward walk. Restructure the conditional so DL only fires when ``panel_visible > 0``. - ``_clear_block`` (``_progress.py``): same defect in the ``close()`` teardown path -- walked up + erased per row, leaving blanks. Replace with a single DL of ``panel + 1`` rows after walking up to the first panel row. The close path is preventive (normal operation hits ``stop_repo`` first and zeroes ``_panel_visible_lines``) but the parallel pattern was wrong for the same reason and would surface under a KeyboardInterrupt close-before-stop_repo. - 4 new regression tests in ``tests/cli/test_sync_progress.py``: DL escape lands when ``_panel_visible_lines > 0`` (with negative assertion that the regressed ``\x1b[2K\n`` per-row pattern is NOT present); DL omitted when zero panel rows; ``stop_repo()`` without ``final_line`` still DL's the panel; ``_clear_block`` deletes panel + spinner via one DL. --- src/vcspull/cli/_progress.py | 43 +++++----- tests/cli/test_sync_progress.py | 135 ++++++++++++++++++++++++++++++++ 2 files changed, 160 insertions(+), 18 deletions(-) diff --git a/src/vcspull/cli/_progress.py b/src/vcspull/cli/_progress.py index 116c04913..49f199ffe 100644 --- a/src/vcspull/cli/_progress.py +++ b/src/vcspull/cli/_progress.py @@ -217,21 +217,24 @@ def stop_repo(self, final_line: str | None = None) -> bool: # # # - # x panel_visible # erase panel rows - # # spinner row replaced + #
# physically remove panel rows + # # spinner row replaced # - # When ``final_line`` is None we still walk + erase so the - # caller's ``formatter.emit_text`` lands on a fresh row, but - # we do NOT advance the cursor with a trailing ``\n`` -- the - # caller's own ``\n`` (via ``print``) does that. + # DL (``\x1b[Pn M``) deletes lines and scrolls subsequent + # rows up to fill, so the spinner row's content lands at the + # cursor row (where the first panel row used to be) and we + # erase+replace it with ``final_line``. The earlier + # ``\x1b[2K\n`` per-panel-row pattern only erased *content*; + # the rows stayed in scrollback as blanks, leaving visible + # gaps between successive ``✓ Synced ...`` lines. parts: list[str] = [_SYNC_START] if had_render: if panel_visible > 0: parts.append(f"\x1b[{panel_visible}A") - parts.append(_CURSOR_TO_COL0) - # Erase panel rows row-by-row, descending. - parts.extend(_ERASE_LINE + "\n" for _ in range(panel_visible)) - # We're now at the original spinner row. + parts.append(_CURSOR_TO_COL0) + parts.append(f"\x1b[{panel_visible}M") + else: + parts.append(_CURSOR_TO_COL0) if final_line is not None: parts.append(_ERASE_LINE + final_line + "\n") else: @@ -469,21 +472,25 @@ def _clear_line(self) -> None: self._last_line_len = 0 def _clear_block(self) -> None: - """Erase the spinner row AND any rendered panel rows above it. + r"""Delete the spinner row AND any rendered panel rows above it. Used by :meth:`stop_repo` (and :meth:`close`) to collapse the live trail so the next ``formatter.emit_text("✓ Synced ...")`` - call lands on a clean row. Without this, the panel rows would - stay sticky in scrollback and the ``stop_repo`` -> permanent-line - transition would scroll-leak history. + call lands on a clean row. We use DL (``\x1b[Pn M``) to *remove* + the rows from the buffer; an earlier implementation walked up + and only erased their content with ``\x1b[2K``, which left the + rows in scrollback as blanks and produced visible gaps between + successive permanent lines. """ if not self._tty: return + total = self._panel_visible_lines + 1 # panel rows + spinner row try: - # Walk up panel rows + the spinner row, erasing each. - self._stream.write(_CURSOR_TO_COL0 + _ERASE_LINE) - for _ in range(self._panel_visible_lines): - self._stream.write("\x1b[1A" + _ERASE_LINE) + if self._panel_visible_lines: + # Walk up to the first panel row before deleting so DL + # consumes the panel + spinner together. + self._stream.write(f"\x1b[{self._panel_visible_lines}A") + self._stream.write(f"{_CURSOR_TO_COL0}\x1b[{total}M") self._stream.flush() except (OSError, ValueError): pass diff --git a/tests/cli/test_sync_progress.py b/tests/cli/test_sync_progress.py index 6084755ba..af92f5aa1 100644 --- a/tests/cli/test_sync_progress.py +++ b/tests/cli/test_sync_progress.py @@ -482,6 +482,141 @@ def test_stop_repo_non_tty_always_returns_false() -> None: assert "✓ Synced clap" not in stream.getvalue() +def test_stop_repo_collapses_panel_rows_via_dl() -> None: + r"""``stop_repo(final_line=...)`` deletes panel rows with DL, not erase. + + Regression: an earlier implementation walked up to the first panel + row and emitted ``\x1b[2K\n`` per row to "clear" the panel. That + only erased the row content -- the rows stayed in the buffer as + blanks, so successive ``✓ Synced ...`` lines were separated by + visible gaps proportional to whatever the previous repo's panel + height was at completion time (reported when chaining + ``vcspull sync ...; vcspull sync ...``). + + Today's fix: after walking up, emit ``\x1b[Pn M`` (DL) once to + physically delete the panel rows from the buffer; the spinner + row's content scrolls up to the cursor and the subsequent + ``erase + final_line + \n`` morphs that row into the permanent + line in place. This test asserts the DL escape lands in the + output AND that the regressed ``\x1b[2K\n`` per-row pattern is + NOT present. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=True, + output_lines=3, + ) + + indicator.start_repo("clap") + indicator._last_line_len = 25 + indicator._panel_visible_lines = 3 + + indicator.stop_repo(final_line="✓ Synced clap → ~/study/rust/clap") + + out = stream.getvalue() + # Walk up panel_visible rows, then DL panel_visible rows. + assert "\x1b[3A" in out + assert "\x1b[3M" in out + # The regressed pattern: ``\x1b[2K\n`` per panel row would appear + # at least 3 times back-to-back. The new path uses DL once. + assert out.count("\x1b[2K\n") <= 1, ( + "panel rows are being erased+newlined instead of DL'd; this " + "leaves blanks in scrollback" + ) + indicator.close() + + +def test_stop_repo_no_panel_skips_dl() -> None: + r"""``stop_repo`` omits DL when there are no panel rows to delete. + + With ``_panel_visible_lines == 0`` only the spinner row needs to + be replaced; emitting ``\x1b[0M`` is meaningless (DL of zero + rows) and even ``\x1b[A`` would walk above the spinner, so the + walk-up + DL pair is skipped entirely. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=True, + output_lines=3, + ) + + indicator.start_repo("clap") + indicator._last_line_len = 25 + indicator._panel_visible_lines = 0 + + indicator.stop_repo(final_line="✓ Synced clap → ~/study/rust/clap") + + out = stream.getvalue() + # No panel-collapse machinery should appear when there's nothing + # to collapse: no upward walk, no DL. + assert "\x1b[0A" not in out + assert "\x1b[0M" not in out + # The spinner-row replacement still lands. + assert "✓ Synced clap" in out + indicator.close() + + +def test_stop_repo_panel_with_no_final_line_still_collapses() -> None: + r"""``stop_repo()`` (no ``final_line``) still collapses panel rows. + + The early-teardown path (e.g. KeyboardInterrupt before the outcome + is known) calls ``stop_repo()`` with no replacement line. The + panel must still be DL'd so the caller's subsequent + ``formatter.emit_text`` lands without a stack of blank rows above + it. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=True, + output_lines=3, + ) + + indicator.start_repo("clap") + indicator._last_line_len = 25 + indicator._panel_visible_lines = 2 + + wrote = indicator.stop_repo() + + out = stream.getvalue() + assert wrote is False # caller still owns the permanent print + assert "\x1b[2A" in out + assert "\x1b[2M" in out + assert indicator._panel_visible_lines == 0 + indicator.close() + + +def test_clear_block_uses_dl_to_remove_panel_and_spinner() -> None: + r"""``_clear_block`` deletes panel rows + spinner via a single DL. + + Used from ``close()`` (and historically from teardown paths). + Walks up to the first panel row, then DLs ``panel + 1`` rows so + the entire frame is removed from the buffer in one operation. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=True, + output_lines=3, + ) + + indicator._panel_visible_lines = 3 + indicator._clear_block() + + out = stream.getvalue() + # Walk up panel rows, then DL panel + spinner together. + assert "\x1b[3A" in out + assert "\x1b[4M" in out + assert indicator._panel_visible_lines == 0 + indicator.close() + + def test_render_tty_skips_stale_tick_when_active_repo_changed() -> None: """``_render_tty`` must skip writes for a stale (now-cleared) repo. From 4986a07ec84e68023bcd5fe8803853f8d95b03f5 Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Sat, 25 Apr 2026 14:36:31 -0500 Subject: [PATCH 16/32] deps(libvcs) Pin to released 0.40.0 stable why: libvcs 0.40.0 was tagged and published to PyPI (2026-04-25T18:59Z, sha256 5f3340b6...). The earlier ``>=0.40.0a0`` pin was a placeholder while the prerelease was being prepared; with the stable release available, drop the ``a0`` suffix so the lower bound matches the actual minimum-supported version. what: - ``pyproject.toml``: ``libvcs>=0.40.0a0,<0.41.0`` -> ``libvcs>=0.40.0,<0.41.0``. - ``uv.lock``: relock to libvcs 0.40.0 from PyPI (sdist + wheel hashes match the published release). The lock carries ``[options.exclude-newer-package] libvcs = "2026-04-26..."`` to override the global 3-day cooldown configured in ``~/.config/uv/uv.toml`` -- libvcs 0.40.0 was published hours before this lock and would otherwise have been filtered out. --- pyproject.toml | 2 +- uv.lock | 15 +++++++++++---- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 5adcca451..dce09e5bf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,7 +53,7 @@ keywords = [ ] homepage = "https://vcspull.git-pull.com" dependencies = [ - "libvcs>=0.40.0a0,<0.41.0", + "libvcs>=0.40.0,<0.41.0", "colorama>=0.3.9", "PyYAML>=6.0" ] diff --git a/uv.lock b/uv.lock index 3d736f607..88f9c9fde 100644 --- a/uv.lock +++ b/uv.lock @@ -6,6 +6,13 @@ resolution-markers = [ "python_full_version < '3.11'", ] +[options] +exclude-newer = "2026-04-22T19:30:48.995337295Z" +exclude-newer-span = "P3D" + +[options.exclude-newer-package] +libvcs = "2026-04-26T00:00:00Z" + [[package]] name = "accessible-pygments" version = "0.0.5" @@ -552,14 +559,14 @@ wheels = [ [[package]] name = "libvcs" -version = "0.39.0" +version = "0.40.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "typing-extensions", marker = "python_full_version < '3.11'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/eb/a4/66ccfdb47f225c60e254476e4dfec2cb248ff1ee018bccce05a01fd5ae28/libvcs-0.39.0.tar.gz", hash = "sha256:2c175c98cef75f3228e8c859f4af6d22b6cfe5843a2a29317e35c3745fd86a45", size = 597772, upload-time = "2026-02-07T19:31:35.307Z" } +sdist = { url = "https://files.pythonhosted.org/packages/61/a1/080912d310e86f8f597ab6033e5fd4d3e798aac3f5ec5cd992cfbffd0ecd/libvcs-0.40.0.tar.gz", hash = "sha256:a6dcfce94d3a7ed7212a5830ebe90f3383e7038606fbf9cd39db35d87fb02ea8", size = 617387, upload-time = "2026-04-25T18:59:13.414Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/6f/60/960cc0fa9b7df6dd9e6f89340629a902f068d25406f8828369d2bcac20f4/libvcs-0.39.0-py3-none-any.whl", hash = "sha256:5b20514e57b53c02ce07d57c708b4b7828a632479b9ea393ebeba31e0377a5f0", size = 96997, upload-time = "2026-02-07T19:31:34.021Z" }, + { url = "https://files.pythonhosted.org/packages/cb/eb/ece616f90f807de6fd77e1680daa4fd11913b5ec7c6a77c09fe372675eda/libvcs-0.40.0-py3-none-any.whl", hash = "sha256:5f3340b64211a971d033b7358a669a9a68bb7e800f3413dd8cef668ec1226c5a", size = 101669, upload-time = "2026-04-25T18:59:11.847Z" }, ] [[package]] @@ -1671,7 +1678,7 @@ typings = [ [package.metadata] requires-dist = [ { name = "colorama", specifier = ">=0.3.9" }, - { name = "libvcs", specifier = "~=0.39.0" }, + { name = "libvcs", specifier = ">=0.40.0,<0.41.0" }, { name = "pyyaml", specifier = ">=6.0" }, ] From fa654a4c00da01a6966487dc9154c3a9f77db479 Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Sat, 25 Apr 2026 14:36:37 -0500 Subject: [PATCH 17/32] docs(CHANGES) Drop ``a0`` from the libvcs dependency-bump line why: The ``### Dependencies`` entry in the unreleased section was written when the pin was ``>=0.40.0a0,<0.41.0``. With libvcs 0.40.0 released and the pin tightened to ``>=0.40.0,<0.41.0``, the changelog text now describes a stale bound; align it with reality so users reading the next release notes see the actual lower bound. what: - ``CHANGES``: ``Bump libvcs to >=0.40.0a0,<0.41.0...`` -> ``Bump libvcs to >=0.40.0,<0.41.0...``. --- CHANGES | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES b/CHANGES index 8471ae7e0..407776388 100644 --- a/CHANGES +++ b/CHANGES @@ -122,7 +122,7 @@ can be post-mortemed even after the CLI has moved on. ### Dependencies -- Bump `libvcs` to `>=0.40.0a0,<0.41.0` for timeout + fast `set_remotes` fixes +- Bump `libvcs` to `>=0.40.0,<0.41.0` for timeout + fast `set_remotes` fixes that let `vcspull sync` recover from a single stuck repository without hanging the whole batch. From 832b35fb12afb830e1e08d854fa6f5387e475d70 Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Sat, 25 Apr 2026 16:01:28 -0500 Subject: [PATCH 18/32] fix(sync) Lazy-format the no-repo-found debug message why: ``log.debug(NO_REPOS_FOR_TERM_MSG.format(name=search_term))`` runs ``str.format`` even when DEBUG is filtered out -- wasted work on the hot ``vcspull sync`` path that resolves search terms for every repo pattern. CLAUDE.md mandates lazy ``%``-style log args precisely because eager formatting defeats the level filter. what: - ``cli/sync.py``: rewrite the debug call as ``log.debug('No repo found in config(s) for "%s"', search_term)``. - The user-facing ``formatter.emit_text`` f-string a few lines below is *not* a log call; leave it alone. --- src/vcspull/cli/sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/vcspull/cli/sync.py b/src/vcspull/cli/sync.py index 6f6b44ebf..0afe45776 100644 --- a/src/vcspull/cli/sync.py +++ b/src/vcspull/cli/sync.py @@ -1206,7 +1206,7 @@ def _sync_impl( found = filter_repos(configs, path=path, vcs_url=vcs_url, name=name) if not found: search_term = name or path or vcs_url or repo_pattern - log.debug(NO_REPOS_FOR_TERM_MSG.format(name=search_term)) + log.debug('No repo found in config(s) for "%s"', search_term) if not summary_only: formatter.emit_text( f"{colors.error('✗')} " From c8247fb2af6a0bedafb2df9b6447b71afb6d9f1d Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Sat, 25 Apr 2026 16:05:31 -0500 Subject: [PATCH 19/32] fix(sync) Swap StreamHandler.stream under handler.lock why: ``_install_indicator_log_diverter`` rebound ``handler.stream`` with a plain attribute assignment while the spinner thread and the worker thread were both emitting libvcs records through the same handler. ``Handler.handle`` (stdlib ``logging/__init__.py:1011``) takes ``self.lock`` around every ``emit()`` precisely so that modifications to the handler's mutable state can serialise with in-flight writes; an unlocked swap technically violates that contract. On CPython the GIL hides the race in practice (single STORE_ATTR is atomic, no torn pointer), but the contract is still wrong and would matter on a non-GIL'd build. what: - Acquire ``handler.lock`` (via ``handler.acquire()/release()``) around the install swap and the restore swap. Same lock the emit path holds. - Don't go through ``StreamHandler.setStream``: its first action is to ``flush()`` the existing stream, which raises ``ValueError`` when the current stream is a finalized pytest ``capsys``/``CaptureIO`` (a prior test ran while ``setup_logger`` captured ``sys.stdout``, the test ended, the CaptureIO closed, the handler still holds the dead reference, then this test calls ``setStream``). Skipping the flush is safe -- we aren't closing the stream, only redirecting future writes -- and matches the handler-internal locking the contract actually requires. - Inline the previous-stream capture (no longer needs the ``setStream`` return-value plumbing). --- src/vcspull/cli/sync.py | 29 ++++++++++++++++++++++++++--- 1 file changed, 26 insertions(+), 3 deletions(-) diff --git a/src/vcspull/cli/sync.py b/src/vcspull/cli/sync.py index 0afe45776..3b038d4d4 100644 --- a/src/vcspull/cli/sync.py +++ b/src/vcspull/cli/sync.py @@ -842,6 +842,20 @@ def _install_indicator_log_diverter( handlers that are :class:`logging.StreamHandler` but not :class:`logging.FileHandler` -- the debug log file keeps writing straight to disk regardless of the spinner. + + Both the install and the restore swap ``handler.stream`` while + holding ``handler.lock`` -- the same lock ``Handler.handle`` (stdlib + ``logging/__init__.py:1011``) takes around every ``emit()``. Direct + ``handler.stream = X`` without the lock technically violates that + contract even if CPython's GIL hides the race in practice. + + We don't go through :meth:`logging.StreamHandler.setStream` because + its first action is to ``flush()`` the old stream. Under pytest, the + handler may already point at a finalized ``capsys``/``CaptureIO`` + from a prior test (``setup_logger`` set ``handler.stream = sys.stdout`` + when stdout *was* the live capture); flushing a closed file there + raises ``ValueError``. Skipping the flush is safe -- we are not + *closing* the stream, only redirecting future writes. """ proxy = _IndicatorStreamProxy(indicator) patched: list[tuple[logging.StreamHandler[t.Any], t.Any]] = [] @@ -852,12 +866,21 @@ def _install_indicator_log_diverter( continue if isinstance(handler, logging.FileHandler): continue - patched.append((handler, handler.stream)) - handler.stream = proxy + handler.acquire() + try: + previous = handler.stream + handler.stream = proxy + finally: + handler.release() + patched.append((handler, previous)) def _restore() -> None: for handler, original in patched: - handler.stream = original + handler.acquire() + try: + handler.stream = original + finally: + handler.release() return _restore From 477e0cd7b6768dd194fa6c51cddbcde72ea50310 Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Sat, 25 Apr 2026 16:06:23 -0500 Subject: [PATCH 20/32] fix(sync) Drop the no-prompt env cache so monkeypatch reaches subprocess why: ``_NO_PROMPT_ENV`` was a module-global cache that snapshotted ``os.environ`` on first call. Tests using ``monkeypatch.setenv`` after the cache populated never saw the change in ``_get_no_prompt_env`` -- the subprocess inherited a stale env. The cache also has no performance justification: ``_maybe_fetch`` already runs ``subprocess.run`` (orders of magnitude more expensive than a dict copy). what: - ``cli/sync.py``: remove the ``_NO_PROMPT_ENV`` global and the ``global``-guard branch; build the env dict fresh on each ``_get_no_prompt_env`` call. - Updated docstring to call out the rationale (test isolation > 1us saved per fetch). --- src/vcspull/cli/sync.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/src/vcspull/cli/sync.py b/src/vcspull/cli/sync.py index 3b038d4d4..5e419db64 100644 --- a/src/vcspull/cli/sync.py +++ b/src/vcspull/cli/sync.py @@ -174,15 +174,16 @@ def clamp(n: int, _min: int, _max: int) -> int: #: override via ``--timeout`` or the ``VCSPULL_SYNC_TIMEOUT_SECONDS`` env var. _DEFAULT_REPO_TIMEOUT_SECONDS = 10 -_NO_PROMPT_ENV: dict[str, str] | None = None - def _get_no_prompt_env() -> dict[str, str]: - """Return an environment dict that prevents git from prompting on stdin.""" - global _NO_PROMPT_ENV - if _NO_PROMPT_ENV is None: - _NO_PROMPT_ENV = {**os.environ, "GIT_TERMINAL_PROMPT": "0"} - return _NO_PROMPT_ENV + """Return an environment dict that prevents git from prompting on stdin. + + Built fresh per call rather than cached at module scope: a cache + snapshots ``os.environ`` at first access, which means + ``monkeypatch.setenv`` calls in tests after that point never reach + the subprocess. A dict copy on every fetch is cheap. + """ + return {**os.environ, "GIT_TERMINAL_PROMPT": "0"} def _maybe_fetch( From 93338c25b415c5db14d491ce3a5efdef357e89e5 Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Sat, 25 Apr 2026 16:07:07 -0500 Subject: [PATCH 21/32] docs(sync) Reword --verbose help to mention libvcs verbosity ladder why: The help text said "increase plan verbosity (-vv for maximum detail)", which was accurate for the original dry-run plan flag but became misleading when ``setup_logger`` started using ``args.verbosity`` to drive the libvcs ``StreamHandler`` level (WARNING -> INFO -> DEBUG). A user reading ``--help`` to figure out why their terminal got chatty on ``-v`` had no breadcrumb pointing at the verbosity ladder. what: - ``cli/sync.py``: rewrite the ``--verbose`` help string to spell out the actual ladder (default WARNING; -v INFO; -vv DEBUG) and to keep the dry-run plan-detail behaviour mentioned for callers who relied on the old wording. --- src/vcspull/cli/sync.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/vcspull/cli/sync.py b/src/vcspull/cli/sync.py index 5e419db64..d7234080c 100644 --- a/src/vcspull/cli/sync.py +++ b/src/vcspull/cli/sync.py @@ -626,7 +626,11 @@ def create_sync_subparser(parser: argparse.ArgumentParser) -> argparse.ArgumentP action="count", dest="verbosity", default=0, - help="increase plan verbosity (-vv for maximum detail)", + help=( + "increase verbosity. -v opens libvcs INFO ('Updating to ...'); " + "-vv opens libvcs DEBUG (full per-repo trace) plus extra " + "dry-run plan detail. Default keeps libvcs at WARNING+." + ), ) parser.add_argument( "--all", From fd354e3c48e902b5d3563661e12b7161a393200e Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Sat, 25 Apr 2026 16:08:52 -0500 Subject: [PATCH 22/32] style(sync) Use the Unicode arrow consistently in CLI ornament MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit why: The rerun-recipe banners (``-> Rerun...``, ``-> Or with verbose...``, ``-> Probe each...``), the per-repo timed-out path arrow, and the "Full debug log" hint all used ASCII ``->``, while the rest of the file (``✓ Synced clap → ~/study/...``, ``Timed out fish → ~/...``) uses the Unicode ``→``. Two ornaments coexisted in the same emit stream, which read as visual drift. what: - ``cli/sync.py``: replace ``colors.info('->')`` and ``colors.muted('->')`` with the ``→`` form across all 7 surfaces (4 in ``_emit_rerun_recipe``, 2 in the ``_sync_impl`` "Full debug log" hints, 1 in the ``timed_out`` permanent line of ``_run_sync_loop``). - The non-arrow ``colors.warning('-')`` bullet stays as-is -- it's a list bullet, not a path-or-flow ornament. --- src/vcspull/cli/sync.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/vcspull/cli/sync.py b/src/vcspull/cli/sync.py index d7234080c..a74d4efac 100644 --- a/src/vcspull/cli/sync.py +++ b/src/vcspull/cli/sync.py @@ -1005,13 +1005,13 @@ def _emit_rerun_recipe( display_path = str(PrivatePath(repo.path)) formatter.emit_text( f" {colors.warning('-')} {colors.info(repo.name):<32} " - f"{colors.muted('->')} {display_path} " + f"{colors.muted('→')} {display_path} " f"({colors.muted(f'{repo.duration:.1f}s elapsed')})", ) formatter.emit_text("") formatter.emit_text( - f"{colors.info('->')} Rerun just the affected repositories with a " + f"{colors.info('→')} Rerun just the affected repositories with a " f"larger timeout:", ) for workspace_root, repos in grouped.items(): @@ -1022,7 +1022,7 @@ def _emit_rerun_recipe( ) formatter.emit_text( - f"{colors.info('->')} Or with verbose logging to diagnose the hang:", + f"{colors.info('→')} Or with verbose logging to diagnose the hang:", ) for workspace_root, repos in grouped.items(): names = " ".join(shlex.quote(r.name) for r in repos) @@ -1033,7 +1033,7 @@ def _emit_rerun_recipe( ) formatter.emit_text( - f"{colors.info('->')} Probe each repository manually " + f"{colors.info('→')} Probe each repository manually " f"({colors.muted('these should return in under 10s')}):", ) for repo in timed_out_repos: @@ -1430,7 +1430,7 @@ def silent_progress(output: str, timestamp: datetime) -> None: _emit_summary(formatter, colors, summary) if log_file_path is not None: formatter.emit_text( - f"{colors.info('->')} Full debug log: " + f"{colors.info('→')} Full debug log: " f"{colors.muted(str(log_file_path))}", ) formatter.finalize() @@ -1450,7 +1450,7 @@ def silent_progress(output: str, timestamp: datetime) -> None: # user has a single post-mortem trail regardless of the failure mode. if summary.get("failed", 0) > 0 and log_file_path is not None: formatter.emit_text( - f"{colors.info('->')} Full debug log: {colors.muted(str(log_file_path))}", + f"{colors.info('→')} Full debug log: {colors.muted(str(log_file_path))}", ) if exit_on_error and unmatched_count > 0: @@ -1539,7 +1539,7 @@ def _run_sync_loop( permanent = ( f"{colors.warning('-')} Timed out {colors.info(repo_name)} " f"after {colors.warning(f'{outcome.duration:.1f}s')} " - f"{colors.muted('->')} {display_repo_path}" + f"{colors.muted('→')} {display_repo_path}" ) wrote_final = indicator.stop_repo( final_line=permanent if is_human else None, @@ -1557,7 +1557,7 @@ def _run_sync_loop( _emit_summary(formatter, colors, summary) if log_file_path is not None: formatter.emit_text( - f"{colors.info('->')} Full debug log: " + f"{colors.info('→')} Full debug log: " f"{colors.muted(str(log_file_path))}", ) formatter.finalize() From fc6f8a49ee0ea3b393df2ff24b0f79d34af9b096 Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Sat, 25 Apr 2026 16:09:45 -0500 Subject: [PATCH 23/32] perf(sync[indicator]) Skip the lock on whitespace-only panel chunks why: ``add_output_line`` took ``self._lock`` and called ``text.splitlines()`` for every chunk libvcs delivered through the progress callback, including whitespace-only fragments (a stray ``\n`` between progress frames is common). The inner ``if stripped`` loop body would skip every fragment and the deque wouldn't change, so the lock acquire was pure overhead on a hot path that fires hundreds of times per repo. what: - ``cli/_progress.py``: after the panel-disabled fallback (which *should* still surface whitespace via ``self.write``), early-return when ``text.strip()`` is empty. The fallback path comes first so panel=0 / non-TTY / JSON callers still see the bytes. --- src/vcspull/cli/_progress.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/vcspull/cli/_progress.py b/src/vcspull/cli/_progress.py index 49f199ffe..233281219 100644 --- a/src/vcspull/cli/_progress.py +++ b/src/vcspull/cli/_progress.py @@ -276,6 +276,13 @@ def add_output_line(self, text: str) -> None: payload = text if text.endswith("\n") else text + "\n" self.write(payload) return + # Whitespace-only chunks (e.g. a stray ``\n`` between progress + # frames) contribute nothing to the deque -- the inner loop's + # ``if stripped`` would skip every fragment. Returning here saves + # the lock acquire + ``splitlines`` call on what is a relatively + # common pattern from libvcs's progress callback. + if not text.strip(): + return with self._lock: for line in text.splitlines(): stripped = line.rstrip() From 8f3c265533407dba665d654b63ca550bfbbddb44 Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Sat, 25 Apr 2026 16:13:27 -0500 Subject: [PATCH 24/32] docs(sync) Add doctests on public-ish progress + resolver helpers why: CLAUDE.md requires working doctests on every function and method, both as documentation and as runnable examples. The sync UX overhaul introduced public-ish helpers without doctest coverage, leaving the contract for ``--panel-lines`` / ``--timeout`` resolution and the ``SyncStatusIndicator`` lifecycle described only in prose. what: - ``cli/sync.py``: add doctests on ``_resolve_repo_timeout`` (default fallback when CLI value is ``None``/``0``/negative; CLI override passes through) and ``_resolve_panel_lines`` (default 3; ``0`` hides; ``-1`` unbounded; explicit values pass through). Each doctest pops the relevant ``VCSPULL_*`` env var first so a stray export in the caller's shell doesn't flake the example. - ``log.py``: add a doctest on ``default_debug_log_path`` asserting the ``vcspull-debug-`` prefix, ``.log`` suffix, and that the parent is the system tempdir. - ``cli/_progress.py``: doctests on ``SyncStatusIndicator.start_repo``, ``stop_repo``, and ``add_output_line``. Each builds a non-TTY (or TTY-with-StringIO) indicator so the doctest prints nothing visible yet still exercises the state machine. ``add_output_line`` got an ``r"""`` prefix because the example payloads contain ``\n`` literals (D301). - Test count goes from 1018 to 1024 (+6 doctests). --- src/vcspull/cli/_progress.py | 44 ++++++++++++++++++++++++++++++++++-- src/vcspull/cli/sync.py | 29 +++++++++++++++++++++++- src/vcspull/log.py | 10 ++++++++ 3 files changed, 80 insertions(+), 3 deletions(-) diff --git a/src/vcspull/cli/_progress.py b/src/vcspull/cli/_progress.py index 233281219..555ac23ae 100644 --- a/src/vcspull/cli/_progress.py +++ b/src/vcspull/cli/_progress.py @@ -151,7 +151,20 @@ def __init__( # ------------------------------------------------------------------ def start_repo(self, name: str) -> None: - """Mark ``name`` as the currently-running repository.""" + """Mark ``name`` as the currently-running repository. + + Examples + -------- + >>> import io + >>> from vcspull.cli._progress import SyncStatusIndicator + >>> indicator = SyncStatusIndicator( + ... enabled=True, stream=io.StringIO(), tty=False + ... ) + >>> indicator.start_repo("clap") + >>> indicator._active_repo + 'clap' + >>> indicator.close() + """ if not self._enabled: return @@ -192,6 +205,20 @@ def stop_repo(self, final_line: str | None = None) -> bool: In every False case the caller is responsible for printing its own permanent line through the formatter as before. + + Examples + -------- + >>> import io + >>> from vcspull.cli._progress import SyncStatusIndicator + >>> indicator = SyncStatusIndicator( + ... enabled=True, stream=io.StringIO(), tty=False + ... ) + >>> indicator.start_repo("clap") + >>> indicator.stop_repo() + False + >>> indicator.stop_repo(final_line="ignored on non-TTY") + False + >>> indicator.close() """ if not self._enabled: return False @@ -251,7 +278,7 @@ def stop_repo(self, final_line: str | None = None) -> bool: return final_line is not None and had_render def add_output_line(self, text: str) -> None: - """Push streamed subprocess output into the live trail panel. + r"""Push streamed subprocess output into the live trail panel. ``text`` may contain multiple newline-separated lines (libvcs's progress callback delivers chunks, not whole lines). We split, @@ -263,6 +290,19 @@ def add_output_line(self, text: str) -> None: is disabled (non-TTY, JSON output, ``--color=never``), fall back to :meth:`write` so the bytes still appear -- just without the in-place rewriting. + + Examples + -------- + >>> import io + >>> from vcspull.cli._progress import SyncStatusIndicator + >>> indicator = SyncStatusIndicator( + ... enabled=True, stream=io.StringIO(), tty=True, output_lines=2 + ... ) + >>> indicator.add_output_line("From github.com/foo/bar\n") + >>> indicator.add_output_line(" abc..def main -> origin/main\n") + >>> list(indicator._panel_buffer) + ['From github.com/foo/bar', ' abc..def main -> origin/main'] + >>> indicator.close() """ if not text: return diff --git a/src/vcspull/cli/sync.py b/src/vcspull/cli/sync.py index a74d4efac..5e5afef35 100644 --- a/src/vcspull/cli/sync.py +++ b/src/vcspull/cli/sync.py @@ -719,7 +719,21 @@ class _SyncOutcome: def _resolve_repo_timeout(cli_timeout: int | None) -> int: - """Resolve the repo timeout from CLI flag / env var / built-in default.""" + """Resolve the repo timeout from CLI flag / env var / built-in default. + + Examples + -------- + >>> import os + >>> _ = os.environ.pop("VCSPULL_SYNC_TIMEOUT_SECONDS", None) + >>> _resolve_repo_timeout(None) + 10 + >>> _resolve_repo_timeout(60) + 60 + >>> _resolve_repo_timeout(0) + 10 + >>> _resolve_repo_timeout(-5) + 10 + """ if cli_timeout is not None and cli_timeout > 0: return cli_timeout env_value = os.environ.get("VCSPULL_SYNC_TIMEOUT_SECONDS") @@ -750,6 +764,19 @@ def _resolve_panel_lines(cli_value: int | None) -> int: short-circuit on ``> 0`` like :func:`_resolve_repo_timeout`. We do accept any integer the user passes via the CLI; only the env var override is validated, with a warning on garbage values. + + Examples + -------- + >>> import os + >>> _ = os.environ.pop("VCSPULL_PROGRESS_LINES", None) + >>> _resolve_panel_lines(None) + 3 + >>> _resolve_panel_lines(0) + 0 + >>> _resolve_panel_lines(-1) + -1 + >>> _resolve_panel_lines(5) + 5 """ if cli_value is not None: return cli_value diff --git a/src/vcspull/log.py b/src/vcspull/log.py index 00044d5c2..efc00b2be 100644 --- a/src/vcspull/log.py +++ b/src/vcspull/log.py @@ -355,6 +355,16 @@ def default_debug_log_path() -> pathlib.Path: file in the system temp directory on every run. The file is always created but its path is only surfaced to the user when something went wrong (failure or timeout), so clean runs stay quiet. + + Examples + -------- + >>> path = default_debug_log_path() + >>> path.name.startswith("vcspull-debug-") + True + >>> path.suffix + '.log' + >>> path.parent == pathlib.Path(tempfile.gettempdir()) + True """ stamp = datetime.now().strftime("%Y%m%dT%H%M%S") pid = os.getpid() From cc720d340d9032d3daccd6b95e80bed51a9e29ef Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Sat, 25 Apr 2026 16:15:44 -0500 Subject: [PATCH 25/32] fix(sync) Reject --panel-lines values other than -1, 0, or positive why: ``--panel-lines`` uses ``-1`` as the unbounded sentinel and ``0`` as the hide sentinel; any other negative was meaningless but ``_resolve_panel_lines`` silently coerced -2 / -7 / -42 to "unbounded" via the permissive ``elif output_lines < 0:`` branch in ``SyncStatusIndicator.__init__``. A user typing ``--panel-lines -3`` expecting "3 lines" would get an unbounded panel with no warning. typer/click validate at parse time precisely so user typos surface where the user can fix them. what: - ``cli/sync.py``: add ``_panel_lines_arg(value: str) -> int``, an argparse ``type=`` callable that accepts -1, 0, or any positive integer; raises ``argparse.ArgumentTypeError`` otherwise. Includes a doctest covering happy-path and both rejection cases (non-int string and out-of-range). - ``--panel-lines`` argparse entry uses ``type=_panel_lines_arg`` in place of plain ``type=int``. - The ``VCSPULL_PROGRESS_LINES`` env-var path keeps its existing log-warning + fallback behaviour: env vars are trusted less strictly than CLI args (typos there are usually copy/paste, not intent), and a hard error on a stray export would lock users out of running ``vcspull sync`` until they unset it. --- src/vcspull/cli/sync.py | 40 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/src/vcspull/cli/sync.py b/src/vcspull/cli/sync.py index 5e5afef35..ac7d682ab 100644 --- a/src/vcspull/cli/sync.py +++ b/src/vcspull/cli/sync.py @@ -661,7 +661,7 @@ def create_sync_subparser(parser: argparse.ArgumentParser) -> argparse.ArgumentP parser.add_argument( "--panel-lines", dest="panel_lines", - type=int, + type=_panel_lines_arg, default=None, metavar="N", help=( @@ -757,6 +757,44 @@ def _resolve_repo_timeout(cli_timeout: int | None) -> int: _DEFAULT_PANEL_LINES = 3 +def _panel_lines_arg(value: str) -> int: + """Validate ``--panel-lines`` accepts ``-1``, ``0``, or any positive int. + + The flag uses ``-1`` as the "unbounded panel" sentinel and ``0`` as + the "hide panel" sentinel. Any other negative value (``-2``, ``-7``, + …) is meaningless; ``argparse`` would silently coerce them to + "unbounded" via :func:`_resolve_panel_lines`'s permissive branch, + which masks user typos. Reject at parse time with a typer-style + ``ArgumentTypeError`` instead. + + Examples + -------- + >>> _panel_lines_arg("0") + 0 + >>> _panel_lines_arg("-1") + -1 + >>> _panel_lines_arg("5") + 5 + >>> _panel_lines_arg("-2") + Traceback (most recent call last): + ... + argparse.ArgumentTypeError: --panel-lines must be -1, 0, or positive (got -2) + >>> _panel_lines_arg("abc") + Traceback (most recent call last): + ... + argparse.ArgumentTypeError: --panel-lines must be an integer (got 'abc') + """ + try: + parsed = int(value) + except ValueError: + msg = f"--panel-lines must be an integer (got {value!r})" + raise argparse.ArgumentTypeError(msg) from None + if parsed < -1: + msg = f"--panel-lines must be -1, 0, or positive (got {parsed})" + raise argparse.ArgumentTypeError(msg) + return parsed + + def _resolve_panel_lines(cli_value: int | None) -> int: """Resolve the live-trail panel height: CLI flag > env > default. From ff6f02ceae9d1278e2e63c2b3d5d90510f1e0986 Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Sat, 25 Apr 2026 16:17:05 -0500 Subject: [PATCH 26/32] fix(sync) Reject --timeout values <= 0 why: Zero or negative ``--timeout`` is meaningless: the watchdog would either return immediately (deadline already passed) or never (logically invalid). The earlier permissive ``_resolve_repo_timeout`` silently fell back to the default on these inputs, masking typos like ``--timeout -10`` (intended ``10``) where the user assumed the flag took effect. typer/click validate at parse time precisely so user typos surface where the user can fix them. what: - ``cli/sync.py``: add ``_positive_int_arg(value: str) -> int``, an argparse ``type=`` callable that requires a positive integer; raises ``argparse.ArgumentTypeError`` on zero, negative, or non-numeric. Includes a doctest covering the happy path and three rejection branches. - ``--timeout`` argparse entry uses ``type=_positive_int_arg`` in place of plain ``type=int``. - ``_resolve_repo_timeout`` keeps its programmatic-caller fallback for ``cli_timeout <= 0``; doctest now notes the CLI surface is guarded by ``_positive_int_arg`` at parse time. - ``VCSPULL_SYNC_TIMEOUT_SECONDS`` env path keeps log-warning + fallback (env vars are trusted less strictly than CLI args; a hard error on a stale export would lock users out). --- src/vcspull/cli/sync.py | 44 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/src/vcspull/cli/sync.py b/src/vcspull/cli/sync.py index ac7d682ab..cbcf3c3c2 100644 --- a/src/vcspull/cli/sync.py +++ b/src/vcspull/cli/sync.py @@ -648,7 +648,7 @@ def create_sync_subparser(parser: argparse.ArgumentParser) -> argparse.ArgumentP parser.add_argument( "--timeout", dest="timeout", - type=int, + type=_positive_int_arg, default=None, metavar="SECONDS", help=( @@ -718,9 +718,51 @@ class _SyncOutcome: duration: float = 0.0 +def _positive_int_arg(value: str) -> int: + """Validate ``--timeout`` accepts only positive integers. + + A timeout of zero or negative seconds is meaningless: the watchdog + would either return immediately (timed out) or never (logically + invalid). The earlier permissive ``_resolve_repo_timeout`` silently + fell back to the default on these inputs, which masked typos like + ``--timeout -10`` (intended ``10``) where the user assumed the + flag had taken effect. + + Examples + -------- + >>> _positive_int_arg("60") + 60 + >>> _positive_int_arg("0") + Traceback (most recent call last): + ... + argparse.ArgumentTypeError: --timeout must be a positive integer (got 0) + >>> _positive_int_arg("-5") + Traceback (most recent call last): + ... + argparse.ArgumentTypeError: --timeout must be a positive integer (got -5) + >>> _positive_int_arg("abc") + Traceback (most recent call last): + ... + argparse.ArgumentTypeError: --timeout must be an integer (got 'abc') + """ + try: + parsed = int(value) + except ValueError: + msg = f"--timeout must be an integer (got {value!r})" + raise argparse.ArgumentTypeError(msg) from None + if parsed <= 0: + msg = f"--timeout must be a positive integer (got {parsed})" + raise argparse.ArgumentTypeError(msg) + return parsed + + def _resolve_repo_timeout(cli_timeout: int | None) -> int: """Resolve the repo timeout from CLI flag / env var / built-in default. + Programmatic callers passing non-positive values still fall back to + the env/default ladder; the CLI surface enforces positive-int + semantics via :func:`_positive_int_arg` at parse time. + Examples -------- >>> import os From cf3f8058863eda2207caf178e9e5aa666325492f Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Sat, 25 Apr 2026 16:18:02 -0500 Subject: [PATCH 27/32] docs(log) Comment the libvcs-floor invariant in setup_file_logger why: ``setup_file_logger`` raises both the ``vcspull`` and ``libvcs`` *logger* levels to DEBUG so its FileHandler captures the full trace for post-mortem. That bump would ordinarily open the same DEBUG records to every other handler attached to those loggers (notably the terminal ``StreamHandler``), but ``setup_logger`` pins per-handler levels on every StreamHandler, which absorbs the bump without leaking. The whole "libvcs DEBUG floodgate" regression that ``89649a86`` fixed was this invariant breaking. The code that relies on it had no inline note, so a future "simplification" that drops the per-handler pinning would silently re-open the floodgate and the only signal would be the regression test going red. what: - ``log.py``: add an inline comment at the ``logger.setLevel(level)`` bump in ``setup_file_logger`` explaining the precondition (the per-handler levels in ``setup_logger``) and naming the regression test (``test_setup_file_logger_does_not_open_stream_floodgate``) that catches it. --- src/vcspull/log.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/vcspull/log.py b/src/vcspull/log.py index efc00b2be..735d62302 100644 --- a/src/vcspull/log.py +++ b/src/vcspull/log.py @@ -418,6 +418,18 @@ def setup_file_logger( if existing is None: logger.addHandler(handler) # Ensure the logger itself lets DEBUG records through to the handler. + # + # Invariant: this raises the *logger* level to DEBUG, which would + # ordinarily open the floodgate to every attached StreamHandler too. + # ``setup_logger`` pins explicit per-handler levels on every + # ``StreamHandler`` (default WARNING; ``-v`` INFO; ``-vv`` DEBUG) + # so the bump here doesn't reach the terminal -- the StreamHandler's + # own filter still drops records below its level. If anyone ever + # "simplifies" ``setup_logger`` to drop the per-handler levels, this + # block will silently re-open the libvcs DEBUG floodgate to stdout. + # Regression-guarded by + # ``test_setup_file_logger_does_not_open_stream_floodgate`` in + # ``tests/test_log.py``. if logger.level == logging.NOTSET or logger.level > level: logger.setLevel(level) From 3e2d4a20b2f3ba3e5b83df22d60cd3e4ff0d75c8 Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Sat, 25 Apr 2026 16:19:23 -0500 Subject: [PATCH 28/32] docs(sync) Comment the _SyncInterruptedAfterSummary catch invariant why: The marker subclass exists so the outer ``sync()`` ``except KeyboardInterrupt`` catch can distinguish "inner layer already printed a coloured partial summary, don't repeat it" from "Ctrl-C hit before the formatter was even built; print the plain-stderr fallback". The contract works because the only ``except KeyboardInterrupt`` between the inner ``_sync_impl`` raise and process exit is :func:`sync`'s, which subclass-checks. A future broader catch wrapping ``cli()`` would need to either re-raise the subclass unchanged or perform the same isinstance check -- otherwise the marker is swallowed, the inner partial-summary prints, AND the outer fallback prints, restoring the double-print artefact the marker was introduced to suppress. The raise site and the catch site each had no inline note. what: - ``cli/sync.py``: extend ``_SyncInterruptedAfterSummary``'s docstring with an explicit "Invariant" section spelling out the "only catcher is sync()" precondition. - Add a comment near the inner-layer ``raise _SyncInterruptedAfterSummary from None`` in ``_sync_impl`` pointing back at the docstring so the raise site is also self-documenting. --- src/vcspull/cli/sync.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/vcspull/cli/sync.py b/src/vcspull/cli/sync.py index cbcf3c3c2..83bafc8d2 100644 --- a/src/vcspull/cli/sync.py +++ b/src/vcspull/cli/sync.py @@ -879,6 +879,15 @@ class _SyncInterruptedAfterSummary(KeyboardInterrupt): inner layer has already printed a coloured partial summary through the formatter. Subclassing ``KeyboardInterrupt`` keeps the exception in the same taxonomy for the outer ``except KeyboardInterrupt`` catch. + + Invariant: the only ``except KeyboardInterrupt`` between the inner + raise and process exit is :func:`sync`'s own handler, which + dispatches on ``isinstance(err, _SyncInterruptedAfterSummary)``. A + future broader catch higher up the stack must either re-raise the + marker unchanged or perform the same isinstance check -- otherwise + the inner partial-summary print AND the outer plain-stderr fallback + both fire, restoring the double-print artefact this marker exists + to suppress. """ @@ -1543,6 +1552,10 @@ def silent_progress(output: str, timestamp: datetime) -> None: formatter.finalize() except (OSError, ValueError): pass + # The ``from None`` chain-suppresses the original ``KeyboardInterrupt`` + # so the marker reaches the outer ``except KeyboardInterrupt as err`` + # in :func:`sync` cleanly. See ``_SyncInterruptedAfterSummary`` for + # the no-double-print invariant this raise relies on. raise _SyncInterruptedAfterSummary from None _emit_summary(formatter, colors, summary) From d2110fbe0c2d9af230a14a394b52ee9c2bee679d Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Sat, 25 Apr 2026 16:37:43 -0500 Subject: [PATCH 29/32] fix(sync) Silence the libvcs terminal stream during human-mode sync MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit why: Reporter saw libvcs's ``|git| (rye) Failed to determine current branch`` warning leak in alongside vcspull's own ``✗ Failed syncing rye: Command failed with code 128: git symbolic-ref HEAD --short`` line -- two messages saying the same thing, breaking the ``✓ Synced X / ✗ Failed X / - Timed out X`` rhythm of the rest of the output. libvcs's WARNING/ERROR records emit through the StreamHandler at default verbosity (WARNING), and the indicator's stream-diverter only redirected the bytes -- it didn't filter them out -- so the libvcs prologue still printed before the vcspull summary. what: - ``cli/sync.py``: ``_install_indicator_log_diverter`` now also raises the libvcs StreamHandler level to ``logging.CRITICAL + 1`` for the duration of the sync, but only when the level is at WARNING (default verbosity). 51 sits one above ``CRITICAL`` so no standard log record reaches the handler. The ``FileHandler`` is left untouched and still captures everything at DEBUG, so a post-mortem in the debug log still shows the libvcs entry. Users at ``-v`` (libvcs INFO) or ``-vv`` (libvcs DEBUG) opted into seeing libvcs activity; the diverter leaves their level alone. - The restore closure now also walks the saved levels and resets them, mirroring the stream restore. - ``tests/test_log.py``: two regression tests next to ``test_setup_file_logger_does_not_open_stream_floodgate`` (same shape: handler-level invariant under condition X). Asserts the bump from WARNING to ``CRITICAL + 1`` and the restore at default verbosity, plus the no-op when the user is at ``-v``. - vcspull's StreamHandler is intentionally untouched: its WARNING records (e.g. ``Could not open debug log at ...``) all fire *before* the indicator is installed, but if any future call site emits a vcspull warning during the loop the user should still see it. --- src/vcspull/cli/sync.py | 23 +++++++++++ tests/test_log.py | 86 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 109 insertions(+) diff --git a/src/vcspull/cli/sync.py b/src/vcspull/cli/sync.py index 83bafc8d2..e092fd5f8 100644 --- a/src/vcspull/cli/sync.py +++ b/src/vcspull/cli/sync.py @@ -977,9 +977,22 @@ def _install_indicator_log_diverter( when stdout *was* the live capture); flushing a closed file there raises ``ValueError``. Skipping the flush is safe -- we are not *closing* the stream, only redirecting future writes. + + On top of the stream swap we also raise the *libvcs* StreamHandler + level above ``CRITICAL`` for the duration of the sync, but only when + the user kept the default verbosity (handler at WARNING). vcspull's + own ``✗ Failed syncing rye: Command failed with code 128: git + symbolic-ref HEAD --short`` line carries the same content as + libvcs's ``|git| (rye) Failed to determine current branch`` warning + that fires immediately before; printing both breaks the + ``✓ Synced X / ✗ Failed X / - Timed out X`` rhythm. The + debug-log :class:`~logging.FileHandler` keeps DEBUG, so a + post-mortem still has the libvcs line for context. ``-v`` / ``-vv`` + users opted into INFO / DEBUG and keep their explicit level. """ proxy = _IndicatorStreamProxy(indicator) patched: list[tuple[logging.StreamHandler[t.Any], t.Any]] = [] + raised_levels: list[tuple[logging.StreamHandler[t.Any], int]] = [] for logger_name in ("libvcs", "vcspull"): logger = logging.getLogger(logger_name) for handler in logger.handlers: @@ -994,6 +1007,14 @@ def _install_indicator_log_diverter( finally: handler.release() patched.append((handler, previous)) + if logger_name == "libvcs" and handler.level == logging.WARNING: + raised_levels.append((handler, handler.level)) + # 51 sits one above ``logging.CRITICAL`` -- standard log + # records can't reach the handler at this level, so libvcs + # noise (WARNING + ERROR + CRITICAL) is dropped from the + # terminal stream. The file handler is untouched and still + # captures everything at DEBUG. + handler.setLevel(logging.CRITICAL + 1) def _restore() -> None: for handler, original in patched: @@ -1002,6 +1023,8 @@ def _restore() -> None: handler.stream = original finally: handler.release() + for handler, level in raised_levels: + handler.setLevel(level) return _restore diff --git a/tests/test_log.py b/tests/test_log.py index eb4a3461e..8fb7acd63 100644 --- a/tests/test_log.py +++ b/tests/test_log.py @@ -718,3 +718,89 @@ def test_setup_file_logger_does_not_open_stream_floodgate( ) finally: log_mod.teardown_file_logger(handler) + + +def test_indicator_log_diverter_silences_libvcs_at_default_verbosity() -> None: + """``_install_indicator_log_diverter`` blocks libvcs from the terminal. + + Reporter saw libvcs's ``|git| (rye) Failed to determine current + branch`` warning leak in alongside vcspull's own ``✗ Failed + syncing rye: Command failed with code 128: git symbolic-ref HEAD + --short`` line -- two messages saying the same thing, breaking + the ``✓ Synced X / ✗ Failed X / - Timed out X`` rhythm. The + diverter raises the libvcs StreamHandler level above + ``CRITICAL`` so libvcs records can't reach the terminal during + indicator-active sync. The file handler (debug log) keeps + DEBUG so post-mortems still have context. + """ + from vcspull.cli._progress import build_indicator + from vcspull.cli.sync import _install_indicator_log_diverter + + setup_logger(level="INFO", verbosity=0) + libvcs_logger = logging.getLogger("libvcs") + handlers = [ + h + for h in libvcs_logger.handlers + if isinstance(h, logging.StreamHandler) + and not isinstance(h, logging.FileHandler) + ] + assert handlers, "libvcs logger should have a StreamHandler after setup" + levels_before = [h.level for h in handlers] + assert all(level == logging.WARNING for level in levels_before) + + indicator = build_indicator(human=True, color="always", tty=True) + try: + restore = _install_indicator_log_diverter(indicator) + try: + levels_during = [h.level for h in handlers] + assert all(level == logging.CRITICAL + 1 for level in levels_during), ( + f"libvcs StreamHandler must be silenced during sync; " + f"got {levels_during}" + ) + finally: + restore() + levels_after = [h.level for h in handlers] + assert levels_after == levels_before, ( + f"diverter must restore the original level; " + f"before={levels_before} after={levels_after}" + ) + finally: + indicator.close() + + +def test_indicator_log_diverter_respects_verbose_user() -> None: + """At ``-v``/``-vv``, the diverter leaves libvcs StreamHandler alone. + + Users who explicitly asked for libvcs INFO (``-v``) or DEBUG + (``-vv``) want to see what libvcs is doing; silencing them + defeats the purpose of the verbosity flag. Only the default + (WARNING) gets bumped. + """ + from vcspull.cli._progress import build_indicator + from vcspull.cli.sync import _install_indicator_log_diverter + + setup_logger(level="INFO", verbosity=1) + libvcs_logger = logging.getLogger("libvcs") + handlers = [ + h + for h in libvcs_logger.handlers + if isinstance(h, logging.StreamHandler) + and not isinstance(h, logging.FileHandler) + ] + assert handlers + levels_before = [h.level for h in handlers] + assert all(level == logging.INFO for level in levels_before) + + indicator = build_indicator(human=True, color="always", tty=True) + try: + restore = _install_indicator_log_diverter(indicator) + try: + levels_during = [h.level for h in handlers] + assert levels_during == levels_before, ( + f"diverter must not change level when user is at -v/-vv; " + f"got {levels_during}" + ) + finally: + restore() + finally: + indicator.close() From 7b5c3a2172ea0e5d2cfb7436966a741072ff4eb0 Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Sat, 25 Apr 2026 17:08:01 -0500 Subject: [PATCH 30/32] fix(log) Group debug logs under vcspull/ (vcspull-test/ for tests) why: ``default_debug_log_path`` was dropping debug logs as bare files in the system tempdir (``/tmp/vcspull-debug--.log``). The ``vcspull-`` filename prefix gets lost in a busy ``/tmp/`` next to ``vscode-IPC-...``, ``systemd-private-...``, ``kotlin-idea-...``, etc., and there is no single dir to ``rm -rf`` to clean up old vcspull logs. Reporter wanted the path grouped under a self-describing directory: ``/tmp/vcspull/`` for production runs, ``/tmp/vcspull-test/`` for tests, so a glance at ``/tmp/`` says what's whose. what: - ``log.py``: ``default_debug_log_path`` now returns ``/vcspull/debug--.log`` (the ``vcspull-`` prefix on the file is redundant once the directory carries the label). - Auto-detect pytest via ``PYTEST_CURRENT_TEST`` and route to ``/vcspull-test/`` instead. This is a safety net for tests that incidentally call ``default_debug_log_path`` without first redirecting ``TMPDIR`` -- they'll never pollute the production ``vcspull/`` dir even when the test author forgets to monkeypatch. ``PYTEST_CURRENT_TEST`` is set by pytest for the entire test session (collection + execution + doctest), so the doctest assertion also picks the test dir. - ``cli/sync.py``: ``--log-file`` help text mentions both production and test default paths. - ``tests/cli/test_sync_log_file.py``: update the two tests asserting on the file name prefix to match the new ``debug--.log`` pattern. ``test_default_debug_log_path_lives_in_tempdir`` also asserts the parent dir name is ``vcspull-test`` (since that's what pytest sees), pinning the auto-detect. - The doctest on ``default_debug_log_path`` accepts either ``vcspull`` or ``vcspull-test`` so the example reads correctly whether it runs under pytest or in a Python repl. - Behaviour is wholly additive: no callers change. Existing debug logs from previous vcspull runs at ``/tmp/vcspull-debug-...log`` remain on disk; users can clean them by hand once. --- src/vcspull/cli/sync.py | 7 ++++--- src/vcspull/log.py | 18 +++++++++++++++--- tests/cli/test_sync_log_file.py | 16 +++++++++++----- 3 files changed, 30 insertions(+), 11 deletions(-) diff --git a/src/vcspull/cli/sync.py b/src/vcspull/cli/sync.py index e092fd5f8..aa38b898d 100644 --- a/src/vcspull/cli/sync.py +++ b/src/vcspull/cli/sync.py @@ -677,9 +677,10 @@ def create_sync_subparser(parser: argparse.ArgumentParser) -> argparse.ArgumentP metavar="PATH", default=None, help=( - "write a debug log to PATH (default: $TMPDIR/vcspull-debug--" - ".log). The path is only printed to the terminal when a sync " - "fails or times out, matching npm/pnpm/yarn." + "write a debug log to PATH (default: $TMPDIR/vcspull/debug--" + ".log; $TMPDIR/vcspull-test/... under pytest). The path is " + "only printed to the terminal when a sync fails or times out, " + "matching npm/pnpm/yarn." ), ) parser.add_argument( diff --git a/src/vcspull/log.py b/src/vcspull/log.py index 735d62302..beb9b3c04 100644 --- a/src/vcspull/log.py +++ b/src/vcspull/log.py @@ -356,19 +356,31 @@ def default_debug_log_path() -> pathlib.Path: but its path is only surfaced to the user when something went wrong (failure or timeout), so clean runs stay quiet. + Logs land under a ``vcspull/`` subdirectory of the system tempdir + (e.g. ``/tmp/vcspull/debug--.log``) so the dir name in + ``/tmp/`` is self-describing instead of relying on a ``vcspull-`` + file prefix that gets lost in a busy tempdir. Under pytest + (``PYTEST_CURRENT_TEST`` is set) the subdirectory is + ``vcspull-test/`` instead -- an automatic safety net so tests that + incidentally invoke this helper without redirecting ``TMPDIR`` + can't pollute the production log dir. + Examples -------- >>> path = default_debug_log_path() - >>> path.name.startswith("vcspull-debug-") + >>> path.name.startswith("debug-") True >>> path.suffix '.log' - >>> path.parent == pathlib.Path(tempfile.gettempdir()) + >>> path.parent.name in {"vcspull", "vcspull-test"} + True + >>> path.parent.parent == pathlib.Path(tempfile.gettempdir()) True """ stamp = datetime.now().strftime("%Y%m%dT%H%M%S") pid = os.getpid() - return pathlib.Path(tempfile.gettempdir()) / f"vcspull-debug-{stamp}-{pid}.log" + subdir = "vcspull-test" if "PYTEST_CURRENT_TEST" in os.environ else "vcspull" + return pathlib.Path(tempfile.gettempdir()) / subdir / f"debug-{stamp}-{pid}.log" def setup_file_logger( diff --git a/tests/cli/test_sync_log_file.py b/tests/cli/test_sync_log_file.py index 9a0005751..150b34075 100644 --- a/tests/cli/test_sync_log_file.py +++ b/tests/cli/test_sync_log_file.py @@ -26,10 +26,16 @@ def test_default_debug_log_path_lives_in_tempdir( path = default_debug_log_path() - # The file name must start with the prefix that downstream tooling (CI log - # collection, grep for "npm-debug"-style artefacts) can pattern-match on. - assert path.name.startswith("vcspull-debug-") + # The file name pattern downstream tooling can grep for is ``debug-- + # .log``; the ``vcspull`` (or ``vcspull-test``) parent directory + # carries the project label so a glance at ``/tmp/`` is self-describing. + assert path.name.startswith("debug-") assert path.suffix == ".log" + # Pytest sets ``PYTEST_CURRENT_TEST`` during test collection AND + # execution, so ``default_debug_log_path`` always picks the test dir + # here -- the production ``vcspull/`` dir is only used outside the + # test runner. + assert path.parent.name == "vcspull-test" def test_setup_file_logger_attaches_debug_handler( @@ -146,5 +152,5 @@ def strftime(_fmt: str) -> str: second = default_debug_log_path() assert first != second - assert first.name.startswith("vcspull-debug-20260424") - assert second.name.startswith("vcspull-debug-20260424") + assert first.name.startswith("debug-20260424") + assert second.name.startswith("debug-20260424") From c69879e8843966bce9e2560fb48e1dadbb6f937c Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Sat, 25 Apr 2026 17:10:50 -0500 Subject: [PATCH 31/32] docs(CHANGES) Note libvcs sync-time silencing and the new log dir layout MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit why: Two user-visible behaviours landed without CHANGES updates: the libvcs WARNING/ERROR silencing during human-mode sync (so the ``|git| (rye) Failed to determine ...`` line no longer competes with ``✗ Failed syncing rye: ...``), and the debug-log directory regrouping under ``$TMPDIR/vcspull/`` (and ``$TMPDIR/vcspull-test/`` under pytest). Releasing without these would leave the changelog describing behaviour that no longer matches reality. what: - ``CHANGES``: extend the existing "libvcs is quiet by default" unreleased section with a paragraph spelling out the sync-time StreamHandler silencing -- name the rye repro because that is the symptom users will recognise -- and stress that the debug log file still captures the entry for post-mortem. - ``CHANGES``: rewrite the debug-log section to advertise the new ``$TMPDIR/vcspull/debug--.log`` path (production) and the ``$TMPDIR/vcspull-test/...`` test-mode safety net, replacing the stale ``$TMPDIR/vcspull-debug-...`` description. --- CHANGES | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/CHANGES b/CHANGES index 407776388..17bab1b4f 100644 --- a/CHANGES +++ b/CHANGES @@ -64,6 +64,16 @@ Pass `-v` to see libvcs `INFO` (`Updating to 'main'.`, trail). The npm/pnpm-style debug log file always captures `DEBUG` regardless of verbosity. +During `vcspull sync` itself the libvcs terminal stream is also +silenced for `WARNING` / `ERROR` records at default verbosity -- +libvcs's `|git| (rye) Failed to determine current branch` warning +was duplicating the same content vcspull's own `✗ Failed syncing +rye: Command failed with code 128: git symbolic-ref HEAD --short` +line already carried, breaking the +`✓ Synced X / ✗ Failed X / - Timed out X` rhythm. The debug log +file still captures every libvcs entry, so a post-mortem after a +failure has the full trace. + Root cause for the regression that prompted this: `setup_file_logger` raised both the `vcspull` and `libvcs` loggers to `DEBUG` so the `FileHandler` could capture full traces, but the terminal `StreamHandler` @@ -113,12 +123,15 @@ colours are turned off. #### `vcspull sync`: per-invocation debug log (npm/pnpm style) Every `vcspull sync` invocation now writes a debug log to -`$TMPDIR/vcspull-debug--.log`. Clean runs stay quiet -- the -log path is only surfaced to the terminal when at least one repository -failed or timed out. Override the destination with `--log-file PATH`, or -disable entirely with `--no-log-file`. The log captures the full libvcs -per-repo activity (progress callback output, exit codes, errors) so a hang -can be post-mortemed even after the CLI has moved on. +`$TMPDIR/vcspull/debug--.log` (or +`$TMPDIR/vcspull-test/debug--.log` when running under +pytest, an automatic safety net so the production log dir stays +uncontaminated by test runs). Clean runs stay quiet -- the log path is +only surfaced to the terminal when at least one repository failed or +timed out. Override the destination with `--log-file PATH`, or disable +entirely with `--no-log-file`. The log captures the full libvcs per-repo +activity (progress callback output, exit codes, errors) so a hang can be +post-mortemed even after the CLI has moved on. ### Dependencies From 2c22011e9fa23919285813314f300b946b5bbce8 Mon Sep 17 00:00:00 2001 From: Tony Narlock Date: Sat, 25 Apr 2026 17:30:34 -0500 Subject: [PATCH 32/32] docs(CHANGES) Tighten the unreleased section to product-level prose why: The unreleased CHANGES had grown to 108 lines of dense prose with implementation details (signal names, internal class names, file:line citations) and entries that read like commit summaries instead of changelog entries. The right shape for a changelog is "what is this, why does it matter to me?" answered in 2-4 lines per surface; depth belongs in autodoc, source, and the linked PR. The previous draft buried the user-visible upgrade story under mechanics. what: - Six "What's new" entries -- per-repo timeout, status indicator, streaming trail, debug log, Ctrl-C chain abort, quieter default -- each 2-4 lines and headed with the user-visible product surface (no commit-prefix headings). - One "Bug fixes" line for the original credential-prompt hang reporters opened the issue against. - libvcs minimum-version bump moved to "Breaking changes", matching the convention from v1.52.0. - Drop signal names, syscall references, internal handler/formatter identifiers, and the floodgate-root-cause paragraph; that's the PR's job. - Each entry references PR `(#544)`. --- CHANGES | 147 ++++++++++++++++++-------------------------------------- 1 file changed, 47 insertions(+), 100 deletions(-) diff --git a/CHANGES b/CHANGES index 17bab1b4f..5963875c4 100644 --- a/CHANGES +++ b/CHANGES @@ -37,107 +37,54 @@ $ uvx --from 'vcspull' --prerelease allow vcspull _Notes on upcoming releases will be added here_ -### New features +### Breaking changes + +- Bump minimum libvcs from 0.39.0 to 0.40.0 (#544) + +### What's new + +#### `vcspull sync`: per-repo timeout + +A stuck repository no longer freezes the whole batch. Each repo runs +under a 10-second wall-clock deadline; override with `--timeout +SECONDS` or `VCSPULL_SYNC_TIMEOUT_SECONDS`. The summary lists any +timed-out repos with a copy-pasteable rerun command. (#544) + +#### `vcspull sync`: live status indicator + +The active repo is now visible while sync runs. In a terminal you get +a spinner with the repo name and elapsed time; in pipes you get a +start line plus a periodic "still syncing" heartbeat. Disabled under +`--json`, `--ndjson`, and `--color=never`. (#544) + +#### `vcspull sync`: streaming output trail + +Git's streaming output now appears in a 3-line trail above the +spinner and collapses to the permanent `✓ Synced` line when the repo +finishes. Tunable with `--panel-lines` (`0` hides, `-1` unbounded) or +`VCSPULL_PROGRESS_LINES`. (#544) + +#### `vcspull sync`: per-invocation debug log + +Every run drops a debug log under `$TMPDIR/vcspull/` (npm/pnpm style), +surfaced to the terminal only when something failed or timed out. +Override with `--log-file PATH`; disable with `--no-log-file`. (#544) + +#### `vcspull sync`: Ctrl-C aborts shell chains + +`vcspull sync` now exits via signal on Ctrl-C, so chained invocations +like `vcspull sync ~/a; vcspull sync ~/b` stop on the first interrupt +instead of running the whole list. Matches `git clone; git fetch`. (#544) + +#### `vcspull`: quieter default output + +libvcs's per-repo log crumbs no longer print at the default verbosity. +Pass `-v` for INFO-level libvcs activity (`Updating to 'main'.`); pass +`-vv` for DEBUG. The debug log file always captures DEBUG regardless. (#544) + +### Bug fixes -#### `vcspull sync`: collapsing 3-line live-trail above the spinner - -Streaming subprocess output (git's `From `, `* [new branch]`, -`Already on 'main'`, `Successfully rebased`, etc.) now flows into a -fixed-height live-trail region above the per-repo spinner. The trail -shows the most recent 3 lines, rolls under as new ones arrive, and -**collapses** when the repo finishes -- leaving only the permanent -`✓ Synced ` line. Modeled on tmuxp's `before_script` -panel. - -Tweak with `--panel-lines N` (`0` hides; `-1` unbounded; default 3) or -the `VCSPULL_PROGRESS_LINES` environment variable. The panel is only -active when the indicator itself is (TTY + human + colour mode); pipes, -`--json`, `--ndjson`, and `--color=never` keep their existing line- -oriented output. - -#### `vcspull`: libvcs is quiet by default; `-v` / `-vv` opens it back up - -The chatty `|git| (repo) head_sha: ...` / `git_remote_name: ...` / -`tag_sha: ...` crumb lines no longer appear at the default verbosity. -Pass `-v` to see libvcs `INFO` (`Updating to 'main'.`, -`Already up-to-date.`); pass `-vv` to see `DEBUG` (the full crumb -trail). The npm/pnpm-style debug log file always captures `DEBUG` -regardless of verbosity. - -During `vcspull sync` itself the libvcs terminal stream is also -silenced for `WARNING` / `ERROR` records at default verbosity -- -libvcs's `|git| (rye) Failed to determine current branch` warning -was duplicating the same content vcspull's own `✗ Failed syncing -rye: Command failed with code 128: git symbolic-ref HEAD --short` -line already carried, breaking the -`✓ Synced X / ✗ Failed X / - Timed out X` rhythm. The debug log -file still captures every libvcs entry, so a post-mortem after a -failure has the full trace. - -Root cause for the regression that prompted this: `setup_file_logger` -raised both the `vcspull` and `libvcs` loggers to `DEBUG` so the -`FileHandler` could capture full traces, but the terminal `StreamHandler` -had no per-handler level filter -- so the bump opened the floodgate to -the terminal too. `setup_logger` now pins explicit per-handler levels -on every `StreamHandler` so file-logger and terminal-logger filters -are independent. - -#### `vcspull sync`: Ctrl-C aborts the whole shell chain, not one command - -`vcspull sync` now terminates with `WIFSIGNALED(SIGINT)` on Ctrl-C instead -of exiting cleanly with status `130`. Bash/zsh sequential lists -(`cmd1; cmd2; cmd3`) abort on the first signal-terminated child, so -chained invocations like -`vcspull sync --workspace ~/study/rust/ --all; vcspull sync --workspace -~/study/otel/ --all` now stop on a single Ctrl-C — matching how `git -clone; git fetch` compose. Behaviour is implemented via the canonical -git pattern (`sigchain.h:20-34`): flush output, close the debug log -handle, install `SIG_DFL`, self-deliver SIGINT. Windows falls back to -the conventional `SystemExit(130)` because it has no `WIFSIGNALED` -analogue. - -#### `vcspull sync`: per-repository watchdog and copyable rerun recipe - -A single stuck repository no longer freezes the whole batch. Every repository -sync now runs under a wall-clock watchdog. The default deadline is 10 seconds --- a healthy fetch/pull against a warm remote finishes well under that, so -anything slower is surfaced rather than waited on. Override with `--timeout -N` or the `VCSPULL_SYNC_TIMEOUT_SECONDS` environment variable. - -When one or more repositories time out, the summary is followed by a -copy-pasteable rerun recipe that retries just those repositories with a -larger timeout, plus a `-vv` variant for diagnosis and a manual -`git fetch --dry-run` probe per repo. Timed-out repositories are counted in -the summary as "N timed out" alongside synced / failed. - -#### `vcspull sync`: live status indicator (spinner + heartbeat) - -Every `vcspull sync` run now shows which repository is currently running -and how long it has been. In a terminal the output is a single-line braille -spinner that refreshes in place; when output is piped (CI, `tee`, log -capture), the indicator degrades to a `· syncing ` line on start plus -a periodic `… still syncing (45s elapsed)` heartbeat. The indicator -is disabled under machine-readable output (`--json` / `--ndjson`) or when -colours are turned off. - -#### `vcspull sync`: per-invocation debug log (npm/pnpm style) - -Every `vcspull sync` invocation now writes a debug log to -`$TMPDIR/vcspull/debug--.log` (or -`$TMPDIR/vcspull-test/debug--.log` when running under -pytest, an automatic safety net so the production log dir stays -uncontaminated by test runs). Clean runs stay quiet -- the log path is -only surfaced to the terminal when at least one repository failed or -timed out. Override the destination with `--log-file PATH`, or disable -entirely with `--no-log-file`. The log captures the full libvcs per-repo -activity (progress callback output, exit codes, errors) so a hang can be -post-mortemed even after the CLI has moved on. - -### Dependencies - -- Bump `libvcs` to `>=0.40.0,<0.41.0` for timeout + fast `set_remotes` fixes - that let `vcspull sync` recover from a single stuck repository without - hanging the whole batch. +- Sync no longer hangs on credential prompts or slow fetches (#544) ### Documentation