diff --git a/CHANGES b/CHANGES index ec5e0b58f..5963875c4 100644 --- a/CHANGES +++ b/CHANGES @@ -37,6 +37,55 @@ $ uvx --from 'vcspull' --prerelease allow vcspull _Notes on upcoming releases will be added here_ +### Breaking changes + +- Bump minimum libvcs from 0.39.0 to 0.40.0 (#544) + +### What's new + +#### `vcspull sync`: per-repo timeout + +A stuck repository no longer freezes the whole batch. Each repo runs +under a 10-second wall-clock deadline; override with `--timeout +SECONDS` or `VCSPULL_SYNC_TIMEOUT_SECONDS`. The summary lists any +timed-out repos with a copy-pasteable rerun command. (#544) + +#### `vcspull sync`: live status indicator + +The active repo is now visible while sync runs. In a terminal you get +a spinner with the repo name and elapsed time; in pipes you get a +start line plus a periodic "still syncing" heartbeat. Disabled under +`--json`, `--ndjson`, and `--color=never`. (#544) + +#### `vcspull sync`: streaming output trail + +Git's streaming output now appears in a 3-line trail above the +spinner and collapses to the permanent `✓ Synced` line when the repo +finishes. Tunable with `--panel-lines` (`0` hides, `-1` unbounded) or +`VCSPULL_PROGRESS_LINES`. (#544) + +#### `vcspull sync`: per-invocation debug log + +Every run drops a debug log under `$TMPDIR/vcspull/` (npm/pnpm style), +surfaced to the terminal only when something failed or timed out. +Override with `--log-file PATH`; disable with `--no-log-file`. (#544) + +#### `vcspull sync`: Ctrl-C aborts shell chains + +`vcspull sync` now exits via signal on Ctrl-C, so chained invocations +like `vcspull sync ~/a; vcspull sync ~/b` stop on the first interrupt +instead of running the whole list. Matches `git clone; git fetch`. (#544) + +#### `vcspull`: quieter default output + +libvcs's per-repo log crumbs no longer print at the default verbosity. +Pass `-v` for INFO-level libvcs activity (`Updating to 'main'.`); pass +`-vv` for DEBUG. The debug log file always captures DEBUG regardless. (#544) + +### Bug fixes + +- Sync no longer hangs on credential prompts or slow fetches (#544) + ### Documentation - Visual improvements to API docs from [gp-sphinx](https://gp-sphinx.git-pull.com)-based Sphinx packages (#542) diff --git a/pyproject.toml b/pyproject.toml index 05941d52d..dce09e5bf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,7 +53,7 @@ keywords = [ ] homepage = "https://vcspull.git-pull.com" dependencies = [ - "libvcs~=0.39.0", + "libvcs>=0.40.0,<0.41.0", "colorama>=0.3.9", "PyYAML>=6.0" ] diff --git a/src/vcspull/cli/__init__.py b/src/vcspull/cli/__init__.py index 2f0633436..6cf53da44 100644 --- a/src/vcspull/cli/__init__.py +++ b/src/vcspull/cli/__init__.py @@ -440,7 +440,14 @@ def cli(_args: list[str] | None = None) -> None: ) = subparsers args = parser.parse_args(_args) - setup_logger(log=log, level=args.log_level.upper()) + # ``args.verbosity`` is only set by the sync subcommand; default 0 + # everywhere else. The sync ``-v`` ladder (0 → libvcs WARNING; 1 → INFO; + # 2+ → DEBUG) is documented in :func:`vcspull.log.setup_logger`. + setup_logger( + log=log, + level=args.log_level.upper(), + verbosity=getattr(args, "verbosity", 0), + ) if args.subparser_name is None: parser.print_help() @@ -466,6 +473,10 @@ def cli(_args: list[str] | None = None) -> None: sync_all=getattr(args, "sync_all", False), parser=sync_parser, include_worktrees=getattr(args, "include_worktrees", False), + timeout=getattr(args, "timeout", None), + log_file=getattr(args, "log_file", None), + no_log_file=getattr(args, "no_log_file", False), + panel_lines=getattr(args, "panel_lines", None), ) elif args.subparser_name == "list": list_repos( diff --git a/src/vcspull/cli/_progress.py b/src/vcspull/cli/_progress.py new file mode 100644 index 000000000..555ac23ae --- /dev/null +++ b/src/vcspull/cli/_progress.py @@ -0,0 +1,628 @@ +"""Live status indicator for ``vcspull sync``. + +Shows the user which repository is currently being synced and how long it has +been running. In a TTY a single-line braille spinner refreshes every ~100 ms; +elsewhere (pipes, CI logs) a once-on-start line is followed by periodic +"still syncing" heartbeats so the output stream keeps moving without flooding +the log. + +Inspired by ``tmuxp``'s spinner module -- stdlib + ANSI only, no ``rich`` +dependency. +""" + +from __future__ import annotations + +import atexit +import collections +import io +import itertools +import logging +import sys +import threading +import time +import typing as t + +from ._colors import ColorMode, Colors, get_color_mode + +log = logging.getLogger(__name__) + + +# ANSI escape sequences +_HIDE_CURSOR = "\033[?25l" +_SHOW_CURSOR = "\033[?25h" +_ERASE_LINE = "\033[2K" +_CURSOR_TO_COL0 = "\r" +#: Synchronized-output bracket -- modern terminals (kitty, iTerm2, WezTerm, +#: recent xterm) buffer everything between these markers and flip to the +#: new state atomically. Terminals that don't recognise the sequence ignore +#: it, so this is a safe belt-and-braces against mid-frame tearing when the +#: spinner redraws while ``progress_cb`` writes from another thread. +_SYNC_START = "\x1b[?2026h" +_SYNC_END = "\x1b[?2026l" + +#: Braille spinner frames -- same glyphs tmuxp uses; modern terminals +#: render them crisply and the rotation reads cleaner than the +#: ASCII ``|/-\`` set. On exotic terminals that can't render Unicode the +#: cells fall back to their Braille block without breaking output. +_SPINNER_FRAMES = "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏" + +#: Default panel height for the live trail above the spinner. Same value +#: tmuxp uses for ``before_script`` script output (see +#: ``~/work/python/tmuxp/src/tmuxp/cli/_progress.py:42``). 3 rows is enough +#: to give the user "what just happened" + a couple of lines of context +#: without dominating the screen. ``0`` hides the panel entirely. +_DEFAULT_OUTPUT_LINES = 3 + +#: How often to refresh the spinner line in the TTY path. +_TTY_REFRESH_INTERVAL = 0.1 + +#: How often to emit a "still syncing" heartbeat line in the non-TTY path. +_HEARTBEAT_INTERVAL = 30.0 + +# Track indicators that have hidden the cursor so we can restore it on atexit +# even if the interpreter crashes mid-sync and no ``finally`` block fires. +_ACTIVE_INDICATORS: set[SyncStatusIndicator] = set() + + +def _restore_cursors_on_exit() -> None: + """Restore the cursor for every indicator that still has it hidden.""" + for indicator in list(_ACTIVE_INDICATORS): + # atexit handlers that raise are swallowed by the interpreter, so we + # log-and-swallow here to leave a breadcrumb for debugging without + # masking other shutdown tasks. + _close_indicator_quietly(indicator) + + +def _close_indicator_quietly(indicator: SyncStatusIndicator) -> None: + try: + indicator.close() + except Exception as exc_obj: + log.debug("Error restoring spinner cursor: %s", exc_obj) + + +atexit.register(_restore_cursors_on_exit) + + +class SyncStatusIndicator: + """Owns the "which repo is running now" UI for a sync session. + + Exactly one repo is considered "active" at a time -- this matches the + current sequential sync loop. Callers drive the indicator with + :meth:`start_repo` / :meth:`stop_repo`, or via the context manager + returned by :meth:`repo`. + """ + + def __init__( + self, + *, + enabled: bool = True, + stream: t.TextIO | None = None, + tty: bool | None = None, + colors: Colors | None = None, + output_lines: int = _DEFAULT_OUTPUT_LINES, + ) -> None: + self._stream = stream if stream is not None else sys.stdout + # Respect the explicit tty override (tests, ``--color=never``) but + # default to whatever the stream actually reports. + self._tty = ( + tty + if tty is not None + else bool(getattr(self._stream, "isatty", lambda: False)()) + ) + # A disabled indicator is a no-op -- it still honours the public API so + # callers don't need if/else ladders around every sync. + self._enabled = enabled + # Default to a NEVER-colour palette so a caller that builds an + # indicator without wiring in the shared ``Colors`` still gets sane + # plain-text output -- nothing worse than ANSI codes leaking into + # captured streams in tests. + self._colors = colors if colors is not None else Colors(ColorMode.NEVER) + + # Live-trail panel above the spinner. ``0`` disables the panel and + # ``add_output_line`` falls back to plain ``write()`` semantics. + # ``-1`` means unbounded -- rare; useful when piping ``-vv`` output + # through a pager. + self._output_lines = output_lines + if output_lines > 0: + self._panel_buffer: collections.deque[str] = collections.deque( + maxlen=output_lines, + ) + elif output_lines < 0: + self._panel_buffer = collections.deque() + else: + # Zero-capacity sentinel so ``maxlen == 0`` is the disabled signal. + self._panel_buffer = collections.deque(maxlen=0) + # Number of panel rows physically rendered last tick. The next + # render walks back this many lines (plus one for the spinner) to + # erase the previous frame. + self._panel_visible_lines = 0 + + self._lock = threading.Lock() + self._thread: threading.Thread | None = None + self._stop_event = threading.Event() + self._active_repo: str | None = None + self._repo_started_at: float | None = None + self._last_heartbeat_at: float | None = None + self._last_line_len = 0 + self._cursor_hidden = False + + # ------------------------------------------------------------------ + # Public API + # ------------------------------------------------------------------ + + def start_repo(self, name: str) -> None: + """Mark ``name`` as the currently-running repository. + + Examples + -------- + >>> import io + >>> from vcspull.cli._progress import SyncStatusIndicator + >>> indicator = SyncStatusIndicator( + ... enabled=True, stream=io.StringIO(), tty=False + ... ) + >>> indicator.start_repo("clap") + >>> indicator._active_repo + 'clap' + >>> indicator.close() + """ + if not self._enabled: + return + + with self._lock: + self._active_repo = name + now = time.monotonic() + self._repo_started_at = now + self._last_heartbeat_at = now + # Fresh trail per repo: drop any leftover panel lines from + # the previous repository so the user only sees activity for + # the active one. + self._panel_buffer.clear() + + if self._tty: + self._ensure_tty_thread() + else: + # Emit a single start line so even log-collecting CI shows which + # repo is in flight. Capital ``Syncing`` matches the permanent + # ``Synced``/``Timed out`` leading-cap pattern. + self._emit_line(f"Syncing {name}") + + def stop_repo(self, final_line: str | None = None) -> bool: + """Stop the active-repo indicator and collapse the panel. + + When ``final_line`` is provided AND the indicator is actively + rendering on a TTY, write ``final_line`` *as the spinner-erase* + in one atomic ANSI write (under the same lock the render loop + holds). The spinner row morphs in place into the permanent + line; panel rows above are erased; cursor advances to the row + below. Returns ``True`` when the line was written by the + indicator, so the caller can skip its own ``formatter.emit_text`` + and avoid the double-print artefact reporters have seen. + + Returns ``False`` when: + - the indicator is disabled (``--color=never``, JSON output); + - we're running headless (non-TTY pipe / CI / capsys); + - no ``final_line`` was provided (caller will emit_text itself). + + In every False case the caller is responsible for printing its + own permanent line through the formatter as before. + + Examples + -------- + >>> import io + >>> from vcspull.cli._progress import SyncStatusIndicator + >>> indicator = SyncStatusIndicator( + ... enabled=True, stream=io.StringIO(), tty=False + ... ) + >>> indicator.start_repo("clap") + >>> indicator.stop_repo() + False + >>> indicator.stop_repo(final_line="ignored on non-TTY") + False + >>> indicator.close() + """ + if not self._enabled: + return False + + with self._lock: + self._active_repo = None + self._repo_started_at = None + self._last_heartbeat_at = None + panel_visible = self._panel_visible_lines + had_render = self._last_line_len > 0 or panel_visible > 0 + self._panel_buffer.clear() + + if not self._tty: + # Headless: nothing was drawn to erase, and the caller + # still needs to surface ``final_line`` themselves. + self._panel_visible_lines = 0 + self._last_line_len = 0 + return False + + # Build the atomic clear (and optional replacement) under the + # lock so a concurrent spinner tick can't squeeze a stale + # frame in. Layout: + # + # + # + #
# physically remove panel rows + # # spinner row replaced + # + # DL (``\x1b[Pn M``) deletes lines and scrolls subsequent + # rows up to fill, so the spinner row's content lands at the + # cursor row (where the first panel row used to be) and we + # erase+replace it with ``final_line``. The earlier + # ``\x1b[2K\n`` per-panel-row pattern only erased *content*; + # the rows stayed in scrollback as blanks, leaving visible + # gaps between successive ``✓ Synced ...`` lines. + parts: list[str] = [_SYNC_START] + if had_render: + if panel_visible > 0: + parts.append(f"\x1b[{panel_visible}A") + parts.append(_CURSOR_TO_COL0) + parts.append(f"\x1b[{panel_visible}M") + else: + parts.append(_CURSOR_TO_COL0) + if final_line is not None: + parts.append(_ERASE_LINE + final_line + "\n") + else: + parts.append(_ERASE_LINE) + parts.append(_SYNC_END) + try: + self._stream.write("".join(parts)) + self._stream.flush() + self._last_line_len = 0 + self._panel_visible_lines = 0 + except (OSError, ValueError): + return False + + return final_line is not None and had_render + + def add_output_line(self, text: str) -> None: + r"""Push streamed subprocess output into the live trail panel. + + ``text`` may contain multiple newline-separated lines (libvcs's + progress callback delivers chunks, not whole lines). We split, + drop blank-only fragments, and append each non-blank line to the + bounded deque. The spinner thread redraws on its own cadence, so + a chatty subprocess doesn't pace itself against the terminal. + + When the panel is disabled (``output_lines=0``) or the indicator + is disabled (non-TTY, JSON output, ``--color=never``), fall back + to :meth:`write` so the bytes still appear -- just without the + in-place rewriting. + + Examples + -------- + >>> import io + >>> from vcspull.cli._progress import SyncStatusIndicator + >>> indicator = SyncStatusIndicator( + ... enabled=True, stream=io.StringIO(), tty=True, output_lines=2 + ... ) + >>> indicator.add_output_line("From github.com/foo/bar\n") + >>> indicator.add_output_line(" abc..def main -> origin/main\n") + >>> list(indicator._panel_buffer) + ['From github.com/foo/bar', ' abc..def main -> origin/main'] + >>> indicator.close() + """ + if not text: + return + # The panel only makes sense in a TTY where the spinner thread + # actually renders the deque. When the indicator is disabled + # (JSON / NDJSON / colour=never), or running headless (pipe, CI, + # capsys), or when the panel is explicitly hidden + # (``output_lines=0``), fall through to the plain ``write()`` + # path so the bytes still reach the user / test capture. + if not self._enabled or not self._tty or self._panel_buffer.maxlen == 0: + payload = text if text.endswith("\n") else text + "\n" + self.write(payload) + return + # Whitespace-only chunks (e.g. a stray ``\n`` between progress + # frames) contribute nothing to the deque -- the inner loop's + # ``if stripped`` would skip every fragment. Returning here saves + # the lock acquire + ``splitlines`` call on what is a relatively + # common pattern from libvcs's progress callback. + if not text.strip(): + return + with self._lock: + for line in text.splitlines(): + stripped = line.rstrip() + if stripped: + self._panel_buffer.append(stripped) + + def repo(self, name: str) -> _RepoContext: + """Context manager form of :meth:`start_repo` / :meth:`stop_repo`.""" + return _RepoContext(self, name) + + @property + def enabled(self) -> bool: + """Whether the indicator is drawing (TTY + human output + colour).""" + return self._enabled + + def write(self, text: str) -> None: + """Emit ``text`` without clobbering (or being clobbered by) the spinner. + + When the spinner's TTY loop has drawn a frame, the cursor sits at the + end of that line and a raw write from another thread would either + tack onto the spinner line or race the next redraw. We hold the same + lock the redraw loop does, clear any in-flight line, write, then let + the next tick redraw cleanly. + + Used by the sync loop to route libvcs log output and libvcs's + progress callback through a single coordinated channel. + """ + if not text: + return + with self._lock: + try: + if self._tty and (self._last_line_len or self._panel_visible_lines): + # Walk back over the panel + spinner before printing + # so the log record lands on a fresh row instead of + # over-writing the trail. The next render redraws the + # whole frame from scratch. + self._stream.write(_CURSOR_TO_COL0 + _ERASE_LINE) + for _ in range(self._panel_visible_lines): + self._stream.write("\x1b[1A" + _ERASE_LINE) + self._stream.write(text) + self._stream.flush() + self._last_line_len = 0 + self._panel_visible_lines = 0 + except (OSError, ValueError): + pass + + def close(self) -> None: + """Stop the background thread and release the TTY cursor. + + Safe to call multiple times; calls after the first are no-ops. Always + invoked on interpreter shutdown via :mod:`atexit`. + """ + if not self._enabled: + return + + self._stop_event.set() + thread = self._thread + self._thread = None + if thread is not None: + thread.join(timeout=1.0) + # Erase the whole frame (panel + spinner) on close, not just the + # spinner row -- otherwise leftover panel rows linger in + # scrollback when the indicator is closed mid-repo (e.g. on + # KeyboardInterrupt before ``stop_repo``). + self._clear_block() + if self._cursor_hidden: + try: + self._stream.write(_SHOW_CURSOR) + self._stream.flush() + except (OSError, ValueError): + pass + self._cursor_hidden = False + _ACTIVE_INDICATORS.discard(self) + + # ------------------------------------------------------------------ + # Internals + # ------------------------------------------------------------------ + + def _ensure_tty_thread(self) -> None: + if self._thread is not None and self._thread.is_alive(): + return + + if not self._cursor_hidden: + try: + self._stream.write(_HIDE_CURSOR) + self._stream.flush() + except (OSError, ValueError): + return + self._cursor_hidden = True + _ACTIVE_INDICATORS.add(self) + + self._stop_event.clear() + self._thread = threading.Thread( + target=self._tty_loop, + daemon=True, + name="vcspull-sync-indicator", + ) + self._thread.start() + + def _tty_loop(self) -> None: + frames = itertools.cycle(_SPINNER_FRAMES) + while not self._stop_event.is_set(): + frame = next(frames) + with self._lock: + name = self._active_repo + started = self._repo_started_at + if name is not None and started is not None: + elapsed = time.monotonic() - started + self._render_tty(frame, name, elapsed) + else: + self._clear_line() + self._stop_event.wait(_TTY_REFRESH_INTERVAL) + + def _render_tty(self, frame: str, name: str, elapsed: float) -> None: + # Colour the spinner cell AND the repo name so the in-flight line + # matches the visual rhythm of the permanent ``✓ Synced `` + # line emitted on completion (which uses ``colors.info(name)``). + # The ``Syncing`` verb stays capitalised for the same reason: on + # screen ``Syncing flume`` reads as a status badge alongside + # ``Synced fish-shell`` and ``Timed out codex`` -- consistent + # leading-cap on the status word. + coloured_frame = self._colors.info(frame) + coloured_name = self._colors.info(name) + visible = f"{frame} Syncing {name} ... {elapsed:4.1f}s" + line = f"{coloured_frame} Syncing {coloured_name} ... {elapsed:4.1f}s" + + # Hold the lock for the entire render -- snapshot through write -- + # so a concurrent ``stop_repo`` can't tear the frame mid-flight. + # Rich does the same (see ``rich/live.py`` ``_RefreshThread.run`` + # which acquires ``self.live._lock`` around every refresh and + # re-checks ``done.is_set()`` inside the lock). The race we're + # closing: spinner thread captures ``name`` outside the lock, then + # main thread enters ``stop_repo`` and clears the screen, then the + # spinner thread (still mid-build) writes a stale frame on top of + # the new state -- producing the duplicate ``✓ Synced `` + # artefact reporters have seen. + with self._lock: + # Re-check the active repo under the lock. If ``stop_repo`` + # already cleared it, this tick is stale; skip the write so + # the next tick starts from a clean state. + if self._active_repo != name: + return + panel_lines = list(self._panel_buffer) + cap = self._panel_buffer.maxlen + if cap and cap > 0 and len(panel_lines) > cap: + panel_lines = panel_lines[-cap:] + new_panel_height = len(panel_lines) + pad = max(self._last_line_len - len(visible), 0) + + # Build the frame as a single string so the synchronized-output + # bracket wraps the whole region atomically. Layout: + # + # + # \n + # \n + # ... + # (no trailing newline; cursor stays) + parts: list[str] = [_SYNC_START] + if self._panel_visible_lines: + # Cursor sits at the spinner row; walk up to the first + # panel row of the previous frame. + parts.append(f"\x1b[{self._panel_visible_lines}A") + parts.append(_CURSOR_TO_COL0) + parts.extend(_ERASE_LINE + panel_line + "\n" for panel_line in panel_lines) + parts.append(_ERASE_LINE + line + (" " * pad)) + parts.append(_SYNC_END) + try: + self._stream.write("".join(parts)) + self._stream.flush() + # Track the *visible* column count (not the string length + # with ANSI codes), so the next frame's padding clears + # exactly the on-screen cells the previous frame occupied. + self._last_line_len = len(visible) + self._panel_visible_lines = new_panel_height + except (OSError, ValueError): + pass + + def _emit_line(self, line: str) -> None: + try: + self._stream.write(line + "\n") + self._stream.flush() + except (OSError, ValueError): + pass + + def _clear_line(self) -> None: + """Erase only the spinner row (legacy; ``write()`` calls this).""" + if not self._tty or self._last_line_len == 0: + return + try: + self._stream.write(_CURSOR_TO_COL0 + _ERASE_LINE) + self._stream.flush() + except (OSError, ValueError): + pass + self._last_line_len = 0 + + def _clear_block(self) -> None: + r"""Delete the spinner row AND any rendered panel rows above it. + + Used by :meth:`stop_repo` (and :meth:`close`) to collapse the + live trail so the next ``formatter.emit_text("✓ Synced ...")`` + call lands on a clean row. We use DL (``\x1b[Pn M``) to *remove* + the rows from the buffer; an earlier implementation walked up + and only erased their content with ``\x1b[2K``, which left the + rows in scrollback as blanks and produced visible gaps between + successive permanent lines. + """ + if not self._tty: + return + total = self._panel_visible_lines + 1 # panel rows + spinner row + try: + if self._panel_visible_lines: + # Walk up to the first panel row before deleting so DL + # consumes the panel + spinner together. + self._stream.write(f"\x1b[{self._panel_visible_lines}A") + self._stream.write(f"{_CURSOR_TO_COL0}\x1b[{total}M") + self._stream.flush() + except (OSError, ValueError): + pass + self._last_line_len = 0 + self._panel_visible_lines = 0 + + def heartbeat(self) -> None: + """Emit a non-TTY heartbeat if enough time has passed. + + Called from the main sync loop whenever the caller wants to give the + user a "still working" signal (e.g. between repositories, or during + the watchdog poll). The TTY path ignores this because the spinner + refresh already provides the signal. + """ + if not self._enabled or self._tty: + return + + with self._lock: + name = self._active_repo + started = self._repo_started_at + last = self._last_heartbeat_at + if name is None or started is None or last is None: + return + now = time.monotonic() + if now - last < _HEARTBEAT_INTERVAL: + return + + elapsed = now - started + self._emit_line(f"... still syncing {name} ({elapsed:.0f}s elapsed)") + with self._lock: + self._last_heartbeat_at = now + + +class _RepoContext: + """Context-manager helper returned by :meth:`SyncStatusIndicator.repo`.""" + + __slots__ = ("_indicator", "_name") + + def __init__(self, indicator: SyncStatusIndicator, name: str) -> None: + self._indicator = indicator + self._name = name + + def __enter__(self) -> _RepoContext: + self._indicator.start_repo(self._name) + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc: BaseException | None, + tb: t.Any, + ) -> None: + self._indicator.stop_repo() + + +def build_indicator( + *, + human: bool, + color: str, + stream: t.TextIO | io.StringIO | None = None, + tty: bool | None = None, + colors: Colors | None = None, + output_lines: int = _DEFAULT_OUTPUT_LINES, +) -> SyncStatusIndicator: + """Return a ``SyncStatusIndicator`` configured for the current session. + + Disabled when output is non-human (JSON/NDJSON) or when colours are turned + off -- the latter implies the user wants quiet, machine-friendly output. + + ``output_lines`` controls the live-trail panel above the spinner: + ``3`` (default) shows the last 3 streamed lines; ``0`` hides the + panel entirely; ``-1`` is unbounded. + """ + enabled = human and color != "never" + # io.StringIO satisfies the TextIO protocol at runtime; tests use this to + # capture spinner output without wrestling with subclassing. + concrete_stream: t.Any = stream + # Resolve the palette from the CLI ``--color`` string when the caller + # didn't hand us a shared ``Colors`` instance -- keeps ``build_indicator`` + # usable without threading colours through every call site. + resolved_colors = colors if colors is not None else Colors(get_color_mode(color)) + return SyncStatusIndicator( + enabled=enabled, + stream=concrete_stream, + tty=tty, + colors=resolved_colors, + output_lines=output_lines, + ) diff --git a/src/vcspull/cli/sync.py b/src/vcspull/cli/sync.py index fadf6e93d..aa38b898d 100644 --- a/src/vcspull/cli/sync.py +++ b/src/vcspull/cli/sync.py @@ -9,15 +9,18 @@ import os import pathlib import re +import shlex +import signal import subprocess import sys +import threading import typing as t from collections.abc import Callable from copy import deepcopy from dataclasses import dataclass from datetime import datetime from io import StringIO -from time import perf_counter +from time import monotonic, perf_counter from libvcs._internal.shortcuts import create_project from libvcs._internal.types import VCSLiteral @@ -34,6 +37,7 @@ sync_all_worktrees, ) from vcspull.config import expand_dir, filter_repos, find_config_files, load_configs +from vcspull.log import default_debug_log_path, setup_file_logger, teardown_file_logger from vcspull.types import ConfigDict from ._colors import Colors, get_color_mode @@ -47,6 +51,7 @@ PlanSummary, get_output_mode, ) +from ._progress import SyncStatusIndicator, build_indicator from ._workspaces import filter_by_workspace from .status import check_repo_status @@ -159,6 +164,28 @@ def clamp(n: int, _min: int, _max: int) -> int: NO_REPOS_FOR_TERM_MSG = 'No repo found in config(s) for "{name}"' +_FETCH_TIMEOUT_SECONDS = 120 + +#: Default wall-clock deadline for each repository in ``vcspull sync``. +#: +#: Kept aggressive on purpose -- a healthy fetch/pull against a warm remote is +#: almost always under 10 seconds. Anything over that is "suspect" and should +#: surface as an actionable timeout rather than a silent hang. Callers can +#: override via ``--timeout`` or the ``VCSPULL_SYNC_TIMEOUT_SECONDS`` env var. +_DEFAULT_REPO_TIMEOUT_SECONDS = 10 + + +def _get_no_prompt_env() -> dict[str, str]: + """Return an environment dict that prevents git from prompting on stdin. + + Built fresh per call rather than cached at module scope: a cache + snapshots ``os.environ`` at first access, which means + ``monkeypatch.setenv`` calls in tests after that point never reach + the subprocess. A dict copy on every fetch is cheap. + """ + return {**os.environ, "GIT_TERMINAL_PROMPT": "0"} + + def _maybe_fetch( repo_path: pathlib.Path, *, @@ -177,7 +204,11 @@ def _maybe_fetch( capture_output=True, text=True, check=False, + timeout=_FETCH_TIMEOUT_SECONDS, + env=_get_no_prompt_env(), ) + except subprocess.TimeoutExpired: + return False, f"git fetch timed out after {_FETCH_TIMEOUT_SECONDS}s" except FileNotFoundError: return False, "git executable not found" except OSError as exc: @@ -595,7 +626,11 @@ def create_sync_subparser(parser: argparse.ArgumentParser) -> argparse.ArgumentP action="count", dest="verbosity", default=0, - help="increase plan verbosity (-vv for maximum detail)", + help=( + "increase verbosity. -v opens libvcs INFO ('Updating to ...'); " + "-vv opens libvcs DEBUG (full per-repo trace) plus extra " + "dry-run plan detail. Default keeps libvcs at WARNING+." + ), ) parser.add_argument( "--all", @@ -610,6 +645,50 @@ def create_sync_subparser(parser: argparse.ArgumentParser) -> argparse.ArgumentP dest="include_worktrees", help="also sync configured worktrees for each repository", ) + parser.add_argument( + "--timeout", + dest="timeout", + type=_positive_int_arg, + default=None, + metavar="SECONDS", + help=( + "per-repository wall-clock deadline in seconds " + f"(default: {_DEFAULT_REPO_TIMEOUT_SECONDS}; env: " + "VCSPULL_SYNC_TIMEOUT_SECONDS). Repos that exceed the deadline " + "are skipped and the rest of the batch continues." + ), + ) + parser.add_argument( + "--panel-lines", + dest="panel_lines", + type=_panel_lines_arg, + default=None, + metavar="N", + help=( + "live-trail panel height for streaming subprocess output " + "above the spinner (default: 3; 0 hides; -1 unbounded; env: " + "VCSPULL_PROGRESS_LINES). The panel collapses when each repo " + "finishes, leaving only the permanent ✓ Synced line." + ), + ) + parser.add_argument( + "--log-file", + dest="log_file", + metavar="PATH", + default=None, + help=( + "write a debug log to PATH (default: $TMPDIR/vcspull/debug--" + ".log; $TMPDIR/vcspull-test/... under pytest). The path is " + "only printed to the terminal when a sync fails or times out, " + "matching npm/pnpm/yarn." + ), + ) + parser.add_argument( + "--no-log-file", + dest="no_log_file", + action="store_true", + help="disable the debug log file entirely", + ) try: import shtab @@ -620,6 +699,491 @@ def create_sync_subparser(parser: argparse.ArgumentParser) -> argparse.ArgumentP return parser +@dataclass +class _TimedOutRepo: + """Metadata about a repository that exceeded its per-repo timeout.""" + + name: str + path: str + workspace_root: str + duration: float + + +@dataclass +class _SyncOutcome: + """Result of attempting to sync a single repository.""" + + status: t.Literal["synced", "failed", "timed_out"] + captured_output: str | None = None + error: BaseException | None = None + duration: float = 0.0 + + +def _positive_int_arg(value: str) -> int: + """Validate ``--timeout`` accepts only positive integers. + + A timeout of zero or negative seconds is meaningless: the watchdog + would either return immediately (timed out) or never (logically + invalid). The earlier permissive ``_resolve_repo_timeout`` silently + fell back to the default on these inputs, which masked typos like + ``--timeout -10`` (intended ``10``) where the user assumed the + flag had taken effect. + + Examples + -------- + >>> _positive_int_arg("60") + 60 + >>> _positive_int_arg("0") + Traceback (most recent call last): + ... + argparse.ArgumentTypeError: --timeout must be a positive integer (got 0) + >>> _positive_int_arg("-5") + Traceback (most recent call last): + ... + argparse.ArgumentTypeError: --timeout must be a positive integer (got -5) + >>> _positive_int_arg("abc") + Traceback (most recent call last): + ... + argparse.ArgumentTypeError: --timeout must be an integer (got 'abc') + """ + try: + parsed = int(value) + except ValueError: + msg = f"--timeout must be an integer (got {value!r})" + raise argparse.ArgumentTypeError(msg) from None + if parsed <= 0: + msg = f"--timeout must be a positive integer (got {parsed})" + raise argparse.ArgumentTypeError(msg) + return parsed + + +def _resolve_repo_timeout(cli_timeout: int | None) -> int: + """Resolve the repo timeout from CLI flag / env var / built-in default. + + Programmatic callers passing non-positive values still fall back to + the env/default ladder; the CLI surface enforces positive-int + semantics via :func:`_positive_int_arg` at parse time. + + Examples + -------- + >>> import os + >>> _ = os.environ.pop("VCSPULL_SYNC_TIMEOUT_SECONDS", None) + >>> _resolve_repo_timeout(None) + 10 + >>> _resolve_repo_timeout(60) + 60 + >>> _resolve_repo_timeout(0) + 10 + >>> _resolve_repo_timeout(-5) + 10 + """ + if cli_timeout is not None and cli_timeout > 0: + return cli_timeout + env_value = os.environ.get("VCSPULL_SYNC_TIMEOUT_SECONDS") + if env_value: + try: + parsed = int(env_value) + except ValueError: + log.warning( + "Ignoring non-integer VCSPULL_SYNC_TIMEOUT_SECONDS=%s", + env_value, + ) + else: + if parsed > 0: + return parsed + return _DEFAULT_REPO_TIMEOUT_SECONDS + + +#: Mirrors the default in ``vcspull.cli._progress`` -- duplicated here so +#: the ``--help`` text can interpolate the real number without importing +#: the private ``_DEFAULT_OUTPUT_LINES`` symbol. +_DEFAULT_PANEL_LINES = 3 + + +def _panel_lines_arg(value: str) -> int: + """Validate ``--panel-lines`` accepts ``-1``, ``0``, or any positive int. + + The flag uses ``-1`` as the "unbounded panel" sentinel and ``0`` as + the "hide panel" sentinel. Any other negative value (``-2``, ``-7``, + …) is meaningless; ``argparse`` would silently coerce them to + "unbounded" via :func:`_resolve_panel_lines`'s permissive branch, + which masks user typos. Reject at parse time with a typer-style + ``ArgumentTypeError`` instead. + + Examples + -------- + >>> _panel_lines_arg("0") + 0 + >>> _panel_lines_arg("-1") + -1 + >>> _panel_lines_arg("5") + 5 + >>> _panel_lines_arg("-2") + Traceback (most recent call last): + ... + argparse.ArgumentTypeError: --panel-lines must be -1, 0, or positive (got -2) + >>> _panel_lines_arg("abc") + Traceback (most recent call last): + ... + argparse.ArgumentTypeError: --panel-lines must be an integer (got 'abc') + """ + try: + parsed = int(value) + except ValueError: + msg = f"--panel-lines must be an integer (got {value!r})" + raise argparse.ArgumentTypeError(msg) from None + if parsed < -1: + msg = f"--panel-lines must be -1, 0, or positive (got {parsed})" + raise argparse.ArgumentTypeError(msg) + return parsed + + +def _resolve_panel_lines(cli_value: int | None) -> int: + """Resolve the live-trail panel height: CLI flag > env > default. + + The flag accepts ``0`` (hide panel) and ``-1`` (unbounded), so we can't + short-circuit on ``> 0`` like :func:`_resolve_repo_timeout`. We do + accept any integer the user passes via the CLI; only the env var + override is validated, with a warning on garbage values. + + Examples + -------- + >>> import os + >>> _ = os.environ.pop("VCSPULL_PROGRESS_LINES", None) + >>> _resolve_panel_lines(None) + 3 + >>> _resolve_panel_lines(0) + 0 + >>> _resolve_panel_lines(-1) + -1 + >>> _resolve_panel_lines(5) + 5 + """ + if cli_value is not None: + return cli_value + env_value = os.environ.get("VCSPULL_PROGRESS_LINES") + if env_value: + try: + return int(env_value) + except ValueError: + log.warning( + "Ignoring non-integer VCSPULL_PROGRESS_LINES=%s", + env_value, + ) + return _DEFAULT_PANEL_LINES + + +class _SyncInterruptedAfterSummary(KeyboardInterrupt): + """Internal marker: ``_sync_impl`` already emitted the interrupt summary. + + Lets the outer ``sync()`` handler skip the plain-stderr fallback when the + inner layer has already printed a coloured partial summary through the + formatter. Subclassing ``KeyboardInterrupt`` keeps the exception in the + same taxonomy for the outer ``except KeyboardInterrupt`` catch. + + Invariant: the only ``except KeyboardInterrupt`` between the inner + raise and process exit is :func:`sync`'s own handler, which + dispatches on ``isinstance(err, _SyncInterruptedAfterSummary)``. A + future broader catch higher up the stack must either re-raise the + marker unchanged or perform the same isinstance check -- otherwise + the inner partial-summary print AND the outer plain-stderr fallback + both fire, restoring the double-print artefact this marker exists + to suppress. + """ + + +def _exit_on_sigint() -> t.NoReturn: + """Terminate so the parent shell sees ``WIFSIGNALED(SIGINT)``. + + Interactive shells (bash, zsh) abort ``cmd1; cmd2`` sequential lists + only when the child was killed by a signal -- a clean + ``SystemExit(130)`` leaves the shell no reason to stop. Match git's + pattern (``sigchain.h:20-34``; ``builtin/clone.c:416``): cleanup first, + install ``SIG_DFL``, self-deliver SIGINT so the kernel reports + ``WIFSIGNALED`` to the parent. + + Callers MUST complete cleanup before invoking this. Python ``atexit`` + hooks and ``finally`` blocks do NOT run once ``SIG_DFL`` terminates the + process at the C level. + + Windows has no ``WIFSIGNALED`` analogue and its ``raise_signal(SIGINT)`` + under ``SIG_DFL`` raises ``KeyboardInterrupt`` back at the caller + instead of exiting -- fall back to the conventional ``SystemExit(130)``. + + Precondition: must be called from the main thread of the main + interpreter. ``signal.signal`` raises ``ValueError`` otherwise. The CLI + dispatcher at ``vcspull/cli/__init__.py`` always invokes ``sync()`` on + the main thread, so this is a callsite contract rather than a runtime + check -- a silent non-main-thread fallback would only kill that thread, + not the process, and bash would still see a clean exit and continue + the ``;`` chain, defeating the whole change. + """ + if sys.platform == "win32": + raise SystemExit(130) + # The two-line swap here is deliberate: a SIGINT delivered during the + # gap either hits SIG_IGN (dropped) or SIG_DFL (terminates) -- both + # outcomes let bash see WIFSIGNALED(SIGINT), so we don't need the + # heavier ``pthread_sigmask`` route. + signal.signal(signal.SIGINT, signal.SIG_IGN) + signal.signal(signal.SIGINT, signal.SIG_DFL) + signal.raise_signal(signal.SIGINT) + # raise_signal does not return under SIG_DFL on POSIX; the SystemExit + # is defence-in-depth only, satisfying ``NoReturn`` and covering exotic + # kernels where the default disposition doesn't terminate. + raise SystemExit(130) # pragma: no cover + + +class _IndicatorStreamProxy: + r"""File-like adapter that routes writes through a ``SyncStatusIndicator``. + + Installed on the ``libvcs`` / ``vcspull`` logger ``StreamHandler`` while + the spinner is active so logged lines clear the spinner before they + print, instead of appending to its in-flight ``\r``-redrawn line. + """ + + def __init__(self, indicator: SyncStatusIndicator) -> None: + self._indicator = indicator + + def write(self, text: str) -> int: + self._indicator.write(text) + return len(text) + + def flush(self) -> None: + # ``SyncStatusIndicator.write`` flushes on every call; nothing extra + # to do here, but ``logging.StreamHandler.emit`` calls ``flush()`` + # unconditionally so we must accept the no-op. + return + + +def _install_indicator_log_diverter( + indicator: SyncStatusIndicator, +) -> Callable[[], None]: + """Rebind the ``libvcs`` / ``vcspull`` stream handlers through ``indicator``. + + Returns a callable that restores the original streams. Only touches + handlers that are :class:`logging.StreamHandler` but not + :class:`logging.FileHandler` -- the debug log file keeps writing + straight to disk regardless of the spinner. + + Both the install and the restore swap ``handler.stream`` while + holding ``handler.lock`` -- the same lock ``Handler.handle`` (stdlib + ``logging/__init__.py:1011``) takes around every ``emit()``. Direct + ``handler.stream = X`` without the lock technically violates that + contract even if CPython's GIL hides the race in practice. + + We don't go through :meth:`logging.StreamHandler.setStream` because + its first action is to ``flush()`` the old stream. Under pytest, the + handler may already point at a finalized ``capsys``/``CaptureIO`` + from a prior test (``setup_logger`` set ``handler.stream = sys.stdout`` + when stdout *was* the live capture); flushing a closed file there + raises ``ValueError``. Skipping the flush is safe -- we are not + *closing* the stream, only redirecting future writes. + + On top of the stream swap we also raise the *libvcs* StreamHandler + level above ``CRITICAL`` for the duration of the sync, but only when + the user kept the default verbosity (handler at WARNING). vcspull's + own ``✗ Failed syncing rye: Command failed with code 128: git + symbolic-ref HEAD --short`` line carries the same content as + libvcs's ``|git| (rye) Failed to determine current branch`` warning + that fires immediately before; printing both breaks the + ``✓ Synced X / ✗ Failed X / - Timed out X`` rhythm. The + debug-log :class:`~logging.FileHandler` keeps DEBUG, so a + post-mortem still has the libvcs line for context. ``-v`` / ``-vv`` + users opted into INFO / DEBUG and keep their explicit level. + """ + proxy = _IndicatorStreamProxy(indicator) + patched: list[tuple[logging.StreamHandler[t.Any], t.Any]] = [] + raised_levels: list[tuple[logging.StreamHandler[t.Any], int]] = [] + for logger_name in ("libvcs", "vcspull"): + logger = logging.getLogger(logger_name) + for handler in logger.handlers: + if not isinstance(handler, logging.StreamHandler): + continue + if isinstance(handler, logging.FileHandler): + continue + handler.acquire() + try: + previous = handler.stream + handler.stream = proxy + finally: + handler.release() + patched.append((handler, previous)) + if logger_name == "libvcs" and handler.level == logging.WARNING: + raised_levels.append((handler, handler.level)) + # 51 sits one above ``logging.CRITICAL`` -- standard log + # records can't reach the handler at this level, so libvcs + # noise (WARNING + ERROR + CRITICAL) is dropped from the + # terminal stream. The file handler is untouched and still + # captures everything at DEBUG. + handler.setLevel(logging.CRITICAL + 1) + + def _restore() -> None: + for handler, original in patched: + handler.acquire() + try: + handler.stream = original + finally: + handler.release() + for handler, level in raised_levels: + handler.setLevel(level) + + return _restore + + +def _sync_repo_with_watchdog( + repo: ConfigDict, + *, + progress_callback: ProgressCallback, + timeout: int, + is_human: bool, +) -> _SyncOutcome: + """Run :func:`update_repo` under a wall-clock watchdog. + + The libvcs call runs on a daemon :class:`threading.Thread`; the main + thread uses a completion :class:`threading.Event` as its deadline. Raw + threads are deliberate -- :class:`concurrent.futures.ThreadPoolExecutor` + registers its workers in ``concurrent.futures.thread._threads_queues``, + whose ``atexit`` hook ``_python_exit`` joins every worker on interpreter + shutdown. If the user hits Ctrl-C while a libvcs subprocess is wedged, + that join hangs the process forever. Daemon threads skip the join + entirely: they're forcibly terminated at shutdown. + """ + buffer: StringIO | None = None if is_human else StringIO() + done = threading.Event() + worker_error: list[BaseException] = [] + + def _run() -> None: + try: + if buffer is None: + update_repo(repo, progress_callback=progress_callback) + return + # Non-human output modes capture everything so the NDJSON/JSON + # payload contains the per-repo details without polluting stdout. + with ( + contextlib.redirect_stdout(buffer), + contextlib.redirect_stderr(buffer), + ): + update_repo(repo, progress_callback=progress_callback) + except BaseException as exc_obj: + # Keep ``BaseException`` here so a worker-side KeyboardInterrupt + # (rare but possible via ``PyThreadState_SetAsyncExc``) is still + # reported back up. The main thread decides how to handle it. + worker_error.append(exc_obj) + finally: + done.set() + + started = monotonic() + worker = threading.Thread( + target=_run, + name=f"vcspull-sync-{repo.get('name', 'repo')}", + daemon=True, + ) + worker.start() + + finished = done.wait(timeout=timeout) + if not finished: + # The worker is still busy; abandon it. Because it's a daemon it'll + # die with the interpreter or when libvcs's subprocess finally exits. + return _SyncOutcome( + status="timed_out", + captured_output=buffer.getvalue() if buffer else None, + duration=monotonic() - started, + ) + + if worker_error: + err = worker_error[0] + if isinstance(err, Exception): + return _SyncOutcome( + status="failed", + captured_output=buffer.getvalue() if buffer else None, + error=err, + duration=monotonic() - started, + ) + # ``BaseException`` that is not ``Exception`` (KeyboardInterrupt, + # SystemExit): propagate -- main thread will tear the batch down. + raise err + + return _SyncOutcome( + status="synced", + captured_output=buffer.getvalue() if buffer else None, + duration=monotonic() - started, + ) + + +def _emit_rerun_recipe( + formatter: OutputFormatter, + colors: Colors, + *, + timed_out_repos: list[_TimedOutRepo], + timeout: int, +) -> None: + """Print a copyable recipe for rerunning just the timed-out repositories. + + Modelled on the way ``cargo``/``npm`` surface actionable next steps after + a failure. The suggested timeout is ``max(120, timeout * 10)`` so a user + who kept the aggressive default still gets a meaningful headroom when + they retry. + """ + if not timed_out_repos: + return + + suggested_timeout = max(120, timeout * 10) + + # Group by workspace so the user can paste one line per workspace. + grouped: dict[str, list[_TimedOutRepo]] = {} + for repo in timed_out_repos: + grouped.setdefault(repo.workspace_root, []).append(repo) + + formatter.emit_text("") + formatter.emit_text( + f"{colors.warning('Timed out:')} " + f"{colors.warning(str(len(timed_out_repos)))} " + f"repositor{'y' if len(timed_out_repos) == 1 else 'ies'} " + f"exceeded {timeout}s.", + ) + for repo in timed_out_repos: + display_path = str(PrivatePath(repo.path)) + formatter.emit_text( + f" {colors.warning('-')} {colors.info(repo.name):<32} " + f"{colors.muted('→')} {display_path} " + f"({colors.muted(f'{repo.duration:.1f}s elapsed')})", + ) + + formatter.emit_text("") + formatter.emit_text( + f"{colors.info('→')} Rerun just the affected repositories with a " + f"larger timeout:", + ) + for workspace_root, repos in grouped.items(): + names = " ".join(shlex.quote(r.name) for r in repos) + workspace_arg = f"--workspace {shlex.quote(str(PrivatePath(workspace_root)))}" + formatter.emit_text( + f" vcspull sync {workspace_arg} --timeout {suggested_timeout} {names}", + ) + + formatter.emit_text( + f"{colors.info('→')} Or with verbose logging to diagnose the hang:", + ) + for workspace_root, repos in grouped.items(): + names = " ".join(shlex.quote(r.name) for r in repos) + workspace_arg = f"--workspace {shlex.quote(str(PrivatePath(workspace_root)))}" + formatter.emit_text( + f" vcspull sync {workspace_arg} " + f"--timeout {suggested_timeout} -vv {names}", + ) + + formatter.emit_text( + f"{colors.info('→')} Probe each repository manually " + f"({colors.muted('these should return in under 10s')}):", + ) + for repo in timed_out_repos: + display_path = str(PrivatePath(repo.path)) + formatter.emit_text( + f" GIT_TERMINAL_PROMPT=0 timeout 10 " + f"git -C {shlex.quote(display_path)} fetch --dry-run", + ) + + def sync( repo_patterns: list[str], config: pathlib.Path | None, @@ -640,8 +1204,123 @@ def sync( parser: argparse.ArgumentParser | None = None, # optional so sync can be unit tested include_worktrees: bool = False, + timeout: int | None = None, + log_file: str | pathlib.Path | None = None, + no_log_file: bool = False, + panel_lines: int | None = None, ) -> None: """Entry point for ``vcspull sync``.""" + # Prevent git from blocking on credential prompts during batch sync + os.environ.setdefault("GIT_TERMINAL_PROMPT", "0") + + repo_timeout = _resolve_repo_timeout(timeout) + resolved_panel_lines = _resolve_panel_lines(panel_lines) + + # Set up the debug log file early so libvcs's per-repo activity is also + # captured. The path is surfaced to the user only when something failed + # (or on an explicit path override), matching npm/pnpm/yarn UX. + log_file_path: pathlib.Path | None = None + log_file_handler: logging.FileHandler | None = None + if not no_log_file: + log_file_path = ( + pathlib.Path(log_file).expanduser() + if log_file is not None + else default_debug_log_path() + ) + try: + log_file_handler = setup_file_logger(log_file_path) + except OSError as exc_obj: + log.warning( + "Could not open debug log at %s: %s", + log_file_path, + exc_obj, + ) + log_file_path = None + log_file_handler = None + + try: + _sync_impl( + repo_patterns=repo_patterns, + config=config, + workspace_root=workspace_root, + output_json=output_json, + output_ndjson=output_ndjson, + color=color, + exit_on_error=exit_on_error, + show_unchanged=show_unchanged, + summary_only=summary_only, + long_view=long_view, + relative_paths=relative_paths, + fetch=fetch, + offline=offline, + verbosity=verbosity, + sync_all=sync_all, + parser=parser, + include_worktrees=include_worktrees, + repo_timeout=repo_timeout, + log_file_path=log_file_path, + dry_run=dry_run, + panel_lines=resolved_panel_lines, + ) + except KeyboardInterrupt as err: + # Catch Ctrl-C from ANY phase of the sync -- the repo loop (where + # ``_sync_impl`` handles it with a partial summary and re-raises + # ``_SyncInterruptedAfterSummary``) and also long pre-loop work + # like ``load_configs`` (YAML parse on big vcspull configs) and + # post-loop emission, where the formatter may not be built yet. + # The plain-stderr fallback below is the only safe surface in the + # pre-loop case; skip it when the inner layer already printed a + # coloured summary through the formatter. + if not isinstance(err, _SyncInterruptedAfterSummary): + sys.stderr.write("\nInterrupted by user.\n") + sys.stderr.flush() + if log_file_path is not None: + sys.stderr.write(f"Full debug log: {log_file_path}\n") + sys.stderr.flush() + # Python ``atexit`` and ``finally`` do NOT run once + # ``_exit_on_sigint`` terminates via ``SIG_DFL`` at the C level, so + # close the debug log inline. Nulling the handle makes the outer + # ``finally`` a no-op for the POSIX path AND a correct single-close + # for the Windows fallback path (where ``_exit_on_sigint`` does + # raise a regular ``SystemExit`` and the ``finally`` does run). + if log_file_handler is not None: + teardown_file_logger(log_file_handler) + log_file_handler = None + _exit_on_sigint() + finally: + if log_file_handler is not None: + teardown_file_logger(log_file_handler) + + +def _sync_impl( + *, + repo_patterns: list[str], + config: pathlib.Path | None, + workspace_root: str | None, + dry_run: bool, + output_json: bool, + output_ndjson: bool, + color: str, + exit_on_error: bool, + show_unchanged: bool, + summary_only: bool, + long_view: bool, + relative_paths: bool, + fetch: bool, + offline: bool, + verbosity: int, + sync_all: bool, + parser: argparse.ArgumentParser | None, + include_worktrees: bool, + repo_timeout: int, + log_file_path: pathlib.Path | None, + panel_lines: int, +) -> None: + """Run the core body of :func:`sync`. + + Kept separate so log-file teardown runs through ``finally`` regardless of + where the caller exits the sync. + """ # Show help if no patterns and --all not specified if not repo_patterns and not sync_all: if parser is not None: @@ -695,7 +1374,7 @@ def sync( found = filter_repos(configs, path=path, vcs_url=vcs_url, name=name) if not found: search_term = name or path or vcs_url or repo_pattern - log.debug(NO_REPOS_FOR_TERM_MSG.format(name=search_term)) + log.debug('No repo found in config(s) for "%s"', search_term) if not summary_only: formatter.emit_text( f"{colors.error('✗')} " @@ -811,11 +1490,31 @@ def sync( "synced": 0, "previewed": 0, "failed": 0, + "timed_out": 0, "unmatched": unmatched_count, } + timed_out_repos: list[_TimedOutRepo] = [] + + indicator = build_indicator( + human=is_human, + color=color, + colors=colors, + output_lines=panel_lines, + ) progress_callback: ProgressCallback - if is_human: + if is_human and indicator.enabled: + # Route libvcs's streaming subprocess output into the indicator's + # live-trail panel: ``add_output_line`` appends to a bounded deque + # that the spinner thread renders above the spinner row, then + # collapses on ``stop_repo``. With panel_lines=0 the indicator + # falls back to plain ``write()`` semantics (no panel, no + # collapse). + def _indicator_progress(output: str, timestamp: datetime) -> None: + indicator.add_output_line(output) + + progress_callback = _indicator_progress + elif is_human: progress_callback = progress_cb else: @@ -825,6 +1524,106 @@ def silent_progress(output: str, timestamp: datetime) -> None: progress_callback = silent_progress + restore_log_streams: Callable[[], None] | None = None + if indicator.enabled: + restore_log_streams = _install_indicator_log_diverter(indicator) + + interrupted = False + try: + _run_sync_loop( + found_repos=found_repos, + formatter=formatter, + colors=colors, + summary=summary, + timed_out_repos=timed_out_repos, + progress_callback=progress_callback, + is_human=is_human, + repo_timeout=repo_timeout, + exit_on_error=exit_on_error, + include_worktrees=include_worktrees, + dry_run=dry_run, + parser=parser, + log_file_path=log_file_path, + indicator=indicator, + ) + except KeyboardInterrupt: + # Ctrl-C during the loop: stop the indicator cleanly, print a + # partial summary via the formatter, then hand termination + # semantics to the outer ``sync()`` catch. We re-raise a typed + # marker so the outer layer doesn't double-print. + interrupted = True + finally: + if restore_log_streams is not None: + restore_log_streams() + indicator.close() + + if interrupted: + # Shield the summary emission against late-breaking ``OSError`` + # (e.g. ``BrokenPipeError`` when stdout is piped to ``head``) + # so the marker exception *always* propagates. If an OSError + # escaped here the outer layer would see a non-KeyboardInterrupt + # and bash would see a plain exit 1, letting the ``;`` chain + # continue -- exactly what this change is designed to prevent. + try: + formatter.emit_text("") + formatter.emit_text(colors.warning("Interrupted by user.")) + _emit_summary(formatter, colors, summary) + if log_file_path is not None: + formatter.emit_text( + f"{colors.info('→')} Full debug log: " + f"{colors.muted(str(log_file_path))}", + ) + formatter.finalize() + except (OSError, ValueError): + pass + # The ``from None`` chain-suppresses the original ``KeyboardInterrupt`` + # so the marker reaches the outer ``except KeyboardInterrupt as err`` + # in :func:`sync` cleanly. See ``_SyncInterruptedAfterSummary`` for + # the no-double-print invariant this raise relies on. + raise _SyncInterruptedAfterSummary from None + + _emit_summary(formatter, colors, summary) + _emit_rerun_recipe( + formatter, + colors, + timed_out_repos=timed_out_repos, + timeout=repo_timeout, + ) + + # Surface the debug log path once, only when something went wrong, so the + # user has a single post-mortem trail regardless of the failure mode. + if summary.get("failed", 0) > 0 and log_file_path is not None: + formatter.emit_text( + f"{colors.info('→')} Full debug log: {colors.muted(str(log_file_path))}", + ) + + if exit_on_error and unmatched_count > 0: + formatter.finalize() + if parser is not None: + parser.exit(status=1, message=EXIT_ON_ERROR_MSG) + raise SystemExit(EXIT_ON_ERROR_MSG) + + formatter.finalize() + + +def _run_sync_loop( + *, + found_repos: list[ConfigDict], + formatter: OutputFormatter, + colors: Colors, + summary: dict[str, int], + timed_out_repos: list[_TimedOutRepo], + progress_callback: ProgressCallback, + is_human: bool, + repo_timeout: int, + exit_on_error: bool, + include_worktrees: bool, + dry_run: bool, + parser: argparse.ArgumentParser | None, + log_file_path: pathlib.Path | None, + indicator: SyncStatusIndicator, +) -> None: + """Iterate the repositories and drive the watchdog + indicator.""" for repo in found_repos: repo_name = repo.get("name", "unknown") repo_path = repo.get("path", "unknown") @@ -832,6 +1631,7 @@ def silent_progress(output: str, timestamp: datetime) -> None: display_repo_path = str(PrivatePath(repo_path)) summary["total"] += 1 + indicator.heartbeat() event: dict[str, t.Any] = { "reason": "sync", @@ -840,58 +1640,120 @@ def silent_progress(output: str, timestamp: datetime) -> None: "workspace_root": str(workspace_label), } - buffer: StringIO | None = None - captured_output: str | None = None + # Manual ``start_repo`` / ``stop_repo`` instead of the + # ``with indicator.repo(...)`` context manager: we want + # ``stop_repo`` to receive the permanent line so the spinner + # collapse + completion print happen as ONE atomic ANSI write + # under the lock. The ``with`` form fires ``stop_repo()`` (no + # args) on exit, before we know the outcome -- which means the + # spinner clears, then the formatter writes the permanent line + # in a separate stream call. That two-step is the source of the + # flicker reporters have called out. + indicator.start_repo(repo_name) try: - if is_human: - update_repo(repo, progress_callback=progress_callback) - else: - buffer = StringIO() - with ( - contextlib.redirect_stdout(buffer), - contextlib.redirect_stderr( - buffer, - ), - ): - update_repo(repo, progress_callback=progress_callback) - captured_output = buffer.getvalue() - except Exception as e: + outcome = _sync_repo_with_watchdog( + repo, + progress_callback=progress_callback, + timeout=repo_timeout, + is_human=is_human, + ) + except BaseException: + # Any exception (KeyboardInterrupt, runtime crash) tears the + # indicator down with no replacement line; the surrounding + # ``except KeyboardInterrupt`` in ``_sync_impl`` still owns + # the partial-summary print. + indicator.stop_repo() + raise + + if outcome.status == "timed_out": + summary["timed_out"] += 1 summary["failed"] += 1 - event["status"] = "error" - event["error"] = str(e) - if not is_human and buffer is not None and not captured_output: - captured_output = buffer.getvalue() - if captured_output: - event["details"] = captured_output.strip() + timed_out_repos.append( + _TimedOutRepo( + name=repo_name, + path=str(repo_path), + workspace_root=str(workspace_label), + duration=outcome.duration, + ), + ) + event["status"] = "timed_out" + event["duration_ms"] = int(outcome.duration * 1000) + if outcome.captured_output: + event["details"] = outcome.captured_output.strip() + permanent = ( + f"{colors.warning('-')} Timed out {colors.info(repo_name)} " + f"after {colors.warning(f'{outcome.duration:.1f}s')} " + f"{colors.muted('→')} {display_repo_path}" + ) + wrote_final = indicator.stop_repo( + final_line=permanent if is_human else None, + ) formatter.emit(event) - if is_human: - log.debug( - "Failed syncing %s", - repo_name, + if not wrote_final: + formatter.emit_text(permanent) + if exit_on_error: + _emit_rerun_recipe( + formatter, + colors, + timed_out_repos=timed_out_repos, + timeout=repo_timeout, ) - if log.isEnabledFor(logging.DEBUG): - import traceback + _emit_summary(formatter, colors, summary) + if log_file_path is not None: + formatter.emit_text( + f"{colors.info('→')} Full debug log: " + f"{colors.muted(str(log_file_path))}", + ) + formatter.finalize() + if parser is not None: + parser.exit(status=1, message=EXIT_ON_ERROR_MSG) + raise SystemExit(EXIT_ON_ERROR_MSG) + continue - traceback.print_exc() - formatter.emit_text( + if outcome.status == "failed": + summary["failed"] += 1 + err = outcome.error + err_msg = str(err) if err is not None else "unknown error" + event["status"] = "error" + event["error"] = err_msg + if outcome.captured_output: + event["details"] = outcome.captured_output.strip() + permanent = ( f"{colors.error('✗')} Failed syncing {colors.info(repo_name)}: " - f"{colors.error(str(e))}", + f"{colors.error(err_msg)}" + ) + wrote_final = indicator.stop_repo( + final_line=permanent if is_human else None, ) + formatter.emit(event) + if is_human: + log.debug("Failed syncing %s", repo_name) + if log.isEnabledFor(logging.DEBUG) and err is not None: + import traceback + + traceback.print_exception(type(err), err, err.__traceback__) + if not wrote_final: + formatter.emit_text(permanent) if exit_on_error: _emit_summary(formatter, colors, summary) formatter.finalize() if parser is not None: parser.exit(status=1, message=EXIT_ON_ERROR_MSG) - raise SystemExit(EXIT_ON_ERROR_MSG) from e + raise SystemExit(EXIT_ON_ERROR_MSG) from err continue summary["synced"] += 1 event["status"] = "synced" - formatter.emit(event) - formatter.emit_text( + permanent = ( f"{colors.success('✓')} Synced {colors.info(repo_name)} " - f"{colors.muted('→')} {display_repo_path}", + f"{colors.muted('→')} {display_repo_path}" + ) + wrote_final = indicator.stop_repo( + final_line=permanent if is_human else None, ) + formatter.emit(event) + if not wrote_final: + formatter.emit_text(permanent) # Sync worktrees if enabled and configured worktrees_config = repo.get("worktrees") @@ -956,16 +1818,6 @@ def silent_progress(output: str, timestamp: datetime) -> None: parser.exit(status=1, message=EXIT_ON_ERROR_MSG) raise SystemExit(EXIT_ON_ERROR_MSG) - _emit_summary(formatter, colors, summary) - - if exit_on_error and unmatched_count > 0: - formatter.finalize() - if parser is not None: - parser.exit(status=1, message=EXIT_ON_ERROR_MSG) - raise SystemExit(EXIT_ON_ERROR_MSG) - - formatter.finalize() - def _emit_summary( formatter: OutputFormatter, @@ -977,12 +1829,17 @@ def _emit_summary( if formatter.mode == OutputMode.HUMAN: previewed = summary.get("previewed", 0) unmatched = summary.get("unmatched", 0) + timed_out = summary.get("timed_out", 0) parts = [ f"\n{colors.info('Summary:')} " f"{colors.info(str(summary['total']))} repos, " f"{colors.success(str(summary['synced']))} synced, " f"{colors.error(str(summary['failed']))} failed", ] + if timed_out > 0: + parts.append( + f", {colors.warning(str(timed_out))} timed out", + ) if previewed > 0: parts.append( f", {colors.warning(str(previewed))} previewed", diff --git a/src/vcspull/log.py b/src/vcspull/log.py index 7acdc8026..beb9b3c04 100644 --- a/src/vcspull/log.py +++ b/src/vcspull/log.py @@ -12,10 +12,14 @@ import contextlib import importlib import logging +import os +import pathlib import pkgutil import sys +import tempfile import time import typing as t +from datetime import datetime from functools import lru_cache from colorama import Fore, Style @@ -50,12 +54,43 @@ def get_cli_logger_names(include_self: bool = True) -> list[str]: return sorted(names) +def _libvcs_stream_level(verbosity: int) -> int: + """Return the libvcs StreamHandler level for a given ``-v`` count. + + Reporter expectation: at the default verbosity, libvcs's chatty + ``log.info("Updating to '%s'.")`` and ``log.debug("git_tag: ...")`` + crumb lines should NOT print on the terminal -- only WARNING+ does. + ``-v`` lifts that to INFO; ``-vv`` to DEBUG. The debug log file + (``setup_file_logger``) is always DEBUG regardless of this knob. + """ + if verbosity >= 2: + return logging.DEBUG + if verbosity >= 1: + return logging.INFO + return logging.WARNING + + def setup_logger( log: logging.Logger | None = None, level: t.Literal["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] = "INFO", + verbosity: int = 0, ) -> None: - """Configure the vcspull logging hierarchy once and reuse it everywhere.""" + """Configure the vcspull logging hierarchy once and reuse it everywhere. + + The ``level`` argument governs the *vcspull* logger and its CLI + submodules; existing callers and ``--log-level`` users see no change. + + The ``verbosity`` argument (``-v`` count from the sync subcommand) drives + the *libvcs* StreamHandler level via :func:`_libvcs_stream_level`. + Setting it as a *per-handler* filter -- not a logger-level filter -- is + deliberate: ``setup_file_logger`` later raises both loggers to DEBUG so + the debug file captures the full trace, and without per-handler levels + on the StreamHandlers that bump would also open the terminal floodgate. + See ``tests/test_log.py::test_setup_file_logger_does_not_open_stream_floodgate`` + for the regression guard. + """ resolved_level = getattr(logging, level.upper(), logging.INFO) + libvcs_stream_level = _libvcs_stream_level(verbosity) vcspull_logger = logging.getLogger("vcspull") @@ -96,6 +131,17 @@ def setup_logger( handler.stream = sys.stdout handler.setFormatter(formatter) + # Pin a per-handler level on every vcspull StreamHandler. This is what + # keeps ``setup_file_logger``'s logger-level bump to DEBUG from leaking + # into the terminal: even when the logger accepts DEBUG records, the + # handler's own filter still drops them. + for handler in existing_handlers: + if isinstance(handler, logging.StreamHandler) and not isinstance( + handler, + logging.FileHandler, + ): + handler.setLevel(resolved_level) + vcspull_logger.setLevel(resolved_level) vcspull_logger.propagate = True @@ -120,7 +166,23 @@ def setup_logger( repo_channel.setFormatter(RepoLogFormatter()) repo_channel.addFilter(RepoFilter()) repo_logger.addHandler(repo_channel) - repo_logger.setLevel(resolved_level) + # Pin the libvcs StreamHandler's per-handler level using the verbosity + # ladder. Default WARNING means the chatty ``|git| (repo) Updating to + # 'main'.`` traffic stays out of the terminal; ``-v`` opens INFO, ``-vv`` + # opens DEBUG. FileHandlers (debug log) keep their own level (DEBUG). + for handler in repo_logger.handlers: + if isinstance(handler, logging.StreamHandler) and not isinstance( + handler, + logging.FileHandler, + ): + handler.setLevel(libvcs_stream_level) + # The libvcs *logger* level is the floor for everything attached to it, + # including any FileHandler we install later. Keep it low enough that + # both the StreamHandler and the eventual FileHandler still see records + # at their own per-handler thresholds. ``min(libvcs_stream_level, DEBUG)`` + # is just DEBUG, which means the file logger always wins -- which is the + # behaviour we want. + repo_logger.setLevel(min(libvcs_stream_level, logging.DEBUG)) repo_logger.propagate = True target_logger = log or vcspull_logger @@ -274,3 +336,127 @@ class RepoFilter(logging.Filter): def filter(self, record: logging.LogRecord) -> bool: """Only return a record if a keyword object.""" return "keyword" in record.__dict__ + + +#: Format string for the debug log file. Verbose enough to trace a hang: +#: level, timestamp (to millisecond), logger, module:line, and the message. +_DEBUG_FILE_FORMAT = ( + "%(asctime)s.%(msecs)03d [%(levelname)s] %(name)s " + "%(module)s:%(lineno)d -- %(message)s" +) + +_DEBUG_FILE_DATE_FORMAT = "%Y-%m-%dT%H:%M:%S" + + +def default_debug_log_path() -> pathlib.Path: + """Return the path where vcspull writes its per-invocation debug log. + + Mirrors the ``npm`` / ``pnpm`` convention of dropping a timestamped log + file in the system temp directory on every run. The file is always created + but its path is only surfaced to the user when something went wrong + (failure or timeout), so clean runs stay quiet. + + Logs land under a ``vcspull/`` subdirectory of the system tempdir + (e.g. ``/tmp/vcspull/debug--.log``) so the dir name in + ``/tmp/`` is self-describing instead of relying on a ``vcspull-`` + file prefix that gets lost in a busy tempdir. Under pytest + (``PYTEST_CURRENT_TEST`` is set) the subdirectory is + ``vcspull-test/`` instead -- an automatic safety net so tests that + incidentally invoke this helper without redirecting ``TMPDIR`` + can't pollute the production log dir. + + Examples + -------- + >>> path = default_debug_log_path() + >>> path.name.startswith("debug-") + True + >>> path.suffix + '.log' + >>> path.parent.name in {"vcspull", "vcspull-test"} + True + >>> path.parent.parent == pathlib.Path(tempfile.gettempdir()) + True + """ + stamp = datetime.now().strftime("%Y%m%dT%H%M%S") + pid = os.getpid() + subdir = "vcspull-test" if "PYTEST_CURRENT_TEST" in os.environ else "vcspull" + return pathlib.Path(tempfile.gettempdir()) / subdir / f"debug-{stamp}-{pid}.log" + + +def setup_file_logger( + path: pathlib.Path, + *, + level: int = logging.DEBUG, +) -> logging.FileHandler: + """Attach a file handler to the ``vcspull`` and ``libvcs`` loggers. + + Unlike :func:`setup_logger`, this handler is not tied to stdout -- it + captures the full debug trace so a post-mortem (npm/pnpm/yarn style) has + enough context to diagnose a timeout even when the CLI output was + aggressively summarised. + + Parameters + ---------- + path : pathlib.Path + Destination file. Parent directories are created as needed. An + existing file is appended to so a single session that sets up logging + multiple times does not clobber earlier context. + level : int + Threshold for the file handler. Defaults to :data:`logging.DEBUG` + because the file is consulted only when diagnosing a failure. + """ + path.parent.mkdir(parents=True, exist_ok=True) + + handler = logging.FileHandler(path, mode="a", encoding="utf-8") + handler.setLevel(level) + handler.setFormatter( + logging.Formatter(_DEBUG_FILE_FORMAT, datefmt=_DEBUG_FILE_DATE_FORMAT), + ) + + # Attach to both loggers so libvcs's per-repo activity is captured alongside + # vcspull's own structured events. Duplicate handlers are avoided by + # checking the baseFilename before attaching. + for logger_name in ("vcspull", "libvcs"): + logger = logging.getLogger(logger_name) + existing = next( + ( + h + for h in logger.handlers + if isinstance(h, logging.FileHandler) + and getattr(h, "baseFilename", None) == str(path) + ), + None, + ) + if existing is None: + logger.addHandler(handler) + # Ensure the logger itself lets DEBUG records through to the handler. + # + # Invariant: this raises the *logger* level to DEBUG, which would + # ordinarily open the floodgate to every attached StreamHandler too. + # ``setup_logger`` pins explicit per-handler levels on every + # ``StreamHandler`` (default WARNING; ``-v`` INFO; ``-vv`` DEBUG) + # so the bump here doesn't reach the terminal -- the StreamHandler's + # own filter still drops records below its level. If anyone ever + # "simplifies" ``setup_logger`` to drop the per-handler levels, this + # block will silently re-open the libvcs DEBUG floodgate to stdout. + # Regression-guarded by + # ``test_setup_file_logger_does_not_open_stream_floodgate`` in + # ``tests/test_log.py``. + if logger.level == logging.NOTSET or logger.level > level: + logger.setLevel(level) + + return handler + + +def teardown_file_logger(handler: logging.FileHandler) -> None: + """Flush and detach ``handler`` from the vcspull/libvcs loggers.""" + try: + handler.flush() + handler.close() + except ValueError: + # Handler already closed; nothing to do. + pass + for logger_name in ("vcspull", "libvcs"): + logger = logging.getLogger(logger_name) + if handler in logger.handlers: + logger.removeHandler(handler) diff --git a/tests/cli/test_sync_log_file.py b/tests/cli/test_sync_log_file.py new file mode 100644 index 000000000..150b34075 --- /dev/null +++ b/tests/cli/test_sync_log_file.py @@ -0,0 +1,156 @@ +"""Tests for the per-invocation debug log file (npm/pnpm style).""" + +from __future__ import annotations + +import logging +import pathlib +import typing as t + +import pytest + +from vcspull.log import ( + default_debug_log_path, + setup_file_logger, + teardown_file_logger, +) + + +def test_default_debug_log_path_lives_in_tempdir( + monkeypatch: pytest.MonkeyPatch, + tmp_path: pathlib.Path, +) -> None: + """``TMPDIR`` is honoured and the file name encodes timestamp + PID.""" + monkeypatch.setenv("TMPDIR", str(tmp_path)) + # On platforms that read TMPDIR via ``tempfile.gettempdir`` this is honoured + # on first access per-process; the default path must reside under it. + + path = default_debug_log_path() + + # The file name pattern downstream tooling can grep for is ``debug-- + # .log``; the ``vcspull`` (or ``vcspull-test``) parent directory + # carries the project label so a glance at ``/tmp/`` is self-describing. + assert path.name.startswith("debug-") + assert path.suffix == ".log" + # Pytest sets ``PYTEST_CURRENT_TEST`` during test collection AND + # execution, so ``default_debug_log_path`` always picks the test dir + # here -- the production ``vcspull/`` dir is only used outside the + # test runner. + assert path.parent.name == "vcspull-test" + + +def test_setup_file_logger_attaches_debug_handler( + tmp_path: pathlib.Path, +) -> None: + """The handler attaches at DEBUG level to both vcspull and libvcs loggers.""" + log_path = tmp_path / "nested" / "vcspull-debug.log" + assert not log_path.exists() + + handler = setup_file_logger(log_path) + try: + # setup_file_logger must create missing parent directories so callers + # can point to a path that does not yet exist. + assert log_path.parent.is_dir() + + vcspull_logger = logging.getLogger("vcspull") + libvcs_logger = logging.getLogger("libvcs") + assert handler in vcspull_logger.handlers + assert handler in libvcs_logger.handlers + assert handler.level == logging.DEBUG + finally: + teardown_file_logger(handler) + + +def test_setup_file_logger_captures_subsequent_log_records( + tmp_path: pathlib.Path, +) -> None: + """Log records emitted after setup end up inside the on-disk file.""" + log_path = tmp_path / "vcspull-debug.log" + handler = setup_file_logger(log_path) + try: + logger = logging.getLogger("vcspull.test-log-file") + logger.debug("debug crumb %s", "apple") + logger.warning("timed out syncing %s", "codex") + finally: + teardown_file_logger(handler) + + text = log_path.read_text(encoding="utf-8") + assert "debug crumb apple" in text + assert "timed out syncing codex" in text + # The formatter must include level + logger name for post-mortem searching. + assert "DEBUG" in text + assert "WARNING" in text + assert "vcspull.test-log-file" in text + + +def test_setup_file_logger_does_not_double_attach( + tmp_path: pathlib.Path, +) -> None: + """Calling setup twice with the same path attaches only one handler.""" + log_path = tmp_path / "vcspull-debug.log" + + handler_a = setup_file_logger(log_path) + handler_b = setup_file_logger(log_path) + try: + vcspull_logger = logging.getLogger("vcspull") + # The second call returns a *new* handler object for the outer caller, + # but the logger must not end up with duplicate file handlers pointing + # at the same file -- that would write every record twice. + matching = [ + h + for h in vcspull_logger.handlers + if isinstance(h, logging.FileHandler) + and getattr(h, "baseFilename", None) == str(log_path) + ] + assert len(matching) == 1 + finally: + teardown_file_logger(handler_a) + teardown_file_logger(handler_b) + + +def test_teardown_file_logger_detaches_and_closes( + tmp_path: pathlib.Path, +) -> None: + """Tearing down removes the handler from both loggers and closes the file.""" + log_path = tmp_path / "vcspull-debug.log" + handler = setup_file_logger(log_path) + teardown_file_logger(handler) + + vcspull_logger = logging.getLogger("vcspull") + libvcs_logger = logging.getLogger("libvcs") + assert handler not in vcspull_logger.handlers + assert handler not in libvcs_logger.handlers + # Closing is idempotent per Python logging convention; a second call must + # not raise so the outer sync()'s ``finally`` can be defensively + # re-invoked. + teardown_file_logger(handler) + + +def test_default_debug_log_path_encodes_distinct_invocations( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Distinct (timestamp, pid) pairs produce distinct file names.""" + + class _Fake: + value = 0 + + @classmethod + def now(cls) -> t.Any: + cls.value += 1 + + class _Stamp: + @staticmethod + def strftime(_fmt: str) -> str: + return f"2026042400000{_Fake.value}" + + return _Stamp() + + import vcspull.log as log_mod + + monkeypatch.setattr(log_mod, "datetime", _Fake) + + first = default_debug_log_path() + second = default_debug_log_path() + + assert first != second + assert first.name.startswith("debug-20260424") + assert second.name.startswith("debug-20260424") diff --git a/tests/cli/test_sync_plan_helpers.py b/tests/cli/test_sync_plan_helpers.py index a9a3a9a1c..5d097dee3 100644 --- a/tests/cli/test_sync_plan_helpers.py +++ b/tests/cli/test_sync_plan_helpers.py @@ -2,13 +2,14 @@ from __future__ import annotations +import os import subprocess import typing as t import pytest from vcspull.cli._output import PlanAction -from vcspull.cli.sync import SyncPlanConfig, _determine_plan_action, _maybe_fetch +from vcspull.cli.sync import SyncPlanConfig, _determine_plan_action, _maybe_fetch, sync if t.TYPE_CHECKING: import pathlib @@ -81,6 +82,15 @@ class MaybeFetchFixture(t.NamedTuple): subprocess_behavior="non-zero", expected_result=(True, None), ), + MaybeFetchFixture( + test_id="fetch-timeout", + fetch=True, + offline=False, + create_repo=True, + create_git_dir=True, + subprocess_behavior="timeout", + expected_result=(False, None), # message checked separately via startswith + ), ] @@ -119,6 +129,8 @@ def _patched_run( if subprocess_behavior == "os-error": error_message = "Permission denied" raise OSError(error_message) + if subprocess_behavior == "timeout": + raise subprocess.TimeoutExpired(cmd=args[0], timeout=120) if subprocess_behavior == "non-zero": return subprocess.CompletedProcess( args=args[0], @@ -140,7 +152,13 @@ def _patched_run( config=SyncPlanConfig(fetch=fetch, offline=offline), ) - assert result == expected_result + ok, message = result + assert ok == expected_result[0] + if subprocess_behavior == "timeout": + assert message is not None + assert "timed out" in message + else: + assert result == expected_result class DeterminePlanActionFixture(t.NamedTuple): @@ -248,3 +266,101 @@ def test_determine_plan_action( action, detail = _determine_plan_action(status, config=config) assert action is expected_action assert detail == expected_detail + + +def test_maybe_fetch_passes_no_prompt_env( + tmp_path: pathlib.Path, + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Verify _maybe_fetch sets GIT_TERMINAL_PROMPT=0 to prevent hangs.""" + repo_path = tmp_path / "repo" + repo_path.mkdir() + (repo_path / ".git").mkdir() + + captured_kwargs: dict[str, t.Any] = {} + + def _spy_run( + *args: t.Any, + **kwargs: t.Any, + ) -> subprocess.CompletedProcess[str]: + captured_kwargs.update(kwargs) + return subprocess.CompletedProcess( + args=args[0], + returncode=0, + stdout="", + stderr="", + ) + + monkeypatch.setattr("subprocess.run", _spy_run) + + _maybe_fetch( + repo_path=repo_path, + config=SyncPlanConfig(fetch=True, offline=False), + ) + + assert "env" in captured_kwargs, "subprocess.run must receive env kwarg" + assert captured_kwargs["env"].get("GIT_TERMINAL_PROMPT") == "0" + + +def test_maybe_fetch_passes_timeout( + tmp_path: pathlib.Path, + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Verify _maybe_fetch sets a timeout to prevent indefinite blocking.""" + repo_path = tmp_path / "repo" + repo_path.mkdir() + (repo_path / ".git").mkdir() + + captured_kwargs: dict[str, t.Any] = {} + + def _spy_run( + *args: t.Any, + **kwargs: t.Any, + ) -> subprocess.CompletedProcess[str]: + captured_kwargs.update(kwargs) + return subprocess.CompletedProcess( + args=args[0], + returncode=0, + stdout="", + stderr="", + ) + + monkeypatch.setattr("subprocess.run", _spy_run) + + _maybe_fetch( + repo_path=repo_path, + config=SyncPlanConfig(fetch=True, offline=False), + ) + + assert "timeout" in captured_kwargs, "subprocess.run must receive timeout kwarg" + assert captured_kwargs["timeout"] > 0 + + +def test_sync_sets_git_terminal_prompt( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Verify sync() sets GIT_TERMINAL_PROMPT=0 to prevent credential hangs.""" + # Remove GIT_TERMINAL_PROMPT if already set so setdefault takes effect + monkeypatch.delenv("GIT_TERMINAL_PROMPT", raising=False) + + sync( + repo_patterns=[], + config=None, + workspace_root=None, + dry_run=False, + output_json=False, + output_ndjson=False, + color="never", + exit_on_error=False, + show_unchanged=False, + summary_only=False, + long_view=False, + relative_paths=False, + fetch=False, + offline=False, + verbosity=0, + sync_all=False, + # No parser and no --all means sync() returns early, but env is set first + ) + + assert os.environ.get("GIT_TERMINAL_PROMPT") == "0" diff --git a/tests/cli/test_sync_progress.py b/tests/cli/test_sync_progress.py new file mode 100644 index 000000000..af92f5aa1 --- /dev/null +++ b/tests/cli/test_sync_progress.py @@ -0,0 +1,643 @@ +"""Tests for ``vcspull.cli._progress.SyncStatusIndicator``.""" + +from __future__ import annotations + +import io +import time +import typing as t + +import pytest + +from vcspull.cli._progress import ( + SyncStatusIndicator, + build_indicator, +) + +if t.TYPE_CHECKING: + pass + + +def test_disabled_indicator_is_silent_noop() -> None: + """An indicator with ``enabled=False`` writes nothing to the stream.""" + stream = io.StringIO() + indicator = SyncStatusIndicator(enabled=False, stream=stream, tty=True) + + indicator.start_repo("codex") + indicator.heartbeat() + indicator.stop_repo() + indicator.close() + + assert stream.getvalue() == "" + + +def test_non_tty_emits_once_on_start_and_honours_heartbeat() -> None: + """Non-TTY path prints a single start line then a "still syncing" line.""" + stream = io.StringIO() + indicator = SyncStatusIndicator(enabled=True, stream=stream, tty=False) + + indicator.start_repo("codex") + # Capital ``Syncing`` matches the permanent ``Synced``/``Timed out`` + # leading-cap pattern so the in-flight + completion lines read as a + # consistent badge family. + assert "Syncing codex" in stream.getvalue() + + # Pretend the repo has been running longer than the heartbeat interval so + # the next heartbeat call emits. We go under the hood to avoid a slow + # sleep in the test. After start_repo both timestamps are populated, so + # the asserts below satisfy the type checker. + assert indicator._last_heartbeat_at is not None + assert indicator._repo_started_at is not None + indicator._last_heartbeat_at -= 60.0 + indicator._repo_started_at -= 60.0 + indicator.heartbeat() + indicator.stop_repo() + indicator.close() + + out = stream.getvalue() + # The heartbeat line begins with ``...`` so ``still syncing`` stays + # lowercase as a sentence continuation, not a status badge. + assert "still syncing codex" in out + # Heartbeat must not duplicate the start line; the only lines are + # start (``Syncing codex``) + one heartbeat (``... still syncing codex``). + # Match case-insensitively to count both spellings together. + assert out.lower().count("syncing codex") == 2 + + +def test_non_tty_heartbeat_throttles_below_interval() -> None: + """Heartbeat does nothing if the interval hasn't elapsed yet.""" + stream = io.StringIO() + indicator = SyncStatusIndicator(enabled=True, stream=stream, tty=False) + + indicator.start_repo("codex") + initial_out = stream.getvalue() + # Call heartbeat a bunch without advancing time -- nothing new should + # land in the stream. + for _ in range(5): + indicator.heartbeat() + indicator.stop_repo() + indicator.close() + + # Still exactly one line: the start-of-repo notification. + assert stream.getvalue() == initial_out + assert stream.getvalue().count("\n") == 1 + + +def test_tty_spinner_renders_active_repo(monkeypatch: pytest.MonkeyPatch) -> None: + """In a TTY the spinner background thread writes to the stream.""" + stream = io.StringIO() + indicator = SyncStatusIndicator(enabled=True, stream=stream, tty=True) + + indicator.start_repo("codex") + # Give the spinner thread a slice of real time to tick at least once. + time.sleep(0.2) + indicator.stop_repo() + indicator.close() + + out = stream.getvalue() + # The spinner line mentions the repo and has at least one Braille frame. + # Capital ``Syncing`` matches the permanent ``Synced`` leading-cap + # pattern; locks in that consistency. + assert "Syncing" in out + assert "codex" in out + assert any(f in out for f in "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏") + # Cursor hide / show sequences bracket the spinner so tmux / kitty don't + # leak a missing cursor after vcspull exits. + assert "\x1b[?25l" in out # hide + assert "\x1b[?25h" in out # show + # Frames are wrapped in ANSI synchronized-output markers so terminals + # flip the spinner line atomically, preventing mid-frame tearing when a + # concurrent writer hits stdout. + assert "\x1b[?2026h" in out + assert "\x1b[?2026l" in out + # Decorative ornaments dropped in the prior polish pass must stay gone. + for glyph in ("…", "·", "⏱"): + assert glyph not in out + + +def test_close_is_idempotent() -> None: + """Calling ``close()`` more than once is safe.""" + stream = io.StringIO() + indicator = SyncStatusIndicator(enabled=True, stream=stream, tty=True) + + indicator.start_repo("codex") + indicator.close() + indicator.close() # must not raise + indicator.close() # still must not raise + + +def test_repo_context_manager_starts_and_stops() -> None: + """The ``repo(name)`` context manager bookends the indicator state.""" + stream = io.StringIO() + indicator = SyncStatusIndicator(enabled=True, stream=stream, tty=False) + + with indicator.repo("codex"): + # While inside, the indicator considers 'codex' the active repo. + # We peek at the internal slot rather than waiting for a render. + assert indicator._active_repo == "codex" + + assert indicator._active_repo is None + indicator.close() + + +def test_build_indicator_disables_in_machine_output_mode() -> None: + """``build_indicator(human=False, ...)`` returns a disabled indicator.""" + indicator = build_indicator(human=False, color="auto") + assert indicator._enabled is False + + +def test_build_indicator_disables_when_color_is_never() -> None: + """``color=never`` implies quiet output -- the spinner stays off.""" + indicator = build_indicator(human=True, color="never") + assert indicator._enabled is False + + +def test_build_indicator_enabled_in_human_mode() -> None: + """Human output + auto/always colour turns the indicator on.""" + indicator = build_indicator(human=True, color="auto") + assert indicator._enabled is True + indicator.close() + + +def test_write_clears_spinner_line_before_emitting_in_tty() -> None: + r"""A concurrent writer clears the spinner's in-flight line first. + + Models the real-world interleave: libvcs emits a log record while the + spinner is mid-draw. Without the clear-then-write sequence the log + message would tack onto the spinner line and produce artefacts like + ``Already on 'main'ec ... 1.1s``. The guard: every ``write()`` in a + TTY must emit ``\r\033[2K`` when the spinner has a drawn line. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator(enabled=True, stream=stream, tty=True) + + # Pretend the spinner has rendered a frame -- that's the state a + # concurrent writer actually observes mid-sync. + indicator._last_line_len = 40 + + indicator.write("|git| (codex) Updating to 'main'\n") + + out = stream.getvalue() + # CR + ERASE_LINE must appear before the message, not after it. + clear_idx = out.find("\r\x1b[2K") + msg_idx = out.find("Updating") + assert clear_idx != -1, "expected clear-line sequence before the write" + assert msg_idx > clear_idx, "clear must precede the actual text" + # After a concurrent write, the spinner should redraw from scratch; the + # lingering line-length counter is reset so the next frame does a full + # render. + assert indicator._last_line_len == 0 + + +def test_write_non_tty_path_passes_through_untouched() -> None: + """Non-TTY streams never see the clear-line escape sequence.""" + stream = io.StringIO() + indicator = SyncStatusIndicator(enabled=True, stream=stream, tty=False) + + indicator.write("hello world\n") + + out = stream.getvalue() + assert out == "hello world\n" + assert "\x1b[2K" not in out + + +def test_tty_spinner_colours_the_frame_cell_and_name() -> None: + """Spinner cell AND repo name are colourised; ``Syncing`` is plain. + + Reporter pointed out that the in-flight line ``Syncing flume`` had a + plain repo name while the permanent ``✓ Synced fish-shell`` line + had ``fish-shell`` in cyan. Match the pattern: colour the Braille + cell (info / cyan) and the repo name (also info / cyan), leaving + the verb and elapsed-time suffix in the terminal's default + foreground. + """ + from vcspull.cli._colors import ColorMode, Colors + + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=True, + colors=Colors(ColorMode.ALWAYS), + ) + + indicator.start_repo("codex") + time.sleep(0.2) + indicator.stop_repo() + indicator.close() + + out = stream.getvalue() + import re as _re + + # A colour sequence appears immediately before at least one Braille frame. + frame_pattern = _re.compile(r"\x1b\[[0-9;]*m[⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏]") + assert frame_pattern.search(out), "spinner frame must be wrapped in ANSI colour" + # The repo name is also wrapped in a colour run (matches the + # permanent ``✓ Synced `` style); search for any colour escape + # immediately before ``codex``. + name_pattern = _re.compile(r"\x1b\[[0-9;]*mcodex") + assert name_pattern.search(out), "spinner repo name must be wrapped in ANSI colour" + # The status verb (``Syncing``) stays in the terminal default and is + # capitalised to match ``Synced`` / ``Timed out`` on permanent lines. + assert "Syncing" in out + + +def test_add_output_line_appends_and_bounds_panel_deque() -> None: + """Panel buffer keeps only the last ``output_lines`` entries. + + Reporter's request: a 3-line live trail above the spinner that + rolls under -- not an unbounded scroll. Locks in + ``collections.deque(maxlen=output_lines)`` semantics: the 4th push + drops the oldest line. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=True, + output_lines=3, + ) + + indicator.add_output_line("first\n") + indicator.add_output_line("second\n") + indicator.add_output_line("third\n") + indicator.add_output_line("fourth\n") + + assert list(indicator._panel_buffer) == ["second", "third", "fourth"] + + +def test_add_output_line_panel_disabled_falls_back_to_write() -> None: + """``output_lines=0`` makes ``add_output_line`` plain ``write()``. + + The 0-panel mode is the escape hatch for users who don't want a + rolling region above the spinner: bytes still reach the terminal, + just on their own row, with the same clear-the-spinner-first + behaviour ``write()`` already provides. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=True, + output_lines=0, + ) + + indicator.add_output_line("From github.com/foo/bar\n") + + assert "From github.com/foo/bar" in stream.getvalue() + # No panel deque ever populated. + assert indicator._panel_buffer.maxlen == 0 + + +def test_add_output_line_non_tty_writes_directly() -> None: + """Non-TTY mode bypasses the panel entirely. + + The spinner thread never starts in non-TTY mode (pipes, CI, pytest + capture). If ``add_output_line`` buffered into the deque on this + path the bytes would never be rendered. Guard against that + regression: lines must reach the stream synchronously. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=False, + output_lines=3, + ) + + indicator.add_output_line("From github.com/foo/bar\n") + + assert "From github.com/foo/bar" in stream.getvalue() + # Deque stays empty -- the line went straight to the stream. + assert list(indicator._panel_buffer) == [] + + +def test_render_tty_writes_panel_above_spinner_atomically() -> None: + r"""Spinner thread writes panel rows + spinner inside one sync bracket. + + The ANSI ``\x1b[?2026h`` / ``\x1b[?2026l`` pair tells modern + terminals to buffer the whole region and flip it atomically. Locks + in the contract for terminals that honour the bracket + (kitty/iTerm2/WezTerm/recent xterm) and is benign elsewhere. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=True, + output_lines=3, + ) + indicator.start_repo("codex") + indicator.add_output_line("From github.com/openai/codex\n") + indicator.add_output_line(" abc..def main -> origin/main\n") + # Let the spinner thread tick at least once. + time.sleep(0.2) + indicator.stop_repo() + indicator.close() + + out = stream.getvalue() + # Both panel lines made it to the stream during a render tick. + assert "From github.com/openai/codex" in out + assert "abc..def" in out + # The synchronized-output bracket wraps each frame. + assert "\x1b[?2026h" in out + assert "\x1b[?2026l" in out + + +def test_stop_repo_collapses_the_panel() -> None: + """``stop_repo`` resets the deque + visible-lines counter. + + The "trail collapses on completion" UX requirement: when the + permanent ``✓ Synced`` line is about to print, the panel must be + drained so it doesn't linger in scrollback. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=True, + output_lines=3, + ) + indicator.start_repo("codex") + indicator.add_output_line("a\nb\nc\n") + # Pretend a render happened. + indicator._panel_visible_lines = 3 + indicator.stop_repo() + + assert list(indicator._panel_buffer) == [] + assert indicator._panel_visible_lines == 0 + indicator.close() + + +def test_panel_clears_between_repos() -> None: + """``start_repo`` empties any leftover deque from the prior repo.""" + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=True, + output_lines=3, + ) + indicator.start_repo("alpha") + indicator.add_output_line("alpha-one\n") + indicator.add_output_line("alpha-two\n") + indicator.start_repo("beta") + + # Fresh trail per repo: alpha's panel must NOT bleed into beta's. + assert list(indicator._panel_buffer) == [] + indicator.close() + + +def test_stop_repo_with_final_line_writes_atomically_and_returns_true() -> None: + r"""``stop_repo(final_line=...)`` writes one atomic clear+replace. + + Regression for the user-reported flicker where ``Syncing flume`` + transitioning to ``✓ Synced flume`` flashed through a blank state. + Today's fix: ``stop_repo`` accepts the permanent line and emits the + panel-erase + spinner-replacement inside a single ``\x1b[?2026h ... + \x1b[?2026l`` synchronized-output bracket so the terminal flips the + whole region atomically. + + The method returns ``True`` to tell the caller it has owned the + print, so the sync loop skips its own ``formatter.emit_text`` and + avoids the double-emit that produced ``✓ Synced clap`` on two rows. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=True, + output_lines=3, + ) + + indicator.start_repo("clap") + # Pretend a render happened so ``had_render`` is true. + indicator._last_line_len = 25 + indicator._panel_visible_lines = 2 + + wrote = indicator.stop_repo(final_line="✓ Synced clap → ~/study/rust/clap") + + assert wrote is True + out = stream.getvalue() + # The atomic write block contains both the synchronized-output + # bracket and the permanent line. + assert "\x1b[?2026h" in out + assert "\x1b[?2026l" in out + assert "✓ Synced clap" in out + # Internal counters reset so the next ``start_repo`` starts clean. + assert indicator._panel_visible_lines == 0 + assert indicator._last_line_len == 0 + indicator.close() + + +def test_stop_repo_without_final_line_returns_false() -> None: + """No ``final_line`` means caller still owns the permanent print. + + The non-atomic path stays available for callers that don't have a + completion line to provide -- e.g. an early teardown on + KeyboardInterrupt. ``stop_repo()`` returns False so the existing + ``formatter.emit_text`` flow keeps working. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=True, + output_lines=3, + ) + + indicator.start_repo("clap") + indicator._last_line_len = 10 + indicator._panel_visible_lines = 1 + + wrote = indicator.stop_repo() + assert wrote is False + assert indicator._panel_visible_lines == 0 + indicator.close() + + +def test_stop_repo_non_tty_always_returns_false() -> None: + """Non-TTY indicators leave printing to the caller's formatter. + + In headless mode (pipes, CI, capsys), nothing was drawn to erase + and the spinner thread never started; the caller is responsible + for printing the permanent line via ``formatter.emit_text``. Even + when ``final_line`` is passed in, ``stop_repo`` returns False so + the caller fires its own emit_text and the headless capture + receives the line. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=False, + output_lines=3, + ) + + indicator.start_repo("clap") + wrote = indicator.stop_repo(final_line="✓ Synced clap → ~/") + assert wrote is False + # The non-TTY path emitted the start line on ``start_repo``; it + # does NOT emit the permanent line here -- the caller owns that. + assert "Syncing clap" in stream.getvalue() + assert "✓ Synced clap" not in stream.getvalue() + + +def test_stop_repo_collapses_panel_rows_via_dl() -> None: + r"""``stop_repo(final_line=...)`` deletes panel rows with DL, not erase. + + Regression: an earlier implementation walked up to the first panel + row and emitted ``\x1b[2K\n`` per row to "clear" the panel. That + only erased the row content -- the rows stayed in the buffer as + blanks, so successive ``✓ Synced ...`` lines were separated by + visible gaps proportional to whatever the previous repo's panel + height was at completion time (reported when chaining + ``vcspull sync ...; vcspull sync ...``). + + Today's fix: after walking up, emit ``\x1b[Pn M`` (DL) once to + physically delete the panel rows from the buffer; the spinner + row's content scrolls up to the cursor and the subsequent + ``erase + final_line + \n`` morphs that row into the permanent + line in place. This test asserts the DL escape lands in the + output AND that the regressed ``\x1b[2K\n`` per-row pattern is + NOT present. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=True, + output_lines=3, + ) + + indicator.start_repo("clap") + indicator._last_line_len = 25 + indicator._panel_visible_lines = 3 + + indicator.stop_repo(final_line="✓ Synced clap → ~/study/rust/clap") + + out = stream.getvalue() + # Walk up panel_visible rows, then DL panel_visible rows. + assert "\x1b[3A" in out + assert "\x1b[3M" in out + # The regressed pattern: ``\x1b[2K\n`` per panel row would appear + # at least 3 times back-to-back. The new path uses DL once. + assert out.count("\x1b[2K\n") <= 1, ( + "panel rows are being erased+newlined instead of DL'd; this " + "leaves blanks in scrollback" + ) + indicator.close() + + +def test_stop_repo_no_panel_skips_dl() -> None: + r"""``stop_repo`` omits DL when there are no panel rows to delete. + + With ``_panel_visible_lines == 0`` only the spinner row needs to + be replaced; emitting ``\x1b[0M`` is meaningless (DL of zero + rows) and even ``\x1b[A`` would walk above the spinner, so the + walk-up + DL pair is skipped entirely. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=True, + output_lines=3, + ) + + indicator.start_repo("clap") + indicator._last_line_len = 25 + indicator._panel_visible_lines = 0 + + indicator.stop_repo(final_line="✓ Synced clap → ~/study/rust/clap") + + out = stream.getvalue() + # No panel-collapse machinery should appear when there's nothing + # to collapse: no upward walk, no DL. + assert "\x1b[0A" not in out + assert "\x1b[0M" not in out + # The spinner-row replacement still lands. + assert "✓ Synced clap" in out + indicator.close() + + +def test_stop_repo_panel_with_no_final_line_still_collapses() -> None: + r"""``stop_repo()`` (no ``final_line``) still collapses panel rows. + + The early-teardown path (e.g. KeyboardInterrupt before the outcome + is known) calls ``stop_repo()`` with no replacement line. The + panel must still be DL'd so the caller's subsequent + ``formatter.emit_text`` lands without a stack of blank rows above + it. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=True, + output_lines=3, + ) + + indicator.start_repo("clap") + indicator._last_line_len = 25 + indicator._panel_visible_lines = 2 + + wrote = indicator.stop_repo() + + out = stream.getvalue() + assert wrote is False # caller still owns the permanent print + assert "\x1b[2A" in out + assert "\x1b[2M" in out + assert indicator._panel_visible_lines == 0 + indicator.close() + + +def test_clear_block_uses_dl_to_remove_panel_and_spinner() -> None: + r"""``_clear_block`` deletes panel rows + spinner via a single DL. + + Used from ``close()`` (and historically from teardown paths). + Walks up to the first panel row, then DLs ``panel + 1`` rows so + the entire frame is removed from the buffer in one operation. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=True, + output_lines=3, + ) + + indicator._panel_visible_lines = 3 + indicator._clear_block() + + out = stream.getvalue() + # Walk up panel rows, then DL panel + spinner together. + assert "\x1b[3A" in out + assert "\x1b[4M" in out + assert indicator._panel_visible_lines == 0 + indicator.close() + + +def test_render_tty_skips_stale_tick_when_active_repo_changed() -> None: + """``_render_tty`` must skip writes for a stale (now-cleared) repo. + + Reproduces the race where the spinner thread captured ``name`` = + "clap" before ``stop_repo`` cleared it; the prior render path then + wrote a stale frame on top of the new state. The fix: re-check + ``self._active_repo`` under the lock at the start of ``_render_tty``. + """ + stream = io.StringIO() + indicator = SyncStatusIndicator( + enabled=True, + stream=stream, + tty=True, + output_lines=3, + ) + + # Pretend the spinner thread captured ``name="clap"`` and is about + # to render, but ``stop_repo`` has just cleared ``_active_repo``. + indicator._active_repo = None + before = stream.getvalue() + indicator._render_tty(frame="⠋", name="clap", elapsed=0.5) + after = stream.getvalue() + # Stale tick: nothing should land on the stream. + assert after == before diff --git a/tests/cli/test_sync_sigint.py b/tests/cli/test_sync_sigint.py new file mode 100644 index 000000000..1e3c9955f --- /dev/null +++ b/tests/cli/test_sync_sigint.py @@ -0,0 +1,71 @@ +"""Subprocess-based tests asserting real ``WIFSIGNALED(SIGINT)`` termination.""" + +from __future__ import annotations + +import os +import signal +import subprocess +import sys + +import pytest + + +@pytest.mark.skipif( + sys.platform == "win32", + reason="POSIX signal semantics only; Windows uses exit-code 130", +) +def test_exit_on_sigint_produces_wifsignaled_sigint() -> None: + """``_exit_on_sigint`` makes the process terminate via ``WIFSIGNALED(SIGINT)``. + + Bash's ``cmd1; cmd2`` sequential-list abort only kicks in when the + child was killed by a signal -- a clean ``SystemExit(130)`` leaves + the shell no reason to stop and the chain keeps running. This + assertion locks in the actual kernel signal bit, not just the exit + code. + + Why a subprocess: the production helper ends with + ``signal.raise_signal(SIGINT)`` under ``SIG_DFL``, which would take + the pytest runner down with us if we called it in-process. The only + reliable observation point is ``subprocess.Popen.returncode``, + which CPython stores as the *negative* of the terminating signal + number when ``os.waitstatus_to_exitcode`` reports ``WIFSIGNALED``. + + Scope: we target ``_exit_on_sigint`` directly in a fresh interpreter + rather than driving ``vcspull sync`` end-to-end, because getting the + sync loop into a deterministic mid-flight state where SIGINT lands + on the watchdog's ``done.wait(...)`` requires either a blocking fake + remote or a monkey-patched ``update_repo`` -- both reintroduce + flake and add nothing over the existing in-process control-flow + tests in ``test_sync_watchdog.py`` (which use the + ``_fake_sigint_escalation`` fixture). The one thing those tests + can't show is that the kernel actually marks the exit as + WIFSIGNALED(SIGINT); that's what this subprocess proves. + """ + # Simulate Ctrl-C in a fresh interpreter, then route it through the + # real helper. We install ``default_int_handler`` explicitly because + # pytest's parent may have fiddled with SIGINT -- this child is a + # fresh Python process, but being explicit removes one variable. + runner = ( + "import signal\n" + "signal.signal(signal.SIGINT, signal.default_int_handler)\n" + "from vcspull.cli.sync import _exit_on_sigint\n" + "try:\n" + " signal.raise_signal(signal.SIGINT)\n" + "except KeyboardInterrupt:\n" + " _exit_on_sigint()\n" + ) + + env = {**os.environ, "PYTHONDONTWRITEBYTECODE": "1"} + + proc = subprocess.run( + [sys.executable, "-c", runner], + env=env, + capture_output=True, + check=False, + timeout=10, + ) + + assert proc.returncode == -signal.SIGINT, ( + f"expected WIFSIGNALED(SIGINT) (-{int(signal.SIGINT)}), " + f"got returncode={proc.returncode}; stderr={proc.stderr!r}" + ) diff --git a/tests/cli/test_sync_watchdog.py b/tests/cli/test_sync_watchdog.py new file mode 100644 index 000000000..695cb376e --- /dev/null +++ b/tests/cli/test_sync_watchdog.py @@ -0,0 +1,476 @@ +"""Tests for the per-repo sync watchdog and rerun-recipe emitter.""" + +from __future__ import annotations + +import importlib +import signal +import sys +import time +import typing as t + +import pytest + +from vcspull.cli._colors import ColorMode, Colors +from vcspull.cli._output import OutputFormatter, OutputMode +from vcspull.cli.sync import ( + _DEFAULT_REPO_TIMEOUT_SECONDS, + _emit_rerun_recipe, + _resolve_repo_timeout, + _sync_repo_with_watchdog, + _TimedOutRepo, +) + +# ``vcspull.cli.__init__`` re-exports the ``sync`` function, which shadows the +# submodule of the same name in normal attribute access. Grab the module +# object directly so monkeypatch.setattr can install stubs on it. +sync_module = importlib.import_module("vcspull.cli.sync") + +if t.TYPE_CHECKING: + import pathlib + + +def _noop_progress(output: str, timestamp: t.Any) -> None: + """Swallow libvcs progress output in tests.""" + return + + +def test_resolve_repo_timeout_prefers_cli_flag( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """CLI ``--timeout`` should win over the env var and the default.""" + monkeypatch.setenv("VCSPULL_SYNC_TIMEOUT_SECONDS", "99") + + assert _resolve_repo_timeout(5) == 5 + + +def test_resolve_repo_timeout_falls_back_to_env_var( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Without a CLI flag, ``VCSPULL_SYNC_TIMEOUT_SECONDS`` takes over.""" + monkeypatch.setenv("VCSPULL_SYNC_TIMEOUT_SECONDS", "42") + + assert _resolve_repo_timeout(None) == 42 + + +def test_resolve_repo_timeout_uses_default_when_unset( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """The module-level default applies when neither override is present.""" + monkeypatch.delenv("VCSPULL_SYNC_TIMEOUT_SECONDS", raising=False) + + assert _resolve_repo_timeout(None) == _DEFAULT_REPO_TIMEOUT_SECONDS + + +def test_resolve_repo_timeout_ignores_bogus_env_value( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """A non-integer env value is logged and ignored; default applies.""" + monkeypatch.setenv("VCSPULL_SYNC_TIMEOUT_SECONDS", "forever") + + assert _resolve_repo_timeout(None) == _DEFAULT_REPO_TIMEOUT_SECONDS + + +def test_watchdog_returns_synced_outcome_on_success( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """A fast ``update_repo`` returns ``status='synced'`` with no error.""" + # Replace update_repo with a stub so we can exercise the watchdog without + # touching libvcs. The stub is fast, so the timeout branch never fires. + calls: list[dict[str, t.Any]] = [] + + def _stub_update_repo(repo: dict[str, t.Any], *, progress_callback: t.Any) -> None: + calls.append(repo) + + monkeypatch.setattr(sync_module, "update_repo", _stub_update_repo) + + outcome = _sync_repo_with_watchdog( + t.cast("t.Any", {"name": "ok"}), + progress_callback=_noop_progress, + timeout=5, + is_human=True, + ) + + assert outcome.status == "synced" + assert outcome.error is None + assert calls == [{"name": "ok"}] + + +def test_watchdog_returns_timed_out_on_slow_update( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """A slow ``update_repo`` is abandoned with ``status='timed_out'``.""" + + def _slow_update_repo(repo: dict[str, t.Any], *, progress_callback: t.Any) -> None: + # Far longer than the 0.2 s timeout below -- the watchdog must fire. + time.sleep(10) + + monkeypatch.setattr(sync_module, "update_repo", _slow_update_repo) + + started = time.monotonic() + outcome = _sync_repo_with_watchdog( + t.cast("t.Any", {"name": "slow"}), + progress_callback=_noop_progress, + timeout=1, + is_human=True, + ) + elapsed = time.monotonic() - started + + assert outcome.status == "timed_out" + # The watchdog should fire near the timeout -- give generous slack so CI + # scheduling jitter doesn't flake the test. + assert elapsed < 5.0 + assert outcome.duration >= 0.5 + + +def test_watchdog_preserves_failed_outcome( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Synchronous exceptions from ``update_repo`` surface as ``failed``.""" + + class _Boom(RuntimeError): + """Sentinel used to trace exception propagation.""" + + def _raising_update_repo( + repo: dict[str, t.Any], *, progress_callback: t.Any + ) -> None: + msg = "remote exploded" + raise _Boom(msg) + + monkeypatch.setattr(sync_module, "update_repo", _raising_update_repo) + + outcome = _sync_repo_with_watchdog( + t.cast("t.Any", {"name": "boom"}), + progress_callback=_noop_progress, + timeout=5, + is_human=True, + ) + + assert outcome.status == "failed" + assert isinstance(outcome.error, _Boom) + assert "remote exploded" in str(outcome.error) + + +def test_rerun_recipe_emits_one_line_per_workspace( + capsys: pytest.CaptureFixture[str], + tmp_path: pathlib.Path, +) -> None: + """Repositories are grouped by workspace root in the suggested rerun.""" + formatter = OutputFormatter(OutputMode.HUMAN) + colors = Colors(ColorMode.NEVER) + + rust_workspace = tmp_path / "rust" + otel_workspace = tmp_path / "otel" + rust_workspace.mkdir() + otel_workspace.mkdir() + + timed_out = [ + _TimedOutRepo( + name="codex", + path=str(rust_workspace / "codex"), + workspace_root=str(rust_workspace), + duration=10.2, + ), + _TimedOutRepo( + name="rust", + path=str(rust_workspace / "rust"), + workspace_root=str(rust_workspace), + duration=10.5, + ), + _TimedOutRepo( + name="opentelemetry-rust", + path=str(otel_workspace / "opentelemetry-rust"), + workspace_root=str(otel_workspace), + duration=10.1, + ), + ] + + _emit_rerun_recipe( + formatter, + colors, + timed_out_repos=timed_out, + timeout=10, + ) + formatter.finalize() + + captured = capsys.readouterr().out + # One rerun command per distinct workspace root, with the repo names + # appended as positional args -- this is what the user copy-pastes. + assert "vcspull sync --workspace" in captured + assert "codex" in captured and "rust" in captured + assert "opentelemetry-rust" in captured + # Suggest 10x the current timeout, clamped to 120 s minimum. + assert "--timeout 120" in captured + # Include a verbose-logging variant for diagnosis. + assert "-vv" in captured + # Include a manual git probe so the user can isolate the failure mode. + assert "GIT_TERMINAL_PROMPT=0" in captured + assert "git -C" in captured + # The rerun recipe itself must stay emoji-free -- plain ASCII markers + # only. Clock/stopwatch emoji had shipped as the prior prefix; guard it. + assert "⏱" not in captured + + +def test_rerun_recipe_is_noop_when_no_timeouts( + capsys: pytest.CaptureFixture[str], +) -> None: + """A clean run with no timeouts emits nothing extra.""" + formatter = OutputFormatter(OutputMode.HUMAN) + colors = Colors(ColorMode.NEVER) + + _emit_rerun_recipe( + formatter, + colors, + timed_out_repos=[], + timeout=10, + ) + formatter.finalize() + + captured = capsys.readouterr().out + assert "Timed out" not in captured + assert "vcspull sync" not in captured + + +def test_rerun_recipe_scales_timeout_suggestion( + capsys: pytest.CaptureFixture[str], + tmp_path: pathlib.Path, +) -> None: + """When the user already passed a long timeout, we suggest 10x it.""" + formatter = OutputFormatter(OutputMode.HUMAN) + colors = Colors(ColorMode.NEVER) + + _emit_rerun_recipe( + formatter, + colors, + timed_out_repos=[ + _TimedOutRepo( + name="huge", + path=str(tmp_path / "huge"), + workspace_root=str(tmp_path), + duration=60.0, + ), + ], + timeout=30, + ) + formatter.finalize() + + captured = capsys.readouterr().out + # max(120, 30 * 10) = 300 + assert "--timeout 300" in captured + + +def test_watchdog_propagates_keyboard_interrupt_from_worker( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """A ``KeyboardInterrupt`` in the worker bubbles out of the watchdog. + + ``_sync_repo_with_watchdog`` runs the libvcs call on a daemon thread. If + the main thread receives Ctrl-C (normal case) it never reaches this code + path, but a worker-side ``KeyboardInterrupt`` (rare, via + ``PyThreadState_SetAsyncExc``) must NOT be laundered into a per-repo + "failed" outcome -- it has to propagate so the outer loop can tear the + batch down cleanly. This locks down the narrowed catch (``Exception``, + not ``BaseException``). + """ + + def _raising(repo: dict[str, t.Any], *, progress_callback: t.Any) -> None: + raise KeyboardInterrupt + + monkeypatch.setattr(sync_module, "update_repo", _raising) + + with pytest.raises(KeyboardInterrupt): + _sync_repo_with_watchdog( + t.cast("t.Any", {"name": "kb"}), + progress_callback=_noop_progress, + timeout=5, + is_human=True, + ) + + +@pytest.fixture +def _fake_sigint_escalation( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Convert ``_exit_on_sigint`` into ``SystemExit(130)`` for in-process tests. + + The real implementation re-raises SIGINT under ``SIG_DFL`` so the + parent shell sees ``WIFSIGNALED(SIGINT)``. Running that in-process + would kill the pytest runner. Unit tests that only want to verify the + control flow through the ``except KeyboardInterrupt`` clauses opt in + to this fixture; the real signal semantics live in a subprocess test + (``tests/cli/test_sync_sigint.py``). + """ + + def _fake() -> t.NoReturn: + raise SystemExit(130) + + monkeypatch.setattr(sync_module, "_exit_on_sigint", _fake) + + +def test_sync_handles_keyboard_interrupt_during_config_load( + monkeypatch: pytest.MonkeyPatch, + capsys: pytest.CaptureFixture[str], + _fake_sigint_escalation: None, +) -> None: + """Ctrl-C during pre-loop work (e.g. YAML parse) exits cleanly with 130. + + Regression for the observed traceback where a KeyboardInterrupt raised + inside ``load_configs`` escaped all the way through ``cli.sync`` and + out to the top-level ``sys.exit`` as an unhandled exception, dumping + the entire yaml parser stack to the terminal. The outer ``sync()`` + entry point must catch the interrupt, emit a short notice, and exit + via ``_exit_on_sigint()``. The ``_fake_sigint_escalation`` fixture + swaps the real ``_exit_on_sigint`` (which re-raises SIGINT under + ``SIG_DFL`` and would kill the test runner) for a plain + ``SystemExit(130)``. + """ + from vcspull.cli.sync import sync as sync_fn + + def _raising_load(*_args: t.Any, **_kwargs: t.Any) -> t.Any: + raise KeyboardInterrupt + + monkeypatch.setattr(sync_module, "load_configs", _raising_load) + + with pytest.raises(SystemExit) as excinfo: + sync_fn( + repo_patterns=[], + config=None, + workspace_root=None, + dry_run=False, + output_json=False, + output_ndjson=False, + color="never", + exit_on_error=False, + show_unchanged=False, + summary_only=False, + long_view=False, + relative_paths=False, + fetch=False, + offline=False, + verbosity=0, + sync_all=True, + ) + + assert excinfo.value.code == 130 + err = capsys.readouterr().err + assert "Interrupted by user." in err + + +def test_exit_on_sigint_posix_installs_sig_dfl_and_raises( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """``_exit_on_sigint`` installs SIG_IGN, then SIG_DFL, then self-SIGINTs. + + Locks in the order from git's ``sigchain.h:20-34`` pattern and proves + the helper would make the kernel deliver SIGINT to ourselves -- which + is what the parent shell's ``WIFSIGNALED`` check keys off to abort a + ``;`` chain. We can't let ``raise_signal`` actually fire (it would + take pytest down with us), so we intercept both stdlib calls and + assert on the recorded sequence. + """ + monkeypatch.setattr(sys, "platform", "linux") + + calls: list[tuple[str, t.Any, t.Any]] = [] + + def _fake_signal(sig: int, handler: t.Any) -> t.Any: + calls.append(("signal", sig, handler)) + return None + + def _fake_raise(sig: int) -> None: + calls.append(("raise_signal", sig, None)) + # Simulate ``SIG_DFL`` termination by exiting -- the real call + # never returns on POSIX; here we just prove the ordering. + raise SystemExit(130) + + monkeypatch.setattr(signal, "signal", _fake_signal) + monkeypatch.setattr(signal, "raise_signal", _fake_raise) + + with pytest.raises(SystemExit) as excinfo: + sync_module._exit_on_sigint() + + assert excinfo.value.code == 130 + assert calls == [ + ("signal", signal.SIGINT, signal.SIG_IGN), + ("signal", signal.SIGINT, signal.SIG_DFL), + ("raise_signal", signal.SIGINT, None), + ] + + +def test_exit_on_sigint_windows_fallback( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Windows falls back to ``SystemExit(130)`` without touching signals. + + ``signal.raise_signal(SIGINT)`` under ``SIG_DFL`` on Windows raises + ``KeyboardInterrupt`` back at the caller instead of terminating, so + the POSIX re-raise would get us nowhere. The helper must short-circuit + on ``sys.platform == "win32"`` and leave the signal handlers + completely untouched. + """ + monkeypatch.setattr(sys, "platform", "win32") + + def _fail_if_called(sig: int) -> None: + msg = "raise_signal must not be called on the Windows fallback path" + raise AssertionError(msg) + + def _fail_signal(sig: int, handler: t.Any) -> t.Any: + msg = "signal.signal must not be called on the Windows fallback path" + raise AssertionError(msg) + + monkeypatch.setattr(signal, "raise_signal", _fail_if_called) + monkeypatch.setattr(signal, "signal", _fail_signal) + + with pytest.raises(SystemExit) as excinfo: + sync_module._exit_on_sigint() + + assert excinfo.value.code == 130 + + +def test_resolve_panel_lines_prefers_cli_flag( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """``--panel-lines N`` wins over the env var and the default.""" + from vcspull.cli.sync import _resolve_panel_lines + + monkeypatch.setenv("VCSPULL_PROGRESS_LINES", "9") + assert _resolve_panel_lines(5) == 5 + + +def test_resolve_panel_lines_falls_back_to_env( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Without a flag, ``VCSPULL_PROGRESS_LINES`` is honoured.""" + from vcspull.cli.sync import _resolve_panel_lines + + monkeypatch.setenv("VCSPULL_PROGRESS_LINES", "5") + assert _resolve_panel_lines(None) == 5 + + +def test_resolve_panel_lines_accepts_zero_and_negative( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """``0`` (hide panel) and ``-1`` (unbounded) must round-trip cleanly.""" + from vcspull.cli.sync import _resolve_panel_lines + + monkeypatch.delenv("VCSPULL_PROGRESS_LINES", raising=False) + assert _resolve_panel_lines(0) == 0 + assert _resolve_panel_lines(-1) == -1 + + +def test_resolve_panel_lines_default_is_three( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Default panel height matches tmuxp's ``DEFAULT_OUTPUT_LINES``.""" + from vcspull.cli.sync import _resolve_panel_lines + + monkeypatch.delenv("VCSPULL_PROGRESS_LINES", raising=False) + assert _resolve_panel_lines(None) == 3 + + +def test_resolve_panel_lines_ignores_bogus_env_value( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """A non-integer env value is logged and ignored; default applies.""" + from vcspull.cli.sync import _resolve_panel_lines + + monkeypatch.setenv("VCSPULL_PROGRESS_LINES", "many") + assert _resolve_panel_lines(None) == 3 diff --git a/tests/test_cli.py b/tests/test_cli.py index 3c932e45c..90a558bcd 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -34,7 +34,7 @@ if t.TYPE_CHECKING: from typing import TypeAlias - from libvcs.pytest_plugin import CreateRepoPytestFixtureFn + from libvcs.pytest_plugin import CreateRepoFn from libvcs.sync.git import GitSync ExpectedOutput: TypeAlias = str | list[str] | None @@ -1051,7 +1051,7 @@ def test_sync_errored_svn_repo( monkeypatch: pytest.MonkeyPatch, user_path: pathlib.Path, config_path: pathlib.Path, - create_svn_remote_repo: CreateRepoPytestFixtureFn, + create_svn_remote_repo: CreateRepoFn, test_id: str, sync_args: list[str], expected_in_out: ExpectedOutput, @@ -1168,7 +1168,7 @@ def test_sync_errored_hg_repo( monkeypatch: pytest.MonkeyPatch, user_path: pathlib.Path, config_path: pathlib.Path, - create_hg_remote_repo: CreateRepoPytestFixtureFn, + create_hg_remote_repo: CreateRepoFn, test_id: str, sync_args: list[str], expected_in_out: ExpectedOutput, diff --git a/tests/test_log.py b/tests/test_log.py index 8fe6972b7..8fb7acd63 100644 --- a/tests/test_log.py +++ b/tests/test_log.py @@ -3,6 +3,7 @@ from __future__ import annotations import logging +import pathlib import typing as t import pytest @@ -428,6 +429,7 @@ def test_get_cli_logger_names_includes_base() -> None: "vcspull.cli", "vcspull.cli._colors", "vcspull.cli._output", + "vcspull.cli._progress", "vcspull.cli._workspaces", "vcspull.cli.add", "vcspull.cli.discover", @@ -610,3 +612,195 @@ def test_repo_filter_integration(caplog: LogCaptureFixture) -> None: exc_info=None, ) assert repo_filter.filter(regular_record) is False + + +def test_setup_logger_default_libvcs_stream_level_is_warning() -> None: + """At ``-v`` count 0, the libvcs StreamHandler drops INFO and DEBUG. + + Regression for the user-reported flood of ``|git| (repo) ...`` lines + that should not appear unless the user opts into ``-v``. + """ + setup_logger(level="INFO", verbosity=0) + libvcs_logger = logging.getLogger("libvcs") + stream_handlers = [ + h + for h in libvcs_logger.handlers + if isinstance(h, logging.StreamHandler) + and not isinstance(h, logging.FileHandler) + ] + assert stream_handlers, "libvcs logger should have a StreamHandler" + assert all(h.level == logging.WARNING for h in stream_handlers) + + +def test_setup_logger_v_lifts_libvcs_stream_to_info() -> None: + """``-v`` (verbosity=1) opens libvcs INFO on the terminal.""" + setup_logger(level="INFO", verbosity=1) + libvcs_logger = logging.getLogger("libvcs") + stream_handlers = [ + h + for h in libvcs_logger.handlers + if isinstance(h, logging.StreamHandler) + and not isinstance(h, logging.FileHandler) + ] + assert stream_handlers + assert all(h.level == logging.INFO for h in stream_handlers) + + +def test_setup_logger_vv_lifts_libvcs_stream_to_debug() -> None: + """``-vv`` (verbosity>=2) opens libvcs DEBUG on the terminal.""" + setup_logger(level="INFO", verbosity=2) + libvcs_logger = logging.getLogger("libvcs") + stream_handlers = [ + h + for h in libvcs_logger.handlers + if isinstance(h, logging.StreamHandler) + and not isinstance(h, logging.FileHandler) + ] + assert stream_handlers + assert all(h.level == logging.DEBUG for h in stream_handlers) + + +def test_setup_logger_pins_vcspull_stream_to_resolved_level() -> None: + """The vcspull StreamHandler gets a per-handler level (not just logger).""" + setup_logger(level="INFO", verbosity=0) + vcspull_logger = logging.getLogger("vcspull") + stream_handlers = [ + h + for h in vcspull_logger.handlers + if isinstance(h, logging.StreamHandler) + and not isinstance(h, logging.FileHandler) + ] + assert stream_handlers + assert all(h.level == logging.INFO for h in stream_handlers) + + +def test_setup_file_logger_does_not_open_stream_floodgate( + tmp_path: pathlib.Path, +) -> None: + """``setup_file_logger`` must not open the terminal StreamHandler floodgate. + + Regression for the floodgate bug: prior versions of ``setup_logger`` + left the StreamHandler with no per-handler level, so when the file + logger bumped the parent logger to DEBUG (so the FileHandler could + capture full traces), the terminal handler also accepted DEBUG and + spammed ``|git| (repo) head_sha: ...`` style lines into the user's + terminal. + """ + import vcspull.log as log_mod + + setup_logger(level="INFO", verbosity=0) + libvcs_logger = logging.getLogger("libvcs") + stream_handlers_before = [ + h + for h in libvcs_logger.handlers + if isinstance(h, logging.StreamHandler) + and not isinstance(h, logging.FileHandler) + ] + levels_before = [h.level for h in stream_handlers_before] + + handler = log_mod.setup_file_logger(tmp_path / "vcspull-debug.log") + try: + # The file handler must accept DEBUG so the npm-style log captures + # the full trace. + assert handler.level == logging.DEBUG + # The libvcs logger may now sit at DEBUG (so DEBUG records reach the + # FileHandler) -- but the StreamHandler levels MUST be unchanged. + stream_handlers_after = [ + h + for h in libvcs_logger.handlers + if isinstance(h, logging.StreamHandler) + and not isinstance(h, logging.FileHandler) + ] + levels_after = [h.level for h in stream_handlers_after] + assert levels_after == levels_before, ( + f"setup_file_logger must not change StreamHandler levels: " + f"before={levels_before} after={levels_after}" + ) + finally: + log_mod.teardown_file_logger(handler) + + +def test_indicator_log_diverter_silences_libvcs_at_default_verbosity() -> None: + """``_install_indicator_log_diverter`` blocks libvcs from the terminal. + + Reporter saw libvcs's ``|git| (rye) Failed to determine current + branch`` warning leak in alongside vcspull's own ``✗ Failed + syncing rye: Command failed with code 128: git symbolic-ref HEAD + --short`` line -- two messages saying the same thing, breaking + the ``✓ Synced X / ✗ Failed X / - Timed out X`` rhythm. The + diverter raises the libvcs StreamHandler level above + ``CRITICAL`` so libvcs records can't reach the terminal during + indicator-active sync. The file handler (debug log) keeps + DEBUG so post-mortems still have context. + """ + from vcspull.cli._progress import build_indicator + from vcspull.cli.sync import _install_indicator_log_diverter + + setup_logger(level="INFO", verbosity=0) + libvcs_logger = logging.getLogger("libvcs") + handlers = [ + h + for h in libvcs_logger.handlers + if isinstance(h, logging.StreamHandler) + and not isinstance(h, logging.FileHandler) + ] + assert handlers, "libvcs logger should have a StreamHandler after setup" + levels_before = [h.level for h in handlers] + assert all(level == logging.WARNING for level in levels_before) + + indicator = build_indicator(human=True, color="always", tty=True) + try: + restore = _install_indicator_log_diverter(indicator) + try: + levels_during = [h.level for h in handlers] + assert all(level == logging.CRITICAL + 1 for level in levels_during), ( + f"libvcs StreamHandler must be silenced during sync; " + f"got {levels_during}" + ) + finally: + restore() + levels_after = [h.level for h in handlers] + assert levels_after == levels_before, ( + f"diverter must restore the original level; " + f"before={levels_before} after={levels_after}" + ) + finally: + indicator.close() + + +def test_indicator_log_diverter_respects_verbose_user() -> None: + """At ``-v``/``-vv``, the diverter leaves libvcs StreamHandler alone. + + Users who explicitly asked for libvcs INFO (``-v``) or DEBUG + (``-vv``) want to see what libvcs is doing; silencing them + defeats the purpose of the verbosity flag. Only the default + (WARNING) gets bumped. + """ + from vcspull.cli._progress import build_indicator + from vcspull.cli.sync import _install_indicator_log_diverter + + setup_logger(level="INFO", verbosity=1) + libvcs_logger = logging.getLogger("libvcs") + handlers = [ + h + for h in libvcs_logger.handlers + if isinstance(h, logging.StreamHandler) + and not isinstance(h, logging.FileHandler) + ] + assert handlers + levels_before = [h.level for h in handlers] + assert all(level == logging.INFO for level in levels_before) + + indicator = build_indicator(human=True, color="always", tty=True) + try: + restore = _install_indicator_log_diverter(indicator) + try: + levels_during = [h.level for h in handlers] + assert levels_during == levels_before, ( + f"diverter must not change level when user is at -v/-vv; " + f"got {levels_during}" + ) + finally: + restore() + finally: + indicator.close() diff --git a/tests/test_sync.py b/tests/test_sync.py index b61567d7c..4172e7bbb 100644 --- a/tests/test_sync.py +++ b/tests/test_sync.py @@ -29,7 +29,7 @@ if t.TYPE_CHECKING: import pathlib - from libvcs.pytest_plugin import CreateRepoPytestFixtureFn + from libvcs.pytest_plugin import CreateRepoFn from vcspull.types import ConfigDict @@ -154,7 +154,7 @@ class ConfigVariationTest(t.NamedTuple): def test_config_variations( tmp_path: pathlib.Path, capsys: pytest.CaptureFixture[str], - create_git_remote_repo: CreateRepoPytestFixtureFn, + create_git_remote_repo: CreateRepoFn, test_id: str, config_tpl: str, remote_list: list[str], @@ -262,7 +262,7 @@ class UpdatingRemoteFixture(t.NamedTuple): ) def test_updating_remote( tmp_path: pathlib.Path, - create_git_remote_repo: CreateRepoPytestFixtureFn, + create_git_remote_repo: CreateRepoFn, test_id: str, config_tpl: str, has_extra_remotes: bool, @@ -340,7 +340,7 @@ def test_updating_remote( def test_sync_deduplicates_repos_matched_by_multiple_patterns( tmp_path: pathlib.Path, capsys: pytest.CaptureFixture[str], - create_git_remote_repo: CreateRepoPytestFixtureFn, + create_git_remote_repo: CreateRepoFn, ) -> None: """Repos matched by multiple patterns should only sync once.""" dummy_repo = create_git_remote_repo( @@ -392,7 +392,7 @@ def test_sync_deduplicates_repos_matched_by_multiple_patterns( @skip_if_svn_missing def test_update_repo_svn( tmp_path: pathlib.Path, - create_svn_remote_repo: CreateRepoPytestFixtureFn, + create_svn_remote_repo: CreateRepoFn, ) -> None: """update_repo should handle SVN repositories and return SvnSync.""" svn_remote = create_svn_remote_repo( @@ -415,7 +415,7 @@ def test_update_repo_svn( @skip_if_hg_missing def test_update_repo_hg( tmp_path: pathlib.Path, - create_hg_remote_repo: CreateRepoPytestFixtureFn, + create_hg_remote_repo: CreateRepoFn, ) -> None: """update_repo should handle Mercurial repositories and return HgSync.""" hg_remote = create_hg_remote_repo( diff --git a/uv.lock b/uv.lock index 3d736f607..88f9c9fde 100644 --- a/uv.lock +++ b/uv.lock @@ -6,6 +6,13 @@ resolution-markers = [ "python_full_version < '3.11'", ] +[options] +exclude-newer = "2026-04-22T19:30:48.995337295Z" +exclude-newer-span = "P3D" + +[options.exclude-newer-package] +libvcs = "2026-04-26T00:00:00Z" + [[package]] name = "accessible-pygments" version = "0.0.5" @@ -552,14 +559,14 @@ wheels = [ [[package]] name = "libvcs" -version = "0.39.0" +version = "0.40.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "typing-extensions", marker = "python_full_version < '3.11'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/eb/a4/66ccfdb47f225c60e254476e4dfec2cb248ff1ee018bccce05a01fd5ae28/libvcs-0.39.0.tar.gz", hash = "sha256:2c175c98cef75f3228e8c859f4af6d22b6cfe5843a2a29317e35c3745fd86a45", size = 597772, upload-time = "2026-02-07T19:31:35.307Z" } +sdist = { url = "https://files.pythonhosted.org/packages/61/a1/080912d310e86f8f597ab6033e5fd4d3e798aac3f5ec5cd992cfbffd0ecd/libvcs-0.40.0.tar.gz", hash = "sha256:a6dcfce94d3a7ed7212a5830ebe90f3383e7038606fbf9cd39db35d87fb02ea8", size = 617387, upload-time = "2026-04-25T18:59:13.414Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/6f/60/960cc0fa9b7df6dd9e6f89340629a902f068d25406f8828369d2bcac20f4/libvcs-0.39.0-py3-none-any.whl", hash = "sha256:5b20514e57b53c02ce07d57c708b4b7828a632479b9ea393ebeba31e0377a5f0", size = 96997, upload-time = "2026-02-07T19:31:34.021Z" }, + { url = "https://files.pythonhosted.org/packages/cb/eb/ece616f90f807de6fd77e1680daa4fd11913b5ec7c6a77c09fe372675eda/libvcs-0.40.0-py3-none-any.whl", hash = "sha256:5f3340b64211a971d033b7358a669a9a68bb7e800f3413dd8cef668ec1226c5a", size = 101669, upload-time = "2026-04-25T18:59:11.847Z" }, ] [[package]] @@ -1671,7 +1678,7 @@ typings = [ [package.metadata] requires-dist = [ { name = "colorama", specifier = ">=0.3.9" }, - { name = "libvcs", specifier = "~=0.39.0" }, + { name = "libvcs", specifier = ">=0.40.0,<0.41.0" }, { name = "pyyaml", specifier = ">=6.0" }, ]