From 2b42ec075b6b8fc55b418b3ad663967cd2fd4b2d Mon Sep 17 00:00:00 2001 From: "claude[bot]" <41898282+claude[bot]@users.noreply.github.com> Date: Mon, 20 Apr 2026 19:39:23 +0000 Subject: [PATCH 1/4] fix(e2e): run binary service tests with sudo -E pv MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previous E2E phases (start-curl, verify-final) use `sudo -E pv start`, which causes `cleanSitesDirs()` to recreate `~/.pv/config/sites/` as root-owned (0755). When s3-binary.sh and mail-binary.sh then ran `pv start` without sudo, the non-root daemon called `os.RemoveAll` on the root-owned sites dir — which failed (no write permission inside it) — causing `GenerateAllConfigs` to abort, `Start()` to exit, and the PID file to be cleaned up. `server.IsRunning()` then returned false, `service:add s3/mail` printed "daemon not running", and the stale daemon-status.json from Phase 19 (empty supervised) was what the test inspected. Fix: use `sudo -E pv` for all daemon and service commands in both binary-service scripts, consistent with every other E2E phase that needs a live daemon. Also increase the post-start sleep from 3 s to 8 s to match the pattern used in start-curl.sh / verify-final.sh (FrankenPHP can take up to 5 s to pass its health check). Co-authored-by: Clovis --- scripts/e2e/mail-binary.sh | 22 +++++++++++----------- scripts/e2e/s3-binary.sh | 20 ++++++++++---------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/scripts/e2e/mail-binary.sh b/scripts/e2e/mail-binary.sh index 6589f95..34479ae 100755 --- a/scripts/e2e/mail-binary.sh +++ b/scripts/e2e/mail-binary.sh @@ -4,15 +4,15 @@ source "$(dirname "$0")/helpers.sh" echo "==> Phase: Mail binary service (Mailpit) lifecycle" -# Start pv in the background so we have a live daemon for service: commands. -pv start >/tmp/pv-mail-e2e.log 2>&1 & +# e2e tests use foreground mode with sudo (previous phases leave root-owned +# config dirs; only root can clean and regenerate them). +sudo -E pv start >/tmp/pv-mail-e2e.log 2>&1 & START_PID=$! -sleep 3 +sleep 8 cleanup() { - pv unlink e2e-mail-env >/dev/null 2>&1 || true - kill "$START_PID" 2>/dev/null || true - pv stop >/dev/null 2>&1 || true + sudo -E pv unlink e2e-mail-env >/dev/null 2>&1 || true + sudo -E pv stop >/dev/null 2>&1 || true rm -rf "${ENVTEST_DIR:-}" 2>/dev/null || true } trap cleanup EXIT @@ -26,7 +26,7 @@ echo "MAIL_MAILER=log" > "$ENVTEST_DIR/.env" pv link "$ENVTEST_DIR" --name e2e-mail-env >/dev/null 2>&1 || { echo "FAIL: pv link for env test"; exit 1; } echo "==> service:add mail" -pv service:add mail || { echo "FAIL: pv service:add mail failed"; exit 1; } +sudo -E pv service:add mail || { echo "FAIL: pv service:add mail failed"; exit 1; } echo "==> Verify mailpit binary exists" test -x "$HOME/.pv/internal/bin/mailpit" || { echo "FAIL: mailpit binary not installed"; exit 1; } @@ -63,7 +63,7 @@ grep -q "MAIL_MAILER=smtp" "$ENVTEST_DIR/.env" || { echo "OK: linked project .env has MAIL_MAILER=smtp" echo "==> service:stop mail" -pv service:stop mail +sudo -E pv service:stop mail sleep 2 if curl -fsS http://127.0.0.1:8025/livez 2>/dev/null; then echo "FAIL: /livez still answering after service:stop" @@ -72,7 +72,7 @@ fi echo "OK: /livez silent after service:stop" echo "==> service:start mail" -pv service:start mail +sudo -E pv service:start mail for i in $(seq 1 20); do if curl -fsS http://127.0.0.1:8025/livez 2>/dev/null; then break; fi sleep 1 @@ -81,13 +81,13 @@ curl -fsS http://127.0.0.1:8025/livez || { echo "FAIL: /livez not reachable afte echo "OK: /livez reachable after service:start" echo "==> service:destroy mail" -pv service:destroy mail +sudo -E pv service:destroy mail test ! -f "$HOME/.pv/internal/bin/mailpit" || { echo "FAIL: mailpit binary not deleted after destroy"; exit 1; } test ! -d "$HOME/.pv/services/mail/latest/data" || { echo "FAIL: data dir not deleted after destroy"; exit 1; } echo "OK: binary and data removed" echo "==> pv stop" -pv stop || true +sudo -E pv stop || true trap - EXIT echo "OK: Mail binary service lifecycle passed" diff --git a/scripts/e2e/s3-binary.sh b/scripts/e2e/s3-binary.sh index 91751ce..4e9733b 100755 --- a/scripts/e2e/s3-binary.sh +++ b/scripts/e2e/s3-binary.sh @@ -4,19 +4,19 @@ source "$(dirname "$0")/helpers.sh" echo "==> Phase: S3 binary service (RustFS) lifecycle" -# Start pv in the background so we have a live daemon for service: commands. -pv start >/tmp/pv-s3-e2e.log 2>&1 & +# e2e tests use foreground mode with sudo (previous phases leave root-owned +# config dirs; only root can clean and regenerate them). +sudo -E pv start >/tmp/pv-s3-e2e.log 2>&1 & START_PID=$! -sleep 3 +sleep 8 cleanup() { - kill "$START_PID" 2>/dev/null || true - pv stop >/dev/null 2>&1 || true + sudo -E pv stop >/dev/null 2>&1 || true } trap cleanup EXIT echo "==> service:add s3" -pv service:add s3 || { echo "FAIL: pv service:add s3 failed"; exit 1; } +sudo -E pv service:add s3 || { echo "FAIL: pv service:add s3 failed"; exit 1; } echo "==> Verify rustfs binary exists" test -x "$HOME/.pv/internal/bin/rustfs" || { echo "FAIL: rustfs binary not installed"; exit 1; } @@ -40,7 +40,7 @@ nc -z 127.0.0.1 9000 || { echo "FAIL: port 9000 not reachable after service:add" echo "OK: port 9000 reachable" echo "==> service:stop s3" -pv service:stop s3 +sudo -E pv service:stop s3 sleep 2 if nc -z 127.0.0.1 9000 2>/dev/null; then echo "FAIL: port 9000 still answering after service:stop" @@ -49,7 +49,7 @@ fi echo "OK: port 9000 silent after service:stop" echo "==> service:start s3" -pv service:start s3 +sudo -E pv service:start s3 for i in $(seq 1 20); do if nc -z 127.0.0.1 9000 2>/dev/null; then break; fi sleep 1 @@ -58,13 +58,13 @@ nc -z 127.0.0.1 9000 || { echo "FAIL: port 9000 not reachable after service:star echo "OK: port 9000 reachable after service:start" echo "==> service:destroy s3" -pv service:destroy s3 +sudo -E pv service:destroy s3 test ! -f "$HOME/.pv/internal/bin/rustfs" || { echo "FAIL: rustfs binary not deleted after destroy"; exit 1; } test ! -d "$HOME/.pv/services/s3/latest/data" || { echo "FAIL: data dir not deleted after destroy"; exit 1; } echo "OK: binary and data removed" echo "==> pv stop" -pv stop || true +sudo -E pv stop || true trap - EXIT echo "OK: S3 binary service lifecycle passed" From 66cc0967f29ccddf8b289e5fafee345386d047a9 Mon Sep 17 00:00:00 2001 From: "claude[bot]" <41898282+claude[bot]@users.noreply.github.com> Date: Mon, 20 Apr 2026 20:04:16 +0000 Subject: [PATCH 2/4] fix(e2e): retry daemon-status.json check after service:add MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit SignalDaemon() sends SIGHUP asynchronously — the daemon's reconcile loop (which writes daemon-status.json with supervised entries) runs concurrently with service:add returning. Checking the file immediately races against the reconcile. Add a 20-iteration/1s retry loop matching the pattern used for port-reachability checks in both binary-service scripts. Co-authored-by: Clovis --- scripts/e2e/mail-binary.sh | 9 ++++++--- scripts/e2e/s3-binary.sh | 9 ++++++--- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/scripts/e2e/mail-binary.sh b/scripts/e2e/mail-binary.sh index 34479ae..a7a24da 100755 --- a/scripts/e2e/mail-binary.sh +++ b/scripts/e2e/mail-binary.sh @@ -33,10 +33,13 @@ test -x "$HOME/.pv/internal/bin/mailpit" || { echo "FAIL: mailpit binary not ins echo "OK: mailpit binary at ~/.pv/internal/bin/mailpit" echo "==> Verify daemon-status.json lists mailpit" -test -f "$HOME/.pv/daemon-status.json" || { echo "FAIL: daemon-status.json missing"; exit 1; } -grep -q '"mailpit"' "$HOME/.pv/daemon-status.json" || { +for i in $(seq 1 20); do + if grep -q '"mailpit"' "$HOME/.pv/daemon-status.json" 2>/dev/null; then break; fi + sleep 1 +done +grep -q '"mailpit"' "$HOME/.pv/daemon-status.json" 2>/dev/null || { echo "FAIL: daemon-status.json does not contain mailpit entry"; - cat "$HOME/.pv/daemon-status.json"; + cat "$HOME/.pv/daemon-status.json" 2>/dev/null || echo "(file missing)"; exit 1; } echo "OK: daemon-status.json advertises mailpit" diff --git a/scripts/e2e/s3-binary.sh b/scripts/e2e/s3-binary.sh index 4e9733b..1f79166 100755 --- a/scripts/e2e/s3-binary.sh +++ b/scripts/e2e/s3-binary.sh @@ -23,10 +23,13 @@ test -x "$HOME/.pv/internal/bin/rustfs" || { echo "FAIL: rustfs binary not insta echo "OK: rustfs binary at ~/.pv/internal/bin/rustfs" echo "==> Verify daemon-status.json lists rustfs" -test -f "$HOME/.pv/daemon-status.json" || { echo "FAIL: daemon-status.json missing"; exit 1; } -grep -q '"rustfs"' "$HOME/.pv/daemon-status.json" || { +for i in $(seq 1 20); do + if grep -q '"rustfs"' "$HOME/.pv/daemon-status.json" 2>/dev/null; then break; fi + sleep 1 +done +grep -q '"rustfs"' "$HOME/.pv/daemon-status.json" 2>/dev/null || { echo "FAIL: daemon-status.json does not contain rustfs entry"; - cat "$HOME/.pv/daemon-status.json"; + cat "$HOME/.pv/daemon-status.json" 2>/dev/null || echo "(file missing)"; exit 1; } echo "OK: daemon-status.json advertises rustfs" From 348275e7d8805fac11e6dbf6007f4596b2f67ca1 Mon Sep 17 00:00:00 2001 From: "claude[bot]" <41898282+claude[bot]@users.noreply.github.com> Date: Mon, 20 Apr 2026 20:38:59 +0000 Subject: [PATCH 3/4] fix(e2e): stop running daemon before foreground start in start-curl.sh pv install auto-starts the daemon; daemon:disable unregisters the launchd service but the process stays alive until FrankenPHP and DNS finish shutting down. The subsequent sudo pv start saw a live PID and failed. Add an explicit pv stop (waits up to 5 s with SIGTERM/SIGKILL) between daemon:disable and the foreground start. Co-authored-by: Clovis --- scripts/e2e/start-curl.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/e2e/start-curl.sh b/scripts/e2e/start-curl.sh index 974944a..97561d8 100755 --- a/scripts/e2e/start-curl.sh +++ b/scripts/e2e/start-curl.sh @@ -4,6 +4,8 @@ source "$(dirname "$0")/helpers.sh" # Disable daemon if auto-enabled during install — e2e tests use foreground mode with sudo. pv daemon:disable 2>/dev/null || true +# pv install starts the daemon immediately; stop it so the PID file is gone before we start fresh. +sudo -E pv stop >/dev/null 2>&1 || true sleep 1 sudo -E pv start & From d6d0b756536fc6e28af079bb3a3b6fb95dab1d1f Mon Sep 17 00:00:00 2001 From: "claude[bot]" <41898282+claude[bot]@users.noreply.github.com> Date: Mon, 20 Apr 2026 20:55:07 +0000 Subject: [PATCH 4/4] fix(e2e): use sudo -E for pv link in mail-binary.sh Root-owned config dirs (created by sudo -E pv start) block the non-root pv link call. Consistent with the sudo -E pv unlink already in cleanup. Co-authored-by: Clovis --- scripts/e2e/mail-binary.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/e2e/mail-binary.sh b/scripts/e2e/mail-binary.sh index a7a24da..f7d4a76 100755 --- a/scripts/e2e/mail-binary.sh +++ b/scripts/e2e/mail-binary.sh @@ -23,7 +23,7 @@ echo '{"require":{"php":"^8.2","laravel/framework":"^11.0"}}' > "$ENVTEST_DIR/co mkdir -p "$ENVTEST_DIR/public" echo ' "$ENVTEST_DIR/public/index.php" echo "MAIL_MAILER=log" > "$ENVTEST_DIR/.env" -pv link "$ENVTEST_DIR" --name e2e-mail-env >/dev/null 2>&1 || { echo "FAIL: pv link for env test"; exit 1; } +sudo -E pv link "$ENVTEST_DIR" --name e2e-mail-env >/dev/null 2>&1 || { echo "FAIL: pv link for env test"; exit 1; } echo "==> service:add mail" sudo -E pv service:add mail || { echo "FAIL: pv service:add mail failed"; exit 1; }