From 6877b955ef91144e036b61aa8c3236cbd8ed4100 Mon Sep 17 00:00:00 2001 From: Keith Date: Fri, 6 Mar 2026 17:42:44 +0900 Subject: [PATCH 1/6] feat: automate rc draft release flow --- .github/workflows/bump-version.yml | 200 ++++++++++++++++++++++++---- .github/workflows/release.yml | 184 ++++++++++++++++++++++++- docs/release-prerelease-workflow.md | 78 +++++++---- 3 files changed, 409 insertions(+), 53 deletions(-) diff --git a/.github/workflows/bump-version.yml b/.github/workflows/bump-version.yml index 05efcfd5..12a6e9a4 100644 --- a/.github/workflows/bump-version.yml +++ b/.github/workflows/bump-version.yml @@ -1,6 +1,7 @@ name: Bump Version on: + create: workflow_dispatch: inputs: bump_type: @@ -16,6 +17,13 @@ on: description: 'Custom version (only used when bump_type is "custom", e.g. 1.0.0-beta.1)' required: false type: string + push: + branches: + - 'rc/v*' + paths-ignore: + - 'package.json' + - 'src-tauri/Cargo.toml' + - 'src-tauri/Cargo.lock' permissions: contents: write @@ -23,15 +31,17 @@ permissions: concurrency: group: bump-version-${{ github.ref_name }} - cancel-in-progress: false + cancel-in-progress: true jobs: prepare: + if: ${{ github.event_name != 'create' || (github.event.ref_type == 'branch' && startsWith(github.event.ref, 'rc/v')) }} runs-on: ubuntu-latest outputs: current_version: ${{ steps.version.outputs.current }} new_version: ${{ steps.version.outputs.new }} is_prerelease: ${{ steps.version.outputs.is_prerelease }} + ref_name: ${{ steps.version.outputs.ref_name }} steps: - name: Checkout uses: actions/checkout@v4 @@ -47,28 +57,65 @@ jobs: id: version run: | CURRENT=$(node -p "require('./package.json').version") + CURRENT_CARGO=$(node - <<'NODE' + const fs = require("fs"); + const cargo = fs.readFileSync("src-tauri/Cargo.toml", "utf8"); + const match = cargo.match(/^version\s*=\s*"([^"]+)"/m); + if (!match) { + console.error("Failed to locate version field in src-tauri/Cargo.toml"); + process.exit(1); + } + process.stdout.write(match[1]); + NODE + ) + + if [ "$CURRENT" != "$CURRENT_CARGO" ]; then + echo "::error::Version mismatch: package.json=$CURRENT, src-tauri/Cargo.toml=$CURRENT_CARGO" + exit 1 + fi + echo "current=$CURRENT" >> "$GITHUB_OUTPUT" + EVENT_NAME="${{ github.event_name }}" + if [ "$EVENT_NAME" = "create" ]; then + REF_NAME="${{ github.event.ref }}" + else + REF_NAME="${{ github.ref_name }}" + fi + echo "ref_name=$REF_NAME" >> "$GITHUB_OUTPUT" + BUMP_TYPE="${{ inputs.bump_type }}" SEMVER_REGEX='^(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(-((0|[1-9][0-9]*|[0-9A-Za-z-][0-9A-Za-z-]*)(\.(0|[1-9][0-9]*|[0-9A-Za-z-][0-9A-Za-z-]*))*))?(\+([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$' - if [ "$BUMP_TYPE" = "custom" ]; then - NEW="${{ inputs.custom_version }}" - if [ -z "$NEW" ]; then - echo "::error::custom_version is required when bump_type is 'custom'" - exit 1 - fi - if ! echo "$NEW" | grep -qE "$SEMVER_REGEX"; then - echo "::error::Invalid custom version: $NEW (must follow semver)" - exit 1 + if [ "$EVENT_NAME" = "workflow_dispatch" ]; then + if [ "$BUMP_TYPE" = "custom" ]; then + NEW="${{ inputs.custom_version }}" + if [ -z "$NEW" ]; then + echo "::error::custom_version is required when bump_type is 'custom'" + exit 1 + fi + if ! echo "$NEW" | grep -qE "$SEMVER_REGEX"; then + echo "::error::Invalid custom version: $NEW (must follow semver)" + exit 1 + fi + else + BASE="${CURRENT%%[-+]*}" + IFS='.' read -r MAJOR MINOR PATCH <<< "$BASE" + case "$BUMP_TYPE" in + major) NEW="$((MAJOR + 1)).0.0" ;; + minor) NEW="${MAJOR}.$((MINOR + 1)).0" ;; + patch) NEW="${MAJOR}.${MINOR}.$((PATCH + 1))" ;; + esac fi else - BASE="${CURRENT%%[-+]*}" - IFS='.' read -r MAJOR MINOR PATCH <<< "$BASE" - case "$BUMP_TYPE" in - major) NEW="$((MAJOR + 1)).0.0" ;; - minor) NEW="${MAJOR}.$((MINOR + 1)).0" ;; - patch) NEW="${MAJOR}.${MINOR}.$((PATCH + 1))" ;; + case "$REF_NAME" in + rc/v*) + NEW="${REF_NAME#rc/v}" + ;; + *) + echo "::error::Branch $REF_NAME does not match rc/v" + exit 1 + ;; esac fi @@ -77,7 +124,65 @@ jobs: exit 1 fi - if [ "$NEW" = "$CURRENT" ]; then + COMPARISON=$( + CURRENT_VERSION="$CURRENT" TARGET_VERSION="$NEW" node - <<'NODE' + function parse(version) { + const match = version.match(/^(\d+)\.(\d+)\.(\d+)(?:-([0-9A-Za-z.-]+))?(?:\+([0-9A-Za-z.-]+))?$/); + if (!match) { + throw new Error(`Invalid semver: ${version}`); + } + return { + major: Number(match[1]), + minor: Number(match[2]), + patch: Number(match[3]), + prerelease: match[4] ? match[4].split(".") : [] + }; + } + + function compareIdentifier(left, right) { + const leftNumeric = /^\d+$/.test(left); + const rightNumeric = /^\d+$/.test(right); + if (leftNumeric && rightNumeric) return Number(left) - Number(right); + if (leftNumeric) return -1; + if (rightNumeric) return 1; + return left.localeCompare(right); + } + + function compare(left, right) { + for (const key of ["major", "minor", "patch"]) { + if (left[key] !== right[key]) { + return left[key] - right[key]; + } + } + + if (!left.prerelease.length && !right.prerelease.length) return 0; + if (!left.prerelease.length) return 1; + if (!right.prerelease.length) return -1; + + const len = Math.max(left.prerelease.length, right.prerelease.length); + for (let i = 0; i < len; i += 1) { + if (left.prerelease[i] === undefined) return -1; + if (right.prerelease[i] === undefined) return 1; + const diff = compareIdentifier(left.prerelease[i], right.prerelease[i]); + if (diff !== 0) return diff; + } + + return 0; + } + + const current = parse(process.env.CURRENT_VERSION); + const next = parse(process.env.TARGET_VERSION); + const diff = compare(next, current); + process.stdout.write(diff < 0 ? "lt" : diff > 0 ? "gt" : "eq"); + NODE + ) + + if [ "$COMPARISON" = "lt" ]; then + echo "::error::New version $NEW would roll back current version $CURRENT" + exit 1 + fi + + if [ "$EVENT_NAME" = "workflow_dispatch" ] && [ "$COMPARISON" = "eq" ]; then echo "::error::New version equals current version ($CURRENT)" exit 1 fi @@ -94,12 +199,45 @@ jobs: echo "new=$NEW" >> "$GITHUB_OUTPUT" - if [[ "$NEW" == *-* ]]; then - echo "is_prerelease=true" >> "$GITHUB_OUTPUT" - echo "### Tag type: prerelease" >> "$GITHUB_STEP_SUMMARY" + if [ "$EVENT_NAME" = "workflow_dispatch" ]; then + if [[ "$NEW" == *-* ]]; then + echo "is_prerelease=true" >> "$GITHUB_OUTPUT" + echo "### Tag type: prerelease" >> "$GITHUB_STEP_SUMMARY" + else + echo "is_prerelease=false" >> "$GITHUB_OUTPUT" + echo "### Tag type: release" >> "$GITHUB_STEP_SUMMARY" + fi else - echo "is_prerelease=false" >> "$GITHUB_OUTPUT" - echo "### Tag type: release" >> "$GITHUB_STEP_SUMMARY" + git fetch --no-tags origin main develop + + MAIN_BASE=$(git merge-base HEAD origin/main || true) + DEVELOP_BASE=$(git merge-base HEAD origin/develop || true) + + if [ -z "$MAIN_BASE" ] || [ -z "$DEVELOP_BASE" ]; then + echo "::error::Failed to resolve ancestry against origin/main and origin/develop" + exit 1 + fi + + if [ "$MAIN_BASE" = "$DEVELOP_BASE" ]; then + echo "::error::Could not distinguish whether $REF_NAME was created from main or develop" + exit 1 + fi + + if git merge-base --is-ancestor "$MAIN_BASE" "$DEVELOP_BASE"; then + SOURCE_BRANCH="develop" + echo "is_prerelease=true" >> "$GITHUB_OUTPUT" + echo "### Tag type: prerelease" >> "$GITHUB_STEP_SUMMARY" + elif git merge-base --is-ancestor "$DEVELOP_BASE" "$MAIN_BASE"; then + SOURCE_BRANCH="main" + echo "is_prerelease=false" >> "$GITHUB_OUTPUT" + echo "### Tag type: release" >> "$GITHUB_STEP_SUMMARY" + else + echo "::error::Ambiguous ancestry for $REF_NAME; unable to determine whether it came from main or develop" + exit 1 + fi + + echo "### Source branch: $SOURCE_BRANCH" >> "$GITHUB_STEP_SUMMARY" + echo "### Trigger mode: $EVENT_NAME" >> "$GITHUB_STEP_SUMMARY" fi echo "### Version bump: $CURRENT → $NEW" >> "$GITHUB_STEP_SUMMARY" @@ -416,7 +554,7 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 - ref: ${{ github.ref_name }} + ref: ${{ needs.prepare.outputs.ref_name }} - name: Setup Node.js uses: actions/setup-node@v4 @@ -459,14 +597,21 @@ jobs: git config user.email "github-actions[bot]@users.noreply.github.com" git add package.json src-tauri/Cargo.toml src-tauri/Cargo.lock - git diff --cached --quiet && { echo "No changes to commit"; exit 1; } + if git diff --cached --quiet; then + COMMIT_SHA=$(git rev-parse HEAD) + echo "No version changes to commit" + echo "commit_sha=$COMMIT_SHA" >> "$GITHUB_OUTPUT" + echo "committed=false" >> "$GITHUB_OUTPUT" + exit 0 + fi git commit -m "chore: bump version to ${VERSION}" echo "Bumping to prerelease: $IS_PRERELEASE" - git push origin "HEAD:${GITHUB_REF_NAME}" + git push origin "HEAD:${{ needs.prepare.outputs.ref_name }}" COMMIT_SHA=$(git rev-parse HEAD) echo "commit_sha=$COMMIT_SHA" >> "$GITHUB_OUTPUT" + echo "committed=true" >> "$GITHUB_OUTPUT" - name: Trigger Release workflow (draft) env: @@ -474,7 +619,7 @@ jobs: COMMIT_SHA: ${{ steps.commit_push.outputs.commit_sha }} run: | gh workflow run release.yml \ - --ref "${GITHUB_REF_NAME}" \ + --ref "${{ needs.prepare.outputs.ref_name }}" \ -f version="${VERSION}" \ -f target_commitish="${COMMIT_SHA}" \ -f is_prerelease="${IS_PRERELEASE}" @@ -482,7 +627,8 @@ jobs: - name: Release trigger summary run: | echo "### Release draft trigger" >> "$GITHUB_STEP_SUMMARY" - echo "- Pushed branch commit: ${GITHUB_REF_NAME}@${{ steps.commit_push.outputs.commit_sha }}" >> "$GITHUB_STEP_SUMMARY" + echo "- Pushed branch commit: ${{ needs.prepare.outputs.ref_name }}@${{ steps.commit_push.outputs.commit_sha }}" >> "$GITHUB_STEP_SUMMARY" + echo "- Version commit created: ${{ steps.commit_push.outputs.committed }}" >> "$GITHUB_STEP_SUMMARY" echo "- Tag to be created on publish: v${VERSION}" >> "$GITHUB_STEP_SUMMARY" echo "- Prerelease: ${IS_PRERELEASE}" >> "$GITHUB_STEP_SUMMARY" echo "- Next: \`Release\` workflow is dispatched to create/update draft release (without pushing tag)." >> "$GITHUB_STEP_SUMMARY" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 9227a31d..3cfa5f22 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -19,6 +19,10 @@ on: permissions: contents: write +concurrency: + group: release-${{ inputs.version }} + cancel-in-progress: true + jobs: changelog: runs-on: ubuntu-latest @@ -71,8 +75,53 @@ jobs: echo "CHANGELOG_EOF" } >> "$GITHUB_OUTPUT" - build: + validate_release_state: needs: changelog + runs-on: ubuntu-latest + steps: + - name: Fail if the tag already has a published release + uses: actions/github-script@v7 + env: + VERSION: ${{ inputs.version }} + TARGET_COMMITISH: ${{ inputs.target_commitish }} + IS_PRERELEASE: ${{ inputs.is_prerelease }} + with: + script: | + const tag = `v${process.env.VERSION}`; + const releases = await github.paginate(github.rest.repos.listReleases, { + owner: context.repo.owner, + repo: context.repo.repo, + per_page: 100, + }); + + const existing = releases.find((release) => release.tag_name === tag); + const prerelease = process.env.IS_PRERELEASE === "true" || process.env.VERSION.includes("-"); + + if (existing && !existing.draft) { + core.setFailed(`Release ${tag} has already been published and cannot be overwritten.`); + return; + } + + if (existing) { + core.summary + .addHeading("Release state") + .addRaw(`Draft release \`${tag}\` already exists and will be refreshed.\n\n`) + .addRaw(`- URL: ${existing.html_url}\n`) + .addRaw(`- Target commit: \`${process.env.TARGET_COMMITISH}\`\n`) + .addRaw(`- Prerelease: \`${prerelease}\`\n`) + .write(); + return; + } + + core.summary + .addHeading("Release state") + .addRaw(`Draft release \`${tag}\` does not exist yet and will be created by this run.\n\n`) + .addRaw(`- Target commit: \`${process.env.TARGET_COMMITISH}\`\n`) + .addRaw(`- Prerelease: \`${prerelease}\`\n`) + .write(); + + build: + needs: [changelog, validate_release_state] strategy: fail-fast: false matrix: @@ -1028,6 +1077,72 @@ jobs: gh release upload "${{ steps.release_meta.outputs.tag }}" $dest --clobber } + - name: Collect final release artifacts + shell: bash + env: + SHOULD_SIGN: ${{ steps.signing.outputs.should_sign }} + TARGET: ${{ matrix.target }} + PLATFORM: ${{ matrix.platform }} + REPACKED_DMG: ${{ steps.repack_dmg.outputs.dmg_path }} + ASSET_SUFFIX: ${{ steps.signing.outputs.asset_suffix }} + run: | + set -euo pipefail + + mkdir -p final-artifacts + + if [ "$SHOULD_SIGN" = "true" ] && [[ "$TARGET" == *apple-darwin* ]] && [ -n "$REPACKED_DMG" ]; then + cp "$REPACKED_DMG" final-artifacts/ + if [ -f "${REPACKED_DMG}.sig" ]; then + cp "${REPACKED_DMG}.sig" final-artifacts/ + fi + elif [ "$SHOULD_SIGN" != "true" ] && [ -d unsigned-release-assets ]; then + find unsigned-release-assets -maxdepth 1 -type f -exec cp {} final-artifacts/ \; + else + CANDIDATES=( + "target/${TARGET}/release/bundle" + "src-tauri/target/${TARGET}/release/bundle" + ) + + BUNDLE_DIR="" + for candidate in "${CANDIDATES[@]}"; do + if [ -d "$candidate" ]; then + BUNDLE_DIR="$candidate" + break + fi + done + + if [ -n "$BUNDLE_DIR" ]; then + find "$BUNDLE_DIR" -type f \( \ + -name "*.dmg" -o \ + -name "*.deb" -o \ + -name "*.AppImage" -o \ + -name "*.msi" -o \ + -name "*.exe" -o \ + -name "*.rpm" \ + \) -exec cp {} final-artifacts/ \; + fi + fi + + PORTABLE="ClawPal_portable_x64${ASSET_SUFFIX}.exe" + if [ -f "$PORTABLE" ]; then + cp "$PORTABLE" final-artifacts/ + fi + + echo "### Final release artifacts (${{ matrix.label }})" >> "$GITHUB_STEP_SUMMARY" + if [ -n "$(find final-artifacts -mindepth 1 -maxdepth 1 -type f -print -quit)" ]; then + ls -lh final-artifacts >> "$GITHUB_STEP_SUMMARY" + else + echo "- No final artifacts collected" >> "$GITHUB_STEP_SUMMARY" + fi + + - name: Upload release artifacts + uses: actions/upload-artifact@v4 + with: + name: release-${{ matrix.label }} + path: final-artifacts/* + retention-days: 7 + if-no-files-found: warn + - name: Cleanup Apple signing (macOS only) if: ${{ contains(matrix.target, 'apple-darwin') && always() }} run: | @@ -1067,3 +1182,70 @@ jobs: echo "Deleting asset: $name" gh release delete-asset "$TAG" "$name" --repo "$GITHUB_REPOSITORY" -y done + + release_summary: + name: Release Summary + needs: [build, cleanup_release_assets] + runs-on: ubuntu-latest + if: ${{ always() && needs.build.result == 'success' }} + steps: + - name: Write summary with workflow artifacts and draft release assets + uses: actions/github-script@v7 + env: + VERSION: ${{ inputs.version }} + TARGET_COMMITISH: ${{ inputs.target_commitish }} + IS_PRERELEASE: ${{ inputs.is_prerelease }} + with: + script: | + const tag = `v${process.env.VERSION}`; + const prerelease = process.env.IS_PRERELEASE === "true" || process.env.VERSION.includes("-"); + const runUrl = `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`; + + const { data: { artifacts } } = await github.rest.actions.listWorkflowRunArtifacts({ + owner: context.repo.owner, + repo: context.repo.repo, + run_id: context.runId, + }); + + const workflowArtifacts = artifacts.filter((artifact) => artifact.name.startsWith("release-")); + const releases = await github.paginate(github.rest.repos.listReleases, { + owner: context.repo.owner, + repo: context.repo.repo, + per_page: 100, + }); + const release = releases.find((item) => item.tag_name === tag); + + let body = `## Release Summary\n\n`; + body += `- Tag: \`${tag}\`\n`; + body += `- Target commit: \`${process.env.TARGET_COMMITISH}\`\n`; + body += `- Prerelease: \`${prerelease}\`\n`; + body += `- Workflow run: ${runUrl}\n\n`; + + body += `### Workflow Artifacts\n\n`; + if (workflowArtifacts.length === 0) { + body += `- No workflow artifacts found.\n`; + } else { + body += `| Artifact | Download | Size |\n|---|---|---|\n`; + for (const artifact of workflowArtifacts) { + const sizeMb = (artifact.size_in_bytes / (1024 * 1024)).toFixed(1); + body += `| ${artifact.name} | [Download](${runUrl}/artifacts/${artifact.id}) | ${sizeMb} MB |\n`; + } + } + + body += `\n### Draft Release Assets\n\n`; + if (!release) { + body += `- Draft release \`${tag}\` was not found.\n`; + } else { + body += `- [Open draft release](${release.html_url})\n`; + if (release.assets.length === 0) { + body += `- No draft release assets found.\n`; + } else { + body += `\n| Asset | Download | Size |\n|---|---|---|\n`; + for (const asset of release.assets) { + const sizeMb = (asset.size / (1024 * 1024)).toFixed(1); + body += `| ${asset.name} | [Download](${asset.browser_download_url}) | ${sizeMb} MB |\n`; + } + } + } + + core.summary.addRaw(body).write(); diff --git a/docs/release-prerelease-workflow.md b/docs/release-prerelease-workflow.md index 72153d85..87953bc9 100644 --- a/docs/release-prerelease-workflow.md +++ b/docs/release-prerelease-workflow.md @@ -1,30 +1,51 @@ # ClawPal Release / Prerelease 流程说明 -本文基于当前仓库 `.github/workflows/bump-version.yml` 与 `.github/workflows/release.yml`(2026-03-05)整理,说明 `release` 与 `prerelease` 的实际执行流程,以及 Apple Developer 签名/公证行为。 +本文基于当前仓库 `.github/workflows/bump-version.yml` 与 `.github/workflows/release.yml`(2026-03-06)整理,说明 `release` 与 `prerelease` 的实际执行流程,以及 Apple Developer 签名/公证行为。 ## 1. 触发入口(推荐) -推荐通过 `Bump Version` workflow(手动触发)作为统一入口: - -1. 校验目标版本(严格 semver + tag 冲突检查) -2. 更新代码版本(`package.json` / `src-tauri/Cargo.toml` / `src-tauri/Cargo.lock`) -3. 运行测试 CI(前端 typecheck/build + Rust fmt/clippy/test) -4. 运行打包 CI(4 平台矩阵,验证可打包) -5. 全部通过后才执行 commit + push(不打 tag) -6. `Bump Version` 直接 dispatch `Release` workflow 创建/更新 Draft Release -7. 人工审核后点击 Publish,GitHub 才会创建 `vX.Y.Z` tag - -## 2. Release Workflow 触发条件 - -- Workflow: `Release` -- 触发事件: `workflow_dispatch`(由 `Bump Version` 触发) -- 输入: +推荐通过 **远端创建 `rc/v` 分支** 作为统一入口: + +1. 从 `main` 或 `develop` 创建远端分支: + - 正式版:`rc/vX.Y.Z` + - 预发布:`rc/vX.Y.Z-rc.N`(也允许 `develop -> rc/vX.Y.Z`,仍按 prerelease 处理) +2. `Bump Version` 在分支创建事件(`create`)上自动启动,无需先手动 push 第二次提交 +3. workflow 从分支名解析版本,并校验: + - 严格 semver + - `package.json` 与 `src-tauri/Cargo.toml` 当前版本一致 + - 目标版本不低于当前分支版本(防止回退) + - `v` tag 尚未存在 +4. workflow 根据分支祖先关系自动判定发布类型: + - 来自 `main`:`release` + - 来自 `develop`:`prerelease` +5. 首次运行如发现源码版本尚未同步,会自动更新 `package.json` / `src-tauri/Cargo.toml` / `src-tauri/Cargo.lock` 并 push 回当前 rc 分支 +6. 随后运行测试 CI(前端 typecheck/build + Rust fmt/clippy/test) +7. 运行打包 CI(4 平台矩阵,验证可打包) +8. `Bump Version` dispatch `Release` workflow 创建/更新 Draft Release +9. 后续对同一 rc 分支的 push 不再重复 bump 版本,但会刷新同一个 Draft Release 及其 artifact +10. 人工审核后点击 Publish,GitHub 才会创建 `vX.Y.Z` tag + +## 2. Workflow 触发条件 + +### `Bump Version` + +- 触发事件: + - `create`:远端创建 `rc/v*` 分支时自动触发 + - `push`:已有 `rc/v*` 分支后续更新时自动触发 + - `workflow_dispatch`:人工兜底入口 +- `create` 只处理 `branch` 类型,tag 创建不会进入发布流程 +- `push` 对纯版本同步提交(`package.json` / `Cargo.toml` / `Cargo.lock`)做了忽略,避免自触发循环 + +### `Release` + +- 触发事件:`workflow_dispatch`(由 `Bump Version` 触发) +- 输入: - `version` - `target_commitish` - `is_prerelease` -- 示例: - - 正式版: `v0.1.1` - - 预发布: `v0.1.1-beta.1` / `v0.1.1-rc.1` +- 示例: + - 正式版:`v0.1.1` + - 预发布:`v0.1.1-beta.1` / `v0.1.1-rc.1` ## 3. 总体结构 @@ -89,6 +110,11 @@ - 正式版一般为 `vX.Y.Z` - 预发布一般为 `vX.Y.Z-alpha.N / beta.N / rc.N` +5. rc 分支来源规则 + - `main -> rc/v...`:正式版 + - `develop -> rc/v...`:预发布 + - 对于自动化入口,发布类型以分支来源为准,而不是只看版本号是否带 `-rc/-beta/-alpha` + ## 6. 签名决策逻辑(关键) 签名由 secrets 是否齐全决定,而不是仅看 release/prerelease: @@ -133,15 +159,17 @@ ## 9. 典型发布操作建议 -1. 先确认版本号与 tag 语义 - - 正式版: `vX.Y.Z` - - 预发布: `vX.Y.Z-beta.N` -2. 手动触发 `Bump Version`,选择 `patch/minor/major/custom` +1. 先确认版本号与发布类型 + - 正式版:从 `main` 创建 `rc/vX.Y.Z` + - 预发布:从 `develop` 创建 `rc/vX.Y.Z-rc.N` 或 `rc/vX.Y.Z` +2. 在远端创建 rc 分支后,等待 `Bump Version` 自动启动 3. 等待 `Bump Version` 的 `Test CI` 与 `Package CI` 全部通过 4. 确认 `Commit and Trigger Draft Release` 成功(此时尚未创建 git tag) 5. 在 `Release` workflow 中核对 4 平台矩阵构建 -6. 在 draft release 中验证产物、签名和说明 -7. 点击 Publish(此时 GitHub 创建 `vX.Y.Z` tag 并正式发布) +6. 在 `Release` workflow summary 中直接下载本次 run artifact,或跳转到 draft release 下载最终资产 +7. 若 rc 分支有后续更新,直接继续 push 到同一 rc 分支;workflow 会复用同一版本并刷新 draft release +8. 在 draft release 中验证产物、签名和说明 +9. 点击 Publish(此时 GitHub 创建 `vX.Y.Z` tag 并正式发布) ## 10. macOS DMG 安装体验增强(2026-03-05) From d5b5e6591f353ae35d2b4c91c5304190c77f4a75 Mon Sep 17 00:00:00 2001 From: Keith Date: Fri, 6 Mar 2026 18:29:44 +0900 Subject: [PATCH 2/6] test: replace token-like literals with synthetic constants in e2e fixture --- src-tauri/tests/docker_profile_sync_e2e.rs | 27 ++++++++++++++++++---- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/src-tauri/tests/docker_profile_sync_e2e.rs b/src-tauri/tests/docker_profile_sync_e2e.rs index e864d30c..4e68140d 100644 --- a/src-tauri/tests/docker_profile_sync_e2e.rs +++ b/src-tauri/tests/docker_profile_sync_e2e.rs @@ -24,6 +24,8 @@ use std::process::Command; const CONTAINER_NAME: &str = "clawpal-e2e-docker-sync"; const SSH_PORT: u16 = 2299; const ROOT_PASSWORD: &str = "clawpal-e2e-pass"; +const TEST_ANTHROPIC_KEY: &str = "test-anthropic-profile-key"; +const TEST_OPENAI_KEY: &str = "test-openai-profile-key"; /// Dockerfile: Ubuntu + openssh-server + Node.js + real openclaw CLI (latest from npm) + seeded OpenClaw config. const DOCKERFILE: &str = r#" @@ -81,12 +83,12 @@ RUN cat > /root/.openclaw/agents/main/agent/auth-profiles.json <<'AUTHEOF' "anthropic:default": { "type": "token", "provider": "anthropic", - "token": "e2e-anthropic-fake-key-00000000" + "token": "ANTHROPIC_KEY" }, "openai:default": { "type": "token", "provider": "openai", - "token": "e2e-openai-fake-key-11111111" + "token": "OPENAI_KEY" } } } @@ -107,8 +109,8 @@ RUN apt-get update && \ npm install -g "openclaw@${OPENCLAW_VERSION}" # Set env vars that ClawPal profile sync checks -RUN echo "export ANTHROPIC_API_KEY=e2e-anthropic-fake-key-00000000" >> /root/.bashrc && \ - echo "export OPENAI_API_KEY=e2e-openai-fake-key-11111111" >> /root/.bashrc +RUN echo "export ANTHROPIC_API_KEY=ANTHROPIC_KEY" >> /root/.bashrc && \ + echo "export OPENAI_API_KEY=OPENAI_KEY" >> /root/.bashrc EXPOSE 22 CMD ["/usr/sbin/sshd", "-D"] @@ -149,7 +151,10 @@ fn cleanup_image() { } fn build_image() -> Result<(), String> { - let dockerfile = DOCKERFILE.replace("ROOTPASS", ROOT_PASSWORD); + let dockerfile = DOCKERFILE + .replace("ROOTPASS", ROOT_PASSWORD) + .replace("ANTHROPIC_KEY", TEST_ANTHROPIC_KEY) + .replace("OPENAI_KEY", TEST_OPENAI_KEY); let output = Command::new("docker") .args([ "build", @@ -315,13 +320,21 @@ async fn e2e_docker_profile_sync_and_doctor() { .pointer("/profiles/anthropic:default/token") .and_then(|v| v.as_str()) .expect("anthropic:default token should exist"); +<<<<<<< HEAD assert_eq!(anthropic_token, "e2e-anthropic-fake-key-00000000"); +======= + assert_eq!(anthropic_token, TEST_ANTHROPIC_KEY); +>>>>>>> 0a991c5 (test: replace token-like literals with synthetic constants in e2e fixture) let openai_token = auth .pointer("/profiles/openai:default/token") .and_then(|v| v.as_str()) .expect("openai:default token should exist"); +<<<<<<< HEAD assert_eq!(openai_token, "e2e-openai-fake-key-11111111"); +======= + assert_eq!(openai_token, TEST_OPENAI_KEY); +>>>>>>> 0a991c5 (test: replace token-like literals with synthetic constants in e2e fixture) eprintln!("[e2e] Auth store verified: 2 provider credentials found"); // --- Step 4: Extract model profiles from config --- @@ -404,7 +417,11 @@ async fn e2e_docker_profile_sync_and_doctor() { .expect("should read env var"); assert_eq!( env_result.stdout.trim(), +<<<<<<< HEAD "e2e-anthropic-fake-key-00000000", +======= + TEST_ANTHROPIC_KEY, +>>>>>>> 0a991c5 (test: replace token-like literals with synthetic constants in e2e fixture) "ANTHROPIC_API_KEY should be set in remote env" ); eprintln!("[e2e] Remote env vars verified"); From 311da28ca66d08f819c2423997872a6c282432e2 Mon Sep 17 00:00:00 2001 From: Keith Date: Fri, 6 Mar 2026 18:39:20 +0900 Subject: [PATCH 3/6] test(e2e): resolve docker fixture conflicts with develop --- src-tauri/tests/docker_profile_sync_e2e.rs | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/src-tauri/tests/docker_profile_sync_e2e.rs b/src-tauri/tests/docker_profile_sync_e2e.rs index 4e68140d..d95fad63 100644 --- a/src-tauri/tests/docker_profile_sync_e2e.rs +++ b/src-tauri/tests/docker_profile_sync_e2e.rs @@ -1,8 +1,9 @@ //! E2E test: Docker Ubuntu container with OpenClaw config → ClawPal SSH connect //! → profile sync → doctor check. //! -//! This test spins up a Docker container running Ubuntu with SSH and the latest -//! real `openclaw` CLI (installed via npm), seeds OpenClaw configuration files, then: +//! This test spins up a Docker container running Ubuntu with SSH and a pinned, +//! real `openclaw` CLI (installed from npm), seeds OpenClaw configuration files, then: +//! //! 1. Connects via `SshConnectionPool` (password auth) //! 2. Reads the OpenClaw config from the container //! 3. Extracts model profiles from the config @@ -27,7 +28,7 @@ const ROOT_PASSWORD: &str = "clawpal-e2e-pass"; const TEST_ANTHROPIC_KEY: &str = "test-anthropic-profile-key"; const TEST_OPENAI_KEY: &str = "test-openai-profile-key"; -/// Dockerfile: Ubuntu + openssh-server + Node.js + real openclaw CLI (latest from npm) + seeded OpenClaw config. +/// Dockerfile: Ubuntu + openssh-server + Node.js + pinned real openclaw CLI + seeded OpenClaw config. const DOCKERFILE: &str = r#" FROM ubuntu:22.04 @@ -320,21 +321,13 @@ async fn e2e_docker_profile_sync_and_doctor() { .pointer("/profiles/anthropic:default/token") .and_then(|v| v.as_str()) .expect("anthropic:default token should exist"); -<<<<<<< HEAD - assert_eq!(anthropic_token, "e2e-anthropic-fake-key-00000000"); -======= assert_eq!(anthropic_token, TEST_ANTHROPIC_KEY); ->>>>>>> 0a991c5 (test: replace token-like literals with synthetic constants in e2e fixture) let openai_token = auth .pointer("/profiles/openai:default/token") .and_then(|v| v.as_str()) .expect("openai:default token should exist"); -<<<<<<< HEAD - assert_eq!(openai_token, "e2e-openai-fake-key-11111111"); -======= assert_eq!(openai_token, TEST_OPENAI_KEY); ->>>>>>> 0a991c5 (test: replace token-like literals with synthetic constants in e2e fixture) eprintln!("[e2e] Auth store verified: 2 provider credentials found"); // --- Step 4: Extract model profiles from config --- @@ -417,11 +410,7 @@ async fn e2e_docker_profile_sync_and_doctor() { .expect("should read env var"); assert_eq!( env_result.stdout.trim(), -<<<<<<< HEAD - "e2e-anthropic-fake-key-00000000", -======= TEST_ANTHROPIC_KEY, ->>>>>>> 0a991c5 (test: replace token-like literals with synthetic constants in e2e fixture) "ANTHROPIC_API_KEY should be set in remote env" ); eprintln!("[e2e] Remote env vars verified"); From a1127e508420a171d7b28d3cf9e4ccb87cffae87 Mon Sep 17 00:00:00 2001 From: Keith Date: Fri, 6 Mar 2026 18:48:02 +0900 Subject: [PATCH 4/6] fix: avoid double trigger on rc branch creation --- .github/workflows/bump-version.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/bump-version.yml b/.github/workflows/bump-version.yml index 12a6e9a4..4bce18db 100644 --- a/.github/workflows/bump-version.yml +++ b/.github/workflows/bump-version.yml @@ -1,7 +1,6 @@ name: Bump Version on: - create: workflow_dispatch: inputs: bump_type: @@ -35,7 +34,6 @@ concurrency: jobs: prepare: - if: ${{ github.event_name != 'create' || (github.event.ref_type == 'branch' && startsWith(github.event.ref, 'rc/v')) }} runs-on: ubuntu-latest outputs: current_version: ${{ steps.version.outputs.current }} From 9e4926ac1909894c5bfb7ce05e152795e432aed7 Mon Sep 17 00:00:00 2001 From: Keith Date: Fri, 6 Mar 2026 18:56:24 +0900 Subject: [PATCH 5/6] fix: expose ssh connection profile for diagnostics --- src-tauri/src/commands/doctor.rs | 203 +++++++++++++++++++++++++++++++ src-tauri/src/commands/mod.rs | 21 ++++ src-tauri/src/lib.rs | 4 +- src/lib/api.ts | 4 +- src/lib/types.ts | 18 +++ 5 files changed, 248 insertions(+), 2 deletions(-) diff --git a/src-tauri/src/commands/doctor.rs b/src-tauri/src/commands/doctor.rs index ae6dafec..7076dd7c 100644 --- a/src-tauri/src/commands/doctor.rs +++ b/src-tauri/src/commands/doctor.rs @@ -1,5 +1,104 @@ use super::*; use clawpal_core::ssh::diagnostic::{from_any_error, SshIntent, SshStage}; +use std::time::Instant; + +const SSH_QUALITY_EXCELLENT_MAX_MS: u64 = 250; +const SSH_QUALITY_GOOD_MAX_MS: u64 = 550; +const SSH_QUALITY_FAIR_MAX_MS: u64 = 1100; +const SSH_QUALITY_POOR_MAX_MS: u64 = 1900; + +async fn timed_exec_login( + pool: &SshConnectionPool, + host_id: &str, + command: &str, +) -> Result<(crate::cli_runner::CliOutput, u64), String> { + let start = Instant::now(); + let output = pool.exec_login(host_id, command).await?; + Ok((output, start.elapsed().as_millis() as u64)) +} + +async fn timed_openclaw_remote_with_autofix( + pool: &SshConnectionPool, + host_id: &str, + args: &[&str], +) -> Result<(crate::cli_runner::CliOutput, u64), String> { + let start = Instant::now(); + let output = run_openclaw_remote_with_autofix(pool, host_id, args).await?; + Ok((output, start.elapsed().as_millis() as u64)) +} + +fn classify_connection_quality(total_ms: u64) -> (&'static str, u8) { + match total_ms { + 0..=SSH_QUALITY_EXCELLENT_MAX_MS => ("excellent", 100), + (SSH_QUALITY_EXCELLENT_MAX_MS + 1)..=SSH_QUALITY_GOOD_MAX_MS => ("good", 84), + (SSH_QUALITY_GOOD_MAX_MS + 1)..=SSH_QUALITY_FAIR_MAX_MS => ("fair", 66), + (SSH_QUALITY_FAIR_MAX_MS + 1)..=SSH_QUALITY_POOR_MAX_MS => ("poor", 42), + _ => ("poor", 18), + } +} + +fn pick_bottleneck_stage( + connect_ms: u64, + gateway_ms: u64, + config_ms: u64, + version_ms: u64, +) -> (&'static str, u64) { + let samples = [ + ("connect", connect_ms), + ("gateway", gateway_ms), + ("config", config_ms), + ("version", version_ms), + ]; + let mut bottleneck = ("other", 0_u64); + for (stage, latency_ms) in samples { + if latency_ms > bottleneck.1 { + bottleneck = (stage, latency_ms); + } + } + bottleneck +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn classify_connection_quality_respects_tuned_thresholds() { + assert_eq!(classify_connection_quality(0), ("excellent", 100)); + assert_eq!(classify_connection_quality(SSH_QUALITY_EXCELLENT_MAX_MS), ("excellent", 100)); + assert_eq!( + classify_connection_quality(SSH_QUALITY_EXCELLENT_MAX_MS + 1), + ("good", 84) + ); + assert_eq!(classify_connection_quality(SSH_QUALITY_GOOD_MAX_MS), ("good", 84)); + assert_eq!( + classify_connection_quality(SSH_QUALITY_GOOD_MAX_MS + 1), + ("fair", 66) + ); + assert_eq!(classify_connection_quality(SSH_QUALITY_FAIR_MAX_MS), ("fair", 66)); + assert_eq!( + classify_connection_quality(SSH_QUALITY_FAIR_MAX_MS + 1), + ("poor", 42) + ); + assert_eq!(classify_connection_quality(SSH_QUALITY_POOR_MAX_MS), ("poor", 42)); + assert_eq!( + classify_connection_quality(SSH_QUALITY_POOR_MAX_MS + 1), + ("poor", 18) + ); + } + + #[test] + fn pick_bottleneck_stage_prefers_largest_latency() { + let (stage, latency) = pick_bottleneck_stage(120, 90, 400, 250); + assert_eq!((stage, latency), ("config", 400)); + } + + #[test] + fn pick_bottleneck_stage_keeps_other_on_empty_measurements() { + let (stage, latency) = pick_bottleneck_stage(0, 0, 0, 0); + assert_eq!((stage, latency), ("other", 0)); + } +} #[tauri::command] pub async fn remote_run_doctor( @@ -124,6 +223,110 @@ pub async fn remote_get_system_status( }) } +#[tauri::command] +pub async fn remote_get_ssh_connection_profile( + pool: State<'_, SshConnectionPool>, + host_id: String, +) -> Result { + let total_start = Instant::now(); + let (connect_result, gateway_result, config_result, version_result) = tokio::join!( + timed_exec_login(&pool, &host_id, "true"), + timed_exec_login( + &pool, + &host_id, + "pgrep -f '[o]penclaw-gateway' >/dev/null 2>&1", + ), + timed_openclaw_remote_with_autofix(&pool, &host_id, &["config", "get", "agents", "--json"]), + timed_exec_login(&pool, &host_id, "openclaw --version"), + ); + + let (connect_res, connect_latency_ms) = connect_result?; + let (gateway_res, gateway_latency_ms) = gateway_result?; + let (config_res, config_latency_ms) = config_result?; + let (version_res, version_latency_ms) = version_result?; + + let config_ok = matches!(&config_res, Ok(output) if output.exit_code == 0); + let (active_agents, global_default_model, fallback_models) = match config_res { + Ok(ref output) if output.exit_code == 0 => { + let cfg: Value = crate::cli_runner::parse_json_output(output).unwrap_or(Value::Null); + let explicit = cfg + .pointer("/list") + .and_then(Value::as_array) + .map(|a| a.len() as u32) + .unwrap_or(0); + let agents = if explicit == 0 { 1 } else { explicit }; + let model = cfg + .pointer("/defaults/model") + .and_then(|v| read_model_value(v)) + .or_else(|| { + cfg.pointer("/default/model") + .and_then(read_model_value) + }); + let fallbacks = cfg + .pointer("/defaults/model/fallbacks") + .and_then(Value::as_array) + .map(|arr| { + arr.iter() + .filter_map(Value::as_str) + .map(String::from) + .collect() + }) + .unwrap_or_default(); + (agents, model, fallbacks) + } + _ => (0, None, Vec::new()), + }; + + let _openclaw_version = match version_res { + Ok(r) if r.exit_code == 0 => Some(r.stdout.trim().to_string()), + Ok(r) => { + let trimmed = r.stdout.trim().to_string(); + if trimmed.is_empty() { + None + } else { + Some(trimmed) + } + } + Err(_) => None, + }; + + let healthy = match gateway_res { + Ok(r) => r.exit_code == 0, + Err(_) if config_ok => true, + Err(_) => false, + }; + + let total_latency_ms = total_start.elapsed().as_millis() as u64; + let (quality, quality_score) = classify_connection_quality(total_latency_ms); + let (bottleneck_stage, bottleneck_latency_ms) = pick_bottleneck_stage( + connect_latency_ms, + gateway_latency_ms, + config_latency_ms, + version_latency_ms, + ); + + Ok(SshConnectionProfile { + status: StatusLight { + healthy, + active_agents, + global_default_model, + fallback_models, + ssh_diagnostic: None, + }, + connect_latency_ms, + gateway_latency_ms, + config_latency_ms, + version_latency_ms, + total_latency_ms, + quality: quality.to_string(), + quality_score, + bottleneck: SshBottleneck { + stage: bottleneck_stage.to_string(), + latency_ms: bottleneck_latency_ms, + }, + }) +} + #[tauri::command] pub async fn remote_get_status_extra( pool: State<'_, SshConnectionPool>, diff --git a/src-tauri/src/commands/mod.rs b/src-tauri/src/commands/mod.rs index 0a9d7a2f..ed7bfee7 100644 --- a/src-tauri/src/commands/mod.rs +++ b/src-tauri/src/commands/mod.rs @@ -450,6 +450,27 @@ pub struct StatusExtra { pub duplicate_installs: Vec, } +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct SshBottleneck { + pub stage: String, + pub latency_ms: u64, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct SshConnectionProfile { + pub status: StatusLight, + pub connect_latency_ms: u64, + pub gateway_latency_ms: u64, + pub config_latency_ms: u64, + pub version_latency_ms: u64, + pub total_latency_ms: u64, + pub quality: String, + pub quality_score: u8, + pub bottleneck: SshBottleneck, +} + /// Clear cached openclaw version — call after upgrade so status shows new version. pub fn clear_openclaw_version_cache() { *OPENCLAW_VERSION_CACHE.lock().unwrap() = None; diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index 9228103b..37e74552 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -32,7 +32,8 @@ use crate::commands::{ remote_delete_backup, remote_delete_cron_job, remote_delete_model_profile, remote_delete_sessions_by_ids, remote_deploy_watchdog, remote_diagnose_primary_via_rescue, remote_extract_model_profiles_from_config, remote_fix_issues, remote_get_cron_runs, - remote_get_status_extra, remote_get_system_status, remote_get_watchdog_status, + remote_get_ssh_connection_profile, remote_get_status_extra, remote_get_system_status, + remote_get_watchdog_status, remote_list_agents_overview, remote_list_backups, remote_list_bindings, remote_list_channels_minimal, remote_list_cron_jobs, remote_list_discord_guild_channels, remote_list_history, remote_list_model_profiles, remote_list_session_files, @@ -205,6 +206,7 @@ pub fn run() { sftp_remove_file, remote_read_raw_config, remote_get_system_status, + remote_get_ssh_connection_profile, remote_get_status_extra, remote_list_agents_overview, remote_list_channels_minimal, diff --git a/src/lib/api.ts b/src/lib/api.ts index e4babb4e..425f43ff 100644 --- a/src/lib/api.ts +++ b/src/lib/api.ts @@ -1,5 +1,5 @@ import { invoke } from "@tauri-apps/api/core"; -import type { AgentOverview, AgentSessionAnalysis, AppPreferences, ApplyQueueResult, ApplyResult, BackupInfo, Binding, BugReportSettings, BugReportStats, ChannelNode, CronJob, CronRun, DiscordGuildChannel, DiscoveredInstance, DockerInstance, EnsureAccessResult, GuidanceAction, HistoryItem, InstallMethodCapability, InstallOrchestratorDecision, InstallSession, InstallStepResult, InstallTargetDecision, InstanceStatus, StatusExtra, ModelCatalogProvider, ModelProfile, PendingCommand, PrecheckIssue, PreviewQueueResult, PreviewResult, ProviderAuthSuggestion, Recipe, RecordInstallExperienceResult, RegisteredInstance, RelatedSecretPushResult, RemoteAuthSyncResult, RescueBotAction, RescueBotManageResult, RescuePrimaryDiagnosisResult, RescuePrimaryRepairResult, ResolvedApiKey, SshConfigHostSuggestion, SshDiagnosticReport, SshHost, SshIntent, SshTransferStats, SystemStatus, DoctorReport, SessionFile, WatchdogStatus, ZeroclawOauthCompleteResult, ZeroclawOauthLoginStartResult, ZeroclawRuntimeTarget, ZeroclawUsageStats } from "./types"; +import type { AgentOverview, AgentSessionAnalysis, AppPreferences, ApplyQueueResult, ApplyResult, BackupInfo, Binding, BugReportSettings, BugReportStats, ChannelNode, CronJob, CronRun, DiscordGuildChannel, DiscoveredInstance, DockerInstance, EnsureAccessResult, GuidanceAction, HistoryItem, InstallMethodCapability, InstallOrchestratorDecision, InstallSession, InstallStepResult, InstallTargetDecision, InstanceStatus, StatusExtra, ModelCatalogProvider, ModelProfile, PendingCommand, PrecheckIssue, PreviewQueueResult, PreviewResult, ProviderAuthSuggestion, Recipe, RecordInstallExperienceResult, RegisteredInstance, RelatedSecretPushResult, RemoteAuthSyncResult, RescueBotAction, RescueBotManageResult, RescuePrimaryDiagnosisResult, RescuePrimaryRepairResult, ResolvedApiKey, SshConfigHostSuggestion, SshConnectionProfile, SshDiagnosticReport, SshHost, SshIntent, SshTransferStats, SystemStatus, DoctorReport, SessionFile, WatchdogStatus, ZeroclawOauthCompleteResult, ZeroclawOauthLoginStartResult, ZeroclawRuntimeTarget, ZeroclawUsageStats } from "./types"; export const api = { setActiveOpenclawHome: (path: string | null): Promise => @@ -241,6 +241,8 @@ export const api = { invoke("remote_read_raw_config", { hostId }), remoteGetInstanceStatus: (hostId: string): Promise => invoke("remote_get_system_status", { hostId }), + remoteGetSshConnectionProfile: (hostId: string): Promise => + invoke("remote_get_ssh_connection_profile", { hostId }), remoteGetStatusExtra: (hostId: string): Promise => invoke("remote_get_status_extra", { hostId }), remoteListAgentsOverview: (hostId: string): Promise => diff --git a/src/lib/types.ts b/src/lib/types.ts index d5a87643..511d14c3 100644 --- a/src/lib/types.ts +++ b/src/lib/types.ts @@ -334,6 +334,24 @@ export interface InstanceStatus { sshDiagnostic?: SshDiagnosticReport | null; } +export type SshConnectionQuality = "excellent" | "good" | "fair" | "poor" | "unknown"; +export type SshConnectionBottleneckStage = "connect" | "gateway" | "config" | "version" | "other"; + +export interface SshConnectionProfile { + status: InstanceStatus; + connectLatencyMs: number; + gatewayLatencyMs: number; + configLatencyMs: number; + versionLatencyMs: number; + totalLatencyMs: number; + quality: SshConnectionQuality; + qualityScore: number; + bottleneck: { + stage: SshConnectionBottleneckStage; + latencyMs: number; + }; +} + export interface StatusExtra { openclawVersion?: string; duplicateInstalls?: string[]; From d29b42c729531b51694d4af57c33093efd563d42 Mon Sep 17 00:00:00 2001 From: Keith Date: Fri, 6 Mar 2026 19:26:43 +0900 Subject: [PATCH 6/6] fix: format rust sources for CI --- src-tauri/src/commands/doctor.rs | 53 +++++++++++++++++++++++--------- src-tauri/src/lib.rs | 34 ++++++++++---------- 2 files changed, 56 insertions(+), 31 deletions(-) diff --git a/src-tauri/src/commands/doctor.rs b/src-tauri/src/commands/doctor.rs index c93d7d78..37324414 100644 --- a/src-tauri/src/commands/doctor.rs +++ b/src-tauri/src/commands/doctor.rs @@ -65,14 +65,38 @@ mod tests { #[test] fn classify_connection_quality_respects_tuned_thresholds() { assert_eq!(classify_connection_quality(0), ("excellent", 100)); - assert_eq!(classify_connection_quality(SSH_QUALITY_EXCELLENT_MAX_MS), ("excellent", 100)); - assert_eq!(classify_connection_quality(SSH_QUALITY_EXCELLENT_MAX_MS + 1), ("good", 84)); - assert_eq!(classify_connection_quality(SSH_QUALITY_GOOD_MAX_MS), ("good", 84)); - assert_eq!(classify_connection_quality(SSH_QUALITY_GOOD_MAX_MS + 1), ("fair", 66)); - assert_eq!(classify_connection_quality(SSH_QUALITY_FAIR_MAX_MS), ("fair", 66)); - assert_eq!(classify_connection_quality(SSH_QUALITY_FAIR_MAX_MS + 1), ("poor", 42)); - assert_eq!(classify_connection_quality(SSH_QUALITY_POOR_MAX_MS), ("poor", 42)); - assert_eq!(classify_connection_quality(SSH_QUALITY_POOR_MAX_MS + 1), ("poor", 18)); + assert_eq!( + classify_connection_quality(SSH_QUALITY_EXCELLENT_MAX_MS), + ("excellent", 100) + ); + assert_eq!( + classify_connection_quality(SSH_QUALITY_EXCELLENT_MAX_MS + 1), + ("good", 84) + ); + assert_eq!( + classify_connection_quality(SSH_QUALITY_GOOD_MAX_MS), + ("good", 84) + ); + assert_eq!( + classify_connection_quality(SSH_QUALITY_GOOD_MAX_MS + 1), + ("fair", 66) + ); + assert_eq!( + classify_connection_quality(SSH_QUALITY_FAIR_MAX_MS), + ("fair", 66) + ); + assert_eq!( + classify_connection_quality(SSH_QUALITY_FAIR_MAX_MS + 1), + ("poor", 42) + ); + assert_eq!( + classify_connection_quality(SSH_QUALITY_POOR_MAX_MS), + ("poor", 42) + ); + assert_eq!( + classify_connection_quality(SSH_QUALITY_POOR_MAX_MS + 1), + ("poor", 18) + ); } #[test] @@ -252,10 +276,7 @@ pub async fn remote_get_ssh_connection_profile( let model = cfg .pointer("/defaults/model") .and_then(|v| read_model_value(v)) - .or_else(|| { - cfg.pointer("/default/model") - .and_then(read_model_value) - }); + .or_else(|| cfg.pointer("/default/model").and_then(read_model_value)); let fallbacks = cfg .pointer("/defaults/model/fallbacks") .and_then(Value::as_array) @@ -284,8 +305,12 @@ pub async fn remote_get_ssh_connection_profile( let total_latency_ms = total_start.elapsed().as_millis() as u64; let (quality, quality_score) = classify_connection_quality(total_latency_ms); - let (bottleneck_stage, bottleneck_latency_ms) = - pick_bottleneck_stage(connect_latency_ms, gateway_latency_ms, config_latency_ms, version_latency_ms); + let (bottleneck_stage, bottleneck_latency_ms) = pick_bottleneck_stage( + connect_latency_ms, + gateway_latency_ms, + config_latency_ms, + version_latency_ms, + ); Ok(SshConnectionProfile { status: StatusLight { diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index 6d6c01be..f34f5e95 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -32,23 +32,23 @@ use crate::commands::{ remote_delete_backup, remote_delete_cron_job, remote_delete_model_profile, remote_delete_sessions_by_ids, remote_deploy_watchdog, remote_diagnose_primary_via_rescue, remote_extract_model_profiles_from_config, remote_fix_issues, remote_get_cron_runs, - remote_get_status_extra, remote_get_system_status, remote_get_ssh_connection_profile, - remote_get_watchdog_status, - remote_list_agents_overview, remote_list_backups, remote_list_bindings, - remote_list_channels_minimal, remote_list_cron_jobs, remote_list_discord_guild_channels, - remote_list_history, remote_list_model_profiles, remote_list_session_files, - remote_manage_rescue_bot, remote_preview_rollback, remote_preview_session, remote_read_app_log, - remote_read_error_log, remote_read_gateway_error_log, remote_read_gateway_log, - remote_read_raw_config, remote_refresh_model_catalog, remote_repair_primary_via_rescue, - remote_resolve_api_keys, remote_restart_gateway, remote_restore_from_backup, remote_rollback, - remote_run_doctor, remote_run_openclaw_upgrade, remote_setup_agent_identity, - remote_start_watchdog, remote_stop_watchdog, remote_sync_profiles_to_local_auth, - remote_test_model_profile, remote_trigger_cron_job, remote_uninstall_watchdog, - remote_upsert_model_profile, remote_write_raw_config, repair_primary_via_rescue, - resolve_api_keys, resolve_provider_auth, restart_gateway, restore_from_backup, rollback, - run_doctor_command, run_openclaw_upgrade, set_active_clawpal_data_dir, - set_active_openclaw_home, set_agent_model, set_bug_report_settings, set_global_model, - set_rescue_bot_ui_preference, set_session_model_override, set_ssh_transfer_speed_ui_preference, + remote_get_ssh_connection_profile, remote_get_status_extra, remote_get_system_status, + remote_get_watchdog_status, remote_list_agents_overview, remote_list_backups, + remote_list_bindings, remote_list_channels_minimal, remote_list_cron_jobs, + remote_list_discord_guild_channels, remote_list_history, remote_list_model_profiles, + remote_list_session_files, remote_manage_rescue_bot, remote_preview_rollback, + remote_preview_session, remote_read_app_log, remote_read_error_log, + remote_read_gateway_error_log, remote_read_gateway_log, remote_read_raw_config, + remote_refresh_model_catalog, remote_repair_primary_via_rescue, remote_resolve_api_keys, + remote_restart_gateway, remote_restore_from_backup, remote_rollback, remote_run_doctor, + remote_run_openclaw_upgrade, remote_setup_agent_identity, remote_start_watchdog, + remote_stop_watchdog, remote_sync_profiles_to_local_auth, remote_test_model_profile, + remote_trigger_cron_job, remote_uninstall_watchdog, remote_upsert_model_profile, + remote_write_raw_config, repair_primary_via_rescue, resolve_api_keys, resolve_provider_auth, + restart_gateway, restore_from_backup, rollback, run_doctor_command, run_openclaw_upgrade, + set_active_clawpal_data_dir, set_active_openclaw_home, set_agent_model, + set_bug_report_settings, set_global_model, set_rescue_bot_ui_preference, + set_session_model_override, set_ssh_transfer_speed_ui_preference, set_zeroclaw_doctor_ui_preference, set_zeroclaw_model_preference, setup_agent_identity, sftp_list_dir, sftp_read_file, sftp_remove_file, sftp_write_file, ssh_connect, ssh_connect_with_passphrase, ssh_disconnect, ssh_exec, ssh_status, start_watchdog,