diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f0b1053..3501d5a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -168,11 +168,11 @@ jobs: # ── MSRV check ────────────────────────────────────────────────────── msrv: - name: MSRV (1.85) + name: MSRV (1.89) runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@1.85.0 + - uses: dtolnay/rust-toolchain@1.89.0 - uses: Swatinem/rust-cache@v2 - run: cargo check --all diff --git a/.gitignore b/.gitignore index dc94bde..a9aa515 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,7 @@ *.swo .DS_Store .claude/worktrees/ + +# WASM binary assets (built or downloaded, not committed) +rivet-cli/assets/wasm/*.wasm +rivet-cli/assets/wasm/js/ diff --git a/.rivet/agent-context.md b/.rivet/agent-context.md new file mode 100644 index 0000000..3dea467 --- /dev/null +++ b/.rivet/agent-context.md @@ -0,0 +1,95 @@ +# Rivet Agent Context + +Auto-generated by `rivet context` — do not edit. + +## Project + +- **Name:** rivet +- **Version:** 0.1.0 +- **Schemas:** common, dev, aadl +- **Sources:** artifacts (generic-yaml) +- **Docs:** docs, arch +- **Results:** results + +## Artifacts + +| Type | Count | Example IDs | +|------|-------|-------------| +| aadl-component | 21 | ARCH-SYS-001, ARCH-SYS-002, ARCH-CORE-001 | +| design-decision | 10 | DD-001, DD-002, DD-003 | +| feature | 30 | FEAT-001, FEAT-002, FEAT-003 | +| requirement | 16 | REQ-001, REQ-002, REQ-003 | +| **Total** | **77** | | + +## Schema + +- **`aadl-analysis-result`** — Output of a spar analysis pass + Required fields: analysis-name, severity +- **`aadl-component`** — AADL component type or implementation imported from spar + Required fields: category, aadl-package +- **`aadl-flow`** — End-to-end flow with latency bounds + Required fields: flow-kind +- **`design-decision`** — An architectural or design decision with rationale + Required fields: rationale +- **`feature`** — A user-visible capability or feature + Required fields: (none) +- **`requirement`** — A functional or non-functional requirement + Required fields: (none) + +### Link Types + +- `allocated-to` (inverse: `allocated-from`) +- `constrained-by` (inverse: `constrains`) +- `depends-on` (inverse: `depended-on-by`) +- `derives-from` (inverse: `derived-into`) +- `implements` (inverse: `implemented-by`) +- `mitigates` (inverse: `mitigated-by`) +- `modeled-by` (inverse: `models`) +- `refines` (inverse: `refined-by`) +- `satisfies` (inverse: `satisfied-by`) +- `traces-to` (inverse: `traced-from`) +- `verifies` (inverse: `verified-by`) + +## Traceability Rules + +| Rule | Source Type | Severity | Description | +|------|------------|----------|-------------| +| requirement-coverage | requirement | warning | Every requirement should be satisfied by at least one design decision or feature | +| decision-justification | design-decision | error | Every design decision must link to at least one requirement | +| aadl-component-has-allocation | aadl-component | info | AADL component should trace to a requirement or architecture element | + +## Coverage + +**Overall: 100.0%** + +| Rule | Source Type | Covered | Total | % | +|------|------------|---------|-------|---| +| requirement-coverage | requirement | 16 | 16 | 100.0% | +| decision-justification | design-decision | 10 | 10 | 100.0% | +| aadl-component-has-allocation | aadl-component | 21 | 21 | 100.0% | + +## Validation + +0 errors, 0 warnings + +## Documents + +4 documents loaded + +## Commands + +```bash +rivet validate # validate all artifacts +rivet list # list all artifacts +rivet list -t # filter by type +rivet stats # artifact counts + orphans +rivet coverage # traceability coverage report +rivet matrix --from X --to Y # traceability matrix +rivet diff --base A --head B # compare artifact sets +rivet schema list # list schema types +rivet schema show # show type details +rivet schema rules # list traceability rules +rivet export -f generic-yaml # export as YAML +rivet serve # start dashboard on :3000 +rivet context # regenerate this file +``` diff --git a/Cargo.lock b/Cargo.lock index 53b07c3..b815d5b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -239,6 +239,21 @@ dependencies = [ "generic-array", ] +[[package]] +name = "borsh" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1da5ab77c1437701eeff7c88d968729e7766172279eab0676857b3d63af7a6f" +dependencies = [ + "cfg_aliases", +] + +[[package]] +name = "boxcar" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36f64beae40a84da1b4b26ff2761a5b895c12adc41dc25aaee1c4f2bbfe97a6e" + [[package]] name = "bumpalo" version = "3.20.2" @@ -356,6 +371,12 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "ciborium" version = "0.2.2" @@ -464,6 +485,12 @@ version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" +[[package]] +name = "countme" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7704b5fdd17b18ae31c4c1da5a2e0305a2bf17b5249300a9ee9ed7b72114c636" + [[package]] name = "cpp_demangle" version = "0.4.5" @@ -541,7 +568,7 @@ dependencies = [ "log", "pulley-interpreter", "regalloc2", - "rustc-hash", + "rustc-hash 2.1.1", "serde", "smallvec", "target-lexicon", @@ -687,6 +714,15 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "crossbeam-queue" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "crossbeam-utils" version = "0.8.21" @@ -1023,7 +1059,7 @@ checksum = "25234f20a3ec0a962a61770cfe39ecf03cb529a6e474ad8cff025ed497eda557" dependencies = [ "bitflags", "debugid", - "rustc-hash", + "rustc-hash 2.1.1", "serde", "serde_derive", "serde_json", @@ -1117,12 +1153,20 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" + [[package]] name = "hashbrown" version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" dependencies = [ + "allocator-api2", + "equivalent", "foldhash", "serde", ] @@ -1133,6 +1177,15 @@ version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" +[[package]] +name = "hashlink" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" +dependencies = [ + "hashbrown 0.15.5", +] + [[package]] name = "heck" version = "0.5.0" @@ -1434,6 +1487,24 @@ dependencies = [ "serde_core", ] +[[package]] +name = "intrusive-collections" +version = "0.9.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "189d0897e4cbe8c75efedf3502c18c887b05046e59d28404d4d8e46cbc4d1e86" +dependencies = [ + "memoffset", +] + +[[package]] +name = "inventory" +version = "0.3.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "009ae045c87e7082cb72dab0ccd01ae075dd00141ddc108f43a0ea150a9e7227" +dependencies = [ + "rustversion", +] + [[package]] name = "io-extras" version = "0.18.4" @@ -1571,6 +1642,12 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "la-arena" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3752f229dcc5a481d60f385fa479ff46818033d881d2d801aa27dffcfb5e8306" + [[package]] name = "lazy_static" version = "1.5.0" @@ -1679,6 +1756,15 @@ dependencies = [ "rustix 1.1.4", ] +[[package]] +name = "memoffset" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" +dependencies = [ + "autocfg", +] + [[package]] name = "mime" version = "0.3.17" @@ -2171,7 +2257,7 @@ dependencies = [ "bumpalo", "hashbrown 0.15.5", "log", - "rustc-hash", + "rustc-hash 2.1.1", "smallvec", ] @@ -2275,12 +2361,14 @@ dependencies = [ "serde_yaml", "tokio", "tower-http", + "urlencoding", ] [[package]] name = "rivet-core" version = "0.1.0" dependencies = [ + "anyhow", "criterion", "log", "petgraph", @@ -2290,6 +2378,8 @@ dependencies = [ "serde", "serde_json", "serde_yaml", + "spar-analysis", + "spar-hir", "thiserror 2.0.18", "tokio", "urlencoding", @@ -2298,12 +2388,30 @@ dependencies = [ "wiremock", ] +[[package]] +name = "rowan" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "417a3a9f582e349834051b8a10c8d71ca88da4211e4093528e36b9845f6b5f21" +dependencies = [ + "countme", + "hashbrown 0.14.5", + "rustc-hash 1.1.0", + "text-size", +] + [[package]] name = "rustc-demangle" version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b50b8869d9fc858ce7266cce0194bd74df58b9d0e3f6df3a9fc8eb470d95c09d" +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + [[package]] name = "rustc-hash" version = "2.1.1" @@ -2403,6 +2511,49 @@ version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" +[[package]] +name = "salsa" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f77debccd43ba198e9cee23efd7f10330ff445e46a98a2b107fed9094a1ee676" +dependencies = [ + "boxcar", + "crossbeam-queue", + "crossbeam-utils", + "hashbrown 0.15.5", + "hashlink", + "indexmap", + "intrusive-collections", + "inventory", + "parking_lot", + "portable-atomic", + "rayon", + "rustc-hash 2.1.1", + "salsa-macro-rules", + "salsa-macros", + "smallvec", + "thin-vec", + "tracing", +] + +[[package]] +name = "salsa-macro-rules" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea07adbf42d91cc076b7daf3b38bc8168c19eb362c665964118a89bc55ef19a5" + +[[package]] +name = "salsa-macros" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d16d4d8b66451b9c75ddf740b7fc8399bc7b8ba33e854a5d7526d18708f67b05" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + [[package]] name = "same-file" version = "1.0.6" @@ -2600,6 +2751,16 @@ dependencies = [ "serde", ] +[[package]] +name = "smol_str" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4aaa7368fcf4852a4c2dd92df0cace6a71f2091ca0a23391ce7f3a31833f1523" +dependencies = [ + "borsh", + "serde_core", +] + [[package]] name = "socket2" version = "0.6.3" @@ -2610,6 +2771,79 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "spar-analysis" +version = "0.1.0" +source = "git+https://github.com/pulseengine/spar.git?rev=21a5411#21a541180ba5efb9f37f1b9975468b2f475c3955" +dependencies = [ + "la-arena", + "rustc-hash 2.1.1", + "spar-hir-def", +] + +[[package]] +name = "spar-annex" +version = "0.1.0" +source = "git+https://github.com/pulseengine/spar.git?rev=21a5411#21a541180ba5efb9f37f1b9975468b2f475c3955" +dependencies = [ + "rowan", + "spar-syntax", +] + +[[package]] +name = "spar-base-db" +version = "0.1.0" +source = "git+https://github.com/pulseengine/spar.git?rev=21a5411#21a541180ba5efb9f37f1b9975468b2f475c3955" +dependencies = [ + "rowan", + "salsa", + "spar-annex", + "spar-syntax", +] + +[[package]] +name = "spar-hir" +version = "0.1.0" +source = "git+https://github.com/pulseengine/spar.git?rev=21a5411#21a541180ba5efb9f37f1b9975468b2f475c3955" +dependencies = [ + "salsa", + "smol_str", + "spar-base-db", + "spar-hir-def", + "spar-syntax", +] + +[[package]] +name = "spar-hir-def" +version = "0.1.0" +source = "git+https://github.com/pulseengine/spar.git?rev=21a5411#21a541180ba5efb9f37f1b9975468b2f475c3955" +dependencies = [ + "la-arena", + "rowan", + "rustc-hash 2.1.1", + "salsa", + "smol_str", + "spar-base-db", + "spar-syntax", +] + +[[package]] +name = "spar-parser" +version = "0.1.0" +source = "git+https://github.com/pulseengine/spar.git?rev=21a5411#21a541180ba5efb9f37f1b9975468b2f475c3955" +dependencies = [ + "rowan", +] + +[[package]] +name = "spar-syntax" +version = "0.1.0" +source = "git+https://github.com/pulseengine/spar.git?rev=21a5411#21a541180ba5efb9f37f1b9975468b2f475c3955" +dependencies = [ + "rowan", + "spar-parser", +] + [[package]] name = "stable_deref_trait" version = "1.2.1" @@ -2724,6 +2958,18 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "text-size" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f18aa187839b2bdb1ad2fa35ead8c4c2976b64e4363c386d45ac0f7ee85c9233" + +[[package]] +name = "thin-vec" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "144f754d318415ac792f9d69fc87abbbfc043ce2ef041c60f16ad828f638717d" + [[package]] name = "thiserror" version = "1.0.69" diff --git a/Cargo.toml b/Cargo.toml index 95cc9cb..55b4c07 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,7 +11,7 @@ version = "0.1.0" authors = ["PulseEngine "] edition = "2024" license = "Apache-2.0" -rust-version = "1.85" +rust-version = "1.89" [workspace.dependencies] # Serialization @@ -49,3 +49,7 @@ wasmtime-wasi = "42" # Benchmarking criterion = { version = "0.5", features = ["html_reports"] } + +# AADL (spar) — parser, HIR, analysis +spar-hir = { git = "https://github.com/pulseengine/spar.git", rev = "21a5411" } +spar-analysis = { git = "https://github.com/pulseengine/spar.git", rev = "21a5411" } diff --git a/arch/rivet_adapters.aadl b/arch/rivet_adapters.aadl new file mode 100644 index 0000000..3285b80 --- /dev/null +++ b/arch/rivet_adapters.aadl @@ -0,0 +1,92 @@ +-- rivet_adapters.aadl +-- +-- Adapter subsystem — models the extensible import/export pipeline. +-- Each adapter is a component that implements the Adapter trait/WIT interface. +-- Native adapters are compiled-in; WASM adapters are loaded at runtime. + +package RivetAdapters +public + with RivetSystem; + + -- ── Adapter interface (mirrors Adapter trait / WIT) ───────── + + abstract Adapter + features + source_in : in data port; + config_in : in data port; + artifacts : out data port RivetSystem::ArtifactSet; + export_data : out data port; + end Adapter; + + -- ── Native adapters (compiled into rivet-core) ────────────── + + system GenericYamlAdapter extends Adapter + -- Parses canonical YAML artifact files (artifacts/*.yaml) + end GenericYamlAdapter; + + system StpaYamlAdapter extends Adapter + -- Imports meld STPA safety analysis YAML + end StpaYamlAdapter; + + system AadlAdapter extends Adapter + -- Invokes spar CLI, parses JSON output into aadl-component artifacts + features + spar_invoke : requires data access; -- spar CLI on PATH + end AadlAdapter; + + system ReqIfAdapter extends Adapter + -- ReqIF 1.2 XML import/export (RIF namespace) + end ReqIfAdapter; + + -- ── WASM adapter runtime ──────────────────────────────────── + + system WasmRuntime + features + component_in : in data port; -- .wasm component bytes + adapter_out : out data port; -- instantiated Adapter + properties + -- Resource limits: fuel metering, memory cap + end WasmRuntime; + + system implementation WasmRuntime.Impl + subcomponents + engine : process WasmEngine; + linker : process WasmLinker; + sandbox : process WasmSandbox; + connections + c_load : port component_in -> engine.load; + c_link : port engine.module -> linker.module; + c_sand : port linker.linked -> sandbox.run; + c_out : port sandbox.adapter -> adapter_out; + end WasmRuntime.Impl; + + process WasmEngine + -- wasmtime::Engine with shared compilation cache + features + load : in data port; + module : out data port; + end WasmEngine; + + process WasmLinker + -- Links WASI imports, adapter WIT interface + features + module : in data port; + linked : out data port; + end WasmLinker; + + process WasmSandbox + -- Isolated execution with fuel metering and memory limits + features + run : in data port; + adapter : out data port; + end WasmSandbox; + + -- ── Future: OSLC sync adapter ─────────────────────────────── + + system OslcAdapter extends Adapter + -- Bidirectional sync with Polarion, DOORS, codebeamer via OSLC RM/QM + features + oslc_endpoint : requires data access; -- remote OSLC service provider + end OslcAdapter; + +end RivetAdapters; diff --git a/arch/rivet_dashboard.aadl b/arch/rivet_dashboard.aadl new file mode 100644 index 0000000..7e49b4d --- /dev/null +++ b/arch/rivet_dashboard.aadl @@ -0,0 +1,101 @@ +-- rivet_dashboard.aadl +-- +-- Dashboard subsystem — the axum + HTMX serve handler. +-- Models the HTTP routing, view rendering, and live-reload state. + +package RivetDashboard +public + with RivetSystem; + + -- ── Dashboard system boundary ─────────────────────────────── + + system Dashboard + features + http_in : in data port RivetSystem::HttpRequest; + http_out : out data port RivetSystem::HtmlFragment; + state : requires data access; -- Arc> + end Dashboard; + + system implementation Dashboard.Impl + subcomponents + router : process HttpRouter; + views : process ViewRenderers; + graph_viz : process GraphVisualizer; + file_watch : process FileWatcher; + connections + c_route : port http_in -> router.request; + c_view : port router.view -> views.render; + c_graph : port router.graph -> graph_viz.render; + c_html : port views.html -> http_out; + c_graph_out: port graph_viz.svg -> views.graph_embed; + c_reload : port file_watch.changed -> router.reload; + end Dashboard.Impl; + + -- ── HTTP router ───────────────────────────────────────────── + + process HttpRouter + -- axum Router with middleware (redirect_non_htmx, logging) + features + request : in data port RivetSystem::HttpRequest; + view : out data port; + graph : out data port; + reload : in data port; + end HttpRouter; + + -- ── View renderers ────────────────────────────────────────── + + process ViewRenderers + -- Server-rendered HTML fragments for HTMX swap + features + render : in data port; + graph_embed : in data port; + html : out data port RivetSystem::HtmlFragment; + end ViewRenderers; + + process implementation ViewRenderers.Impl + subcomponents + stats_view : thread StatsView; + artifacts_view : thread ArtifactsView; + artifact_detail : thread ArtifactDetailView; + validation_view : thread ValidationView; + matrix_view : thread MatrixView; + graph_view : thread GraphView; + source_view : thread SourceView; + doc_view : thread DocumentView; + doc_linkage_view : thread DocLinkageView; + search_handler : thread SearchHandler; + preview_handler : thread PreviewHandler; + end ViewRenderers.Impl; + + -- ── Graph visualizer (etch layout engine) ─────────────────── + + process GraphVisualizer + -- petgraph → etch layout → SVG with pan/zoom/drag + features + render : in data port; + svg : out data port; + end GraphVisualizer; + + -- ── File watcher for live reload ──────────────────────────── + + process FileWatcher + -- Watches project directory, triggers state rebuild + features + changed : out data port; + end FileWatcher; + + -- ── Individual view threads ───────────────────────────────── + + thread StatsView end StatsView; + thread ArtifactsView end ArtifactsView; + thread ArtifactDetailView end ArtifactDetailView; + thread ValidationView end ValidationView; + thread MatrixView end MatrixView; + thread GraphView end GraphView; + thread SourceView end SourceView; + thread DocumentView end DocumentView; + thread DocLinkageView end DocLinkageView; + thread SearchHandler end SearchHandler; + thread PreviewHandler end PreviewHandler; + +end RivetDashboard; diff --git a/arch/rivet_system.aadl b/arch/rivet_system.aadl new file mode 100644 index 0000000..f76f888 --- /dev/null +++ b/arch/rivet_system.aadl @@ -0,0 +1,224 @@ +-- rivet_system.aadl +-- +-- Top-level system architecture for Rivet. +-- Models the system as seen by users: CLI commands and HTTP dashboard. + +package RivetSystem +public + + -- ── Data types ────────────────────────────────────────────── + + data ArtifactSet + -- Collection of parsed artifacts (YAML, STPA, AADL, ReqIF) + end ArtifactSet; + + data ValidationReport + -- Schema violations, orphans, coverage gaps + end ValidationReport; + + data ProjectConfig + -- Parsed rivet.yaml: schemas, sources, project metadata + end ProjectConfig; + + data SchemaSet + -- Merged schema (common + domain overlays) + end SchemaSet; + + data LinkGraph + -- petgraph-backed directed graph of artifact links + end LinkGraph; + + data TraceMatrix + -- Coverage matrix: source type → target type with percentages + end TraceMatrix; + + data HtmlFragment + -- Server-rendered HTML (HTMX partial or full page) + end HtmlFragment; + + data HttpRequest + -- Inbound HTTP request (axum) + end HttpRequest; + + -- ── System boundary ───────────────────────────────────────── + + system Rivet + features + cli_input : in data port; + cli_output : out data port; + http_request : in data port HttpRequest; + http_response : out data port HtmlFragment; + filesystem : requires data access; + end Rivet; + + system implementation Rivet.Impl + subcomponents + core : process RivetCore.Impl; + cli : process RivetCli.Impl; + connections + c_cli_in : port cli_input -> cli.commands; + c_cli_out : port cli.output -> cli_output; + c_http_in : port http_request -> cli.http_in; + c_http_out: port cli.http_out -> http_response; + c_core : port cli.core_req -> core.request; + c_core_r : port core.response -> cli.core_resp; + end Rivet.Impl; + + -- ── RivetCore process ─────────────────────────────────────── + + process RivetCore + features + request : in data port; + response : out data port; + end RivetCore; + + process implementation RivetCore.Impl + subcomponents + config_loader : thread ConfigLoader; + schema_engine : thread SchemaEngine; + store : thread ArtifactStore; + adapters : thread AdapterDispatch; + graph : thread GraphEngine; + validator : thread ValidationEngine; + matrix : thread MatrixEngine; + diff_engine : thread DiffEngine; + doc_engine : thread DocumentEngine; + query_engine : thread QueryEngine; + results : thread ResultsEngine; + connections + -- Config loads schemas and source list + cfg_schema : port config_loader.schemas -> schema_engine.load; + cfg_src : port config_loader.sources -> adapters.source_list; + -- Adapters produce artifacts into the store + adapt_arts : port adapters.artifacts -> store.ingest; + -- Store feeds the graph builder + store_graph: port store.artifact_set -> graph.build; + -- Validator reads schema + store + graph + val_schema : port schema_engine.merged -> validator.schema; + val_store : port store.artifact_set -> validator.artifacts; + val_graph : port graph.link_graph -> validator.graph; + -- Matrix reads graph + store + mat_graph : port graph.link_graph -> matrix.graph; + mat_store : port store.artifact_set -> matrix.artifacts; + -- Document engine scans source files + doc_store : port store.artifact_set -> doc_engine.artifacts; + -- Query engine for search/filter + qry_store : port store.artifact_set -> query_engine.artifacts; + end RivetCore.Impl; + + -- ── RivetCli process ──────────────────────────────────────── + + process RivetCli + features + commands : in data port; + output : out data port; + http_in : in data port HttpRequest; + http_out : out data port HtmlFragment; + core_req : out data port; + core_resp : in data port; + end RivetCli; + + process implementation RivetCli.Impl + subcomponents + cmd_dispatch : thread CommandDispatch; + serve_handler : thread ServeHandler; + connections + cmd_in : port commands -> cmd_dispatch.input; + cmd_out : port cmd_dispatch.output -> output; + cmd_core : port cmd_dispatch.core_call -> core_req; + cmd_resp : port core_resp -> cmd_dispatch.core_result; + srv_in : port http_in -> serve_handler.request; + srv_out : port serve_handler.response -> http_out; + srv_core : port core_resp -> serve_handler.data; + end RivetCli.Impl; + + -- ── Core threads (one per module) ─────────────────────────── + + thread ConfigLoader + features + schemas : out data port SchemaSet; + sources : out data port; + end ConfigLoader; + + thread SchemaEngine + features + load : in data port; + merged : out data port SchemaSet; + end SchemaEngine; + + thread ArtifactStore + features + ingest : in data port ArtifactSet; + artifact_set : out data port ArtifactSet; + end ArtifactStore; + + thread AdapterDispatch + features + source_list : in data port; + artifacts : out data port ArtifactSet; + end AdapterDispatch; + + thread GraphEngine + features + build : in data port ArtifactSet; + link_graph : out data port LinkGraph; + end GraphEngine; + + thread ValidationEngine + features + schema : in data port SchemaSet; + artifacts : in data port ArtifactSet; + graph : in data port LinkGraph; + report : out data port ValidationReport; + end ValidationEngine; + + thread MatrixEngine + features + graph : in data port LinkGraph; + artifacts : in data port ArtifactSet; + matrix : out data port TraceMatrix; + end MatrixEngine; + + thread DiffEngine + features + baseline : in data port ArtifactSet; + current : in data port ArtifactSet; + delta : out data port; + end DiffEngine; + + thread DocumentEngine + features + artifacts : in data port ArtifactSet; + documents : out data port; + end DocumentEngine; + + thread QueryEngine + features + artifacts : in data port ArtifactSet; + results : out data port; + end QueryEngine; + + thread ResultsEngine + features + test_data : in data port; + summary : out data port; + end ResultsEngine; + + -- ── CLI threads ───────────────────────────────────────────── + + thread CommandDispatch + features + input : in data port; + output : out data port; + core_call : out data port; + core_result : in data port; + end CommandDispatch; + + thread ServeHandler + features + request : in data port HttpRequest; + response : out data port HtmlFragment; + data : in data port; + end ServeHandler; + +end RivetSystem; diff --git a/artifacts/architecture.yaml b/artifacts/architecture.yaml new file mode 100644 index 0000000..7be91de --- /dev/null +++ b/artifacts/architecture.yaml @@ -0,0 +1,381 @@ +artifacts: + # ── System-level components ────────────────────────────────── + + - id: ARCH-SYS-001 + type: aadl-component + title: Rivet System (top-level) + status: implemented + tags: [aadl, architecture, system] + links: + - type: allocated-from + target: REQ-001 + fields: + category: system + aadl-package: RivetSystem + classifier-kind: type + aadl-file: arch/rivet_system.aadl:49 + source-ref: arch/rivet_system.aadl:49-54 + + - id: ARCH-SYS-002 + type: aadl-component + title: Rivet System Implementation + status: implemented + tags: [aadl, architecture, system] + links: + - type: allocated-from + target: REQ-001 + fields: + category: system + aadl-package: RivetSystem + classifier-kind: implementation + aadl-file: arch/rivet_system.aadl:56-67 + + # ── Core process ───────────────────────────────────────────── + + - id: ARCH-CORE-001 + type: aadl-component + title: RivetCore process + description: > + Core library process containing all domain logic: config loading, + schema merging, artifact storage, adapter dispatch, graph building, + validation, matrix computation, diff, documents, and query. + status: implemented + tags: [aadl, architecture, core] + links: + - type: allocated-from + target: REQ-001 + - type: allocated-from + target: REQ-002 + fields: + category: process + aadl-package: RivetSystem + classifier-kind: implementation + aadl-file: arch/rivet_system.aadl:75-108 + source-ref: rivet-core/src/lib.rs:1 + + # ── CLI process ────────────────────────────────────────────── + + - id: ARCH-CLI-001 + type: aadl-component + title: RivetCli process + description: > + CLI binary process: dispatches subcommands (validate, list, stats, + matrix, stpa, serve) and hosts the HTTP dashboard. + status: implemented + tags: [aadl, architecture, cli] + links: + - type: allocated-from + target: REQ-007 + fields: + category: process + aadl-package: RivetSystem + classifier-kind: implementation + aadl-file: arch/rivet_system.aadl:112-125 + source-ref: rivet-cli/src/main.rs:1 + + # ── Core threads (one per module) ──────────────────────────── + + - id: ARCH-CORE-SCHEMA + type: aadl-component + title: Schema Engine + description: > + Loads and merges YAML schema files (common + domain overlays). + Produces the merged SchemaSet used by validation and matrix. + status: implemented + tags: [aadl, architecture, core, schema] + links: + - type: allocated-from + target: REQ-002 + - type: allocated-from + target: REQ-003 + - type: allocated-from + target: REQ-010 + fields: + category: thread + aadl-package: RivetSystem + classifier-kind: type + source-ref: rivet-core/src/schema.rs:1 + + - id: ARCH-CORE-STORE + type: aadl-component + title: Artifact Store + description: > + In-memory store holding all loaded artifacts. Provides lookup by ID, + type filtering, and iteration. + status: implemented + tags: [aadl, architecture, core, store] + links: + - type: allocated-from + target: REQ-001 + fields: + category: thread + aadl-package: RivetSystem + classifier-kind: type + source-ref: rivet-core/src/store.rs:1 + + - id: ARCH-CORE-ADAPTERS + type: aadl-component + title: Adapter Dispatch + description: > + Dispatches source loading to the appropriate adapter based on format + string (generic-yaml, stpa-yaml, aadl, reqif). + status: implemented + tags: [aadl, architecture, core, adapters] + links: + - type: allocated-from + target: REQ-001 + - type: allocated-from + target: REQ-005 + fields: + category: thread + aadl-package: RivetSystem + classifier-kind: type + source-ref: rivet-core/src/adapter.rs:1 + + - id: ARCH-CORE-GRAPH + type: aadl-component + title: Graph Engine + description: > + Builds petgraph directed graph from artifact links. Provides cycle + detection, orphan detection, reachability queries, and topological sort. + status: implemented + tags: [aadl, architecture, core, graph] + links: + - type: allocated-from + target: REQ-004 + fields: + category: thread + aadl-package: RivetSystem + classifier-kind: type + source-ref: rivet-core/src/links.rs:1 + + - id: ARCH-CORE-VALIDATE + type: aadl-component + title: Validation Engine + description: > + Validates artifacts against schema (types, required fields, link + constraints, traceability rules). Produces ValidationReport with + errors and warnings. + status: implemented + tags: [aadl, architecture, core, validation] + links: + - type: allocated-from + target: REQ-003 + fields: + category: thread + aadl-package: RivetSystem + classifier-kind: type + source-ref: rivet-core/src/validate.rs:1 + + - id: ARCH-CORE-MATRIX + type: aadl-component + title: Matrix Engine + description: > + Computes traceability matrix with coverage percentages. + Source type → target type mapping with linked/total counts. + status: implemented + tags: [aadl, architecture, core, matrix] + links: + - type: allocated-from + target: REQ-004 + fields: + category: thread + aadl-package: RivetSystem + classifier-kind: type + source-ref: rivet-core/src/matrix.rs:1 + + - id: ARCH-CORE-DIFF + type: aadl-component + title: Diff Engine + description: > + Computes artifact differences between two snapshots (added, removed, + modified artifacts with field-level change details). + status: implemented + tags: [aadl, architecture, core, diff] + links: + - type: allocated-from + target: REQ-001 + fields: + category: thread + aadl-package: RivetSystem + classifier-kind: type + source-ref: rivet-core/src/diff.rs:1 + + - id: ARCH-CORE-DOC + type: aadl-component + title: Document Engine + description: > + Scans source files for artifact references ([[ID]] patterns), + builds document model with sections and cross-references. + status: implemented + tags: [aadl, architecture, core, documents] + links: + - type: allocated-from + target: REQ-001 + fields: + category: thread + aadl-package: RivetSystem + classifier-kind: type + source-ref: rivet-core/src/document.rs:1 + + - id: ARCH-CORE-QUERY + type: aadl-component + title: Query Engine + description: > + Filters and searches artifacts by type, status, tags, and + free-text queries. + status: implemented + tags: [aadl, architecture, core, query] + links: + - type: allocated-from + target: REQ-001 + fields: + category: thread + aadl-package: RivetSystem + classifier-kind: type + source-ref: rivet-core/src/query.rs:1 + + - id: ARCH-CORE-RESULTS + type: aadl-component + title: Results Engine + description: > + Parses test execution results (JUnit XML) and coverage data (LCOV) + for evidence tracking and dashboard display. + status: implemented + tags: [aadl, architecture, core, results] + links: + - type: allocated-from + target: REQ-009 + fields: + category: thread + aadl-package: RivetSystem + classifier-kind: type + source-ref: rivet-core/src/results.rs:1 + + # ── Adapter components ─────────────────────────────────────── + + - id: ARCH-ADAPT-GENERIC + type: aadl-component + title: Generic YAML Adapter + description: > + Imports canonical YAML artifact files. Primary format for + hand-authored artifacts. + status: implemented + tags: [aadl, architecture, adapter] + links: + - type: allocated-from + target: FEAT-002 + fields: + category: system + aadl-package: RivetAdapters + classifier-kind: type + source-ref: rivet-core/src/formats/generic.rs:1 + + - id: ARCH-ADAPT-STPA + type: aadl-component + title: STPA YAML Adapter + description: > + Imports meld's STPA safety analysis YAML format. Maps losses, + hazards, constraints, UCAs, and scenarios to rivet artifacts. + status: implemented + tags: [aadl, architecture, adapter, stpa] + links: + - type: allocated-from + target: FEAT-001 + fields: + category: system + aadl-package: RivetAdapters + classifier-kind: type + source-ref: rivet-core/src/formats/stpa.rs:1 + + - id: ARCH-ADAPT-AADL + type: aadl-component + title: AADL Adapter (spar integration) + description: > + Layer 1 integration: invokes spar CLI with --format json, parses + output into aadl-component and aadl-analysis-result artifacts. + status: implemented + tags: [aadl, architecture, adapter] + links: + - type: allocated-from + target: FEAT-018 + fields: + category: system + aadl-package: RivetAdapters + classifier-kind: type + source-ref: rivet-core/src/formats/aadl.rs:1 + + - id: ARCH-ADAPT-REQIF + type: aadl-component + title: ReqIF 1.2 Adapter + description: > + Import/export of ReqIF 1.2 XML. Handles spec-objects, spec-types, + and spec-relations mapping to rivet artifacts and links. + status: implemented + tags: [aadl, architecture, adapter, reqif] + links: + - type: allocated-from + target: FEAT-010 + fields: + category: system + aadl-package: RivetAdapters + classifier-kind: type + source-ref: rivet-core/src/reqif.rs:1 + + - id: ARCH-ADAPT-WASM + type: aadl-component + title: WASM Adapter Runtime + description: > + Loads WASM component adapters at runtime via WIT interface. + Provides sandboxed execution with fuel metering and memory limits. + status: partial + tags: [aadl, architecture, adapter, wasm] + links: + - type: allocated-from + target: FEAT-012 + - type: allocated-from + target: REQ-008 + fields: + category: system + aadl-package: RivetAdapters + classifier-kind: implementation + source-ref: rivet-core/src/wasm_runtime.rs:1 + + # ── Dashboard components ───────────────────────────────────── + + - id: ARCH-DASH-001 + type: aadl-component + title: Dashboard System + description: > + axum HTTP server with HTMX-driven dashboard. Server-rendered HTML + fragments, no frontend framework. Includes pan/zoom/drag graph + visualization, artifact hover previews, and Cmd+K search. + status: implemented + tags: [aadl, architecture, dashboard] + links: + - type: allocated-from + target: FEAT-009 + - type: allocated-from + target: REQ-007 + fields: + category: system + aadl-package: RivetDashboard + classifier-kind: implementation + aadl-file: arch/rivet_dashboard.aadl:18-35 + source-ref: rivet-cli/src/serve.rs:1 + + - id: ARCH-DASH-GRAPH + type: aadl-component + title: Graph Visualizer (etch) + description: > + Renders petgraph link graphs as SVG using the etch layout engine. + Supports interactive pan, zoom, and node dragging. + status: implemented + tags: [aadl, architecture, dashboard, graph] + links: + - type: allocated-from + target: FEAT-009 + fields: + category: process + aadl-package: RivetDashboard + classifier-kind: type diff --git a/artifacts/decisions.yaml b/artifacts/decisions.yaml index 229c078..45eea1a 100644 --- a/artifacts/decisions.yaml +++ b/artifacts/decisions.yaml @@ -39,6 +39,7 @@ artifacts: alternatives: > Custom adjacency list implementation. Rejected because graph algorithms are subtle and petgraph is well-proven. + source-ref: rivet-core/src/graph.rs:1 - id: DD-003 type: design-decision @@ -56,6 +57,7 @@ artifacts: - type: satisfies target: REQ-003 fields: + source-ref: rivet-core/src/schema.rs:1 rationale: > Merging allows a base common schema to be extended by domain-specific schemas without duplication. Projects pick which schemas they need. diff --git a/artifacts/features.yaml b/artifacts/features.yaml index 4d1866e..54994fd 100644 --- a/artifacts/features.yaml +++ b/artifacts/features.yaml @@ -262,3 +262,53 @@ artifacts: target: DD-010 fields: phase: phase-1 + + - id: FEAT-018 + type: feature + title: AADL adapter (spar Layer 1) + status: approved + description: > + Import AADL architecture models via spar CLI JSON output. Converts + spar component types, implementations, and analysis diagnostics + into rivet aadl-component and aadl-analysis-result artifacts. + tags: [adapter, aadl, phase-2] + links: + - type: satisfies + target: REQ-001 + - type: satisfies + target: REQ-005 + fields: + phase: phase-2 + + - id: FEAT-019 + type: feature + title: AADL architecture dogfood (rivet self-model) + status: approved + description: > + Model rivet's own system and software architecture as AADL + components in arch/. Three packages: RivetSystem (top-level), + RivetAdapters (extensibility), RivetDashboard (serve/UI). + Architecture artifacts trace to requirements via allocated-from. + tags: [aadl, architecture, dogfood, phase-2] + links: + - type: satisfies + target: REQ-001 + fields: + phase: phase-2 + + - id: FEAT-020 + type: feature + title: AADL browser rendering (spar WASM) + status: draft + description: > + Render AADL component diagrams in the dashboard using a spar WASM + module compiled for the browser. Provides interactive visualization + of system/software architecture with drill-down into subcomponents. + tags: [aadl, wasm, ui, phase-3] + links: + - type: satisfies + target: REQ-008 + - type: satisfies + target: REQ-007 + fields: + phase: phase-3 diff --git a/artifacts/verification.yaml b/artifacts/verification.yaml new file mode 100644 index 0000000..61b0ac3 --- /dev/null +++ b/artifacts/verification.yaml @@ -0,0 +1,193 @@ +artifacts: + - id: TEST-001 + type: feature + title: Store and model unit tests + status: approved + description: > + Unit tests for the diff, document, and store modules. Verifies artifact + storage, retrieval, upsert, by-type indexing, YAML frontmatter parsing, + document reference extraction, HTML rendering, and structural diff + computation between store snapshots. + tags: [testing, swe-4] + links: + - type: verifies + target: REQ-001 + - type: satisfies + target: REQ-014 + fields: + phase: phase-1 + + - id: TEST-002 + type: feature + title: STPA adapter and schema tests + status: approved + description: > + Tests in stpa_roundtrip.rs that verify STPA schema loading, artifact + type and link type presence, store insert/lookup, duplicate ID rejection, + and broken link detection within the STPA domain. + tags: [testing, stpa, swe-5] + links: + - type: verifies + target: REQ-002 + - type: verifies + target: REQ-004 + - type: satisfies + target: REQ-014 + fields: + phase: phase-1 + + - id: TEST-003 + type: feature + title: Schema validation and merge tests + status: approved + description: > + Integration tests for schema loading and merging. Verifies that common + + stpa + aspice merge preserves all artifact types, link types, and inverse + mappings. Includes cybersecurity schema merge verification and ASPICE + traceability rule enforcement. + tags: [testing, schema, swe-5] + links: + - type: verifies + target: REQ-010 + - type: verifies + target: REQ-004 + - type: satisfies + target: REQ-014 + fields: + phase: phase-1 + + - id: TEST-004 + type: feature + title: Link graph and coverage tests + status: approved + description: > + Tests for link graph construction, backlink computation, orphan detection, + reachability queries, and traceability matrix computation. Includes + coverage module tests for full, partial, and vacuous coverage scenarios. + tags: [testing, validation, swe-5] + links: + - type: verifies + target: REQ-004 + - type: satisfies + target: REQ-014 + fields: + phase: phase-1 + + - id: TEST-005 + type: feature + title: ReqIF roundtrip tests + status: approved + description: > + Unit tests in reqif.rs and integration tests in integration.rs that + verify ReqIF 1.2 XML export produces valid structure, minimal ReqIF + parsing, full roundtrip preservation of artifacts/links/fields, and + ReqIF-to-store integration. + tags: [testing, reqif, swe-5] + links: + - type: verifies + target: REQ-005 + - type: satisfies + target: REQ-014 + fields: + phase: phase-1 + + - id: TEST-006 + type: feature + title: Property-based tests (proptest) + status: approved + description: > + Six proptest properties verifying store insert/lookup consistency, + duplicate rejection, schema merge idempotence, link graph backlink + symmetry, validation determinism, and type iterator correctness. + Runs 30-50 randomized cases per property. + tags: [testing, proptest, swe-4] + links: + - type: verifies + target: REQ-001 + - type: verifies + target: REQ-004 + - type: verifies + target: REQ-010 + - type: satisfies + target: REQ-014 + fields: + phase: phase-1 + + - id: TEST-007 + type: feature + title: Integration test suite + status: approved + description: > + Eighteen cross-module integration tests exercising the full pipeline: + dogfood validation, generic YAML roundtrip, schema merge, traceability + matrix, query filters, link graph, ASPICE rules, store upsert, ReqIF + roundtrip, diff computation, and diagnostic diffing. + tags: [testing, integration, swe-5] + links: + - type: verifies + target: REQ-003 + - type: verifies + target: REQ-007 + - type: verifies + target: REQ-001 + - type: satisfies + target: REQ-014 + fields: + phase: phase-1 + + - id: TEST-008 + type: feature + title: Diff module tests + status: approved + description: > + Five unit tests for the diff module verifying empty diff, identical + stores, added artifacts, removed artifacts, and modified artifact + detection including title, status, tags, links, and fields changes. + tags: [testing, diff, swe-4] + links: + - type: verifies + target: REQ-001 + - type: satisfies + target: REQ-014 + fields: + phase: phase-1 + + - id: TEST-009 + type: feature + title: Document system tests + status: approved + description: > + Nine unit tests for the document module verifying YAML frontmatter + parsing, error handling for missing frontmatter, document store + operations, HTML heading rendering, wiki-link reference resolution, + default document type inference, multiple references per line, + reference extraction, and section hierarchy extraction. + tags: [testing, document, swe-4] + links: + - type: verifies + target: REQ-001 + - type: verifies + target: REQ-007 + - type: satisfies + target: REQ-014 + fields: + phase: phase-1 + + - id: TEST-010 + type: feature + title: Results model tests + status: approved + description: > + Nine unit tests for the results module verifying TestStatus display + and predicate methods, ResultStore insert ordering, latest_for and + history_for queries, aggregate summary statistics, YAML roundtrip + serialization, and edge case handling for empty and nonexistent + result directories. + tags: [testing, results, swe-4] + links: + - type: verifies + target: REQ-009 + - type: satisfies + target: REQ-014 + fields: + phase: phase-1 diff --git a/docs/architecture.md b/docs/architecture.md new file mode 100644 index 0000000..a054675 --- /dev/null +++ b/docs/architecture.md @@ -0,0 +1,323 @@ +--- +id: ARCH-001 +type: architecture +title: Rivet System Architecture +status: approved +glossary: + STPA: Systems-Theoretic Process Analysis + ASPICE: Automotive SPICE + OSLC: Open Services for Lifecycle Collaboration + ReqIF: Requirements Interchange Format + WASM: WebAssembly + WIT: WASM Interface Types + HTMX: Hypermedia-driven AJAX + CLI: Command-Line Interface + YAML: YAML Ain't Markup Language +--- + +# Rivet System Architecture + +## 1. System Overview + +Rivet is a Rust-based SDLC traceability tool for safety-critical systems. It +manages lifecycle artifacts (requirements, designs, tests, STPA analyses) as +version-controlled YAML files and validates their traceability links against +composable schemas. + +The system is structured as two crates following [[DD-006]]: + +- **rivet-core** -- Library crate containing all domain logic: artifact model, + adapters, schema loading, link graph, validation, coverage, matrix + computation, diff, document system, query engine, and format-specific + adapters. + +- **rivet-cli** -- Binary crate providing the `rivet` command-line tool and + the axum + HTMX dashboard server. Depends on rivet-core for all domain + operations. + +This flat crate structure keeps module boundaries clear without deep nesting. +The library/binary split ensures that rivet-core can be consumed as a Rust +dependency by other tools or tested independently. + +### System Architecture Diagram + +The top-level system with its core and CLI subsystems: + +```aadl +root: RivetSystem::Rivet.Impl +``` + +### Core Process Internals + +The core library process showing all domain logic modules and their data flow: + +```aadl +root: RivetSystem::RivetCore.Impl +``` + +### CLI Process + +The CLI binary process with command dispatch and HTTP serve handler: + +```aadl +root: RivetSystem::RivetCli.Impl +``` + +## 2. Module Structure + +### 2.1 rivet-core Modules + +| Module | Purpose | +|--------------|------------------------------------------------------------------| +| `model` | Core data types: `Artifact`, `Link`, `ProjectConfig`, `SourceConfig` | +| `store` | In-memory artifact store with by-ID and by-type indexing | +| `schema` | Schema loading, merging, artifact type and link type definitions | +| `links` | `LinkGraph` construction via petgraph, backlinks, orphan detection | +| `validate` | Validation engine: types, fields, cardinality, traceability rules | +| `coverage` | Traceability coverage computation per rule | +| `matrix` | Traceability matrix computation (forward and backward) | +| `query` | Query engine: filter artifacts by type, status, tag, link presence | +| `diff` | Artifact diff and diagnostic diff between two store snapshots | +| `document` | Markdown documents with YAML frontmatter and wiki-link references | +| `results` | Test run results model, YAML loading, and `ResultStore` | +| `adapter` | Adapter trait and configuration for import/export | +| `reqif` | ReqIF 1.2 XML import/export adapter | +| `oslc` | OSLC client for discovery, query, CRUD, and sync (feature-gated) | +| `wasm_runtime` | WASM component adapter runtime (feature-gated) | +| `error` | Unified error type for the library | +| `formats/` | Format-specific adapters: `generic` (YAML), `stpa` (STPA YAML) | + +### 2.2 rivet-cli Modules + +| Module | Purpose | +|---------|----------------------------------------------------------------------| +| `main` | CLI entry point, clap argument parsing, subcommand dispatch | +| `serve` | axum HTTP server with HTMX-rendered dashboard pages | + +## 3. Data Flow + +The core data pipeline follows a consistent flow from YAML files through to +validation results: + +``` + rivet.yaml + | + v + ProjectConfig + | + +---> Schema loading (schemas/*.yaml) + | | + | v + | Schema::merge() --> merged Schema + | + +---> Artifact loading (sources/*.yaml) + | + v + Adapter::import() --> Vec + | + v + Store (in-memory, indexed by ID and type) + | + +---> LinkGraph::build(&store, &schema) + | | + | v + | petgraph DiGraph (nodes = artifacts, edges = links) + | | + | +---> validate::validate() --> Vec + | +---> coverage::compute() --> CoverageReport + | +---> matrix::compute() --> TraceabilityMatrix + | +---> graph.orphans() --> orphan detection + | +---> graph.broken --> broken links + | + +---> query::execute(&store, &query) --> filtered artifacts + +---> diff::ArtifactDiff::compute() --> change analysis +``` + +### 3.1 Schema Loading + +Schemas are loaded from YAML files and merged using `Schema::merge()`. Each +schema file declares artifact types with field definitions, link-field +constraints (cardinality, target types), and traceability rules. The merge +operation combines types and link types from multiple schemas, enabling +composition: a project can load `common + dev`, `common + stpa`, +`common + aspice + cybersecurity`, or any combination. + +This design is specified by [[REQ-010]] and [[DD-003]]. + +### 3.2 Adapter Pipeline + +Adapters implement the `Adapter` trait, which defines `import()` and +`export()` methods. Three native adapters exist: + +1. **GenericYamlAdapter** -- Canonical YAML format with explicit type, links + array, and fields map. Used for Rivet's own artifacts. +2. **StpaYamlAdapter** -- Imports STPA analysis artifacts from the meld + project's YAML format (losses, hazards, UCAs, etc.). +3. **ReqIfAdapter** -- Import/export for OMG ReqIF 1.2 XML, enabling + interchange with DOORS, Polarion, and codebeamer ([[REQ-005]]). + +The WASM adapter runtime ([[DD-004]]) and OSLC sync adapter ([[DD-001]]) +extend this pipeline for plugin formats and remote tool synchronization. + +```aadl +root: RivetAdapters::WasmRuntime.Impl +``` + +### 3.3 Link Graph + +The `LinkGraph` module uses petgraph ([[DD-002]]) to build a directed graph +where nodes are artifacts and edges are links. The graph provides: + +- **Forward links** -- `links_from(id)` returns outgoing links +- **Backlinks** -- `backlinks_to(id)` returns incoming links with inverse type +- **Broken links** -- Links where the target artifact doesn't exist +- **Orphans** -- Artifacts with no incoming or outgoing links +- **Reachability** -- `reachable(id, link_type)` for transitive closure + +### 3.4 Validation Engine + +The validator ([[REQ-004]]) checks artifacts against the merged schema: + +1. **Known type** -- Every artifact's type must exist in the schema +2. **Required fields** -- Type-specific required fields must be present +3. **Allowed values** -- Field values must match the schema's allowed set +4. **Link cardinality** -- Link counts must satisfy exactly-one, one-or-many, + zero-or-one, or zero-or-many constraints +5. **Link target types** -- Link targets must have the correct artifact type +6. **Broken links** -- All link targets must exist in the store +7. **Traceability rules** -- Forward and backward link coverage rules + +Diagnostics are returned with severity levels (error, warning, info) and the +caller decides whether to fail on errors. + +## 4. Dashboard Architecture + +```aadl +root: RivetDashboard::Dashboard.Impl +``` + +The HTTP dashboard follows [[DD-005]], using axum as the server framework and +HTMX for dynamic page updates without a JavaScript build toolchain. + +### 4.1 Server Structure + +The `serve` module in rivet-cli sets up an axum `Router` with routes for: + +- `/` -- Project overview with artifact counts, validation status, and context +- `/artifacts` -- Browsable artifact list with type/status filters +- `/artifact/:id` -- Single artifact detail with links and backlinks +- `/matrix` -- Traceability matrix view +- `/coverage` -- Coverage report +- `/docs` -- Document browser +- `/doc/:id` -- Single document rendered as HTML +- `/results` -- Test result runs and history +- `/graph` -- Interactive link graph visualization (SVG via etch) + +### 4.2 Application State + +The server holds shared state behind `Arc>`: + +- `Store` -- All loaded artifacts +- `Schema` -- Merged schema +- `LinkGraph` -- Precomputed link graph +- `DocumentStore` -- Loaded markdown documents +- `ResultStore` -- Test result runs +- `RepoContext` -- Git branch, commit, dirty state, sibling projects + +### 4.3 Page Layout + +Every page shares a common layout with: + +- **Context bar** -- Project name, git branch/commit, dirty indicator, + loaded-at timestamp, and sibling project links +- **Navigation** -- Horizontal nav bar linking to all major views +- **Content area** -- Route-specific content rendered as HTML fragments + +HTMX provides partial page updates: clicking a navigation link fetches only +the content fragment and swaps it into the page, avoiding full reloads. + +## 5. Schema System + +### 5.1 Schema Files + +Schema files are YAML documents defining: + +```yaml +schema: + name: dev + version: "0.1.0" + extends: [common] + +artifact-types: + - name: requirement + fields: [...] + link-fields: [...] + +link-types: + - name: satisfies + inverse: satisfied-by + +traceability-rules: + - name: requirement-coverage + source-type: requirement + required-backlink: satisfies + severity: warning +``` + +### 5.2 Available Schemas + +| Schema | Types | Link Types | Rules | Domain | +|-----------------|-------|------------|-------|--------------------------------| +| `common` | 0 | 9 | 0 | Base fields and link types | +| `dev` | 3 | 1 | 2 | Development tracking | +| `stpa` | 10 | 5 | 7 | STPA safety analysis | +| `aspice` | 14 | 2 | 10 | ASPICE v4.0 V-model | +| `cybersecurity` | 10 | 2 | 10 | SEC.1-4, ISO/SAE 21434 | + +### 5.3 Merge Semantics + +When schemas are merged, artifact types, link types, and traceability rules +are combined by name. If two schemas define the same type, the later +definition wins. Inverse mappings are rebuilt after merge. This enables +domain-specific schemas to extend common definitions without duplication. + +## 6. Test Results as Evidence + +[[REQ-009]] specifies that test execution results are tied to releases as +evidence. The `results` module ([[DD-007]]) implements this: + +- **TestRunFile** -- YAML format with run metadata and per-artifact results +- **ResultStore** -- In-memory collection sorted by timestamp +- **TestStatus** -- Pass, fail, skip, error, blocked +- **ResultSummary** -- Aggregate statistics with pass rate + +Results files are loaded from a configured directory and displayed in the +dashboard alongside artifacts they verify. + +## 7. Design Decisions + +This architecture reflects the following key decisions: + +- [[DD-001]] -- OSLC over per-tool REST adapters for external tool sync +- [[DD-002]] -- petgraph for link graph operations +- [[DD-003]] -- Mergeable YAML schemas for domain composition +- [[DD-004]] -- WIT-based WASM adapter interface for plugins +- [[DD-005]] -- axum + HTMX serve pattern for the dashboard +- [[DD-006]] -- Flat crate structure (rivet-core + rivet-cli) +- [[DD-007]] -- Test results tied to GitHub releases +- [[DD-008]] -- Rust edition 2024 with comprehensive CI +- [[DD-009]] -- Criterion benchmarks as KPI baselines +- [[DD-010]] -- ASPICE 4.0 terminology and composable cybersecurity schema + +## 8. Requirements Coverage + +This document addresses the following requirements: + +- [[REQ-001]] -- Text-file-first artifact management (section 2, 3) +- [[REQ-004]] -- Validation engine (section 3.4) +- [[REQ-005]] -- ReqIF 1.2 import/export (section 3.2) +- [[REQ-006]] -- OSLC-based tool synchronization (section 3.2) +- [[REQ-007]] -- CLI and serve pattern (section 4) +- [[REQ-008]] -- WASM component adapters (section 3.2) +- [[REQ-009]] -- Test results as release evidence (section 6) +- [[REQ-010]] -- Schema-driven validation (section 5) diff --git a/docs/plans/2026-03-09-spar-wasm-browser-rendering.md b/docs/plans/2026-03-09-spar-wasm-browser-rendering.md new file mode 100644 index 0000000..145d915 --- /dev/null +++ b/docs/plans/2026-03-09-spar-wasm-browser-rendering.md @@ -0,0 +1,723 @@ +# spar-wasm Browser AADL Rendering Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Compile spar+etch to a WASI component (wasm32-wasip2) that takes AADL source and returns interactive SVG architecture diagrams, loaded in rivet's document viewer via jco transpilation, with traceability-based highlighting. + +**Architecture:** A `spar-wasm` crate in the spar workspace exports a WIT `renderer` interface. It uses the full spar-hir pipeline (including salsa) since it compiles cleanly to wasm32-wasip2. The WASM component reads .aadl files via WASI filesystem, builds the instance model, converts it to a petgraph, and renders SVG via etch. On the rivet side, the document renderer detects ` ```aadl ` code blocks and emits placeholder divs. Browser JS (using jco-transpiled bindings) calls the WASM renderer and inserts the SVG. Interactive highlighting uses etch's `data-id` attributes and CSS, with link graph data provided by a rivet API endpoint. + +**Tech Stack:** Rust, wasm32-wasip2, wit-bindgen, jco, wasmtime, etch, petgraph, spar-hir, spar-analysis + +--- + +## Two repos + +- **spar** (`/Volumes/Home/git/pulseengine/spar`, branch: `feat/serde-json-integration`) +- **rivet** (`/Volumes/Home/git/sdlc`, branch: `feat/aadl-integration`) + +--- + +### Task 1: Extend WIT with renderer interface (rivet) + +**Files:** +- Modify: `wit/adapter.wit` + +**Step 1: Add the renderer interface to the WIT file** + +After the existing `adapter` interface, add: + +```wit +/// Renderer interface for producing SVG visualizations. +/// +/// Unlike the adapter interface (which imports/exports artifacts), +/// the renderer takes a root classifier and produces SVG output. +/// It reads source files via WASI filesystem. +interface renderer { + /// Render an AADL architecture diagram as SVG. + /// + /// `root` — classifier to instantiate (e.g., "FlightControl::Controller.Basic") + /// `highlight` — artifact IDs to visually emphasize in the diagram + /// Returns SVG string on success. + render: func(root: string, highlight: list) -> result; + + /// Errors specific to rendering. + variant render-error { + parse-error(string), + no-root(string), + layout-error(string), + } +} + +/// Extended world that includes both adapter and renderer capabilities. +world spar-component { + export adapter; + export renderer; +} +``` + +**Step 2: Verify the WIT is syntactically valid** + +Run: `wasm-tools parse wit/adapter.wit` (or just `cargo check` later — wasmtime will validate) + +**Step 3: Commit** + +```bash +git add wit/adapter.wit +git commit -m "feat(wit): add renderer interface for SVG visualization" +``` + +--- + +### Task 2: Create spar-wasm crate scaffolding (spar) + +**Files:** +- Create: `crates/spar-wasm/Cargo.toml` +- Create: `crates/spar-wasm/src/lib.rs` +- Modify: `Cargo.toml` (workspace members) + +**Step 1: Create Cargo.toml** + +```toml +[package] +name = "spar-wasm" +version.workspace = true +edition.workspace = true +license.workspace = true +repository.workspace = true +description = "WASM component for AADL parsing, analysis, and SVG rendering" + +[dependencies] +spar-parser.workspace = true +spar-syntax.workspace = true +spar-base-db.workspace = true +spar-hir-def.workspace = true +spar-hir.workspace = true +spar-analysis.workspace = true +serde.workspace = true +serde_json = "1" +petgraph = "0.6" +``` + +Note: etch is in the rivet workspace, not spar. We'll vendor the graph-building + SVG rendering logic directly in spar-wasm since it's a small amount of code and avoids cross-workspace dependency. The SVG output uses etch's format so it's compatible with rivet's existing interactive JS. + +**Step 2: Create minimal lib.rs** + +```rust +//! WASM component for AADL architecture visualization. +//! +//! Provides two capabilities as a WASI component: +//! 1. `adapter` — import/export AADL artifacts (same as CLI JSON output) +//! 2. `renderer` — parse AADL, instantiate, and render SVG via graph layout +//! +//! The component reads `.aadl` files via WASI filesystem and uses the full +//! spar-hir pipeline (including salsa) for semantic analysis. + +mod graph; +mod render; +``` + +**Step 3: Add to workspace** + +Add `"crates/spar-wasm"` to the `members` list in the root `Cargo.toml`. + +**Step 4: Verify it compiles** + +Run: `cargo check -p spar-wasm` + +**Step 5: Commit** + +```bash +git add crates/spar-wasm/ Cargo.toml +git commit -m "feat(spar-wasm): scaffold WASM component crate" +``` + +--- + +### Task 3: Instance model to petgraph conversion (spar) + +**Files:** +- Create: `crates/spar-wasm/src/graph.rs` +- Modify: `crates/spar-wasm/src/lib.rs` (add test) + +This module converts a `SystemInstance` (arena-based) into a `petgraph::Graph` suitable for layout. + +**Step 1: Write the failing test** + +In `crates/spar-wasm/src/graph.rs`: + +```rust +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn instance_to_graph_basic() { + // Build a minimal SystemInstance with 2 components and 1 connection + use spar_hir_def::instance::*; + use spar_hir_def::item_tree::{ComponentCategory, ConnectionKind, Direction, FeatureKind}; + use spar_hir_def::name::Name; + use la_arena::Arena; + + let mut components = Arena::new(); + let mut features = Arena::new(); + let mut connections = Arena::new(); + + let root = components.alloc(ComponentInstance { + name: Name::new("Root"), + category: ComponentCategory::System, + package: Name::new("Pkg"), + type_name: Name::new("Root"), + impl_name: Some(Name::new("Root.Impl")), + parent: None, + children: Vec::new(), + features: Vec::new(), + connections: Vec::new(), + diagnostics: Vec::new(), + }); + + let child = components.alloc(ComponentInstance { + name: Name::new("sub1"), + category: ComponentCategory::Process, + package: Name::new("Pkg"), + type_name: Name::new("Sub"), + impl_name: None, + parent: Some(root), + children: Vec::new(), + features: Vec::new(), + connections: Vec::new(), + diagnostics: Vec::new(), + }); + + // Update root's children + components[root].children.push(child); + + let instance = SystemInstance { + root, + components, + features, + connections, + }; + + let (graph, node_map) = build_graph(&instance); + assert_eq!(graph.node_count(), 2); + assert!(node_map.contains_key(&root)); + assert!(node_map.contains_key(&child)); + } +} +``` + +**Step 2: Run test to verify it fails** + +Run: `cargo test -p spar-wasm -- instance_to_graph_basic` +Expected: FAIL (build_graph not defined) + +**Step 3: Implement graph building** + +```rust +//! Convert a SystemInstance into a petgraph for layout. + +use std::collections::HashMap; + +use petgraph::Graph; +use petgraph::graph::NodeIndex; +use spar_hir_def::instance::{ComponentInstanceIdx, SystemInstance}; +use spar_hir_def::item_tree::ComponentCategory; + +/// Node data for the architecture graph. +#[derive(Debug, Clone)] +pub struct ArchNode { + pub id: String, + pub label: String, + pub category: ComponentCategory, + pub sublabel: Option, +} + +/// Edge data for the architecture graph. +#[derive(Debug, Clone)] +pub struct ArchEdge { + pub label: String, +} + +/// Build a petgraph from a SystemInstance. +/// +/// Returns the graph and a map from ComponentInstanceIdx to NodeIndex. +pub fn build_graph( + instance: &SystemInstance, +) -> (Graph, HashMap) { + let mut graph = Graph::new(); + let mut node_map = HashMap::new(); + + // Add all components as nodes (recursive) + add_component_nodes(instance, instance.root, &mut graph, &mut node_map); + + // Add connections as edges + for (_ci_idx, ci) in instance.components.iter() { + for &conn_idx in &ci.connections { + let conn = instance.connection(conn_idx); + if let (Some(src), Some(dst)) = (conn.source_component, conn.dest_component) { + if let (Some(&src_node), Some(&dst_node)) = (node_map.get(&src), node_map.get(&dst)) { + graph.add_edge(src_node, dst_node, ArchEdge { + label: conn.name.as_str().to_string(), + }); + } + } + } + } + + (graph, node_map) +} + +fn add_component_nodes( + instance: &SystemInstance, + idx: ComponentInstanceIdx, + graph: &mut Graph, + node_map: &mut HashMap, +) { + let comp = instance.component(idx); + let id = format!("AADL-{}-{}", comp.package.as_str(), comp.name.as_str()); + + let node = ArchNode { + id, + label: comp.name.as_str().to_string(), + category: comp.category, + sublabel: Some(format!("{:?}", comp.category)), + }; + + let ni = graph.add_node(node); + node_map.insert(idx, ni); + + for &child_idx in &comp.children { + add_component_nodes(instance, child_idx, graph, node_map); + if let Some(&child_ni) = node_map.get(&child_idx) { + graph.add_edge(ni, child_ni, ArchEdge { + label: "contains".into(), + }); + } + } +} +``` + +**Step 4: Run test to verify it passes** + +Run: `cargo test -p spar-wasm -- instance_to_graph_basic` +Expected: PASS + +**Step 5: Commit** + +```bash +git add crates/spar-wasm/src/graph.rs +git commit -m "feat(spar-wasm): add instance model to petgraph conversion" +``` + +--- + +### Task 4: SVG render function (spar) + +**Files:** +- Create: `crates/spar-wasm/src/render.rs` + +This is the main entry point: AADL source text to SVG string. It uses spar-hir's Database for full semantic analysis, then builds the graph and renders SVG. + +Since etch is in a different workspace, we inline a minimal Sugiyama layout + SVG renderer here. The SVG format matches etch's output (same CSS classes, data-id attributes) so rivet's existing interactive JS works. + +**Alternative (preferred if feasible):** Add etch as a git dependency in spar-wasm's Cargo.toml: +```toml +etch = { git = "https://github.com/pulseengine/sdlc.git", path = "etch" } +``` + +**Step 1: Write the failing test** + +```rust +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn render_basic_aadl() { + let source = r#" +package FlightControl +public + system Controller + features + sensorIn: in data port; + end Controller; + + system implementation Controller.Basic + subcomponents + nav: process NavProcess; + end Controller.Basic; + + process NavProcess + end NavProcess; +end FlightControl; +"#; + let svg = render_aadl(source, "FlightControl::Controller.Basic", &[]).unwrap(); + assert!(svg.contains("")); + assert!(svg.contains("data-id")); // interactive + } + + #[test] + fn render_with_highlight() { + let source = r#" +package Pkg +public + system S end S; + system implementation S.I + subcomponents + sub1: process P; + end S.I; + process P end P; +end Pkg; +"#; + let svg = render_aadl(source, "Pkg::S.I", &["AADL-Pkg-sub1".into()]).unwrap(); + assert!(svg.contains("stroke-width=\"3")); // highlighted node + } +} +``` + +**Step 2: Run test to verify it fails** + +Run: `cargo test -p spar-wasm -- render_basic_aadl` +Expected: FAIL + +**Step 3: Implement render_aadl** + +```rust +//! Top-level render function: AADL source to SVG. + +use spar_hir::Database; +use crate::graph::{build_graph}; + +/// Render AADL source into an SVG architecture diagram. +/// +/// - `source` -- AADL source text (one or more packages) +/// - `root` -- classifier to instantiate (e.g., "Pkg::Type.Impl") +/// - `highlight` -- artifact IDs to visually highlight +pub fn render_aadl( + source: &str, + root: &str, + highlight: &[String], +) -> Result { + // 1. Parse and build semantic model + let db = Database::from_aadl(&[("input.aadl".into(), source.into())]); + + // 2. Instantiate + let instance = db.instantiate(root) + .map_err(|e| RenderError::NoRoot(format!("{}: {}", root, e)))?; + + // 3. Build graph + let (graph, _node_map) = build_graph(&instance); + + // 4. Layout + render SVG (inline minimal Sugiyama) + render_graph_to_svg(&graph, highlight) +} + +#[derive(Debug)] +pub enum RenderError { + ParseError(String), + NoRoot(String), + LayoutError(String), +} +``` + +The `render_graph_to_svg` function builds SVG matching etch's format: +- CSS classes: `.node`, `.edge`, `.type-{category}` +- `data-id` on every node (matching rivet artifact IDs like `AADL-Pkg-Name`) +- Highlighted nodes get `stroke-width: 3` and `stroke: #ff6600` + +**Step 4: Run tests** + +Run: `cargo test -p spar-wasm` +Expected: PASS + +**Step 5: Commit** + +```bash +git add crates/spar-wasm/src/render.rs +git commit -m "feat(spar-wasm): add AADL source to SVG render pipeline" +``` + +--- + +### Task 5: WASM build + verify (spar) + +**Step 1: Verify wasm32-wasip2 compilation** + +Run: `cargo build --target wasm32-wasip2 -p spar-wasm` + +**Step 2: Check binary size** + +Run: `ls -lh target/wasm32-wasip2/debug/spar_wasm.wasm` + +**Step 3: Commit any build fixes** + +--- + +### Task 6: rivet document renderer -- detect aadl code blocks (rivet) + +**Files:** +- Modify: `rivet-core/src/document.rs` + +**Step 1: Write the failing test** + +```rust +#[test] +fn render_aadl_code_block_placeholder() { + let content = "---\nid: DOC-001\ntitle: Architecture\n---\n\n## Overview\n\n```aadl\nroot: FlightControl::Controller.Basic\n```\n\nSome text after.\n"; + let doc = parse_document(content, None).unwrap(); + let html = render_to_html(&doc, |_| true); + assert!(html.contains("aadl-diagram")); + assert!(html.contains("data-root=\"FlightControl::Controller.Basic\"")); + // Should NOT contain pre/code for aadl blocks + assert!(!html.contains("
root: FlightControl"));
+}
+```
+
+**Step 2: Run test to verify it fails**
+
+Run: `cargo test -p rivet-core -- render_aadl_code_block_placeholder`
+Expected: FAIL (currently renders as `
`)
+
+**Step 3: Modify render_to_html to detect aadl blocks**
+
+In the opening fence handler (line ~308), capture the language tag:
+
+```rust
+// When opening a fenced code block, capture the language tag
+if trimmed.starts_with("```") {
+    if in_code_block {
+        if code_block_lang.as_deref() == Some("aadl") {
+            // Emit AADL diagram placeholder instead of code block
+            let root = code_block_lines.iter()
+                .find_map(|l| l.strip_prefix("root:").or_else(|| l.strip_prefix("root :")))
+                .map(|s| s.trim())
+                .unwrap_or("");
+            html.push_str(&format!(
+                "
\ +

Loading AADL diagram...

\n", + html_escape(root) + )); + } else { + html.push_str("
");
+            html.push_str(&code_block_lines.join("\n"));
+            html.push_str("
\n"); + } + code_block_lines.clear(); + code_block_lang = None; + in_code_block = false; + } else { + // ... close open blocks ... + code_block_lang = trimmed.strip_prefix("```").map(|s| s.trim().to_string()); + in_code_block = true; + } + continue; +} +``` + +Add `let mut code_block_lang: Option = None;` at the top of the function. + +**Step 4: Run test to verify it passes** + +Run: `cargo test -p rivet-core -- render_aadl_code_block_placeholder` +Expected: PASS + +**Step 5: Commit** + +```bash +git add rivet-core/src/document.rs +git commit -m "feat(document): detect aadl code blocks and emit diagram placeholders" +``` + +--- + +### Task 7: rivet API endpoint for artifact links (rivet) + +**Files:** +- Modify: `rivet-cli/src/serve.rs` + +This endpoint returns the linked AADL component IDs for a given artifact, so the browser JS knows what to highlight. + +**Step 1: Add the endpoint** + +```rust +// Route: +.route("/api/links/{id}", get(api_artifact_links)) + +// Handler: +async fn api_artifact_links( + State(state): State, + Path(id): Path, +) -> axum::Json> { + let state = state.read().await; + let graph = &state.graph; + + let mut linked_ids = Vec::new(); + + // Forward links from this artifact + for link in graph.links_from(&id) { + if link.target.starts_with("AADL-") { + linked_ids.push(link.target.clone()); + } + } + + // Backlinks to this artifact + for bl in graph.backlinks_to(&id) { + if bl.source.starts_with("AADL-") { + linked_ids.push(bl.source.clone()); + } + } + + // If this IS an AADL artifact, include self + if id.starts_with("AADL-") { + linked_ids.push(id); + } + + axum::Json(linked_ids) +} +``` + +**Step 2: Commit** + +```bash +git add rivet-cli/src/serve.rs +git commit -m "feat(serve): add /api/links/{id} endpoint for diagram highlighting" +``` + +--- + +### Task 8: rivet serve -- JS for WASM loading + rendering + highlighting (rivet) + +**Files:** +- Modify: `rivet-cli/src/serve.rs` (inline JS in the dashboard) + +**Step 1: Add AADL diagram initialization JS** + +In the main `", + json + )); +} +``` + +**Step 2: Commit** + +```bash +git add rivet-cli/src/serve.rs +git commit -m "feat(serve): embed AADL link data in artifact detail for diagram highlighting" +``` diff --git a/docs/roadmap.md b/docs/roadmap.md new file mode 100644 index 0000000..13f4db6 --- /dev/null +++ b/docs/roadmap.md @@ -0,0 +1,105 @@ +--- +id: ROAD-001 +type: specification +title: Product Roadmap and Feature Plan +status: approved +glossary: + STPA: Systems-Theoretic Process Analysis + ASPICE: Automotive SPICE + OSLC: Open Services for Lifecycle Collaboration + ReqIF: Requirements Interchange Format + WASM: WebAssembly + HTMX: Hypermedia-driven AJAX +--- + +# Product Roadmap + +## Phase 1 — Core Engine (Complete) + +Phase 1 established the foundation: artifact model, adapters, schema system, +link graph, validation, and CLI tooling. + +### Adapters + +- [[FEAT-001]] — STPA YAML adapter for importing meld's safety analysis artifacts +- [[FEAT-002]] — Generic YAML adapter for canonical artifact format + +### Schema & Validation + +- [[FEAT-003]] — Schema loading and merging (common + domain overlays) +- [[FEAT-005]] — Validation engine (types, fields, links, traceability rules) +- [[FEAT-016]] — ASPICE 4.0 schema alignment (v4.0 verification types) +- [[FEAT-017]] — Cybersecurity schema (SEC.1-4, TARA, 10 artifact types) + +### Graph & Traceability + +- [[FEAT-004]] — Link graph with petgraph (cycles, orphans, reachability) +- [[FEAT-006]] — Traceability matrix computation with coverage percentages + +### CLI + +- [[FEAT-007]] — `rivet validate` command +- [[FEAT-008]] — `rivet stpa` command for direct STPA validation + +### Testing & Quality + +- [[FEAT-013]] — Property-based tests (proptest) for randomized verification +- [[FEAT-014]] — Integration test suite (dogfood, roundtrip, schema merge) +- [[FEAT-015]] — Criterion benchmarks at 100/1000/10000 scales + +## Phase 2 — Dashboard & Interchange (In Progress) + +Phase 2 adds the web dashboard and interchange formats for external tool +integration. + +### Dashboard + +- [[FEAT-009]] — HTTP serve with HTMX dashboard (axum, no frontend framework) + +The dashboard provides: artifact browsing, validation results, traceability +graph (via etch layout engine), coverage matrix, document viewer with markdown +rendering, source code viewer with line-level anchors, document linkage view, +verification tracking, test results, git diff view, and project switcher. + +### Interchange + +- [[FEAT-010]] — ReqIF 1.2 import/export adapter + +### AADL Architecture Integration + +- [[FEAT-018]] — AADL adapter via spar CLI JSON (Layer 1 import) +- [[FEAT-019]] — AADL architecture dogfood (rivet models itself in arch/) + +The `arch/` directory contains AADL models for rivet's own architecture: +`RivetSystem` (top-level system + core/cli processes), `RivetAdapters` +(extensible adapter subsystem + WASM runtime), and `RivetDashboard` +(axum/HTMX serve handler with view renderers and graph visualizer). + +## Phase 3 — Sync & Extensibility (Planned) + +Phase 3 enables bidirectional synchronization with external ALM tools and +runtime extensibility through WASM components. + +### OSLC Integration + +- [[FEAT-011]] — OSLC RM/QM client for Polarion, DOORS, codebeamer sync + +### WASM Runtime + +- [[FEAT-012]] — WASM component adapters loaded at runtime via WIT interface +- [[FEAT-020]] — AADL browser rendering via spar WASM module + +## Test Coverage + +The following test artifacts verify feature implementations: + +- [[TEST-001]] — Store and model unit tests +- [[TEST-002]] — STPA adapter and schema tests +- [[TEST-003]] — Schema validation and merge tests +- [[TEST-004]] — Link graph and coverage tests +- [[TEST-005]] — ReqIF roundtrip tests +- [[TEST-006]] — Property-based tests (proptest) +- [[TEST-007]] — Integration test suite +- [[TEST-008]] — Diff module tests +- [[TEST-009]] — Document system tests +- [[TEST-010]] — Results model tests diff --git a/docs/srs.md b/docs/srs.md new file mode 100644 index 0000000..3b12129 --- /dev/null +++ b/docs/srs.md @@ -0,0 +1,87 @@ +--- +id: SRS-001 +type: specification +title: System Requirements Specification +status: draft +glossary: + STPA: Systems-Theoretic Process Analysis + UCA: Unsafe Control Action + ASPICE: Automotive SPICE + OSLC: Open Services for Lifecycle Collaboration + ReqIF: Requirements Interchange Format + WASM: WebAssembly +--- + +# System Requirements Specification + +## 1. Purpose + +This document specifies the system-level requirements for **Rivet**, an SDLC +traceability tool for safety-critical systems. Rivet manages lifecycle +artifacts (requirements, designs, tests, STPA analyses) as version-controlled +YAML files and validates their traceability links against composable schemas. + +## 2. Scope + +Rivet targets Automotive SPICE, ISO 26262, and ISO/SAE 21434 workflows. It +replaces heavyweight ALM tools with a text-file-first, git-friendly approach. + +## 3. Functional Requirements + +### 3.1 Artifact Management + +[[REQ-001]] defines the core principle: artifacts live as human-readable YAML +files under version control. + +[[REQ-002]] extends this to STPA artifacts — losses, hazards, unsafe control +actions, causal factors, and loss scenarios. + +### 3.2 Traceability + +[[REQ-003]] requires full Automotive SPICE V-model traceability, from +stakeholder requirements down to unit verification and back. + +[[REQ-004]] mandates a validation engine that checks link integrity, +cardinality constraints, required fields, and traceability coverage. + +### 3.3 Schema System + +[[REQ-010]] requires schema-driven validation where artifact types, fields, +link types, and traceability rules are defined declaratively. + +[[REQ-015]] aligns schemas with ASPICE 4.0 terminology (verification replaces +test). + +[[REQ-016]] adds cybersecurity schema support for ISO/SAE 21434 and ASPICE +SEC.1-4. + +### 3.4 Interoperability + +[[REQ-005]] covers ReqIF 1.2 import/export for requirements interchange with +tools like DOORS, Polarion, and codebeamer. + +[[REQ-006]] specifies OSLC-based bidirectional synchronization rather than +per-tool REST adapters. + +[[REQ-008]] enables WASM component adapters for custom format plugins. + +### 3.5 User Interface + +[[REQ-007]] requires both a CLI and an HTTP serve pattern for the dashboard. + +### 3.6 Quality + +[[REQ-012]] mandates comprehensive CI quality gates (fmt, clippy, test, miri, +audit, deny, vet, coverage). + +[[REQ-013]] requires performance benchmarks with regression detection. + +[[REQ-014]] structures test artifacts to mirror the ASPICE SWE.4/5/6 levels. + +[[REQ-009]] ties test results to GitHub releases as evidence artifacts. + +[[REQ-011]] pins Rust edition 2024 with MSRV 1.85. + +## 4. Glossary + +See the glossary panel below (defined in document frontmatter). diff --git a/docs/verification.md b/docs/verification.md new file mode 100644 index 0000000..380f20f --- /dev/null +++ b/docs/verification.md @@ -0,0 +1,283 @@ +--- +id: VER-001 +type: specification +title: Verification Strategy and Test Mapping +status: approved +glossary: + SWE.4: ASPICE Software Unit Verification (proptest, Miri) + SWE.5: ASPICE Software Integration Verification (cross-module tests) + SWE.6: ASPICE Software Qualification Verification (full pipeline, benchmarks) + STPA: Systems-Theoretic Process Analysis + ASPICE: Automotive SPICE +--- + +# Verification Strategy and Test Mapping + +## 1. Purpose + +This document defines the verification strategy for the Rivet project and maps +test suites to the requirements they verify. Rivet dogfoods itself: the same +tool that validates ASPICE traceability for its users is used to track its own +requirements, design decisions, and test coverage. + +The test suite is organized to mirror ASPICE SWE.4/5/6 verification levels +as specified by [[REQ-014]]. + +## 2. Test Suite Overview + +Rivet's test suite consists of 59 tests across four categories: + +| Level | Category | Test Count | File | +|-------|---------------------|------------|-------------------------------| +| SWE.4 | Unit tests | 30 | `rivet-core/src/*.rs` | +| SWE.4 | Property tests | 6 | `rivet-core/tests/proptest_core.rs` | +| SWE.5 | Integration tests | 18 | `rivet-core/tests/integration.rs` | +| SWE.5 | STPA roundtrip | 5 | `rivet-core/tests/stpa_roundtrip.rs` | +| SWE.6 | Benchmarks | 7 groups | `rivet-core/benches/` | +| SWE.6 | CI quality gates | 10 stages | `.github/workflows/` | + +All 59 tests pass. Zero failures, zero ignored. + +## 3. Unit Tests (SWE.4) + +Unit tests live inside `#[cfg(test)]` modules within rivet-core source files. +They verify individual module behavior in isolation. + +### 3.1 Diff Module (5 tests) + +File: `rivet-core/src/diff.rs` + +| Test | Verifies | +|-------------------------------|---------------| +| `empty_diff` | [[REQ-001]] | +| `identical_stores` | [[REQ-001]] | +| `added_artifact` | [[REQ-001]] | +| `removed_artifact` | [[REQ-001]] | +| `modified_title` | [[REQ-001]] | + +The diff module computes structural differences between two store snapshots. +These tests verify that added, removed, modified, and unchanged artifacts are +correctly classified. + +### 3.2 Document Module (9 tests) + +File: `rivet-core/src/document.rs` + +| Test | Verifies | +|-----------------------------------|---------------| +| `parse_frontmatter` | [[REQ-001]] | +| `missing_frontmatter_is_error` | [[REQ-001]] | +| `document_store` | [[REQ-001]] | +| `render_html_headings` | [[REQ-007]] | +| `render_html_resolves_refs` | [[REQ-007]] | +| `default_doc_type_when_omitted` | [[REQ-001]] | +| `multiple_refs_on_one_line` | [[REQ-001]] | +| `extract_references_from_body` | [[REQ-004]] | +| `extract_sections_hierarchy` | [[REQ-007]] | + +Document tests verify YAML frontmatter parsing, wiki-link reference extraction, +HTML rendering, and the document store. + +### 3.3 Results Module (9 tests) + +File: `rivet-core/src/results.rs` + +| Test | Verifies | +|-----------------------------------|---------------| +| `test_status_display` | [[REQ-009]] | +| `test_status_is_pass_fail` | [[REQ-009]] | +| `test_result_store_insert_and_sort` | [[REQ-009]] | +| `test_latest_for` | [[REQ-009]] | +| `test_history_for` | [[REQ-009]] | +| `test_summary` | [[REQ-009]] | +| `test_load_results_empty_dir` | [[REQ-009]] | +| `test_load_results_nonexistent_dir` | [[REQ-009]] | +| `test_roundtrip_yaml` | [[REQ-009]] | + +These tests verify the test results model: status enum behavior, result store +ordering, latest/history queries, aggregate statistics, YAML roundtrip +serialization, and edge cases (empty/nonexistent directories). + +### 3.4 ReqIF Module (3 tests) + +File: `rivet-core/src/reqif.rs` + +| Test | Verifies | +|-----------------------------------|---------------| +| `test_export_produces_valid_xml` | [[REQ-005]] | +| `test_parse_minimal_reqif` | [[REQ-005]] | +| `test_roundtrip` | [[REQ-005]] | + +These tests verify that ReqIF 1.2 XML export produces valid structure, that +minimal ReqIF documents can be parsed, and that full roundtrip +(export then import) preserves all artifact data. + +### 3.5 Coverage Module (4 tests) + +File: `rivet-core/src/coverage.rs` + +| Test | Verifies | +|-----------------------------------|---------------| +| `full_coverage` | [[REQ-004]] | +| `partial_coverage` | [[REQ-004]] | +| `zero_artifacts_gives_100_percent` | [[REQ-004]] | +| `to_json_roundtrip` | [[REQ-004]] | + +Coverage tests verify the traceability coverage computation engine: full +coverage detection, partial coverage percentage calculation, vacuous truth +for empty sets, and JSON serialization roundtrip. + +## 4. Property-Based Tests (SWE.4) + +File: `rivet-core/tests/proptest_core.rs` + +Property tests use proptest to verify invariants with randomized inputs. +Each test runs 30-50 cases with generated data. + +| Test | Verifies | +|-----------------------------------|----------------------| +| `prop_store_insert_all_retrievable` | [[REQ-001]] | +| `prop_store_rejects_duplicates` | [[REQ-001]] | +| `prop_schema_merge_idempotent` | [[REQ-010]] | +| `prop_link_graph_backlink_symmetry` | [[REQ-004]] | +| `prop_validation_determinism` | [[REQ-004]] | +| `prop_store_types_match_inserted` | [[REQ-001]] | + +These properties verify: + +- **Store consistency** -- Inserting N unique artifacts yields a store of + size N where every artifact is retrievable by ID and by-type counts match. +- **Duplicate rejection** -- Inserting the same ID twice is rejected. +- **Schema merge idempotence** -- Merging a schema with itself produces the + same artifact types, link types, and inverse maps. +- **Backlink symmetry** -- Every forward link in the graph has a corresponding + backlink at the target node. +- **Validation determinism** -- Running `validate()` twice on identical inputs + produces identical diagnostic output. +- **Type iterator correctness** -- The `types()` iterator returns exactly the + set of types that have artifacts in the store. + +## 5. Integration Tests (SWE.5) + +File: `rivet-core/tests/integration.rs` + +Integration tests exercise cross-module pipelines: loading real schemas, +building stores, computing link graphs, running validation, and computing +traceability matrices. + +| Test | Verifies | +|-----------------------------------|-----------------------------| +| `test_dogfood_validate` | [[REQ-001]], [[REQ-010]] | +| `test_generic_yaml_roundtrip` | [[REQ-001]] | +| `test_schema_merge_preserves_types` | [[REQ-010]], [[REQ-003]] | +| `test_cybersecurity_schema_merge` | [[REQ-016]] | +| `test_traceability_matrix` | [[REQ-004]] | +| `test_traceability_matrix_empty` | [[REQ-004]] | +| `test_query_filters` | [[REQ-007]] | +| `test_link_graph_integration` | [[REQ-004]] | +| `test_aspice_traceability_rules` | [[REQ-003]], [[REQ-015]] | +| `test_store_upsert_overwrites` | [[REQ-001]] | +| `test_store_upsert_type_change` | [[REQ-001]] | +| `test_reqif_roundtrip` | [[REQ-005]] | +| `test_reqif_store_integration` | [[REQ-005]] | +| `test_diff_identical_stores` | [[REQ-001]] | +| `test_diff_added_artifact` | [[REQ-001]] | +| `test_diff_removed_artifact` | [[REQ-001]] | +| `test_diff_modified_artifact` | [[REQ-001]] | +| `test_diff_diagnostic_changes` | [[REQ-004]] | + +### 5.1 Dogfood Validation + +The `test_dogfood_validate` test loads Rivet's own `rivet.yaml`, schemas, and +artifacts, then runs the full validation pipeline. This test must pass with +zero errors. It verifies that Rivet can validate itself -- the most direct +form of dogfooding. + +### 5.2 STPA Roundtrip Tests + +File: `rivet-core/tests/stpa_roundtrip.rs` + +| Test | Verifies | +|-----------------------------------|---------------| +| `test_stpa_schema_loads` | [[REQ-002]] | +| `test_store_insert_and_lookup` | [[REQ-001]] | +| `test_duplicate_id_rejected` | [[REQ-001]] | +| `test_broken_link_detected` | [[REQ-004]] | +| `test_validation_catches_unknown_type` | [[REQ-004]], [[REQ-010]] | + +These tests verify STPA-specific schema loading and validation: that all +STPA artifact types and link types are present after schema load, that basic +store operations work, and that broken links and unknown types are detected. + +## 6. OSLC Integration Tests + +File: `rivet-core/tests/oslc_integration.rs` + +These tests are feature-gated behind `#[cfg(feature = "oslc")]` and use +wiremock to simulate an OSLC-compliant ALM tool. They verify [[REQ-006]]: + +- Service Provider Catalog discovery +- OSLC RM query with filtering (oslc.where, oslc.select) +- Single resource GET +- Resource creation (POST to creation factory) +- Resource update (PUT) +- Pull via SyncAdapter (OSLC resources converted to Rivet artifacts) +- Mixed resource type handling (RM, QM, CM) +- Error handling (404, 500, malformed JSON) +- Authentication (basic auth, bearer token) +- Pagination (next_page link) + +## 7. Benchmarks (SWE.6) + +[[REQ-013]] and [[DD-009]] specify criterion benchmarks at multiple scales. +Seven benchmark groups measure core operations at 100, 1000, and 10000 +artifact scales: + +| Benchmark Group | Measures | +|------------------------|-----------------------------------------| +| `store_insert` | Artifact insertion throughput | +| `store_lookup` | By-ID and by-type lookup latency | +| `schema_load` | Schema file loading and merge time | +| `link_graph_build` | petgraph construction from store | +| `validate` | Full validation pass duration | +| `matrix_compute` | Traceability matrix computation | +| `coverage_compute` | Coverage report generation | + +## 8. CI Quality Gates (SWE.6) + +[[REQ-012]] and [[DD-008]] mandate the following CI stages, each acting as +a qualification gate: + +| Gate | Tool | What it catches | +|----------------|---------------------|----------------------------------------| +| `fmt` | `cargo fmt` | Code style violations | +| `clippy` | `clippy -D warnings`| Lint warnings, unsafe patterns | +| `test` | `cargo test` | Functional regressions | +| `miri` | `cargo +nightly miri` | Undefined behavior, memory safety | +| `proptest` | proptest | Invariant violations with random input | +| `audit` | `cargo audit` | Known CVEs in dependencies | +| `deny` | `cargo deny` | License violations, duplicate deps | +| `vet` | `cargo vet` | Supply chain verification | +| `coverage` | `cargo llvm-cov` | Code coverage metrics | +| `msrv` | MSRV 1.85 check | Backward compatibility ([[REQ-011]]) | + +## 9. Requirement-to-Test Mapping Summary + +| Requirement | Unit | Integration | Property | Total | +|---------------|------|-------------|----------|-------| +| [[REQ-001]] | 14 | 7 | 3 | 24 | +| [[REQ-002]] | 0 | 1 | 0 | 1 | +| [[REQ-003]] | 0 | 2 | 0 | 2 | +| [[REQ-004]] | 5 | 5 | 2 | 12 | +| [[REQ-005]] | 3 | 2 | 0 | 5 | +| [[REQ-006]] | 0 | 0 (gated) | 0 | 0+ | +| [[REQ-007]] | 3 | 1 | 0 | 4 | +| [[REQ-009]] | 9 | 0 | 0 | 9 | +| [[REQ-010]] | 0 | 2 | 1 | 3 | +| [[REQ-015]] | 0 | 1 | 0 | 1 | +| [[REQ-016]] | 0 | 1 | 0 | 1 | + +Requirements without direct test coverage ([[REQ-006]], [[REQ-008]], +[[REQ-011]], [[REQ-012]], [[REQ-013]], [[REQ-014]]) are verified through CI +quality gates, feature-gated integration tests, or benchmark KPIs rather than +unit tests. diff --git a/etch/src/svg.rs b/etch/src/svg.rs index ab9d30f..7700e26 100644 --- a/etch/src/svg.rs +++ b/etch/src/svg.rs @@ -143,10 +143,13 @@ fn write_style(svg: &mut String, options: &SvgOptions) { \x20 .node text {{ font-family: {font}; font-size: {fs}px; \ fill: #222; text-anchor: middle; dominant-baseline: central; }}\n\ \x20 .node .sublabel {{ font-size: {}px; fill: #666; }}\n\ - \x20 .edge path {{ fill: none; stroke: {ec}; stroke-width: 1.2; \ + \x20 .edge path {{ fill: none; stroke: {ec}; stroke-width: 1.4; \ marker-end: url(#arrowhead); }}\n\ + \x20 .edge .label-bg {{ fill: #fff; opacity: 0.85; rx: 3; }}\n\ \x20 .edge text {{ font-family: {font}; font-size: {}px; \ - fill: {ec}; text-anchor: middle; }}\n\ + fill: #555; text-anchor: middle; dominant-baseline: central; \ + font-weight: 500; }}\n\ + \x20 .node:hover rect {{ filter: brightness(0.92); }}\n\ \x20 \n", fs - 2.0, fs - 2.0, @@ -175,15 +178,25 @@ fn write_edges(svg: &mut String, layout: &GraphLayout) { writeln!(svg, " ").unwrap(); - // Edge label at midpoint. + // Edge label at midpoint with background pill. if !edge.label.is_empty() { let mid = edge.points.len() / 2; let (mx, my) = edge.points[mid]; + let label = xml_escape(&edge.label); + let text_y = my - 4.0; + // Approximate label width: ~6.5px per char at default font size. + let approx_w = edge.label.len() as f64 * 6.5 + 8.0; + let approx_h = 14.0; writeln!( svg, - " {}", - my - 4.0, - xml_escape(&edge.label), + " ", + mx - approx_w / 2.0, + text_y - approx_h / 2.0, + ) + .unwrap(); + writeln!( + svg, + " {label}", ) .unwrap(); } diff --git a/examples/aadl/aadl/flight-control.aadl b/examples/aadl/aadl/flight-control.aadl new file mode 100644 index 0000000..777dc6c --- /dev/null +++ b/examples/aadl/aadl/flight-control.aadl @@ -0,0 +1,22 @@ +package FlightControl +public + system Controller + features + sensor_in: in data port; + actuator_out: out data port; + end Controller; + + process NavProcess + features + inp: in data port; + outp: out data port; + end NavProcess; + + system implementation Controller.Basic + subcomponents + nav: process NavProcess; + connections + c1: port sensor_in -> nav.inp; + c2: port nav.outp -> actuator_out; + end Controller.Basic; +end FlightControl; diff --git a/examples/aadl/artifacts/requirements.yaml b/examples/aadl/artifacts/requirements.yaml new file mode 100644 index 0000000..cb813f2 --- /dev/null +++ b/examples/aadl/artifacts/requirements.yaml @@ -0,0 +1,42 @@ +artifacts: + - id: STAKE-001 + type: stakeholder-req + title: System shall respond to pilot inputs in real-time + status: approved + + - id: SYSREQ-001 + type: system-req + title: Flight controller shall process sensor data within 50ms + status: approved + fields: + req-type: performance + priority: must + links: + - type: derives-from + target: STAKE-001 + + - id: AADL-FlightControl-Controller + type: aadl-component + title: system FlightControl Controller + status: imported + tags: [aadl] + fields: + category: system + aadl-package: FlightControl + classifier-kind: type + links: + - type: allocated-from + target: SYSREQ-001 + + - id: AADL-FlightControl-Controller.Basic + type: aadl-component + title: system implementation Controller.Basic (FlightControl) + status: imported + tags: [aadl] + fields: + category: system + aadl-package: FlightControl + classifier-kind: implementation + links: + - type: allocated-from + target: SYSREQ-001 diff --git a/examples/aadl/rivet.yaml b/examples/aadl/rivet.yaml new file mode 100644 index 0000000..db86a19 --- /dev/null +++ b/examples/aadl/rivet.yaml @@ -0,0 +1,11 @@ +project: + name: aadl-integration-example + version: "0.1.0" + schemas: + - common + - aspice + - aadl + +sources: + - path: artifacts + format: generic-yaml diff --git a/examples/aspice/artifacts/architecture.yaml b/examples/aspice/artifacts/architecture.yaml new file mode 100644 index 0000000..110cc90 --- /dev/null +++ b/examples/aspice/artifacts/architecture.yaml @@ -0,0 +1,176 @@ +artifacts: + # ── System Architecture (SYS.3) ────────────────────────────────────── + + - id: SYSARCH-1 + type: system-arch-component + title: Hydraulic Control Unit + status: approved + description: > + The HCU receives brake pressure commands from the ECU and drives + proportional solenoid valves to modulate brake line pressure + independently on each axle. Contains the valve block, pump motor, + and pressure sensors. + tags: [braking, hcu, hardware] + fields: + component-type: mixed + interfaces: + provided: + - name: pressure-command + protocol: CAN FD + description: Accepts 12-bit pressure demand per axle at 100 Hz + required: + - name: power-supply + description: 12 V nominal, 60 A peak during pump operation + links: + - type: allocated-from + target: SYSREQ-1 + - type: allocated-from + target: SYSREQ-2 + + - id: SYSARCH-2 + type: system-arch-component + title: ABS Electronic Control Unit + status: approved + description: > + The ABS ECU hosts the slip control software, reads wheel speed sensors + via the sensor interface, and commands pressure modulation through the + HCU. Includes the microcontroller, CAN FD transceiver, and power + management. + tags: [braking, abs, ecu] + fields: + component-type: mixed + interfaces: + provided: + - name: abs-status + protocol: CAN FD + description: ABS active flag, wheel speeds, slip ratios at 100 Hz + required: + - name: wheel-speed-input + protocol: analog + description: 4x wheel speed sensor signals (inductive, 48 teeth) + - name: hcu-command + protocol: CAN FD + description: Pressure build/hold/release commands to HCU + links: + - type: allocated-from + target: SYSREQ-3 + + # ── Software Architecture (SWE.2) ──────────────────────────────────── + + - id: SWARCH-1 + type: sw-arch-component + title: Brake Pressure Manager + status: approved + description: > + Software component responsible for computing brake pressure demands + for each axle. Reads pedal position and axle load estimates, applies + the load-dependent ratio, and outputs DAC commands to the HCU valve + driver. Runs in the 10 ms periodic task. + tags: [braking, ebd, software] + fields: + interfaces: + provided: + - name: pressure_demand_output + type: function + description: "fn pressure_demand(pedal: u16, speed: u16, ratio: f32) -> [u16; 2]" + required: + - name: axle_load_input + type: function + description: "fn get_axle_loads() -> (f32, f32)" + concurrency: single-threaded (10 ms cyclic task) + resource-budgets: + stack: 2 KiB + wcet: 200 us + links: + - type: allocated-from + target: SWREQ-1 + - type: allocated-from + target: SWREQ-2 + + - id: SWARCH-2 + type: sw-arch-component + title: ABS Slip Controller + status: approved + description: > + Software component implementing the wheel slip regulation algorithm. + Reads wheel speed sensor inputs at 500 Hz via the sensor abstraction + layer, computes individual wheel slip ratios, determines the pressure + modulation phase (build/hold/release), and issues commands to the HCU + driver. Runs in the 2 ms high-priority task. + tags: [braking, abs, software] + fields: + interfaces: + provided: + - name: slip_status + type: struct + description: "struct SlipStatus { slip_ratio: [f32; 4], phase: [Phase; 4], abs_active: bool }" + required: + - name: wheel_speed_input + type: function + description: "fn read_wheel_speeds() -> [u16; 4]" + - name: hcu_command + type: function + description: "fn set_pressure_phase(wheel: u8, phase: Phase)" + concurrency: single-threaded (2 ms cyclic task) + resource-budgets: + stack: 4 KiB + wcet: 400 us + links: + - type: allocated-from + target: SWREQ-3 + + # ── Software Detailed Design / Unit Construction (SWE.3) ───────────── + + - id: SWDD-1 + type: sw-detail-design + title: Pressure demand calculation function + status: approved + description: > + Implements the brake pressure demand calculation. Reads the 12-bit + ADC pedal position value, multiplies by the load-dependent front/rear + ratio from the axle load estimator, clamps the result to the + [0, 4095] DAC range, and writes to the HCU valve driver output buffer. + Includes a rate limiter (max 500 LSB/cycle) to prevent pressure + spikes. + tags: [braking, ebd, implementation] + fields: + unit: src/braking/pressure_demand.rs + function: calculate_pressure_demand + algorithm: > + 1. Read pedal ADC (12-bit, 0-4095). + 2. Read axle load ratio (front_ratio, rear_ratio) from estimator. + 3. front_demand = clamp(pedal * front_ratio, 0, 4095). + 4. rear_demand = clamp(pedal * rear_ratio, 0, 4095). + 5. Apply rate limiter: abs(demand - prev_demand) <= 500. + 6. Write to HCU output buffer. + links: + - type: refines + target: SWARCH-1 + + - id: SWDD-2 + type: sw-detail-design + title: Wheel slip ratio computation and phase selector + status: approved + description: > + Computes individual wheel slip ratios from raw wheel speed sensor + ticks and vehicle reference speed. Implements the ABS phase state + machine: NORMAL -> BUILD -> HOLD -> RELEASE -> NORMAL based on slip + threshold crossings with hysteresis. Transition thresholds are + calibratable parameters stored in NVM. + tags: [braking, abs, implementation] + fields: + unit: src/braking/slip_control.rs + function: compute_slip_and_select_phase + algorithm: > + 1. Convert wheel speed ticks to m/s using calibration factor. + 2. Estimate vehicle reference speed as max(wheel_speeds). + 3. slip[i] = (v_ref - v_wheel[i]) / v_ref (guard div-by-zero). + 4. Phase state machine per wheel: + - NORMAL: if slip > threshold_high -> BUILD + - BUILD: if slip > threshold_release -> HOLD + - HOLD: if slip < threshold_low -> RELEASE + - RELEASE: if slip < threshold_normal -> NORMAL + 5. Output phase commands to HCU driver. + links: + - type: refines + target: SWARCH-2 diff --git a/examples/aspice/artifacts/requirements.yaml b/examples/aspice/artifacts/requirements.yaml new file mode 100644 index 0000000..b498bda --- /dev/null +++ b/examples/aspice/artifacts/requirements.yaml @@ -0,0 +1,156 @@ +artifacts: + # ── Stakeholder Requirements (SYS.1) ────────────────────────────────── + + - id: STKH-1 + type: stakeholder-req + title: Electronic Brake Force Distribution + status: approved + description: > + The vehicle shall distribute braking force between front and rear axles + electronically, adapting to load conditions, to ensure stable and + predictable deceleration across all operating conditions. + tags: [braking, ebd] + fields: + priority: must + source: customer + + - id: STKH-2 + type: stakeholder-req + title: Anti-lock Braking System + status: approved + description: > + The vehicle shall prevent wheel lock-up during emergency braking on all + surface types to maintain steering control and reduce stopping distance, + compliant with ECE R13-H and FMVSS 135. + tags: [braking, abs] + fields: + priority: must + source: regulation + + # ── System Requirements (SYS.2) ─────────────────────────────────────── + + - id: SYSREQ-1 + type: system-req + title: Brake pressure modulation per axle + status: approved + description: > + The braking system shall independently modulate brake pressure on front + and rear axles within 10 ms control cycle time, using proportional + solenoid valves driven by the hydraulic control unit. + tags: [braking, ebd, hydraulics] + fields: + req-type: functional + priority: must + verification-criteria: > + Measure brake pressure response on a dynamometer at each axle during + step and ramp demand profiles; confirm independent modulation within + 10 ms cycle time. + links: + - type: derives-from + target: STKH-1 + + - id: SYSREQ-2 + type: system-req + title: Dynamic load-dependent brake force ratio + status: approved + description: > + The system shall compute the front-to-rear brake force ratio as a + function of estimated vehicle deceleration, axle load transfer, and + surface friction coefficient, updating the ratio every control cycle. + tags: [braking, ebd, control] + fields: + req-type: functional + priority: must + verification-criteria: > + Verify computed brake force ratio against reference model output for + a set of deceleration, load, and friction scenarios on a + hardware-in-the-loop bench. + links: + - type: derives-from + target: STKH-1 + + - id: SYSREQ-3 + type: system-req + title: Wheel slip regulation + status: approved + description: > + The ABS controller shall regulate individual wheel slip to the target + slip ratio (10-20 % depending on surface) by modulating brake pressure + through build, hold, and release phases, achieving regulation within + 3 pressure cycles after lock-up onset detection. + tags: [braking, abs, control] + fields: + req-type: functional + priority: must + verification-criteria: > + Execute full-vehicle ABS stops on low-mu, split-mu, and high-mu + surfaces; confirm wheel slip stays within target band and regulation + onset occurs within 3 pressure cycles. + links: + - type: derives-from + target: STKH-2 + + # ── Software Requirements (SWE.1) ───────────────────────────────────── + + - id: SWREQ-1 + type: sw-req + title: Brake pressure demand calculation + status: approved + description: > + The software shall calculate the target brake pressure for each axle + based on driver brake pedal input, vehicle speed, and the load-dependent + ratio, outputting a 12-bit DAC command to the hydraulic valve driver + every 10 ms. + tags: [braking, ebd, software] + fields: + req-type: functional + priority: must + verification-criteria: > + Unit test the pressure demand function with boundary and nominal pedal + input, speed, and ratio combinations; verify DAC output within +/- 1 LSB + of the reference model. + links: + - type: derives-from + target: SYSREQ-1 + + - id: SWREQ-2 + type: sw-req + title: Axle load estimator + status: approved + description: > + The software shall estimate front and rear axle loads using longitudinal + acceleration from the inertial measurement unit and static weight + distribution parameters, updating the estimate every 10 ms with a + first-order low-pass filter (time constant 50 ms). + tags: [braking, ebd, estimation] + fields: + req-type: functional + priority: must + verification-criteria: > + Inject known acceleration profiles and verify estimated axle loads + against a Simulink reference model; maximum steady-state error + shall not exceed 2 % of nominal axle load. + links: + - type: derives-from + target: SYSREQ-2 + + - id: SWREQ-3 + type: sw-req + title: ABS slip control algorithm + status: approved + description: > + The software shall implement a threshold-based ABS slip control + algorithm that reads wheel speed sensor inputs at 500 Hz, computes + individual wheel slip ratios, and commands pressure build, hold, or + release actions to maintain each wheel within the target slip window. + tags: [braking, abs, software] + fields: + req-type: functional + priority: must + verification-criteria: > + Execute model-in-the-loop tests with recorded wheel speed data from + ice, wet, and dry surfaces; verify that slip regulation commands + match the validated reference controller output. + links: + - type: derives-from + target: SYSREQ-3 diff --git a/examples/aspice/artifacts/verification.yaml b/examples/aspice/artifacts/verification.yaml new file mode 100644 index 0000000..146dc31 --- /dev/null +++ b/examples/aspice/artifacts/verification.yaml @@ -0,0 +1,316 @@ +artifacts: + # ── Unit Verification (SWE.4) ──────────────────────────────────────── + + - id: UVER-1 + type: unit-verification + title: Pressure demand calculation unit tests + status: approved + description: > + Automated unit tests for the pressure demand calculation function. + Covers nominal pedal inputs, boundary conditions (0 and 4095), + rate limiter activation, and axle load ratio extremes. + tags: [braking, ebd, unit-test] + fields: + method: automated-test + preconditions: + - Rust test harness with mock HCU output buffer + - Calibration constants loaded from test fixture + steps: + - step: 1 + action: Call calculate_pressure_demand with pedal=0, ratio=(0.6, 0.4) + expected: front_demand=0, rear_demand=0 + - step: 2 + action: Call with pedal=4095, ratio=(0.6, 0.4) + expected: front_demand=2457, rear_demand=1638 + - step: 3 + action: Call with pedal=4095 after previous pedal=0 (rate limiter test) + expected: Demand increases by at most 500 per cycle + - step: 4 + action: Call with pedal=2048, ratio=(1.0, 0.0) — full front bias + expected: front_demand=2048, rear_demand=0 + links: + - type: verifies + target: SWDD-1 + + - id: UVER-2 + type: unit-verification + title: Slip ratio and phase state machine unit tests + status: approved + description: > + Automated unit tests for the wheel slip computation and ABS phase + state machine. Tests nominal slip calculation, divide-by-zero guard, + and all state transitions with calibratable thresholds. + tags: [braking, abs, unit-test] + fields: + method: automated-test + preconditions: + - Rust test harness with mock wheel speed sensor inputs + - NVM calibration parameters loaded from test fixture + steps: + - step: 1 + action: Set all wheel speeds equal to reference speed + expected: Slip ratio = 0.0 for all wheels, phase = NORMAL + - step: 2 + action: Set one wheel speed to 80 % of reference (20 % slip) + expected: Slip ratio = 0.2, phase transitions to BUILD + - step: 3 + action: Set reference speed to 0 (vehicle stationary) + expected: Slip ratio clamped to 0.0, no divide-by-zero + - step: 4 + action: Simulate full ABS cycle (NORMAL -> BUILD -> HOLD -> RELEASE -> NORMAL) + expected: Each phase transition occurs at correct threshold crossings + links: + - type: verifies + target: SWDD-2 + + # ── Software Integration Verification (SWE.5) ──────────────────────── + + - id: SWINTVER-1 + type: sw-integration-verification + title: Brake Pressure Manager integration verification + status: approved + description: > + Integration test verifying the Brake Pressure Manager component + interfaces. Validates that the pressure demand output is correctly + consumed by the HCU valve driver and that the axle load estimator + input interface provides consistent data across task boundaries. + tags: [braking, ebd, integration] + fields: + method: automated-test + preconditions: + - Software-in-the-loop environment with HCU driver stub + - Axle load estimator component running in parallel task + steps: + - step: 1 + action: Run 10 ms cyclic task for 100 cycles with ramp pedal input + expected: Pressure demand output follows pedal ramp with correct ratio + - step: 2 + action: Inject a step change in axle load estimate mid-cycle + expected: Pressure ratio adapts within one control cycle (10 ms) + - step: 3 + action: Verify inter-component data consistency under task preemption + expected: No data tearing in shared axle load structure + links: + - type: verifies + target: SWARCH-1 + + - id: SWINTVER-2 + type: sw-integration-verification + title: ABS Slip Controller integration verification + status: approved + description: > + Integration test verifying the ABS Slip Controller component + interfaces with the wheel speed sensor abstraction layer and the + HCU command interface. Validates end-to-end data flow from sensor + read to pressure phase command output. + tags: [braking, abs, integration] + fields: + method: automated-test + preconditions: + - Software-in-the-loop environment with sensor and HCU driver stubs + - Simulated wheel speed profiles for ABS activation scenario + steps: + - step: 1 + action: Run 2 ms cyclic task with all wheels at constant speed + expected: No ABS intervention, all phases remain NORMAL + - step: 2 + action: Inject sudden wheel deceleration on one wheel (simulated lock-up) + expected: ABS activates within 3 control cycles, phase transitions to BUILD + - step: 3 + action: Verify HCU command output matches expected phase sequence + expected: Build, hold, release commands issued in correct order + links: + - type: verifies + target: SWARCH-2 + + # ── Software Verification (SWE.6) ──────────────────────────────────── + + - id: SWVER-1 + type: sw-verification + title: Brake pressure demand and axle load estimation verification + status: approved + description: > + Software-level verification of the brake pressure demand calculation + and axle load estimator against their software requirements. + Conducted on the target microcontroller using hardware-in-the-loop + simulation with calibrated brake pedal and IMU sensor inputs. + tags: [braking, ebd, hil] + fields: + method: automated-test + preconditions: + - Hardware-in-the-loop bench with calibrated pedal sensor simulator + - IMU signal generator for acceleration profiles + - CAN FD bus analyzer monitoring HCU commands + steps: + - step: 1 + action: Apply 50 % pedal input at 60 km/h on level road + expected: DAC output matches expected pressure demand within +/- 1 LSB + - step: 2 + action: Apply full braking during 0.8 g deceleration + expected: Axle load estimate shifts ratio towards front axle within 2 % + - step: 3 + action: Release brake pedal rapidly + expected: Pressure demand ramps down respecting rate limiter + links: + - type: verifies + target: SWREQ-1 + - type: verifies + target: SWREQ-2 + + - id: SWVER-2 + type: sw-verification + title: ABS slip control algorithm verification + status: approved + description: > + Software-level verification of the ABS slip control algorithm against + its software requirement. Uses a vehicle dynamics model in the + hardware-in-the-loop environment to simulate lock-up scenarios on + various road surfaces. + tags: [braking, abs, hil] + fields: + method: automated-test + preconditions: + - Hardware-in-the-loop bench with vehicle dynamics model (CarMaker) + - Wheel speed sensor emulation (4 channels, 48 teeth) + - Road surface friction profiles (ice, wet, dry, split-mu) + steps: + - step: 1 + action: Emergency braking at 100 km/h on dry asphalt (mu = 0.9) + expected: No wheel lock-up, slip stays within 10-15 % target band + - step: 2 + action: Emergency braking at 80 km/h on ice (mu = 0.15) + expected: ABS activates, slip regulated within 10-20 % band + - step: 3 + action: Emergency braking at 60 km/h on split-mu (left ice, right dry) + expected: Independent wheel regulation, vehicle maintains directional stability + links: + - type: verifies + target: SWREQ-3 + + # ── System Integration Verification (SYS.4) ────────────────────────── + + - id: SYSINTVER-1 + type: sys-integration-verification + title: HCU integration verification + status: approved + description: > + System integration verification of the Hydraulic Control Unit with + the ABS ECU. Validates the CAN FD command interface, solenoid valve + response timing, and pressure sensor feedback loop on the physical + brake system test bench. + tags: [braking, hcu, system-integration] + fields: + method: automated-test + preconditions: + - Physical brake system test bench with HCU and ABS ECU + - CAN FD bus connected and operational + - Brake fluid system primed and bled + steps: + - step: 1 + action: Send pressure build command for front axle via CAN FD + expected: Front solenoid valve opens within 5 ms, pressure rises + - step: 2 + action: Send hold command followed by release command + expected: Pressure holds stable, then decreases within 10 ms + - step: 3 + action: Verify pressure sensor feedback matches commanded pressure + expected: Feedback within 3 % of commanded value at steady state + links: + - type: verifies + target: SYSARCH-1 + + - id: SYSINTVER-2 + type: sys-integration-verification + title: ABS ECU integration verification + status: approved + description: > + System integration verification of the ABS ECU with wheel speed + sensors and the HCU. Validates the complete sensor-to-actuator + signal chain on the vehicle integration test bench. + tags: [braking, abs, system-integration] + fields: + method: automated-test + preconditions: + - Vehicle integration test bench with all four wheel speed sensors + - ABS ECU connected to HCU via CAN FD + - Wheel speed simulation via motor-driven tone wheels + steps: + - step: 1 + action: Spin all tone wheels at constant speed (60 km/h equivalent) + expected: ECU reads four valid wheel speeds, ABS inactive + - step: 2 + action: Decelerate one tone wheel rapidly (simulate lock-up) + expected: ECU detects slip, sends pressure modulation commands to HCU + - step: 3 + action: Verify end-to-end latency from sensor event to valve actuation + expected: Total latency below 6 ms (2 ms computation + 4 ms CAN + valve) + links: + - type: verifies + target: SYSARCH-2 + + # ── System Verification (SYS.5) ────────────────────────────────────── + + - id: SYSVER-1 + type: sys-verification + title: Brake pressure modulation and load-dependent ratio system test + status: approved + description: > + Full system verification of brake pressure modulation and dynamic + load-dependent ratio on the vehicle dynamometer. Validates against + system requirements for axle-independent modulation and load-based + ratio adaptation. + tags: [braking, ebd, dynamometer] + fields: + method: automated-test + preconditions: + - Vehicle on chassis dynamometer with brake pressure transducers + - Vehicle loaded to GVW (Gross Vehicle Weight) + - Data acquisition system recording at 1 kHz + steps: + - step: 1 + action: Apply 50 % brake pedal at 100 km/h, measure front and rear pressure + expected: Independent pressure modulation with front/rear ratio matching load + - step: 2 + action: Repeat with vehicle at curb weight (reduced rear load) + expected: Ratio shifts towards front axle compared to GVW test + - step: 3 + action: Apply step pedal input, measure pressure response time + expected: Pressure responds within 10 ms control cycle at each axle + links: + - type: verifies + target: SYSREQ-1 + - type: verifies + target: SYSREQ-2 + + - id: SYSVER-2 + type: sys-verification + title: ABS wheel slip regulation system test + status: approved + description: > + Full system verification of ABS wheel slip regulation on the proving + ground. Validates against the system requirement for slip regulation + within the target band on multiple surface types. + tags: [braking, abs, proving-ground] + fields: + method: manual-test + preconditions: + - Instrumented test vehicle on proving ground + - Low-mu (basalt tile), split-mu, and high-mu (dry asphalt) surfaces + - Optical wheel speed reference measurement system + - On-board data logger recording slip ratios and pressure commands + steps: + - step: 1 + action: Emergency stop from 80 km/h on dry asphalt + expected: No wheel lock-up, stopping distance within ECE R13-H limit + - step: 2 + action: Emergency stop from 60 km/h on wet basalt tiles (mu ~ 0.3) + expected: ABS activates, slip regulated within 10-20 % band + - step: 3 + action: Emergency stop from 60 km/h on split-mu surface + expected: ABS regulates each side independently, vehicle tracks straight + - step: 4 + action: Verify regulation onset timing + expected: Slip regulation achieved within 3 pressure cycles of lock-up onset + links: + - type: verifies + target: SYSREQ-3 diff --git a/examples/aspice/docs/sdd.md b/examples/aspice/docs/sdd.md new file mode 100644 index 0000000..fe6f669 --- /dev/null +++ b/examples/aspice/docs/sdd.md @@ -0,0 +1,112 @@ +--- +id: SDD-001 +type: design +title: Software Design Document — Electronic Braking System +status: approved +glossary: + EBD: Electronic Brake Force Distribution + ABS: Anti-lock Braking System + HCU: Hydraulic Control Unit + ECU: Electronic Control Unit + NVM: Non-Volatile Memory + WCET: Worst-Case Execution Time + DAC: Digital-to-Analog Converter + ADC: Analog-to-Digital Converter + IMU: Inertial Measurement Unit +--- + +# Software Design Document — Electronic Braking System + +## 1. Introduction + +This document describes the software design for the Electronic Braking +System (EBS), covering both the Electronic Brake Force Distribution (EBD) +and Anti-lock Braking System (ABS) functions. The design is structured +into two major software architecture components, each decomposed into +detailed design units. + +## 2. Software Architecture Overview + +The braking software runs on a dual-core automotive microcontroller. +The architecture is divided into two components aligned with the V-model: + +- **[[SWARCH-1]]** — Brake Pressure Manager: responsible for computing + axle-level brake pressure demands based on driver input and load + distribution. Executes in the 10 ms periodic task on Core 0. + +- **[[SWARCH-2]]** — ABS Slip Controller: responsible for detecting + incipient wheel lock-up and modulating brake pressure to maintain + wheel slip within the target band. Executes in the 2 ms high-priority + task on Core 1. + +## 3. Detailed Design + +### 3.1 Pressure Demand Calculation + +The pressure demand function (**[[SWDD-1]]**) is the core of the EBD +subsystem. It converts driver pedal input into calibrated brake pressure +commands for the front and rear axles. + +**Algorithm outline:** + +1. Read the 12-bit ADC pedal position (0–4095). +2. Retrieve the current front/rear axle load ratio from the axle load + estimator. +3. Compute `front_demand = clamp(pedal * front_ratio, 0, 4095)`. +4. Compute `rear_demand = clamp(pedal * rear_ratio, 0, 4095)`. +5. Apply a rate limiter (maximum 500 LSB per 10 ms cycle) to prevent + hydraulic pressure spikes. +6. Write the results to the HCU valve driver output buffer. + +The rate limiter is critical for driver comfort and valve protection. +Calibration constants (ratio bounds, rate limit) are stored in NVM and +can be updated via the UDS WriteDataByIdentifier service. + +### 3.2 Wheel Slip Ratio and Phase Selection + +The slip controller (**[[SWDD-2]]**) implements the ABS regulation +algorithm. It runs at 500 Hz (2 ms cycle) to achieve the required +response time. + +**Slip ratio computation:** + +``` +slip[i] = (v_ref - v_wheel[i]) / v_ref +``` + +where `v_ref` is the estimated vehicle reference speed (maximum of all +wheel speeds) and `v_wheel[i]` is the speed of wheel `i`. A +divide-by-zero guard clamps the ratio to 0.0 when the vehicle is +stationary. + +**Phase state machine (per wheel):** + +| Current State | Condition | Next State | +|---------------|------------------------------|------------| +| NORMAL | slip > threshold_high | BUILD | +| BUILD | slip > threshold_release | HOLD | +| HOLD | slip < threshold_low | RELEASE | +| RELEASE | slip < threshold_normal | NORMAL | + +Threshold values are calibratable NVM parameters with hysteresis to +prevent oscillation at state boundaries. + +## 4. Interface Summary + +The two architecture components interact through shared data structures +protected by the AUTOSAR RTE mechanism: + +| Interface | Producer | Consumer | Rate | +|------------------------|---------------|----------------|--------| +| Axle load estimate | [[SWARCH-1]] | [[SWARCH-1]] | 10 ms | +| Pressure demand output | [[SWARCH-1]] | HCU driver | 10 ms | +| Wheel speed input | Sensor HAL | [[SWARCH-2]] | 2 ms | +| Slip status output | [[SWARCH-2]] | Vehicle bus | 10 ms | +| HCU phase commands | [[SWARCH-2]] | HCU driver | 2 ms | + +## 5. Resource Budgets + +| Component | Stack | WCET | Priority | +|----------------|--------|---------|----------| +| [[SWARCH-1]] | 2 KiB | 200 us | Medium | +| [[SWARCH-2]] | 4 KiB | 400 us | High | diff --git a/examples/aspice/results/run-001.yaml b/examples/aspice/results/run-001.yaml new file mode 100644 index 0000000..8537b8b --- /dev/null +++ b/examples/aspice/results/run-001.yaml @@ -0,0 +1,26 @@ +run: + id: run-2026-03-01 + timestamp: "2026-03-01T09:15:00Z" + source: "CI pipeline #18" + environment: "HIL bench A" + commit: "a1b2c3d" +results: + - artifact: UVER-1 + status: pass + duration: "1.2s" + - artifact: UVER-2 + status: pass + duration: "2.8s" + - artifact: SWINTVER-1 + status: pass + duration: "4.5s" + - artifact: SWINTVER-2 + status: fail + message: "ABS activation latency exceeded 3 cycle threshold on ice surface" + duration: "6.1s" + - artifact: SWVER-1 + status: pass + duration: "12.3s" + - artifact: SWVER-2 + status: pass + duration: "15.7s" diff --git a/examples/aspice/results/run-002.yaml b/examples/aspice/results/run-002.yaml new file mode 100644 index 0000000..764a22b --- /dev/null +++ b/examples/aspice/results/run-002.yaml @@ -0,0 +1,38 @@ +run: + id: run-2026-03-05 + timestamp: "2026-03-05T14:30:00Z" + source: "CI pipeline #24" + environment: "HIL bench A" + commit: "e4f5g6h" +results: + - artifact: UVER-1 + status: pass + duration: "1.1s" + - artifact: UVER-2 + status: pass + duration: "2.6s" + - artifact: SWINTVER-1 + status: pass + duration: "4.2s" + - artifact: SWINTVER-2 + status: pass + duration: "5.8s" + message: "Fixed: ABS cycle threshold tuning resolved" + - artifact: SWVER-1 + status: pass + duration: "11.9s" + - artifact: SWVER-2 + status: pass + duration: "14.3s" + - artifact: SYSINTVER-1 + status: pass + duration: "22.1s" + - artifact: SYSINTVER-2 + status: skip + message: "Vehicle integration bench unavailable" + - artifact: SYSVER-1 + status: pass + duration: "45.0s" + - artifact: SYSVER-2 + status: blocked + message: "Proving ground access pending weather clearance" diff --git a/examples/aspice/rivet.yaml b/examples/aspice/rivet.yaml new file mode 100644 index 0000000..726188e --- /dev/null +++ b/examples/aspice/rivet.yaml @@ -0,0 +1,16 @@ +# Run: rivet --schemas ../../schemas validate +project: + name: aspice-braking-system + version: "1.0.0" + schemas: + - common + - aspice + +sources: + - path: artifacts + format: generic-yaml + +docs: + - docs + +results: results diff --git a/results/run-001.yaml b/results/run-001.yaml new file mode 100644 index 0000000..3875ed0 --- /dev/null +++ b/results/run-001.yaml @@ -0,0 +1,67 @@ +run: + id: run-001 + timestamp: "2026-03-08T00:00:00Z" + source: cargo test -p rivet-core + environment: macOS darwin aarch64 + commit: 9622f67 + +results: + # TEST-001: Store and model unit tests (diff, document) + - artifact: TEST-001 + status: pass + duration: "0.00s" + message: "14 unit tests passed (diff: 5, document: 9)" + + # TEST-002: STPA adapter and schema tests + - artifact: TEST-002 + status: pass + duration: "0.00s" + message: "5 tests passed (stpa_roundtrip.rs)" + + # TEST-003: Schema validation and merge tests + - artifact: TEST-003 + status: pass + duration: "0.01s" + message: "4 integration tests passed (schema merge, cybersecurity, ASPICE rules)" + + # TEST-004: Link graph and coverage tests + - artifact: TEST-004 + status: pass + duration: "0.01s" + message: "8 tests passed (coverage: 4, link graph: 2, matrix: 2)" + + # TEST-005: ReqIF roundtrip tests + - artifact: TEST-005 + status: pass + duration: "0.01s" + message: "5 tests passed (unit: 3, integration: 2)" + + # TEST-006: Property-based tests (proptest) + - artifact: TEST-006 + status: pass + duration: "0.04s" + message: "6 proptest properties verified (30-50 cases each)" + + # TEST-007: Integration test suite + - artifact: TEST-007 + status: pass + duration: "0.01s" + message: "18 integration tests passed" + + # TEST-008: Diff module tests + - artifact: TEST-008 + status: pass + duration: "0.00s" + message: "5 diff unit tests passed" + + # TEST-009: Document system tests + - artifact: TEST-009 + status: pass + duration: "0.00s" + message: "9 document unit tests passed" + + # TEST-010: Results model tests + - artifact: TEST-010 + status: pass + duration: "0.00s" + message: "9 results unit tests passed" diff --git a/rivet-cli/Cargo.toml b/rivet-cli/Cargo.toml index 2b744d1..357f919 100644 --- a/rivet-cli/Cargo.toml +++ b/rivet-cli/Cargo.toml @@ -29,3 +29,4 @@ tokio = { workspace = true } tower-http = { workspace = true } etch = { path = "../etch" } petgraph = { workspace = true } +urlencoding = { workspace = true } diff --git a/rivet-cli/assets/wasm/README.md b/rivet-cli/assets/wasm/README.md new file mode 100644 index 0000000..3ba2891 --- /dev/null +++ b/rivet-cli/assets/wasm/README.md @@ -0,0 +1,28 @@ +# WASM Components + +This directory holds pre-built WASM components for rivet adapters. + +## spar-wasm + +The `spar_wasm.wasm` component provides AADL parsing, analysis, and SVG rendering. + +### Building from source + +```bash +cd /path/to/spar +cargo build --target wasm32-wasip2 -p spar-wasm --release +cp target/wasm32-wasip2/release/spar_wasm.wasm /path/to/sdlc/rivet-cli/assets/wasm/ +``` + +### Downloading from GitHub releases + +```bash +./scripts/fetch-wasm.sh +``` + +### jco transpilation (for browser use) + +```bash +npx @bytecodealliance/jco transpile rivet-cli/assets/wasm/spar_wasm.wasm \ + -o rivet-cli/assets/wasm/js/ +``` diff --git a/rivet-cli/src/docs.rs b/rivet-cli/src/docs.rs new file mode 100644 index 0000000..abcf71b --- /dev/null +++ b/rivet-cli/src/docs.rs @@ -0,0 +1,493 @@ +//! `rivet docs` — built-in searchable documentation. +//! +//! All documentation is embedded in the binary. Topics are searchable +//! via `rivet docs --grep ` (like a built-in rg). + +use rivet_core::embedded; + +// ── Topic registry ────────────────────────────────────────────────────── + +struct DocTopic { + slug: &'static str, + title: &'static str, + category: &'static str, + content: &'static str, +} + +const TOPICS: &[DocTopic] = &[ + DocTopic { + slug: "artifact-format", + title: "YAML artifact file format", + category: "Reference", + content: ARTIFACT_FORMAT_DOC, + }, + DocTopic { + slug: "rivet-yaml", + title: "rivet.yaml configuration reference", + category: "Reference", + content: RIVET_YAML_DOC, + }, + DocTopic { + slug: "cli", + title: "CLI command reference", + category: "Reference", + content: CLI_DOC, + }, + DocTopic { + slug: "json-output", + title: "JSON output format and jq examples", + category: "Reference", + content: JSON_DOC, + }, + DocTopic { + slug: "schema/common", + title: "Common base fields and link types", + category: "Schemas", + content: embedded::SCHEMA_COMMON, + }, + DocTopic { + slug: "schema/dev", + title: "Development tracking schema (requirement, design-decision, feature)", + category: "Schemas", + content: embedded::SCHEMA_DEV, + }, + DocTopic { + slug: "schema/stpa", + title: "STPA safety analysis schema (10 types)", + category: "Schemas", + content: embedded::SCHEMA_STPA, + }, + DocTopic { + slug: "schema/aspice", + title: "Automotive SPICE schema (14 types, ASPICE 4.0)", + category: "Schemas", + content: embedded::SCHEMA_ASPICE, + }, + DocTopic { + slug: "schema/cybersecurity", + title: "Cybersecurity schema (SEC.1-4, 10 types)", + category: "Schemas", + content: embedded::SCHEMA_CYBERSECURITY, + }, + DocTopic { + slug: "schema/aadl", + title: "AADL architecture schema (spar integration)", + category: "Schemas", + content: embedded::SCHEMA_AADL, + }, +]; + +// ── Embedded documentation ────────────────────────────────────────────── + +const ARTIFACT_FORMAT_DOC: &str = r#"# Artifact YAML Format + +Artifacts are stored in YAML files under the `artifacts/` directory. +Each file contains an `artifacts:` key with a list of artifact objects. + +## Structure + +```yaml +artifacts: + - id: REQ-001 # Unique identifier (required) + type: requirement # Artifact type from schema (required) + title: Short title # Human-readable title (required) + status: draft # Lifecycle status (optional) + description: > # Detailed description (optional, supports markdown) + Multi-line description here. + tags: [safety, core] # Categorization tags (optional) + links: # Traceability links (optional) + - type: satisfies # Link type from schema + target: FEAT-001 # Target artifact ID + fields: # Type-specific fields (defined by schema) + priority: must + category: functional +``` + +## ID Conventions + +- Use uppercase prefix + number: `REQ-001`, `DD-002`, `FEAT-003` +- Prefix typically matches the artifact type abbreviation +- IDs must be unique across all artifact files in the project + +## Multiple Files + +Split artifacts across files by domain or lifecycle phase: +- `artifacts/requirements.yaml` +- `artifacts/architecture.yaml` +- `artifacts/verification.yaml` + +All files under configured source paths are loaded and merged. + +## Field Types + +| Type | Description | Example | +|------------|---------------------------------|----------------------| +| string | Single-line text | `priority: must` | +| text | Multi-line text (use `>`) | `description: >` | +| number | Numeric value | `latency-ms: 50` | +| boolean | True/false | `safety-relevant: true` | +| structured | Nested YAML object | `properties: {}` | +| enum | One of allowed values | `status: approved` | +| list | YAML list | `tags: [a, b]` | + +## Link Types + +Links express traceability relationships between artifacts: + +| Link Type | Inverse | Use Case | +|----------------|----------------|------------------------------------| +| satisfies | satisfied-by | Feature satisfies a requirement | +| derives-from | derived-into | SW req derives from system req | +| verifies | verified-by | Test verifies a requirement | +| implements | implemented-by | Decision implements a requirement | +| allocated-to | allocated-from | Req allocated to arch component | +| traces-to | traced-from | General traceability | +| mitigates | mitigated-by | Control mitigates a hazard | +| constrained-by | constrains | Action constrained by constraint | +"#; + +const RIVET_YAML_DOC: &str = r#"# rivet.yaml Configuration + +The `rivet.yaml` file defines the project configuration. + +## Structure + +```yaml +project: + name: my-project # Project name + version: "0.1.0" # Version string + schemas: # Schemas to load (merged in order) + - common # Always include — base fields, link types + - dev # Development tracking types + # - aspice # ASPICE V-model types + # - stpa # STPA safety analysis types + # - cybersecurity # ISO 21434 types + # - aadl # AADL architecture types + +sources: # Artifact sources + - path: artifacts # Directory or file path + format: generic-yaml # Adapter: generic-yaml, stpa-yaml, aadl, reqif + # config: # Adapter-specific config (optional) + # key: value + +docs: # Documentation directories (for [[ID]] scanning) + - docs + +results: results # Test results directory (JUnit XML, LCOV) +``` + +## Available Schemas + +| Name | Types | Description | +|----------------|-------|------------------------------------| +| common | 0 | Base fields, 8 link types | +| dev | 3 | requirement, design-decision, feature | +| stpa | 10 | STPA losses through scenarios | +| aspice | 14 | ASPICE 4.0 SYS.1-5, SWE.1-6 | +| cybersecurity | 10 | SEC.1-4, TARA, ISO 21434 | +| aadl | 3 | AADL components, analysis, flows | + +## Available Adapters + +| Format | Description | +|--------------|-------------------------------------| +| generic-yaml | Canonical YAML artifact files | +| stpa-yaml | Meld STPA safety analysis YAML | +| aadl | AADL files via spar (library) | +| reqif | ReqIF 1.2 XML import/export | +"#; + +const CLI_DOC: &str = r#"# CLI Command Reference + +## Project Commands + +``` +rivet validate Validate all artifacts against schemas +rivet list [-t TYPE] List artifacts (filter by type/status) +rivet stats Summary statistics and orphan detection +rivet coverage Traceability coverage report +rivet matrix --from X --to Y Traceability matrix between types +rivet diff Compare artifact versions +rivet export -f FORMAT Export to reqif or generic-yaml +rivet serve [-P PORT] Start HTMX dashboard (default: 3000) +``` + +## Schema Commands + +``` +rivet schema list List all artifact types +rivet schema show TYPE Show type details with example YAML +rivet schema links List all link types with inverses +rivet schema rules List all traceability rules +``` + +## Documentation Commands + +``` +rivet docs List available documentation topics +rivet docs TOPIC Show a specific topic +rivet docs --grep PATTERN Search across all documentation +``` + +## Scaffolding + +``` +rivet init Initialize a new project (dev preset) +rivet init --preset aspice Initialize with ASPICE schema + examples +rivet context Generate .rivet/agent-context.md +``` + +## Global Flags + +``` +-p, --project PATH Project directory (default: .) + --schemas PATH Schemas directory override +-v, --verbose Increase verbosity (-v info, -vv debug) +``` + +## JSON Output + +Most commands support `--format json` for machine-readable output: + +``` +rivet schema list --format json +rivet schema show sw-req --format json +rivet validate --format json +rivet list --format json +rivet stats --format json +rivet coverage --format json +rivet docs --grep PATTERN --format json +``` +"#; + +const JSON_DOC: &str = r#"# JSON Output Format & jq Examples + +All `--format json` output follows a consistent envelope: + +```json +{ + "command": "command-name", + "data": { ... } +} +``` + +## jq Recipes + +### List all artifact type names +```bash +rivet schema list --format json | jq -r '.artifact_types[].name' +``` + +### Show fields for a specific type +```bash +rivet schema show sw-req --format json | jq '.artifact_type.fields[]' +``` + +### Get required fields only +```bash +rivet schema show sw-req --format json | jq '[.artifact_type.fields[] | select(.required)]' +``` + +### List all link types and inverses +```bash +rivet schema links --format json | jq -r '.link_types[] | "\(.name) <-> \(.inverse // "none")"' +``` + +### Get validation errors only +```bash +rivet validate --format json | jq '[.diagnostics[] | select(.severity == "error")]' +``` + +### Count artifacts by type +```bash +rivet stats --format json | jq '.types' +``` + +### List artifacts of a specific type +```bash +rivet list -t requirement --format json | jq -r '.artifacts[].id' +``` + +### Get uncovered artifacts from coverage +```bash +rivet coverage --format json | jq '[.entries[] | select(.uncovered_ids | length > 0)]' +``` + +### Search docs and get matching lines +```bash +rivet docs --grep "verification" --format json | jq -r '.matches[] | "\(.topic):\(.line): \(.text)"' +``` + +### Generate a type reference table +```bash +rivet schema list --format json | jq -r '.artifact_types[] | [.name, .description] | @tsv' +``` + +### Check if validation passes +```bash +rivet validate --format json | jq -e '.errors == 0' > /dev/null && echo "PASS" || echo "FAIL" +``` +"#; + +// ── Public API ────────────────────────────────────────────────────────── + +/// List all available documentation topics. +pub fn list_topics(format: &str) -> String { + if format == "json" { + let items: Vec = TOPICS + .iter() + .map(|t| { + serde_json::json!({ + "slug": t.slug, + "title": t.title, + "category": t.category, + }) + }) + .collect(); + return serde_json::to_string_pretty(&serde_json::json!({ + "command": "docs-list", + "topics": items, + })) + .unwrap_or_default(); + } + + let mut out = String::new(); + out.push_str("Available documentation topics:\n\n"); + + let mut current_cat = ""; + for t in TOPICS { + if t.category != current_cat { + if !current_cat.is_empty() { + out.push('\n'); + } + out.push_str(&format!(" {}\n", t.category)); + current_cat = t.category; + } + out.push_str(&format!(" {:<24} {}\n", t.slug, t.title)); + } + + out.push_str("\nUsage:\n"); + out.push_str(" rivet docs Show a topic\n"); + out.push_str(" rivet docs --grep Search across all docs\n"); + out.push_str(" rivet docs -f json Machine-readable output\n"); + out +} + +/// Show a specific topic. +pub fn show_topic(slug: &str, format: &str) -> String { + let Some(topic) = TOPICS.iter().find(|t| t.slug == slug) else { + let mut out = format!("Unknown topic: {slug}\n\nAvailable topics:\n"); + for t in TOPICS { + out.push_str(&format!(" {:<24} {}\n", t.slug, t.title)); + } + return out; + }; + + if format == "json" { + return serde_json::to_string_pretty(&serde_json::json!({ + "command": "docs-show", + "topic": topic.slug, + "title": topic.title, + "category": topic.category, + "content": topic.content, + })) + .unwrap_or_default(); + } + + let mut out = String::new(); + out.push_str(&format!("# {} — {}\n\n", topic.slug, topic.title)); + out.push_str(topic.content); + out +} + +/// Search across all documentation for a pattern (like rg). +pub fn grep_docs(pattern: &str, format: &str, context: usize) -> String { + let pattern_lower = pattern.to_lowercase(); + + let mut all_matches: Vec = Vec::new(); + + for topic in TOPICS { + for (i, line) in topic.content.lines().enumerate() { + if line.to_lowercase().contains(&pattern_lower) { + let lines: Vec<&str> = topic.content.lines().collect(); + let start = i.saturating_sub(context); + let end = (i + context + 1).min(lines.len()); + let context_before: Vec = + lines[start..i].iter().map(|l| l.to_string()).collect(); + let context_after: Vec = + lines[(i + 1)..end].iter().map(|l| l.to_string()).collect(); + + all_matches.push(GrepMatch { + topic: topic.slug, + line_num: i + 1, + text: line.to_string(), + context_before, + context_after, + }); + } + } + } + + if format == "json" { + let items: Vec = all_matches + .iter() + .map(|m| { + serde_json::json!({ + "topic": m.topic, + "line": m.line_num, + "text": m.text, + "context_before": m.context_before, + "context_after": m.context_after, + }) + }) + .collect(); + return serde_json::to_string_pretty(&serde_json::json!({ + "command": "docs-grep", + "pattern": pattern, + "match_count": items.len(), + "matches": items, + })) + .unwrap_or_default(); + } + + if all_matches.is_empty() { + return format!("No matches for: {pattern}\n"); + } + + let mut out = String::new(); + let mut prev_topic = ""; + for m in &all_matches { + if m.topic != prev_topic { + if !prev_topic.is_empty() { + out.push_str("--\n"); + } + prev_topic = m.topic; + } + for (j, cl) in m.context_before.iter().enumerate() { + let ln = m.line_num - m.context_before.len() + j; + out.push_str(&format!("{}:{}: {}\n", m.topic, ln, cl)); + } + out.push_str(&format!("{}:{}> {}\n", m.topic, m.line_num, m.text)); + for (j, cl) in m.context_after.iter().enumerate() { + out.push_str(&format!("{}:{}: {}\n", m.topic, m.line_num + 1 + j, cl)); + } + } + out.push_str(&format!( + "\n{} matches across {} topics\n", + all_matches.len(), + { + let mut topics: Vec<&str> = all_matches.iter().map(|m| m.topic).collect(); + topics.dedup(); + topics.len() + } + )); + out +} + +struct GrepMatch { + topic: &'static str, + line_num: usize, + text: String, + context_before: Vec, + context_after: Vec, +} diff --git a/rivet-cli/src/main.rs b/rivet-cli/src/main.rs index 33dcae0..1c6a06d 100644 --- a/rivet-cli/src/main.rs +++ b/rivet-cli/src/main.rs @@ -4,13 +4,18 @@ use std::process::ExitCode; use anyhow::{Context, Result}; use clap::{Parser, Subcommand}; +use rivet_core::coverage; use rivet_core::diff::{ArtifactDiff, DiagnosticDiff}; +use rivet_core::document::{self, DocumentStore}; use rivet_core::links::LinkGraph; use rivet_core::matrix::{self, Direction}; +use rivet_core::results::{self, ResultStore}; use rivet_core::schema::Severity; use rivet_core::store::Store; use rivet_core::validate; +mod docs; +mod schema_cmd; mod serve; #[derive(Parser)] @@ -34,8 +39,31 @@ struct Cli { #[derive(Subcommand)] enum Command { + /// Initialize a new rivet project + Init { + /// Project name (defaults to directory name) + #[arg(long)] + name: Option, + + /// Preset: dev (default), aspice, stpa, cybersecurity, aadl + #[arg(long, default_value = "dev")] + preset: String, + + /// Schemas to include (overrides preset if given) + #[arg(long, value_delimiter = ',')] + schema: Vec, + + /// Directory to initialize (defaults to current directory) + #[arg(long, default_value = ".")] + dir: PathBuf, + }, + /// Validate artifacts against schemas - Validate, + Validate { + /// Output format: "text" (default) or "json" + #[arg(short, long, default_value = "text")] + format: String, + }, /// List artifacts, optionally filtered by type List { @@ -46,10 +74,29 @@ enum Command { /// Filter by status #[arg(short, long)] status: Option, + + /// Output format: "text" (default) or "json" + #[arg(short, long, default_value = "text")] + format: String, }, /// Show artifact summary statistics - Stats, + Stats { + /// Output format: "text" (default) or "json" + #[arg(short, long, default_value = "text")] + format: String, + }, + + /// Show traceability coverage report + Coverage { + /// Output format: "table" (default) or "json" + #[arg(short, long, default_value = "table")] + format: String, + + /// Exit with failure if overall coverage is below this percentage + #[arg(long)] + fail_under: Option, + }, /// Generate a traceability matrix Matrix { @@ -68,6 +115,10 @@ enum Command { /// Direction: "forward" or "backward" #[arg(long, default_value = "backward")] direction: String, + + /// Output format: "text" (default) or "json" + #[arg(short, long, default_value = "text")] + format: String, }, /// Load and validate STPA files directly (without rivet.yaml) @@ -89,6 +140,10 @@ enum Command { /// Path to the head artifact directory (newer version) #[arg(long)] head: Option, + + /// Output format: "text" (default) or "json" + #[arg(short, long, default_value = "text")] + format: String, }, /// Export artifacts to a specified format @@ -102,6 +157,33 @@ enum Command { output: Option, }, + /// Introspect loaded schemas (types, links, rules) + Schema { + #[command(subcommand)] + action: SchemaAction, + }, + + /// Built-in documentation (topics, search) + Docs { + /// Topic slug to display (omit for topic list) + topic: Option, + + /// Search across all docs (like grep) + #[arg(long)] + grep: Option, + + /// Output format: "text" (default) or "json" + #[arg(short, long, default_value = "text")] + format: String, + + /// Context lines around grep matches + #[arg(short = 'C', long, default_value = "2")] + context: usize, + }, + + /// Generate .rivet/agent-context.md from current project state + Context, + /// Start the HTMX-powered dashboard server Serve { /// Port to listen on @@ -126,6 +208,36 @@ enum Command { }, } +#[derive(Subcommand)] +enum SchemaAction { + /// List all artifact types + List { + /// Output format: "text" (default) or "json" + #[arg(short, long, default_value = "text")] + format: String, + }, + /// Show detailed info for an artifact type + Show { + /// Artifact type name + name: String, + /// Output format: "text" (default) or "json" + #[arg(short, long, default_value = "text")] + format: String, + }, + /// List all link types with inverses + Links { + /// Output format: "text" (default) or "json" + #[arg(short, long, default_value = "text")] + format: String, + }, + /// List all traceability rules + Rules { + /// Output format: "text" (default) or "json" + #[arg(short, long, default_value = "text")] + format: String, + }, +} + fn main() -> ExitCode { let cli = Cli::parse(); @@ -154,24 +266,76 @@ fn main() -> ExitCode { } fn run(cli: Cli) -> Result { + // Commands that don't need a loaded project. + if let Command::Init { + name, + preset, + schema, + dir, + } = &cli.command + { + return cmd_init(name.as_deref(), preset, schema, dir); + } + if let Command::Docs { + topic, + grep, + format, + context, + } = &cli.command + { + return cmd_docs(topic.as_deref(), grep.as_deref(), format, *context); + } + if let Command::Context = &cli.command { + return cmd_context(&cli); + } + match &cli.command { + Command::Init { .. } | Command::Docs { .. } | Command::Context => unreachable!(), Command::Stpa { path, schema } => cmd_stpa(path, schema.as_deref(), &cli), - Command::Validate => cmd_validate(&cli), - Command::List { r#type, status } => cmd_list(&cli, r#type.as_deref(), status.as_deref()), - Command::Stats => cmd_stats(&cli), + Command::Validate { format } => cmd_validate(&cli, format), + Command::List { + r#type, + status, + format, + } => cmd_list(&cli, r#type.as_deref(), status.as_deref(), format), + Command::Stats { format } => cmd_stats(&cli, format), + Command::Coverage { format, fail_under } => cmd_coverage(&cli, format, fail_under.as_ref()), Command::Matrix { from, to, link, direction, - } => cmd_matrix(&cli, from, to, link.as_deref(), direction), - Command::Diff { base, head } => cmd_diff(&cli, base.as_deref(), head.as_deref()), + format, + } => cmd_matrix(&cli, from, to, link.as_deref(), direction, format), + Command::Diff { base, head, format } => { + cmd_diff(&cli, base.as_deref(), head.as_deref(), format) + } Command::Export { format, output } => cmd_export(&cli, format, output.as_deref()), + Command::Schema { action } => cmd_schema(&cli, action), Command::Serve { port } => { let port = *port; - let (store, schema, graph) = load_project(&cli)?; + let ( + store, + schema, + graph, + doc_store, + result_store, + project_name, + project_path, + schemas_dir, + ) = load_project_full(&cli)?; let rt = tokio::runtime::Runtime::new().context("failed to create tokio runtime")?; - rt.block_on(serve::run(store, schema, graph, port))?; + rt.block_on(serve::run( + store, + schema, + graph, + doc_store, + result_store, + project_name, + project_path, + schemas_dir, + port, + ))?; Ok(true) } #[cfg(feature = "wasm")] @@ -183,6 +347,349 @@ fn run(cli: Cli) -> Result { } } +/// Preset configuration for `rivet init`. +struct InitPreset { + schemas: Vec<&'static str>, + /// Each entry: (filename, yaml_content) + sample_files: Vec<(&'static str, &'static str)>, +} + +fn resolve_preset(preset: &str) -> Result { + match preset { + "dev" => Ok(InitPreset { + schemas: vec!["common", "dev"], + sample_files: vec![("requirements.yaml", DEV_SAMPLE)], + }), + "aspice" => Ok(InitPreset { + schemas: vec!["common", "aspice"], + sample_files: vec![("requirements.yaml", ASPICE_SAMPLE)], + }), + "stpa" => Ok(InitPreset { + schemas: vec!["common", "stpa"], + sample_files: vec![("safety.yaml", STPA_SAMPLE)], + }), + "cybersecurity" => Ok(InitPreset { + schemas: vec!["common", "cybersecurity"], + sample_files: vec![("security.yaml", CYBERSECURITY_SAMPLE)], + }), + "aadl" => Ok(InitPreset { + schemas: vec!["common", "dev", "aadl"], + sample_files: vec![("architecture.yaml", AADL_SAMPLE)], + }), + other => anyhow::bail!( + "unknown preset: '{other}' (valid: dev, aspice, stpa, cybersecurity, aadl)" + ), + } +} + +const DEV_SAMPLE: &str = "\ +artifacts: + - id: REQ-001 + type: requirement + title: First requirement + status: draft + description: > + Describe what the system shall do. + tags: [core] + fields: + priority: must + category: functional + + - id: FEAT-001 + type: feature + title: Initial feature + status: draft + description: > + A user-visible capability delivered by the project. + fields: + phase: phase-1 + links: + - type: satisfies + target: REQ-001 +"; + +const ASPICE_SAMPLE: &str = "\ +artifacts: + - id: SYSREQ-001 + type: system-req + title: System shall provide data logging + status: draft + description: > + The system shall log all sensor data at 100Hz to non-volatile storage. + fields: + req-type: functional + priority: must + verification-criteria: > + Verify that sensor data is recorded at 100Hz under nominal load. + + - id: SWREQ-001 + type: sw-req + title: Logging service shall buffer sensor frames + status: draft + description: > + The logging service shall maintain a ring buffer of at least 1000 + sensor frames to absorb transient write latency. + fields: + req-type: functional + priority: must + links: + - type: derives-from + target: SYSREQ-001 + + - id: SWARCH-001 + type: sw-arch-component + title: SensorLogger component + status: draft + description: > + Software component responsible for buffering and persisting sensor + data frames. + links: + - type: allocated-from + target: SWREQ-001 +"; + +const STPA_SAMPLE: &str = "\ +artifacts: + - id: L-001 + type: loss + title: Loss of vehicle control + status: draft + description: > + Driver loses ability to control vehicle trajectory, potentially + resulting in collision or road departure. + fields: + stakeholders: [driver, passengers, other-road-users] + + - id: H-001 + type: hazard + title: Unintended acceleration while stationary + status: draft + description: > + Vehicle accelerates without driver command while the vehicle is + stationary, together with traffic conditions, leading to L-001. + fields: + severity: catastrophic + links: + - type: leads-to-loss + target: L-001 + + - id: UCA-001 + type: uca + title: Throttle controller provides torque request when vehicle is stationary and driver has not pressed accelerator + status: draft + description: > + Providing a torque request while stationary and no pedal input + causes unintended acceleration (H-001). + fields: + uca-type: providing + context: > + Vehicle is stationary, brake applied, accelerator pedal not pressed. + links: + - type: issued-by + target: CTRL-001 + - type: leads-to-hazard + target: H-001 + + - id: CTRL-001 + type: controller + title: Throttle controller + status: draft + description: > + ECU responsible for computing torque requests from pedal position + and engine state. + fields: + controller-type: automated +"; + +const CYBERSECURITY_SAMPLE: &str = "\ +artifacts: + - id: TS-001 + type: threat-scenario + title: Spoofed CAN messages inject false sensor readings + status: draft + description: > + An attacker with physical access to the OBD-II port sends + crafted CAN frames that spoof wheel-speed sensor values. + fields: + attack-vector: physical + attack-feasibility: medium + impact: severe + links: + - type: threatens + target: ASSET-001 + + - id: ASSET-001 + type: asset + title: Wheel-speed sensor data + status: draft + description: > + CAN bus messages carrying wheel-speed sensor readings used + by ABS and ESC controllers. + fields: + asset-type: data + cybersecurity-properties: [integrity, availability] + + - id: SECGOAL-001 + type: cybersecurity-goal + title: Ensure integrity of wheel-speed data on CAN bus + status: draft + description: > + Wheel-speed sensor messages shall be authenticated to prevent + injection of spoofed values. + fields: + cal: \"3\" + links: + - type: mitigates + target: TS-001 +"; + +const AADL_SAMPLE: &str = "\ +artifacts: + - id: REQ-001 + type: requirement + title: Sensor data acquisition + status: draft + description: > + The system shall acquire sensor data at a minimum rate of 100Hz. + fields: + priority: must + category: functional + + - id: AADL-001 + type: aadl-component + title: sensor_acquisition.impl + status: draft + description: > + AADL process implementation for sensor data acquisition, + containing periodic threads for each sensor channel. + fields: + category: process + aadl-package: sensor_subsystem + classifier-kind: implementation + links: + - type: allocated-from + target: REQ-001 +"; + +/// Initialize a new rivet project. +fn cmd_init( + name: Option<&str>, + preset: &str, + schema_override: &[String], + dir: &std::path::Path, +) -> Result { + let dir = if dir == std::path::Path::new(".") { + std::env::current_dir().context("resolving current directory")? + } else { + dir.to_path_buf() + }; + + let project_name = name.map(|s| s.to_string()).unwrap_or_else(|| { + dir.file_name() + .map(|n| n.to_string_lossy().into_owned()) + .unwrap_or_else(|| "my-project".to_string()) + }); + + // Check for existing rivet.yaml + let config_path = dir.join("rivet.yaml"); + if config_path.exists() { + eprintln!( + "warning: {} already exists, skipping init", + config_path.display() + ); + return Ok(false); + } + + // Resolve preset (before I/O so invalid preset fails early) + let init_preset = resolve_preset(preset)?; + + // Ensure the target directory exists + std::fs::create_dir_all(&dir) + .with_context(|| format!("creating directory {}", dir.display()))?; + + // Use --schema override if provided, otherwise use preset defaults + let schemas: Vec = if schema_override.is_empty() { + init_preset.schemas.iter().map(|s| s.to_string()).collect() + } else { + schema_override.to_vec() + }; + + // Build schema list for the config + let schema_entries: String = schemas + .iter() + .map(|s| format!(" - {s}")) + .collect::>() + .join("\n"); + + // Write rivet.yaml + let config_content = format!( + "\ +project: + name: {project_name} + version: \"0.1.0\" + schemas: +{schema_entries} + +sources: + - path: artifacts + format: generic-yaml +" + ); + std::fs::write(&config_path, &config_content) + .with_context(|| format!("writing {}", config_path.display()))?; + println!(" created {}", config_path.display()); + + // Create artifacts/ directory with preset-specific sample files + let artifacts_dir = dir.join("artifacts"); + std::fs::create_dir_all(&artifacts_dir) + .with_context(|| format!("creating {}", artifacts_dir.display()))?; + + for (filename, content) in &init_preset.sample_files { + let path = artifacts_dir.join(filename); + std::fs::write(&path, content).with_context(|| format!("writing {}", path.display()))?; + println!(" created {}", path.display()); + } + + // Create docs/ directory with a sample document + let docs_dir = dir.join("docs"); + std::fs::create_dir_all(&docs_dir) + .with_context(|| format!("creating {}", docs_dir.display()))?; + + let sample_doc_path = docs_dir.join("getting-started.md"); + let sample_doc = format!( + "\ +# {project_name} + +Getting started with your rivet project. + +## Overview + +This project uses [rivet](https://github.com/pulseengine/rivet) for SDLC artifact +traceability and validation. Artifacts are stored as YAML files in `artifacts/` and +validated against schemas listed in `rivet.yaml`. + +## Quick start + +```bash +rivet validate # Validate all artifacts +rivet list # List all artifacts +rivet stats # Show summary statistics +``` +" + ); + std::fs::write(&sample_doc_path, &sample_doc) + .with_context(|| format!("writing {}", sample_doc_path.display()))?; + println!(" created {}", sample_doc_path.display()); + + println!( + "\nInitialized rivet project '{}' in {} (preset: {preset})", + project_name, + dir.display() + ); + + Ok(true) +} + /// Load STPA files directly and validate them. fn cmd_stpa( stpa_dir: &std::path::Path, @@ -256,11 +763,10 @@ fn cmd_stpa( } /// Validate a full project (with rivet.yaml). -fn cmd_validate(cli: &Cli) -> Result { - let (store, schema, graph) = load_project(cli)?; - let diagnostics = validate::validate(&store, &schema, &graph); - - print_diagnostics(&diagnostics); +fn cmd_validate(cli: &Cli, format: &str) -> Result { + let (store, schema, graph, doc_store) = load_project_with_docs(cli)?; + let mut diagnostics = validate::validate(&store, &schema, &graph); + diagnostics.extend(validate::validate_documents(&doc_store, &store)); let errors = diagnostics .iter() @@ -270,19 +776,59 @@ fn cmd_validate(cli: &Cli) -> Result { .iter() .filter(|d| d.severity == Severity::Warning) .count(); + let infos = diagnostics + .iter() + .filter(|d| d.severity == Severity::Info) + .count(); - println!(); - if errors > 0 { - println!("Result: FAIL ({} errors, {} warnings)", errors, warnings); - Ok(false) + if format == "json" { + let diag_json: Vec = diagnostics + .iter() + .map(|d| { + serde_json::json!({ + "severity": format!("{:?}", d.severity).to_lowercase(), + "artifact_id": d.artifact_id, + "message": d.message, + }) + }) + .collect(); + let output = serde_json::json!({ + "command": "validate", + "errors": errors, + "warnings": warnings, + "infos": infos, + "diagnostics": diag_json, + }); + println!("{}", serde_json::to_string_pretty(&output).unwrap()); } else { - println!("Result: PASS ({} warnings)", warnings); - Ok(true) + if !doc_store.is_empty() { + println!( + "Loaded {} documents with {} artifact references", + doc_store.len(), + doc_store.all_references().len() + ); + } + + print_diagnostics(&diagnostics); + + println!(); + if errors > 0 { + println!("Result: FAIL ({} errors, {} warnings)", errors, warnings); + } else { + println!("Result: PASS ({} warnings)", warnings); + } } + + Ok(errors == 0) } /// List artifacts. -fn cmd_list(cli: &Cli, type_filter: Option<&str>, status_filter: Option<&str>) -> Result { +fn cmd_list( + cli: &Cli, + type_filter: Option<&str>, + status_filter: Option<&str>, + format: &str, +) -> Result { let (store, _, _) = load_project(cli)?; let query = rivet_core::query::Query { @@ -293,34 +839,136 @@ fn cmd_list(cli: &Cli, type_filter: Option<&str>, status_filter: Option<&str>) - let results = rivet_core::query::execute(&store, &query); - for artifact in &results { - let status = artifact.status.as_deref().unwrap_or("-"); - let links = artifact.links.len(); - println!( - " {:20} {:25} {:12} {:3} links {}", - artifact.id, artifact.artifact_type, status, links, artifact.title - ); + if format == "json" { + let artifacts_json: Vec = results + .iter() + .map(|a| { + serde_json::json!({ + "id": a.id, + "type": a.artifact_type, + "title": a.title, + "status": a.status.as_deref().unwrap_or("-"), + "links": a.links.len(), + }) + }) + .collect(); + let output = serde_json::json!({ + "command": "list", + "count": results.len(), + "artifacts": artifacts_json, + }); + println!("{}", serde_json::to_string_pretty(&output).unwrap()); + } else { + for artifact in &results { + let status = artifact.status.as_deref().unwrap_or("-"); + let links = artifact.links.len(); + println!( + " {:20} {:25} {:12} {:3} links {}", + artifact.id, artifact.artifact_type, status, links, artifact.title + ); + } + println!("\n{} artifacts", results.len()); } - println!("\n{} artifacts", results.len()); Ok(true) } /// Print summary statistics. -fn cmd_stats(cli: &Cli) -> Result { +fn cmd_stats(cli: &Cli, format: &str) -> Result { let (store, _, graph) = load_project(cli)?; - print_stats(&store); let orphans = graph.orphans(&store); - if !orphans.is_empty() { - println!("\nOrphan artifacts (no links): {}", orphans.len()); - for id in &orphans { - println!(" {}", id); + + if format == "json" { + let mut types = serde_json::Map::new(); + let mut type_names: Vec<&str> = store.types().collect(); + type_names.sort(); + for t in &type_names { + types.insert(t.to_string(), serde_json::json!(store.count_by_type(t))); + } + let output = serde_json::json!({ + "command": "stats", + "total": store.len(), + "types": types, + "orphans": orphans, + "broken_links": graph.broken.len(), + }); + println!("{}", serde_json::to_string_pretty(&output).unwrap()); + } else { + print_stats(&store); + + if !orphans.is_empty() { + println!("\nOrphan artifacts (no links): {}", orphans.len()); + for id in &orphans { + println!(" {}", id); + } + } + + if !graph.broken.is_empty() { + println!("\nBroken links: {}", graph.broken.len()); + } + } + + Ok(true) +} + +/// Show traceability coverage report. +fn cmd_coverage(cli: &Cli, format: &str, fail_under: Option<&f64>) -> Result { + let (store, schema, graph) = load_project(cli)?; + let report = coverage::compute_coverage(&store, &schema, &graph); + + if format == "json" { + let json = report + .to_json() + .map_err(|e| anyhow::anyhow!("json serialization: {e}"))?; + println!("{json}"); + } else { + println!("Traceability Coverage Report\n"); + println!( + " {:<30} {:<20} {:>8} {:>8} {:>8}", + "Rule", "Source Type", "Covered", "Total", "%" + ); + println!(" {}", "-".repeat(80)); + + for entry in &report.entries { + println!( + " {:<30} {:<20} {:>8} {:>8} {:>7.1}%", + entry.rule_name, + entry.source_type, + entry.covered, + entry.total, + entry.percentage() + ); + } + + let overall = report.overall_coverage(); + println!(" {}", "-".repeat(80)); + println!(" {:<52} {:>7.1}%", "Overall (weighted)", overall); + + // Show uncovered artifacts + let has_uncovered = report.entries.iter().any(|e| !e.uncovered_ids.is_empty()); + if has_uncovered { + println!("\nUncovered artifacts:"); + for entry in &report.entries { + if !entry.uncovered_ids.is_empty() { + println!(" {} ({}):", entry.rule_name, entry.source_type); + for id in &entry.uncovered_ids { + println!(" {}", id); + } + } + } } } - if !graph.broken.is_empty() { - println!("\nBroken links: {}", graph.broken.len()); + if let Some(&threshold) = fail_under { + let overall = report.overall_coverage(); + if overall < threshold { + eprintln!( + "\nerror: overall coverage {:.1}% is below threshold {:.1}%", + overall, threshold + ); + return Ok(false); + } } Ok(true) @@ -333,6 +981,7 @@ fn cmd_matrix( to: &str, link_type: Option<&str>, direction: &str, + format: &str, ) -> Result { let (store, _schema, graph) = load_project(cli)?; @@ -350,26 +999,50 @@ fn cmd_matrix( let result = matrix::compute_matrix(&store, &graph, from, to, link, dir); - println!( - "Traceability: {} -> {} (via '{}')\n", - result.source_type, result.target_type, result.link_type - ); + if format == "json" { + let rows_json: Vec = result + .rows + .iter() + .map(|row| { + let targets: Vec<&str> = row.targets.iter().map(|t| t.id.as_str()).collect(); + serde_json::json!({ + "source_id": row.source_id, + "targets": targets, + }) + }) + .collect(); + let output = serde_json::json!({ + "command": "matrix", + "source_type": result.source_type, + "target_type": result.target_type, + "link_type": result.link_type, + "covered": result.covered, + "total": result.total, + "rows": rows_json, + }); + println!("{}", serde_json::to_string_pretty(&output).unwrap()); + } else { + println!( + "Traceability: {} -> {} (via '{}')\n", + result.source_type, result.target_type, result.link_type + ); - for row in &result.rows { - if row.targets.is_empty() { - println!(" {:20} -> (none)", row.source_id); - } else { - let targets: Vec<&str> = row.targets.iter().map(|t| t.id.as_str()).collect(); - println!(" {:20} -> {}", row.source_id, targets.join(", ")); + for row in &result.rows { + if row.targets.is_empty() { + println!(" {:20} -> (none)", row.source_id); + } else { + let targets: Vec<&str> = row.targets.iter().map(|t| t.id.as_str()).collect(); + println!(" {:20} -> {}", row.source_id, targets.join(", ")); + } } - } - println!( - "\nCoverage: {}/{} ({:.1}%)", - result.covered, - result.total, - result.coverage_pct() - ); + println!( + "\nCoverage: {}/{} ({:.1}%)", + result.covered, + result.total, + result.coverage_pct() + ); + } Ok(true) } @@ -422,6 +1095,7 @@ fn cmd_diff( cli: &Cli, base_path: Option<&std::path::Path>, head_path: Option<&std::path::Path>, + format: &str, ) -> Result { let (base_store, base_schema, base_graph, head_store, head_schema, head_graph) = match (base_path, head_path) { @@ -432,13 +1106,17 @@ fn cmd_diff( project: bp.to_path_buf(), schemas: cli.schemas.clone(), verbose: cli.verbose, - command: Command::Validate, + command: Command::Validate { + format: "text".to_string(), + }, }; let head_cli = Cli { project: hp.to_path_buf(), schemas: cli.schemas.clone(), verbose: cli.verbose, - command: Command::Validate, + command: Command::Validate { + format: "text".to_string(), + }, }; let (bs, bsc, bg) = load_project(&base_cli)?; let (hs, hsc, hg) = load_project(&head_cli)?; @@ -462,107 +1140,382 @@ fn cmd_diff( let head_diags = validate::validate(&head_store, &head_schema, &head_graph); let diag_diff = DiagnosticDiff::compute(&base_diags, &head_diags); - // ── Display ────────────────────────────────────────────────────── + if format == "json" { + let modified_json: Vec = diff + .modified + .iter() + .map(|change| { + let mut changes = Vec::new(); + if let Some((old, new)) = &change.title_changed { + changes.push(format!("title: {} -> {}", old, new)); + } + if change.description_changed { + changes.push("description: changed".to_string()); + } + if let Some((old, new)) = &change.status_changed { + let old_s = old.as_deref().unwrap_or("(none)"); + let new_s = new.as_deref().unwrap_or("(none)"); + changes.push(format!("status: {} -> {}", old_s, new_s)); + } + if let Some((old, new)) = &change.type_changed { + changes.push(format!("type: {} -> {}", old, new)); + } + for tag in &change.tags_added { + changes.push(format!("tag added: {}", tag)); + } + for tag in &change.tags_removed { + changes.push(format!("tag removed: {}", tag)); + } + for link in &change.links_added { + changes.push(format!("link added: {} -> {}", link.link_type, link.target)); + } + for link in &change.links_removed { + changes.push(format!( + "link removed: {} -> {}", + link.link_type, link.target + )); + } + for field in &change.fields_changed { + changes.push(format!("field changed: {}", field)); + } + serde_json::json!({ + "id": change.id, + "changes": changes, + }) + }) + .collect(); + + let output = serde_json::json!({ + "command": "diff", + "added": diff.added, + "removed": diff.removed, + "modified": modified_json, + "summary": diff.summary(), + }); + println!("{}", serde_json::to_string_pretty(&output).unwrap()); + } else { + // ── Display ────────────────────────────────────────────────────── - let use_color = std::io::IsTerminal::is_terminal(&std::io::stdout()); + let use_color = std::io::IsTerminal::is_terminal(&std::io::stdout()); - let green = |s: &str| { - if use_color { - format!("\x1b[32m{s}\x1b[0m") - } else { - format!("+ {s}") + let green = |s: &str| { + if use_color { + format!("\x1b[32m{s}\x1b[0m") + } else { + format!("+ {s}") + } + }; + let red = |s: &str| { + if use_color { + format!("\x1b[31m{s}\x1b[0m") + } else { + format!("- {s}") + } + }; + let yellow = |s: &str| { + if use_color { + format!("\x1b[33m{s}\x1b[0m") + } else { + format!("~ {s}") + } + }; + + // Added + for id in &diff.added { + let title = head_store.get(id).map(|a| a.title.as_str()).unwrap_or(""); + println!("{}", green(&format!("{id} {title}"))); } - }; - let red = |s: &str| { - if use_color { - format!("\x1b[31m{s}\x1b[0m") - } else { - format!("- {s}") + + // Removed + for id in &diff.removed { + let title = base_store.get(id).map(|a| a.title.as_str()).unwrap_or(""); + println!("{}", red(&format!("{id} {title}"))); } - }; - let yellow = |s: &str| { - if use_color { - format!("\x1b[33m{s}\x1b[0m") - } else { - format!("~ {s}") + + // Modified + for change in &diff.modified { + println!("{}", yellow(&change.id)); + + if let Some((old, new)) = &change.title_changed { + println!(" title: {} -> {}", red(old), green(new)); + } + if change.description_changed { + println!(" description: changed"); + } + if let Some((old, new)) = &change.status_changed { + let old_s = old.as_deref().unwrap_or("(none)"); + let new_s = new.as_deref().unwrap_or("(none)"); + println!(" status: {} -> {}", red(old_s), green(new_s)); + } + if let Some((old, new)) = &change.type_changed { + println!(" type: {} -> {}", red(old), green(new)); + } + for tag in &change.tags_added { + println!(" tag: {}", green(tag)); + } + for tag in &change.tags_removed { + println!(" tag: {}", red(tag)); + } + for link in &change.links_added { + println!( + " link: {}", + green(&format!("{} -> {}", link.link_type, link.target)) + ); + } + for link in &change.links_removed { + println!( + " link: {}", + red(&format!("{} -> {}", link.link_type, link.target)) + ); + } + for field in &change.fields_changed { + println!(" field changed: {field}"); + } } - }; - // Added - for id in &diff.added { - let title = head_store.get(id).map(|a| a.title.as_str()).unwrap_or(""); - println!("{}", green(&format!("{id} {title}"))); + // Summary + println!(); + println!("{}", diff.summary()); + + // Diagnostic diff + if !diag_diff.is_empty() { + println!(); + for d in &diag_diff.new_errors { + println!("{}", red(&format!("NEW {d}"))); + } + for d in &diag_diff.resolved_errors { + println!("{}", green(&format!("RESOLVED {d}"))); + } + for d in &diag_diff.new_warnings { + println!("{}", yellow(&format!("NEW {d}"))); + } + for d in &diag_diff.resolved_warnings { + println!("{}", green(&format!("RESOLVED {d}"))); + } + println!("{}", diag_diff.summary()); + } } - // Removed - for id in &diff.removed { - let title = base_store.get(id).map(|a| a.title.as_str()).unwrap_or(""); - println!("{}", red(&format!("{id} {title}"))); + Ok(true) +} + +/// Show built-in docs (no project load needed). +fn cmd_docs(topic: Option<&str>, grep: Option<&str>, format: &str, context: usize) -> Result { + if let Some(pattern) = grep { + print!("{}", docs::grep_docs(pattern, format, context)); + } else if let Some(slug) = topic { + print!("{}", docs::show_topic(slug, format)); + } else { + print!("{}", docs::list_topics(format)); } + Ok(true) +} - // Modified - for change in &diff.modified { - println!("{}", yellow(&change.id)); +/// Introspect loaded schemas. +fn cmd_schema(cli: &Cli, action: &SchemaAction) -> Result { + let schemas_dir = resolve_schemas_dir(cli); + let config_path = cli.project.join("rivet.yaml"); + let schema_names = if config_path.exists() { + let config = rivet_core::load_project_config(&config_path) + .with_context(|| format!("loading {}", config_path.display()))?; + config.project.schemas + } else { + vec!["common".to_string(), "dev".to_string()] + }; + let schema = + rivet_core::load_schemas(&schema_names, &schemas_dir).context("loading schemas")?; + + let output = match action { + SchemaAction::List { format } => schema_cmd::cmd_list(&schema, format), + SchemaAction::Show { name, format } => schema_cmd::cmd_show(&schema, name, format), + SchemaAction::Links { format } => schema_cmd::cmd_links(&schema, format), + SchemaAction::Rules { format } => schema_cmd::cmd_rules(&schema, format), + }; + print!("{output}"); + Ok(true) +} - if let Some((old, new)) = &change.title_changed { - println!(" title: {} -> {}", red(old), green(new)); - } - if change.description_changed { - println!(" description: changed"); - } - if let Some((old, new)) = &change.status_changed { - let old_s = old.as_deref().unwrap_or("(none)"); - let new_s = new.as_deref().unwrap_or("(none)"); - println!(" status: {} -> {}", red(old_s), green(new_s)); - } - if let Some((old, new)) = &change.type_changed { - println!(" type: {} -> {}", red(old), green(new)); - } - for tag in &change.tags_added { - println!(" tag: {}", green(tag)); - } - for tag in &change.tags_removed { - println!(" tag: {}", red(tag)); - } - for link in &change.links_added { - println!( - " link: {}", - green(&format!("{} -> {}", link.link_type, link.target)) - ); - } - for link in &change.links_removed { - println!( - " link: {}", - red(&format!("{} -> {}", link.link_type, link.target)) - ); - } - for field in &change.fields_changed { - println!(" field changed: {field}"); - } +/// Generate .rivet/agent-context.md from project state. +fn cmd_context(cli: &Cli) -> Result { + let config_path = cli.project.join("rivet.yaml"); + let config = rivet_core::load_project_config(&config_path) + .with_context(|| format!("loading {}", config_path.display()))?; + + let (store, schema, graph, doc_store) = load_project_with_docs(cli)?; + let diagnostics = validate::validate(&store, &schema, &graph); + let coverage_report = coverage::compute_coverage(&store, &schema, &graph); + + let rivet_dir = cli.project.join(".rivet"); + std::fs::create_dir_all(&rivet_dir) + .with_context(|| format!("creating {}", rivet_dir.display()))?; + + let mut out = String::new(); + out.push_str("# Rivet Agent Context\n\n"); + out.push_str("Auto-generated by `rivet context` — do not edit.\n\n"); + + // ── 1. Project configuration ──────────────────────────────────────── + out.push_str("## Project\n\n"); + out.push_str(&format!("- **Name:** {}\n", config.project.name)); + if let Some(ref v) = config.project.version { + out.push_str(&format!("- **Version:** {v}\n")); + } + out.push_str(&format!( + "- **Schemas:** {}\n", + config.project.schemas.join(", ") + )); + out.push_str(&format!( + "- **Sources:** {}\n", + config + .sources + .iter() + .map(|s| format!("{} ({})", s.path, s.format)) + .collect::>() + .join(", ") + )); + if !config.docs.is_empty() { + out.push_str(&format!("- **Docs:** {}\n", config.docs.join(", "))); + } + if let Some(ref r) = config.results { + out.push_str(&format!("- **Results:** {r}\n")); } + out.push('\n'); - // Summary - println!(); - println!("{}", diff.summary()); + // ── 2. Artifact summary with example IDs ──────────────────────────── + out.push_str("## Artifacts\n\n"); + let mut types: Vec<&str> = store.types().collect(); + types.sort(); + out.push_str("| Type | Count | Example IDs |\n|------|-------|-------------|\n"); + for t in &types { + let ids = store.by_type(t); + let examples: Vec<&str> = ids.iter().take(3).map(|id| id.as_str()).collect(); + out.push_str(&format!( + "| {} | {} | {} |\n", + t, + store.count_by_type(t), + examples.join(", ") + )); + } + out.push_str(&format!("| **Total** | **{}** | |\n\n", store.len())); + + // ── 3. Schema summary (types + required fields) ───────────────────── + out.push_str("## Schema\n\n"); + let mut stypes: Vec<_> = schema.artifact_types.values().collect(); + stypes.sort_by_key(|t| &t.name); + for t in &stypes { + let required: Vec<&str> = t + .fields + .iter() + .filter(|f| f.required) + .map(|f| f.name.as_str()) + .collect(); + let req_str = if required.is_empty() { + String::from("(none)") + } else { + required.join(", ") + }; + out.push_str(&format!( + "- **`{}`** — {} \n Required fields: {}\n", + t.name, t.description, req_str + )); + } - // Diagnostic diff - if !diag_diff.is_empty() { - println!(); - for d in &diag_diff.new_errors { - println!("{}", red(&format!("NEW {d}"))); - } - for d in &diag_diff.resolved_errors { - println!("{}", green(&format!("RESOLVED {d}"))); - } - for d in &diag_diff.new_warnings { - println!("{}", yellow(&format!("NEW {d}"))); + // Link types + out.push_str("\n### Link Types\n\n"); + let mut links: Vec<_> = schema.link_types.values().collect(); + links.sort_by_key(|l| &l.name); + for l in &links { + let inv = l.inverse.as_deref().unwrap_or("-"); + out.push_str(&format!("- `{}` (inverse: `{}`)\n", l.name, inv)); + } + out.push('\n'); + + // ── 4. Traceability rules ─────────────────────────────────────────── + out.push_str("## Traceability Rules\n\n"); + if schema.traceability_rules.is_empty() { + out.push_str("No traceability rules defined.\n\n"); + } else { + out.push_str("| Rule | Source Type | Severity | Description |\n"); + out.push_str("|------|------------|----------|-------------|\n"); + for rule in &schema.traceability_rules { + let sev = match rule.severity { + Severity::Error => "error", + Severity::Warning => "warning", + Severity::Info => "info", + }; + out.push_str(&format!( + "| {} | {} | {} | {} |\n", + rule.name, rule.source_type, sev, rule.description + )); } - for d in &diag_diff.resolved_warnings { - println!("{}", green(&format!("RESOLVED {d}"))); + out.push('\n'); + } + + // ── 5. Coverage summary ───────────────────────────────────────────── + out.push_str("## Coverage\n\n"); + out.push_str(&format!( + "**Overall: {:.1}%**\n\n", + coverage_report.overall_coverage() + )); + if !coverage_report.entries.is_empty() { + out.push_str("| Rule | Source Type | Covered | Total | % |\n"); + out.push_str("|------|------------|---------|-------|---|\n"); + for entry in &coverage_report.entries { + out.push_str(&format!( + "| {} | {} | {} | {} | {:.1}% |\n", + entry.rule_name, + entry.source_type, + entry.covered, + entry.total, + entry.percentage() + )); } - println!("{}", diag_diff.summary()); + out.push('\n'); + } + + // ── 6. Validation summary ─────────────────────────────────────────── + let errors = diagnostics + .iter() + .filter(|d| d.severity == Severity::Error) + .count(); + let warnings = diagnostics + .iter() + .filter(|d| d.severity == Severity::Warning) + .count(); + out.push_str(&format!( + "## Validation\n\n{} errors, {} warnings\n\n", + errors, warnings + )); + + // Documents + if !doc_store.is_empty() { + out.push_str(&format!( + "## Documents\n\n{} documents loaded\n\n", + doc_store.len() + )); } + // ── 7. Quick command reference ────────────────────────────────────── + out.push_str("## Commands\n\n"); + out.push_str("```bash\n"); + out.push_str("rivet validate # validate all artifacts\n"); + out.push_str("rivet list # list all artifacts\n"); + out.push_str("rivet list -t # filter by type\n"); + out.push_str("rivet stats # artifact counts + orphans\n"); + out.push_str("rivet coverage # traceability coverage report\n"); + out.push_str("rivet matrix --from X --to Y # traceability matrix\n"); + out.push_str("rivet diff --base A --head B # compare artifact sets\n"); + out.push_str("rivet schema list # list schema types\n"); + out.push_str("rivet schema show # show type details\n"); + out.push_str("rivet schema rules # list traceability rules\n"); + out.push_str("rivet export -f generic-yaml # export as YAML\n"); + out.push_str("rivet serve # start dashboard on :3000\n"); + out.push_str("rivet context # regenerate this file\n"); + out.push_str("```\n"); + + let context_path = rivet_dir.join("agent-context.md"); + std::fs::write(&context_path, &out) + .with_context(|| format!("writing {}", context_path.display()))?; + println!("Generated {}", context_path.display()); Ok(true) } @@ -620,6 +1573,111 @@ fn load_project(cli: &Cli) -> Result<(Store, rivet_core::schema::Schema, LinkGra Ok((store, schema, graph)) } +fn load_project_with_docs( + cli: &Cli, +) -> Result<(Store, rivet_core::schema::Schema, LinkGraph, DocumentStore)> { + let config_path = cli.project.join("rivet.yaml"); + let config = rivet_core::load_project_config(&config_path) + .with_context(|| format!("loading {}", config_path.display()))?; + + let schemas_dir = resolve_schemas_dir(cli); + let schema = rivet_core::load_schemas(&config.project.schemas, &schemas_dir) + .context("loading schemas")?; + + let mut store = Store::new(); + for source in &config.sources { + let artifacts = rivet_core::load_artifacts(source, &cli.project) + .with_context(|| format!("loading source '{}'", source.path))?; + for artifact in artifacts { + store.upsert(artifact); + } + } + + let graph = LinkGraph::build(&store, &schema); + + // Load documents from configured directories. + let mut doc_store = DocumentStore::new(); + for docs_path in &config.docs { + let dir = cli.project.join(docs_path); + let docs = document::load_documents(&dir) + .with_context(|| format!("loading docs from '{docs_path}'"))?; + for doc in docs { + doc_store.insert(doc); + } + } + + Ok((store, schema, graph, doc_store)) +} + +#[allow(clippy::type_complexity)] +fn load_project_full( + cli: &Cli, +) -> Result<( + Store, + rivet_core::schema::Schema, + LinkGraph, + DocumentStore, + ResultStore, + String, + PathBuf, + PathBuf, +)> { + let config_path = cli.project.join("rivet.yaml"); + let config = rivet_core::load_project_config(&config_path) + .with_context(|| format!("loading {}", config_path.display()))?; + + let schemas_dir = resolve_schemas_dir(cli); + let schema = rivet_core::load_schemas(&config.project.schemas, &schemas_dir) + .context("loading schemas")?; + + let mut store = Store::new(); + for source in &config.sources { + let artifacts = rivet_core::load_artifacts(source, &cli.project) + .with_context(|| format!("loading source '{}'", source.path))?; + for artifact in artifacts { + store.upsert(artifact); + } + } + + let graph = LinkGraph::build(&store, &schema); + + // Load documents + let mut doc_store = DocumentStore::new(); + for docs_path in &config.docs { + let dir = cli.project.join(docs_path); + let docs = document::load_documents(&dir) + .with_context(|| format!("loading docs from '{docs_path}'"))?; + for doc in docs { + doc_store.insert(doc); + } + } + + // Load test results + let mut result_store = ResultStore::new(); + if let Some(ref results_path) = config.results { + let dir = cli.project.join(results_path); + let runs = results::load_results(&dir) + .with_context(|| format!("loading results from '{results_path}'"))?; + for run in runs { + result_store.insert(run); + } + } + + let project_name = config.project.name.clone(); + let project_path = std::fs::canonicalize(&cli.project).unwrap_or_else(|_| cli.project.clone()); + + Ok(( + store, + schema, + graph, + doc_store, + result_store, + project_name, + project_path, + schemas_dir, + )) +} + fn print_stats(store: &Store) { println!("Artifact summary:"); let mut types: Vec<&str> = store.types().collect(); diff --git a/rivet-cli/src/schema_cmd.rs b/rivet-cli/src/schema_cmd.rs new file mode 100644 index 0000000..32958eb --- /dev/null +++ b/rivet-cli/src/schema_cmd.rs @@ -0,0 +1,362 @@ +//! `rivet schema` subcommand — introspect loaded schemas. +//! +//! Provides `list`, `show`, `links`, `rules` for both humans and AI agents. + +use rivet_core::schema::{Cardinality, Schema, Severity}; + +/// List all artifact types. +pub fn cmd_list(schema: &Schema, format: &str) -> String { + let mut types: Vec<_> = schema.artifact_types.values().collect(); + types.sort_by_key(|t| &t.name); + + if format == "json" { + let items: Vec = types + .iter() + .map(|t| { + serde_json::json!({ + "name": t.name, + "description": t.description, + "fields": t.fields.len(), + "link_fields": t.link_fields.len(), + "aspice_process": t.aspice_process, + }) + }) + .collect(); + serde_json::to_string_pretty(&serde_json::json!({ + "command": "schema-list", + "count": items.len(), + "artifact_types": items, + })) + .unwrap_or_default() + } else { + let mut out = String::new(); + out.push_str(&format!("Artifact types ({}):\n\n", types.len())); + for t in &types { + let proc = t + .aspice_process + .as_deref() + .map(|p| format!(" ({p})")) + .unwrap_or_default(); + out.push_str(&format!(" {:<30} {}{}\n", t.name, t.description, proc)); + } + out.push_str("\nUse: rivet schema show \n"); + out + } +} + +/// Show detailed info for a single artifact type, including an example YAML snippet. +pub fn cmd_show(schema: &Schema, name: &str, format: &str) -> String { + let Some(t) = schema.artifact_type(name) else { + return format!( + "Unknown artifact type: {name}\n\nAvailable: {}\n", + schema + .artifact_types + .keys() + .cloned() + .collect::>() + .join(", ") + ); + }; + + if format == "json" { + let fields: Vec = t + .fields + .iter() + .map(|f| { + serde_json::json!({ + "name": f.name, + "type": f.field_type, + "required": f.required, + "description": f.description, + "allowed_values": f.allowed_values, + }) + }) + .collect(); + let link_fields: Vec = t + .link_fields + .iter() + .map(|lf| { + serde_json::json!({ + "name": lf.name, + "link_type": lf.link_type, + "target_types": lf.target_types, + "required": lf.required, + "cardinality": format!("{:?}", lf.cardinality), + }) + }) + .collect(); + let rules: Vec = schema + .traceability_rules + .iter() + .filter(|r| r.source_type == t.name) + .map(|r| { + serde_json::json!({ + "name": r.name, + "description": r.description, + "severity": format!("{:?}", r.severity), + "required_link": r.required_link, + "required_backlink": r.required_backlink, + "target_types": r.target_types, + "from_types": r.from_types, + }) + }) + .collect(); + let example = generate_example_yaml(t, schema); + return serde_json::to_string_pretty(&serde_json::json!({ + "command": "schema-show", + "artifact_type": { + "name": t.name, + "description": t.description, + "aspice_process": t.aspice_process, + "fields": fields, + "link_fields": link_fields, + "traceability_rules": rules, + "example_yaml": example, + } + })) + .unwrap_or_default(); + } + + let mut out = String::new(); + out.push_str(&format!("Type: {}\n", t.name)); + out.push_str(&format!("Description: {}\n", t.description)); + if let Some(ref proc) = t.aspice_process { + out.push_str(&format!("ASPICE Process: {proc}\n")); + } + + // Fields + if !t.fields.is_empty() { + out.push_str("\nFields:\n"); + for f in &t.fields { + let req = if f.required { "required" } else { "optional" }; + let vals = f + .allowed_values + .as_ref() + .map(|v| format!(" [{}]", v.join(", "))) + .unwrap_or_default(); + out.push_str(&format!( + " {:<24} {:<10} {}{}\n", + f.name, f.field_type, req, vals + )); + } + } + + // Link fields + if !t.link_fields.is_empty() { + out.push_str("\nLink fields:\n"); + for lf in &t.link_fields { + let req = if lf.required { "required" } else { "optional" }; + let card = match lf.cardinality { + Cardinality::ExactlyOne => "exactly-one", + Cardinality::ZeroOrMany => "zero-or-many", + Cardinality::ZeroOrOne => "zero-or-one", + Cardinality::OneOrMany => "one-or-many", + }; + let targets = if lf.target_types.is_empty() { + "any".to_string() + } else { + lf.target_types.join(", ") + }; + out.push_str(&format!( + " {:<24} {} -> [{}] {} {}\n", + lf.name, lf.link_type, targets, req, card + )); + } + } + + // Traceability rules + let rules: Vec<_> = schema + .traceability_rules + .iter() + .filter(|r| r.source_type == t.name) + .collect(); + if !rules.is_empty() { + out.push_str("\nTraceability rules:\n"); + for r in &rules { + let sev = match r.severity { + Severity::Error => "error", + Severity::Warning => "warning", + Severity::Info => "info", + }; + out.push_str(&format!(" {} ({}): {}\n", r.name, sev, r.description)); + if let Some(ref link) = r.required_link { + out.push_str(&format!( + " required link: {} -> [{}]\n", + link, + r.target_types.join(", ") + )); + } + if let Some(ref bl) = r.required_backlink { + out.push_str(&format!( + " required backlink: {} from [{}]\n", + bl, + r.from_types.join(", ") + )); + } + } + } + + // Example + out.push_str("\nExample:\n"); + out.push_str(&generate_example_yaml(t, schema)); + + out +} + +/// List all link types. +pub fn cmd_links(schema: &Schema, format: &str) -> String { + let mut links: Vec<_> = schema.link_types.values().collect(); + links.sort_by_key(|l| &l.name); + + if format == "json" { + let items: Vec = links + .iter() + .map(|l| { + serde_json::json!({ + "name": l.name, + "inverse": l.inverse, + "description": l.description, + "source_types": l.source_types, + "target_types": l.target_types, + }) + }) + .collect(); + return serde_json::to_string_pretty(&serde_json::json!({ + "command": "schema-links", + "count": items.len(), + "link_types": items, + })) + .unwrap_or_default(); + } + + let mut out = String::new(); + out.push_str(&format!("Link types ({}):\n\n", links.len())); + out.push_str(&format!( + " {:<24} {:<24} {}\n", + "Name", "Inverse", "Description" + )); + out.push_str(&format!(" {}\n", "-".repeat(72))); + for l in &links { + let inv = l.inverse.as_deref().unwrap_or("-"); + out.push_str(&format!(" {:<24} {:<24} {}\n", l.name, inv, l.description)); + } + out +} + +/// List all traceability rules. +pub fn cmd_rules(schema: &Schema, format: &str) -> String { + if format == "json" { + let items: Vec = schema + .traceability_rules + .iter() + .map(|r| { + serde_json::json!({ + "name": r.name, + "description": r.description, + "source_type": r.source_type, + "severity": format!("{:?}", r.severity), + "required_link": r.required_link, + "required_backlink": r.required_backlink, + "target_types": r.target_types, + "from_types": r.from_types, + }) + }) + .collect(); + return serde_json::to_string_pretty(&serde_json::json!({ + "command": "schema-rules", + "count": items.len(), + "rules": items, + })) + .unwrap_or_default(); + } + + let mut out = String::new(); + out.push_str(&format!( + "Traceability rules ({}):\n\n", + schema.traceability_rules.len() + )); + for r in &schema.traceability_rules { + let sev = match r.severity { + Severity::Error => "ERROR ", + Severity::Warning => "WARN ", + Severity::Info => "INFO ", + }; + out.push_str(&format!(" {} {:<36} {}\n", sev, r.name, r.source_type)); + out.push_str(&format!(" {}\n", r.description)); + } + out +} + +// ── Example YAML generation ───────────────────────────────────────────── + +fn generate_example_yaml(t: &rivet_core::schema::ArtifactTypeDef, _schema: &Schema) -> String { + let mut out = String::new(); + let id_prefix = t + .name + .split('-') + .map(|w| { + let mut c = w.chars(); + match c.next() { + Some(ch) => ch.to_uppercase().to_string(), + None => String::new(), + } + }) + .collect::>() + .join(""); + + out.push_str(&format!(" - id: {}-001\n", id_prefix)); + out.push_str(&format!(" type: {}\n", t.name)); + out.push_str(&format!(" title: Example {}\n", t.name)); + out.push_str(" status: draft\n"); + out.push_str(" description: >\n"); + out.push_str(&format!(" Describe this {}.\n", t.name)); + out.push_str(" tags: [example]\n"); + + // Links + if !t.link_fields.is_empty() { + out.push_str(" links:\n"); + for lf in &t.link_fields { + let target_hint = lf + .target_types + .first() + .map(|tt| { + let prefix: String = tt + .split('-') + .map(|w| { + let mut c = w.chars(); + match c.next() { + Some(ch) => ch.to_uppercase().to_string(), + None => String::new(), + } + }) + .collect(); + format!("{prefix}-001") + }) + .unwrap_or_else(|| "TARGET-001".to_string()); + out.push_str(&format!(" - type: {}\n", lf.link_type)); + out.push_str(&format!(" target: {}\n", target_hint)); + } + } + + // Fields + if !t.fields.is_empty() { + out.push_str(" fields:\n"); + for f in &t.fields { + let val = if let Some(ref vals) = f.allowed_values { + vals.first().cloned().unwrap_or_else(|| "value".to_string()) + } else { + match f.field_type.as_str() { + "number" => "0".to_string(), + "boolean" => "true".to_string(), + "text" => ">\n Description text.".to_string(), + "structured" => "{}".to_string(), + _ => format!("example-{}", f.name), + } + }; + let comment = if !f.required { " # optional" } else { "" }; + out.push_str(&format!(" {}: {}{}\n", f.name, val, comment)); + } + } + + out +} diff --git a/rivet-cli/src/serve.rs b/rivet-cli/src/serve.rs index e96a33e..d21ebb2 100644 --- a/rivet-cli/src/serve.rs +++ b/rivet-cli/src/serve.rs @@ -1,48 +1,333 @@ -use std::collections::HashMap; +use std::collections::{BTreeMap, HashMap}; +use std::path::PathBuf; use std::sync::Arc; -use anyhow::Result; +use anyhow::{Context as _, Result}; use axum::Router; use axum::extract::{Path, Query, State}; -use axum::response::Html; -use axum::routing::get; -use petgraph::graph::{Graph, NodeIndex}; +use axum::response::{Html, IntoResponse}; +use axum::routing::{get, post}; +use petgraph::graph::{EdgeIndex, Graph, NodeIndex}; use petgraph::visit::EdgeRef; +use tokio::sync::RwLock; +use crate::{docs, schema_cmd}; use etch::filter::ego_subgraph; use etch::layout::{self as pgv_layout, EdgeInfo, LayoutOptions, NodeInfo}; use etch::svg::{SvgOptions, render_svg}; +use rivet_core::adapter::{Adapter, AdapterConfig, AdapterSource}; +use rivet_core::coverage; +use rivet_core::diff::ArtifactDiff; +use rivet_core::document::{self, DocumentStore}; +use rivet_core::formats::generic::GenericYamlAdapter; use rivet_core::links::LinkGraph; use rivet_core::matrix::{self, Direction}; +use rivet_core::model::ProjectConfig; +use rivet_core::results::ResultStore; use rivet_core::schema::{Schema, Severity}; use rivet_core::store::Store; use rivet_core::validate; +// ── Repository context ────────────────────────────────────────────────── + +/// Git repository status captured at load time. +struct GitInfo { + branch: String, + commit_short: String, + is_dirty: bool, + dirty_count: usize, +} + +/// A discovered sibling project (example or peer). +struct SiblingProject { + name: String, + rel_path: String, +} + +/// Project context shown in the dashboard header. +struct RepoContext { + project_name: String, + project_path: String, + git: Option, + loaded_at: String, + siblings: Vec, + port: u16, +} + +fn capture_git_info(project_path: &std::path::Path) -> Option { + let branch = std::process::Command::new("git") + .args(["rev-parse", "--abbrev-ref", "HEAD"]) + .current_dir(project_path) + .output() + .ok() + .filter(|o| o.status.success()) + .map(|o| String::from_utf8_lossy(&o.stdout).trim().to_string())?; + + let commit_short = std::process::Command::new("git") + .args(["rev-parse", "--short", "HEAD"]) + .current_dir(project_path) + .output() + .ok() + .filter(|o| o.status.success()) + .map(|o| String::from_utf8_lossy(&o.stdout).trim().to_string()) + .unwrap_or_default(); + + let porcelain = std::process::Command::new("git") + .args(["status", "--porcelain"]) + .current_dir(project_path) + .output() + .ok() + .filter(|o| o.status.success()) + .map(|o| String::from_utf8_lossy(&o.stdout).to_string()) + .unwrap_or_default(); + + let dirty_count = porcelain.lines().filter(|l| !l.is_empty()).count(); + + Some(GitInfo { + branch, + commit_short, + is_dirty: dirty_count > 0, + dirty_count, + }) +} + +/// Discover other rivet projects (examples/ and peer directories). +fn discover_siblings(project_path: &std::path::Path) -> Vec { + let mut siblings = Vec::new(); + + // Check examples/ subdirectory + let examples_dir = project_path.join("examples"); + if examples_dir.is_dir() { + if let Ok(entries) = std::fs::read_dir(&examples_dir) { + for entry in entries.flatten() { + let p = entry.path(); + if p.join("rivet.yaml").exists() { + if let Some(name) = p.file_name().and_then(|n| n.to_str()) { + siblings.push(SiblingProject { + name: name.to_string(), + rel_path: format!("examples/{name}"), + }); + } + } + } + } + } + + // If inside examples/, offer root project and peers + if let Some(parent) = project_path.parent() { + if parent.file_name().and_then(|n| n.to_str()) == Some("examples") { + if let Some(root) = parent.parent() { + if root.join("rivet.yaml").exists() { + if let Ok(cfg) = std::fs::read_to_string(root.join("rivet.yaml")) { + let root_name = cfg + .lines() + .find(|l| l.trim().starts_with("name:")) + .map(|l| l.trim().trim_start_matches("name:").trim().to_string()) + .unwrap_or_else(|| { + root.file_name() + .and_then(|n| n.to_str()) + .unwrap_or("root") + .to_string() + }); + siblings.push(SiblingProject { + name: root_name, + rel_path: root.display().to_string(), + }); + } + } + // Peer examples + if let Ok(entries) = std::fs::read_dir(parent) { + for entry in entries.flatten() { + let p = entry.path(); + if p != project_path && p.join("rivet.yaml").exists() { + if let Some(name) = p.file_name().and_then(|n| n.to_str()) { + siblings.push(SiblingProject { + name: name.to_string(), + rel_path: p.display().to_string(), + }); + } + } + } + } + } + } + } + + siblings.sort_by(|a, b| a.name.cmp(&b.name)); + siblings +} + /// Shared application state loaded once at startup. struct AppState { store: Store, schema: Schema, graph: LinkGraph, + doc_store: DocumentStore, + result_store: ResultStore, + context: RepoContext, + /// Canonical path to the project directory (for reload). + project_path_buf: PathBuf, + /// Path to the schemas directory (for reload). + schemas_dir: PathBuf, +} + +/// Convenience alias so handler signatures stay compact. +type SharedState = Arc>; + +/// Build a fresh `AppState` by loading everything from disk. +fn reload_state( + project_path: &std::path::Path, + schemas_dir: &std::path::Path, + port: u16, +) -> Result { + let config_path = project_path.join("rivet.yaml"); + let config = rivet_core::load_project_config(&config_path) + .with_context(|| format!("loading {}", config_path.display()))?; + + let schema = rivet_core::load_schemas(&config.project.schemas, schemas_dir) + .context("loading schemas")?; + + let mut store = Store::new(); + for source in &config.sources { + let artifacts = rivet_core::load_artifacts(source, project_path) + .with_context(|| format!("loading source '{}'", source.path))?; + for artifact in artifacts { + store.upsert(artifact); + } + } + + let graph = LinkGraph::build(&store, &schema); + + let mut doc_store = DocumentStore::new(); + for docs_path in &config.docs { + let dir = project_path.join(docs_path); + let docs = rivet_core::document::load_documents(&dir) + .with_context(|| format!("loading docs from '{docs_path}'"))?; + for doc in docs { + doc_store.insert(doc); + } + } + + let mut result_store = ResultStore::new(); + if let Some(ref results_path) = config.results { + let dir = project_path.join(results_path); + let runs = rivet_core::results::load_results(&dir) + .with_context(|| format!("loading results from '{results_path}'"))?; + for run in runs { + result_store.insert(run); + } + } + + let git = capture_git_info(project_path); + let loaded_at = std::process::Command::new("date") + .arg("+%H:%M:%S") + .output() + .ok() + .filter(|o| o.status.success()) + .map(|o| String::from_utf8_lossy(&o.stdout).trim().to_string()) + .unwrap_or_else(|| "unknown".into()); + let siblings = discover_siblings(project_path); + let project_name = config.project.name.clone(); + + let context = RepoContext { + project_name, + project_path: project_path.display().to_string(), + git, + loaded_at, + siblings, + port, + }; + + Ok(AppState { + store, + schema, + graph, + doc_store, + result_store, + context, + project_path_buf: project_path.to_path_buf(), + schemas_dir: schemas_dir.to_path_buf(), + }) } /// Start the axum HTTP server on the given port. -pub async fn run(store: Store, schema: Schema, graph: LinkGraph, port: u16) -> Result<()> { - let state = Arc::new(AppState { +#[allow(clippy::too_many_arguments)] +pub async fn run( + store: Store, + schema: Schema, + graph: LinkGraph, + doc_store: DocumentStore, + result_store: ResultStore, + project_name: String, + project_path: PathBuf, + schemas_dir: PathBuf, + port: u16, +) -> Result<()> { + let git = capture_git_info(&project_path); + let loaded_at = std::process::Command::new("date") + .arg("+%H:%M:%S") + .output() + .ok() + .filter(|o| o.status.success()) + .map(|o| String::from_utf8_lossy(&o.stdout).trim().to_string()) + .unwrap_or_else(|| "unknown".into()); + let siblings = discover_siblings(&project_path); + let context = RepoContext { + project_name, + project_path: project_path.display().to_string(), + git, + loaded_at, + siblings, + port, + }; + + let state: SharedState = Arc::new(RwLock::new(AppState { store, schema, graph, - }); + doc_store, + result_store, + context, + project_path_buf: project_path, + schemas_dir, + })); let app = Router::new() .route("/", get(index)) .route("/artifacts", get(artifacts_list)) .route("/artifacts/{id}", get(artifact_detail)) + .route("/artifacts/{id}/preview", get(artifact_preview)) .route("/artifacts/{id}/graph", get(artifact_graph)) .route("/validate", get(validate_view)) .route("/matrix", get(matrix_view)) .route("/graph", get(graph_view)) .route("/stats", get(stats_view)) - .with_state(state); + .route("/coverage", get(coverage_view)) + .route("/documents", get(documents_list)) + .route("/documents/{id}", get(document_detail)) + .route("/search", get(search_view)) + .route("/verification", get(verification_view)) + .route("/stpa", get(stpa_view)) + .route("/results", get(results_view)) + .route("/results/{run_id}", get(result_detail)) + .route("/source", get(source_tree_view)) + .route("/source/{*path}", get(source_file_view)) + .route("/diff", get(diff_view)) + .route("/doc-linkage", get(doc_linkage_view)) + .route("/traceability", get(traceability_view)) + .route("/traceability/history", get(traceability_history)) + .route("/api/links/{id}", get(api_artifact_links)) + .route("/api/render-aadl", get(api_render_aadl)) + .route("/help", get(help_view)) + .route("/help/docs", get(help_docs_list)) + .route("/help/docs/{*slug}", get(help_docs_topic)) + .route("/help/schema", get(help_schema_list)) + .route("/help/schema/{name}", get(help_schema_show)) + .route("/help/links", get(help_links_view)) + .route("/help/rules", get(help_rules_view)) + .route("/reload", post(reload_handler)) + .with_state(state) + .layer(axum::middleware::from_fn(redirect_non_htmx)); let addr = format!("0.0.0.0:{port}"); eprintln!("rivet dashboard listening on http://localhost:{port}"); @@ -52,6 +337,350 @@ pub async fn run(store: Store, schema: Schema, graph: LinkGraph, port: u16) -> R Ok(()) } +/// Middleware: redirect direct browser requests (no HX-Request header) to `/?goto=/path` +/// so the full layout is served and JS loads the content. +async fn redirect_non_htmx( + req: axum::extract::Request, + next: axum::middleware::Next, +) -> axum::response::Response { + let path = req.uri().path().to_string(); + let is_htmx = req.headers().contains_key("hx-request"); + let method = req.method().clone(); + + // Only redirect GET requests to known view routes, not / or /reload or /api/* + if method == axum::http::Method::GET + && !is_htmx + && path != "/" + && !path.starts_with("/?") + && !path.starts_with("/api/") + { + let goto = urlencoding::encode(&path); + return axum::response::Redirect::to(&format!("/?goto={goto}")).into_response(); + } + + next.run(req).await +} + +/// GET /api/links/{id} — return JSON array of AADL-prefixed artifact IDs linked +/// to the given artifact (forward links, backlinks, and self if applicable). +async fn api_artifact_links( + State(state): State, + Path(id): Path, +) -> axum::Json> { + let state = state.read().await; + let graph = &state.graph; + + let mut linked_ids = Vec::new(); + + // Forward links from this artifact + for link in graph.links_from(&id) { + if link.target.starts_with("AADL-") { + linked_ids.push(link.target.clone()); + } + } + + // Backlinks to this artifact + for bl in graph.backlinks_to(&id) { + if bl.source.starts_with("AADL-") { + linked_ids.push(bl.source.clone()); + } + } + + // If this IS an AADL artifact, include self + if id.starts_with("AADL-") { + linked_ids.push(id); + } + + axum::Json(linked_ids) +} + +#[derive(Debug, serde::Deserialize)] +struct RenderAadlParams { + root: String, + #[serde(default)] + highlight: Option, +} + +/// Serializable instance node from spar JSON output. +#[derive(Debug, serde::Deserialize)] +struct SparInstanceNode { + name: String, + category: String, + #[allow(dead_code)] + package: String, + #[allow(dead_code)] + type_name: String, + #[allow(dead_code)] + impl_name: Option, + #[serde(default)] + children: Vec, +} + +/// Top-level JSON output from `spar analyze --format json`. +#[derive(Debug, serde::Deserialize)] +struct SparAnalyzeOutput { + #[allow(dead_code)] + root: String, + #[serde(default)] + instance: Option, +} + +/// Recursively collect .aadl files from a directory. +fn collect_aadl_files_recursive(dir: &std::path::Path) -> Vec { + let mut files = Vec::new(); + let Ok(entries) = std::fs::read_dir(dir) else { + return files; + }; + for entry in entries.flatten() { + let path = entry.path(); + if path.extension().is_some_and(|ext| ext == "aadl") { + files.push(path); + } else if path.is_dir() { + files.extend(collect_aadl_files_recursive(&path)); + } + } + files +} + +/// GET /api/render-aadl — render an AADL component diagram as SVG. +/// +/// Shells out to `spar analyze --root {root} --format json {files}`, +/// parses the instance tree, builds a petgraph, and renders SVG via etch. +async fn api_render_aadl( + State(state): State, + Query(params): Query, +) -> Result, Html> { + // 1. Find .aadl files: check configured sources first, then scan project dir. + let project_path = { + let guard = state.read().await; + guard.project_path_buf.clone() + }; + + let aadl_files = find_aadl_files(&project_path); + if aadl_files.is_empty() { + return Err(Html( + "
No .aadl files found in the project directory.
" + .into(), + )); + } + + // 2. Call spar CLI. + let mut cmd = std::process::Command::new("spar"); + cmd.arg("analyze") + .arg("--root") + .arg(¶ms.root) + .arg("--format") + .arg("json"); + for f in &aadl_files { + cmd.arg(f); + } + + let output = match cmd.output() { + Ok(o) => o, + Err(e) if e.kind() == std::io::ErrorKind::NotFound => { + return Err(Html( + "
\ + spar not found. Install the spar CLI and ensure it is on your PATH.
\ + cargo install --path /path/to/spar/crates/spar-cli\ +
" + .into(), + )); + } + Err(e) => { + return Err(Html(format!( + "
Failed to run spar: {}
", + html_escape(&e.to_string()) + ))); + } + }; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + return Err(Html(format!( + "
spar exited with error:
{}
", + html_escape(&stderr) + ))); + } + + // 3. Parse the JSON output. + let stdout = String::from_utf8_lossy(&output.stdout); + let parsed: SparAnalyzeOutput = serde_json::from_str(&stdout).map_err(|e| { + Html(format!( + "
Failed to parse spar JSON output: {}
", + html_escape(&e.to_string()) + )) + })?; + + let instance = parsed.instance.ok_or_else(|| { + Html(format!( + "
No instance model produced for root {}. \ + Check that the root classifier exists and has an implementation.
", + html_escape(¶ms.root) + )) + })?; + + // 4. Build a petgraph from the instance tree. + let mut graph: Graph<(String, String), ()> = Graph::new(); + let mut node_indices = Vec::new(); + build_instance_graph(&instance, &mut graph, None, &mut node_indices); + + // Parse highlight set from comma-separated param. + let highlight_set: std::collections::HashSet = params + .highlight + .as_deref() + .unwrap_or("") + .split(',') + .filter(|s| !s.is_empty()) + .map(|s| s.trim().to_string()) + .collect(); + + // 5. Layout and render SVG. + let mut colors = aadl_category_colors(); + // Merge in the general type_color_map for consistent look. + for (k, v) in type_color_map() { + colors.entry(k).or_insert(v); + } + + let svg_opts = SvgOptions { + type_colors: colors, + interactive: true, + base_url: Some("/artifacts".into()), + background: Some("#fafbfc".into()), + font_size: 12.0, + edge_color: "#888".into(), + highlight: if highlight_set.len() == 1 { + highlight_set.into_iter().next() + } else { + None + }, + ..SvgOptions::default() + }; + + let layout_opts = LayoutOptions { + node_width: 200.0, + node_height: 56.0, + rank_separation: 90.0, + node_separation: 30.0, + ..Default::default() + }; + + let gl = pgv_layout::layout( + &graph, + &|_idx: NodeIndex, (name, category): &(String, String)| NodeInfo { + id: name.clone(), + label: name.clone(), + node_type: category.clone(), + sublabel: Some(category.clone()), + }, + &|_idx: EdgeIndex, _e: &()| EdgeInfo { + label: String::new(), + }, + &layout_opts, + ); + + let svg = render_svg(&gl, &svg_opts); + + Ok(Html(svg)) +} + +/// Find .aadl files for the project: check rivet.yaml sources first, then scan. +fn find_aadl_files(project_path: &std::path::Path) -> Vec { + // Try loading the project config to find AADL-format sources. + let config_path = project_path.join("rivet.yaml"); + if let Ok(content) = std::fs::read_to_string(&config_path) { + if let Ok(config) = serde_yaml::from_str::(&content) { + let mut files = Vec::new(); + for source in &config.sources { + if source.format == "aadl" { + let dir = project_path.join(&source.path); + files.extend(collect_aadl_files_recursive(&dir)); + } + } + if !files.is_empty() { + return files; + } + } + } + + // Fallback: scan the entire project directory for .aadl files. + collect_aadl_files_recursive(project_path) +} + +/// Recursively add instance tree nodes and edges to a petgraph. +fn build_instance_graph( + node: &SparInstanceNode, + graph: &mut Graph<(String, String), ()>, + parent: Option, + indices: &mut Vec, +) { + let idx = graph.add_node((node.name.clone(), node.category.clone())); + indices.push(idx); + + if let Some(parent_idx) = parent { + graph.add_edge(parent_idx, idx, ()); + } + + for child in &node.children { + build_instance_graph(child, graph, Some(idx), indices); + } +} + +/// AADL-specific category color palette. +fn aadl_category_colors() -> HashMap { + let pairs: &[(&str, &str)] = &[ + ("system", "#4a90d9"), + ("process", "#50b848"), + ("thread", "#f5a623"), + ("thread-group", "#e8913a"), + ("processor", "#9b59b6"), + ("virtual-processor", "#af7ac5"), + ("memory", "#e74c3c"), + ("bus", "#1abc9c"), + ("virtual-bus", "#48c9b0"), + ("device", "#34495e"), + ("abstract", "#95a5a6"), + ("data", "#3498db"), + ("subprogram", "#e67e22"), + ("subprogram-group", "#d35400"), + ]; + pairs + .iter() + .map(|(k, v)| (k.to_string(), v.to_string())) + .collect() +} + +/// POST /reload — re-read the project from disk and replace the shared state. +async fn reload_handler(State(state): State) -> impl IntoResponse { + let (project_path, schemas_dir, port) = { + let guard = state.read().await; + ( + guard.project_path_buf.clone(), + guard.schemas_dir.clone(), + guard.context.port, + ) + }; + + match reload_state(&project_path, &schemas_dir, port) { + Ok(new_state) => { + let mut guard = state.write().await; + *guard = new_state; + ( + axum::http::StatusCode::OK, + [("HX-Refresh", "true")], + "reloaded", + ) + } + Err(e) => { + eprintln!("reload error: {e:#}"); + ( + axum::http::StatusCode::INTERNAL_SERVER_ERROR, + [("HX-Refresh", "false")], + "reload failed", + ) + } + } +} + // ── Color palette ──────────────────────────────────────────────────────── fn type_color_map() -> HashMap { @@ -67,6 +696,9 @@ fn type_color_map() -> HashMap { ("causal-factor", "#d63384"), ("safety-constraint", "#20c997"), ("loss-scenario", "#e83e8c"), + ("controller-constraint", "#20c997"), + ("controlled-process", "#6610f2"), + ("sub-hazard", "#fd7e14"), // ASPICE ("stakeholder-req", "#0d6efd"), ("system-req", "#0dcaf0"), @@ -103,58 +735,527 @@ fn type_color_map() -> HashMap { .collect() } +/// Return a colored badge `` for an artifact type. +/// +/// Uses the `type_color_map` hex color as text and computes a 12%-opacity +/// tinted background from it. +fn badge_for_type(type_name: &str) -> String { + let colors = type_color_map(); + let hex = colors + .get(type_name) + .map(|s| s.as_str()) + .unwrap_or("#5b2d9e"); + // Parse hex → rgb + let hex_digits = hex.trim_start_matches('#'); + let r = u8::from_str_radix(&hex_digits[0..2], 16).unwrap_or(91); + let g = u8::from_str_radix(&hex_digits[2..4], 16).unwrap_or(45); + let b = u8::from_str_radix(&hex_digits[4..6], 16).unwrap_or(158); + format!( + "{}", + html_escape(type_name) + ) +} + // ── CSS ────────────────────────────────────────────────────────────────── const CSS: &str = r#" -*{box-sizing:border-box;margin:0;padding:0} -body{font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,Helvetica,Arial,sans-serif; - color:#1a1a2e;background:#f8f9fa;line-height:1.6} -a{color:#3a86ff;text-decoration:none} -a:hover{text-decoration:underline} +/* ── Reset & base ─────────────────────────────────────────────── */ +*,*::before,*::after{box-sizing:border-box;margin:0;padding:0} +:root{ + --bg: #f5f5f7; + --surface:#fff; + --sidebar:#0f0f13; + --sidebar-hover:#1c1c24; + --sidebar-text:#9898a6; + --sidebar-active:#fff; + --text: #1d1d1f; + --text-secondary:#6e6e73; + --border: #e5e5ea; + --accent: #3a86ff; + --accent-hover:#2568d6; + --radius: 10px; + --radius-sm:6px; + --shadow: 0 1px 3px rgba(0,0,0,.06),0 1px 2px rgba(0,0,0,.04); + --shadow-md:0 4px 12px rgba(0,0,0,.06),0 1px 3px rgba(0,0,0,.04); + --mono: 'JetBrains Mono','Fira Code','SF Mono',Menlo,monospace; + --font: 'Atkinson Hyperlegible',system-ui,-apple-system,sans-serif; + --transition:180ms ease; +} +html{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;text-rendering:optimizeLegibility} +body{font-family:var(--font);color:var(--text);background:var(--bg);line-height:1.6;font-size:15px} + +/* ── Links ────────────────────────────────────────────────────── */ +a{color:var(--accent);text-decoration:none;transition:color var(--transition)} +a:hover{color:var(--accent-hover)} +a:focus-visible{outline:2px solid var(--accent);outline-offset:2px;border-radius:3px} + +/* ── Shell layout ─────────────────────────────────────────────── */ .shell{display:flex;min-height:100vh} -nav{width:220px;background:#1a1a2e;color:#e0e0e0;padding:1.5rem 1rem;flex-shrink:0} -nav h1{font-size:1.2rem;color:#fff;margin-bottom:1.5rem;letter-spacing:.05em} -nav ul{list-style:none} -nav li{margin-bottom:.25rem} -nav a{display:block;padding:.45rem .75rem;border-radius:6px;color:#c0c0d0;font-size:.9rem} -nav a:hover,nav a.active{background:#2a2a4e;color:#fff;text-decoration:none} -main{flex:1;padding:2rem 2.5rem;max-width:1100px} -h2{font-size:1.35rem;margin-bottom:1rem;color:#1a1a2e} -h3{font-size:1.1rem;margin:1.25rem 0 .5rem;color:#333} -table{width:100%;border-collapse:collapse;margin-bottom:1.5rem} -th,td{text-align:left;padding:.5rem .75rem;border-bottom:1px solid #dee2e6} -th{background:#e9ecef;font-weight:600;font-size:.85rem;text-transform:uppercase;letter-spacing:.03em;color:#495057} -td{font-size:.9rem} -tr:hover td{background:#f1f3f5} -.badge{display:inline-block;padding:.15rem .5rem;border-radius:4px;font-size:.78rem;font-weight:600} -.badge-error{background:#ffe0e0;color:#c62828} -.badge-warn{background:#fff3cd;color:#856404} -.badge-info{background:#d1ecf1;color:#0c5460} -.badge-ok{background:#d4edda;color:#155724} -.badge-type{background:#e8e0f0;color:#4a148c} -.card{background:#fff;border:1px solid #dee2e6;border-radius:8px;padding:1.25rem;margin-bottom:1rem;box-shadow:0 1px 3px rgba(0,0,0,.04)} -.stat-grid{display:grid;grid-template-columns:repeat(auto-fill,minmax(180px,1fr));gap:1rem;margin-bottom:1.5rem} -.stat-box{background:#fff;border:1px solid #dee2e6;border-radius:8px;padding:1rem;text-align:center} -.stat-box .number{font-size:2rem;font-weight:700;color:#3a86ff} -.stat-box .label{font-size:.85rem;color:#6c757d} -.link-pill{display:inline-block;padding:.1rem .4rem;border-radius:3px;font-size:.8rem;background:#e9ecef;margin:.1rem} -.form-row{display:flex;gap:.75rem;align-items:end;flex-wrap:wrap;margin-bottom:1rem} -.form-row label{font-size:.85rem;font-weight:600;color:#495057} -.form-row select,.form-row input{padding:.4rem .6rem;border:1px solid #ced4da;border-radius:4px;font-size:.9rem} -.form-row button{padding:.4rem 1rem;background:#3a86ff;color:#fff;border:none;border-radius:4px; - font-size:.9rem;cursor:pointer} -.form-row button:hover{background:#2a6fdf} -dl{margin:.5rem 0} -dt{font-weight:600;font-size:.85rem;color:#495057;margin-top:.5rem} -dd{margin-left:0;margin-bottom:.25rem} -.meta{color:#6c757d;font-size:.85rem} -.nav-icon{display:inline-block;width:1.1rem;text-align:center;margin-right:.3rem;font-size:.85rem} -.graph-container{border:1px solid #dee2e6;border-radius:8px;overflow:hidden;background:#fff;cursor:grab} +.content-area{display:flex;flex-direction:column;flex:1;min-width:0} + +/* ── Sidebar navigation ──────────────────────────────────────── */ +nav{width:232px;background:var(--sidebar);color:var(--sidebar-text); + padding:1.75rem 1rem;flex-shrink:0;display:flex;flex-direction:column; + position:sticky;top:0;height:100vh;overflow-y:auto; + border-right:1px solid rgba(255,255,255,.06)} +nav h1{font-size:1.05rem;font-weight:700;color:var(--sidebar-active); + margin-bottom:2rem;letter-spacing:.04em;padding:0 .75rem; + display:flex;align-items:center;gap:.5rem} +nav h1::before{content:'';display:inline-block;width:8px;height:8px; + border-radius:50%;background:var(--accent);flex-shrink:0} +nav ul{list-style:none;display:flex;flex-direction:column;gap:2px} +nav li{margin:0} +nav a{display:flex;align-items:center;gap:.5rem;padding:.5rem .75rem;border-radius:var(--radius-sm); + color:var(--sidebar-text);font-size:.875rem;font-weight:500; + transition:all var(--transition)} +nav a:hover{background:var(--sidebar-hover);color:var(--sidebar-active);text-decoration:none} +nav a.active{background:rgba(58,134,255,.08);color:var(--sidebar-active);border-left:2px solid var(--accent);padding-left:calc(.75rem - 2px)} +nav a:focus-visible{outline:2px solid var(--accent);outline-offset:-2px} + +/* ── Main content ─────────────────────────────────────────────── */ +main{flex:1;padding:2.5rem 3rem;max-width:1400px;min-width:0;overflow-y:auto} +main.htmx-swapping{opacity:.4;transition:opacity 150ms ease-out} +main.htmx-settling{opacity:1;transition:opacity 200ms ease-in} + +/* ── Loading bar ──────────────────────────────────────────────── */ +#loading-bar{position:fixed;top:0;left:0;width:0;height:2px;background:var(--accent); + z-index:9999;transition:none;pointer-events:none} +#loading-bar.active{width:85%;transition:width 8s cubic-bezier(.1,.05,.1,1)} +#loading-bar.done{width:100%;transition:width 100ms ease;opacity:0;transition:width 100ms ease,opacity 300ms ease 100ms} + +/* ── Typography ───────────────────────────────────────────────── */ +h2{font-size:1.4rem;font-weight:700;margin-bottom:1.25rem;color:var(--text);letter-spacing:-.01em;padding-bottom:.75rem;border-bottom:1px solid var(--border)} +h3{font-size:1.05rem;font-weight:600;margin:1.5rem 0 .75rem;color:var(--text)} +code,pre{font-family:var(--mono);font-size:.85em} +pre{background:#f1f1f3;padding:1rem;border-radius:var(--radius-sm);overflow-x:auto} + +/* ── Tables ───────────────────────────────────────────────────── */ +table{width:100%;border-collapse:collapse;margin-bottom:1.5rem;font-size:.9rem} +th,td{text-align:left;padding:.65rem .875rem} +th{font-weight:600;font-size:.75rem;text-transform:uppercase;letter-spacing:.06em; + color:var(--text-secondary);border-bottom:2px solid var(--border);background:transparent} +td{border-bottom:1px solid var(--border)} +tbody tr{transition:background var(--transition)} +tbody tr:nth-child(even){background:rgba(0,0,0,.015)} +tbody tr:hover{background:rgba(58,134,255,.04)} +td a{font-family:var(--mono);font-size:.85rem;font-weight:500} + +/* ── Badges ───────────────────────────────────────────────────── */ +.badge{display:inline-flex;align-items:center;padding:.2rem .55rem;border-radius:5px; + font-size:.73rem;font-weight:600;letter-spacing:.02em;line-height:1.4;white-space:nowrap} +.badge-error{background:#fee;color:#c62828} +.badge-warn{background:#fff8e1;color:#8b6914} +.badge-info{background:#e8f4fd;color:#0c5a82} +.badge-ok{background:#e6f9ed;color:#15713a} +.badge-type{background:#f0ecf9;color:#5b2d9e;font-family:var(--mono);font-size:.72rem} + +/* ── Validation bar ──────────────────────────────────────────── */ +.validation-bar{padding:1rem 1.25rem;border-radius:var(--radius);margin-bottom:1.25rem;font-weight:600;font-size:.95rem} +.validation-bar.pass{background:linear-gradient(135deg,#e6f9ed,#d4f5e0);color:#15713a;border:1px solid #b8e8c8} +.validation-bar.fail{background:linear-gradient(135deg,#fee,#fdd);color:#c62828;border:1px solid #f4c7c3} + +/* ── Status progress bars ────────────────────────────────────── */ +.status-bar-row{display:flex;align-items:center;gap:.75rem;margin-bottom:.5rem;font-size:.85rem} +.status-bar-label{width:80px;text-align:right;font-weight:500;color:var(--text-secondary)} +.status-bar-track{flex:1;height:20px;background:#e5e5ea;border-radius:4px;overflow:hidden;position:relative} +.status-bar-fill{height:100%;border-radius:4px;transition:width .3s ease} +.status-bar-count{width:40px;font-variant-numeric:tabular-nums;color:var(--text-secondary)} + +/* ── Cards ────────────────────────────────────────────────────── */ +.card{background:var(--surface);border-radius:var(--radius);padding:1.5rem; + margin-bottom:1.25rem;box-shadow:var(--shadow);border:1px solid var(--border); + transition:box-shadow var(--transition)} + +/* ── Stat grid ────────────────────────────────────────────────── */ +.stat-grid{display:grid;grid-template-columns:repeat(auto-fill,minmax(160px,1fr));gap:1rem;margin-bottom:1.75rem} +.stat-box{background:var(--surface);border-radius:var(--radius);padding:1.25rem 1rem;text-align:center; + box-shadow:var(--shadow);border:1px solid var(--border);transition:box-shadow var(--transition),transform var(--transition); + border-top:3px solid var(--border)} +.stat-box:hover{box-shadow:var(--shadow-md);transform:translateY(-1px)} +.stat-box .number{font-size:2rem;font-weight:800;letter-spacing:-.02em; + font-variant-numeric:tabular-nums;line-height:1.2} +.stat-box .label{font-size:.8rem;font-weight:500;color:var(--text-secondary);margin-top:.25rem; + text-transform:uppercase;letter-spacing:.04em} +.stat-blue{border-top-color:#3a86ff}.stat-blue .number{color:#3a86ff} +.stat-green{border-top-color:#15713a}.stat-green .number{color:#15713a} +.stat-orange{border-top-color:#e67e22}.stat-orange .number{color:#e67e22} +.stat-red{border-top-color:#c62828}.stat-red .number{color:#c62828} +.stat-amber{border-top-color:#b8860b}.stat-amber .number{color:#b8860b} +.stat-purple{border-top-color:#6f42c1}.stat-purple .number{color:#6f42c1} + +/* ── Link pills ───────────────────────────────────────────────── */ +.link-pill{display:inline-block;padding:.15rem .45rem;border-radius:4px; + font-size:.76rem;font-family:var(--mono);background:#f0f0f3; + color:var(--text-secondary);margin:.1rem;font-weight:500} + +/* ── Forms ────────────────────────────────────────────────────── */ +.form-row{display:flex;gap:1rem;align-items:end;flex-wrap:wrap;margin-bottom:1rem} +.form-row label{font-size:.8rem;font-weight:600;color:var(--text-secondary); + text-transform:uppercase;letter-spacing:.04em} +.form-row select,.form-row input[type="text"],.form-row input[type="search"], +.form-row input:not([type]),.form-row input[list]{ + padding:.5rem .75rem;border:1px solid var(--border);border-radius:var(--radius-sm); + font-size:.875rem;font-family:var(--font);background:var(--surface);color:var(--text); + transition:border-color var(--transition),box-shadow var(--transition);appearance:none; + -webkit-appearance:none} +.form-row select{padding-right:2rem;background-image:url("data:image/svg+xml,%3Csvg width='10' height='6' viewBox='0 0 10 6' fill='none' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M1 1l4 4 4-4' stroke='%236e6e73' stroke-width='1.5' stroke-linecap='round' stroke-linejoin='round'/%3E%3C/svg%3E"); + background-repeat:no-repeat;background-position:right .75rem center} +.form-row input:focus,.form-row select:focus{ + outline:none;border-color:var(--accent);box-shadow:0 0 0 3px rgba(58,134,255,.15)} +.form-row input[type="range"]{padding:0;border:none;accent-color:var(--accent);width:100%} +.form-row input[type="range"]:focus{box-shadow:none} +.form-row button{padding:.5rem 1.25rem;background:var(--accent);color:#fff;border:none; + border-radius:var(--radius-sm);font-size:.875rem;font-weight:600; + font-family:var(--font);cursor:pointer;transition:all var(--transition); + box-shadow:0 1px 2px rgba(0,0,0,.08)} +.form-row button:hover{background:var(--accent-hover);box-shadow:0 2px 6px rgba(58,134,255,.25);transform:translateY(-1px)} +.form-row button:active{transform:translateY(0)} +.form-row button:focus-visible{outline:2px solid var(--accent);outline-offset:2px} + +/* ── Definition lists ─────────────────────────────────────────── */ +dl{margin:.75rem 0} +dt{font-weight:600;font-size:.8rem;color:var(--text-secondary);margin-top:.75rem; + text-transform:uppercase;letter-spacing:.04em} +dd{margin-left:0;margin-bottom:.25rem;margin-top:.2rem} + +/* ── Meta text ────────────────────────────────────────────────── */ +.meta{color:var(--text-secondary);font-size:.85rem} + +/* ── Nav icons & badges ───────────────────────────────────────── */ +.nav-icon{display:inline-flex;width:1.25rem;height:1.25rem;align-items:center;justify-content:center;flex-shrink:0;opacity:.5} +nav a:hover .nav-icon,nav a.active .nav-icon{opacity:.9} +.nav-label{display:flex;align-items:center;gap:.5rem;flex:1;min-width:0} +.nav-badge{font-size:.65rem;font-weight:700;padding:.1rem .4rem;border-radius:4px; + background:rgba(255,255,255,.08);color:rgba(255,255,255,.4);margin-left:auto;flex-shrink:0} +.nav-badge-error{background:rgba(220,53,69,.2);color:#ff6b7a} +nav .nav-divider{height:1px;background:rgba(255,255,255,.06);margin:.75rem .75rem} + +/* ── Context bar ─────────────────────────────────────────────── */ +.context-bar{display:flex;align-items:center;gap:.75rem;padding:.5rem 1.5rem; + background:var(--surface);border-bottom:1px solid var(--border);font-size:.78rem;color:var(--text-secondary); + flex-wrap:wrap} +.context-bar .ctx-project{font-weight:700;color:var(--text);font-size:.82rem} +.context-bar .ctx-sep{opacity:.25} +.context-bar .ctx-git{font-family:var(--mono);font-size:.72rem;padding:.15rem .4rem;border-radius:4px; + background:rgba(58,134,255,.08);color:var(--accent)} +.context-bar .ctx-dirty{font-family:var(--mono);font-size:.68rem;padding:.15rem .4rem;border-radius:4px; + background:rgba(220,53,69,.1);color:#c62828} +.context-bar .ctx-clean{font-family:var(--mono);font-size:.68rem;padding:.15rem .4rem;border-radius:4px; + background:rgba(21,113,58,.1);color:#15713a} +.context-bar .ctx-time{margin-left:auto;opacity:.6} +.ctx-switcher{position:relative;display:inline-flex;align-items:center} +.ctx-switcher-details{position:relative} +.ctx-switcher-details summary{cursor:pointer;list-style:none;display:inline-flex;align-items:center; + padding:.15rem .35rem;border-radius:4px;opacity:.5;transition:opacity .15s} +.ctx-switcher-details summary:hover{opacity:1;background:rgba(255,255,255,.06)} +.ctx-switcher-details summary::-webkit-details-marker{display:none} +.ctx-switcher-dropdown{position:absolute;top:100%;left:0;z-index:100;margin-top:.35rem; + background:var(--surface);border:1px solid var(--border);border-radius:var(--radius-sm); + padding:.5rem;min-width:280px;box-shadow:0 8px 24px rgba(0,0,0,.35)} +.ctx-switcher-item{padding:.5rem .65rem;border-radius:4px} +.ctx-switcher-item:hover{background:rgba(255,255,255,.04)} +.ctx-switcher-item .ctx-switcher-name{display:block;font-weight:600;font-size:.8rem;color:var(--text);margin-bottom:.2rem} +.ctx-switcher-item .ctx-switcher-cmd{display:block;font-size:.7rem;color:var(--text-secondary); + padding:.2rem .4rem;background:rgba(255,255,255,.04);border-radius:3px; + font-family:var(--mono);user-select:all;cursor:text} + +/* ── Footer ──────────────────────────────────────────────────── */ +.footer{padding:2rem 0 1rem;text-align:center;font-size:.75rem;color:var(--text-secondary); + border-top:1px solid var(--border);margin-top:3rem} + +/* ── Verification ────────────────────────────────────────────── */ +.ver-level{margin-bottom:1.5rem} +.ver-level-header{display:flex;align-items:center;gap:.75rem;margin-bottom:.75rem} +.ver-level-title{font-size:1rem;font-weight:600;color:var(--text)} +.ver-level-arrow{color:var(--text-secondary);font-size:.85rem} +details.ver-row>summary{cursor:pointer;list-style:none;padding:.6rem .875rem;border-bottom:1px solid var(--border); + display:flex;align-items:center;gap:.75rem;transition:background var(--transition)} +details.ver-row>summary::-webkit-details-marker{display:none} +details.ver-row>summary:hover{background:rgba(58,134,255,.04)} +details.ver-row[open]>summary{background:rgba(58,134,255,.04);border-bottom-color:var(--accent)} +details.ver-row>.ver-detail{padding:1rem 1.5rem;background:rgba(0,0,0,.01);border-bottom:1px solid var(--border)} +.ver-chevron{transition:transform var(--transition);display:inline-flex;opacity:.4} +details.ver-row[open] .ver-chevron{transform:rotate(90deg)} +.ver-steps{width:100%;border-collapse:collapse;font-size:.85rem;margin-top:.5rem} +.ver-steps th{text-align:left;font-weight:600;font-size:.72rem;text-transform:uppercase; + letter-spacing:.04em;color:var(--text-secondary);padding:.4rem .5rem;border-bottom:1px solid var(--border)} +.ver-steps td{padding:.4rem .5rem;border-bottom:1px solid rgba(0,0,0,.04);vertical-align:top} +.method-badge{display:inline-flex;padding:.15rem .5rem;border-radius:4px;font-size:.72rem;font-weight:600; + background:#e8f4fd;color:#0c5a82} + +/* ── Results ─────────────────────────────────────────────────── */ +.result-pass{color:#15713a}.result-fail{color:#c62828}.result-skip{color:#6e6e73} +.result-error{color:#e67e22}.result-blocked{color:#8b6914} +.result-dot{display:inline-block;width:8px;height:8px;border-radius:50%;margin-right:.35rem} +.result-dot-pass{background:#15713a}.result-dot-fail{background:#c62828} +.result-dot-skip{background:#c5c5cd}.result-dot-error{background:#e67e22}.result-dot-blocked{background:#b8860b} + +/* ── Diff ────────────────────────────────────────────────────── */ +.diff-added{background:rgba(21,113,58,.08)} +.diff-removed{background:rgba(198,40,40,.08)} +.diff-modified{background:rgba(184,134,11,.08)} +.diff-icon{display:inline-flex;align-items:center;justify-content:center;width:1.5rem;height:1.5rem; + border-radius:4px;font-size:.85rem;font-weight:700;flex-shrink:0;margin-right:.35rem} +.diff-icon-add{background:rgba(21,113,58,.12);color:#15713a} +.diff-icon-remove{background:rgba(198,40,40,.12);color:#c62828} +.diff-icon-modify{background:rgba(184,134,11,.12);color:#b8860b} +.diff-summary{display:flex;gap:1.25rem;padding:.75rem 1rem;border-radius:var(--radius-sm); + background:var(--surface);border:1px solid var(--border);margin-bottom:1.25rem;font-size:.9rem;font-weight:600} +.diff-summary-item{display:flex;align-items:center;gap:.35rem} +.diff-old{color:#c62828;text-decoration:line-through;font-size:.85rem} +.diff-new{color:#15713a;font-size:.85rem} +.diff-arrow{color:var(--text-secondary);margin:0 .25rem;font-size:.8rem} +details.diff-row>summary{cursor:pointer;list-style:none;padding:.6rem .875rem;border-bottom:1px solid var(--border); + display:flex;align-items:center;gap:.5rem;transition:background var(--transition)} +details.diff-row>summary::-webkit-details-marker{display:none} +details.diff-row>summary:hover{background:rgba(58,134,255,.04)} +details.diff-row[open]>summary{background:rgba(184,134,11,.06);border-bottom-color:var(--border)} +details.diff-row>.diff-detail{padding:.75rem 1.25rem;background:rgba(0,0,0,.01);border-bottom:1px solid var(--border);font-size:.88rem} +.diff-field{padding:.3rem 0;display:flex;align-items:baseline;gap:.5rem} +.diff-field-name{font-weight:600;font-size:.8rem;color:var(--text-secondary);min-width:100px; + text-transform:uppercase;letter-spacing:.03em} + +/* ── Detail actions ──────────────────────────────────────────── */ +.detail-actions{display:flex;gap:.75rem;align-items:center;margin-top:1rem} +.btn{display:inline-flex;align-items:center;gap:.4rem;padding:.45rem 1rem;border-radius:var(--radius-sm); + font-size:.85rem;font-weight:600;font-family:var(--font);text-decoration:none; + transition:all var(--transition);cursor:pointer;border:none} +.btn-primary{background:var(--accent);color:#fff;box-shadow:0 1px 2px rgba(0,0,0,.08)} +.btn-primary:hover{background:var(--accent-hover);transform:translateY(-1px);color:#fff;text-decoration:none} +.btn-secondary{background:transparent;color:var(--text-secondary);border:1px solid var(--border)} +.btn-secondary:hover{background:rgba(0,0,0,.03);color:var(--text);text-decoration:none} + +/* ── Graph ────────────────────────────────────────────────────── */ +.graph-container{border-radius:var(--radius);overflow:hidden;background:#fafbfc;cursor:grab; + height:calc(100vh - 280px);min-height:400px;position:relative;border:1px solid var(--border)} .graph-container:active{cursor:grabbing} -.graph-container svg{display:block;width:100%;height:auto} -.filter-grid{display:flex;flex-wrap:wrap;gap:.5rem;margin-bottom:.75rem} -.filter-grid label{font-size:.82rem;display:flex;align-items:center;gap:.25rem} -.filter-grid input[type="checkbox"]{margin:0} +.graph-container svg{display:block;width:100%;height:100%;position:absolute;top:0;left:0} +.graph-controls{position:absolute;top:.75rem;right:.75rem;display:flex;flex-direction:column;gap:.35rem;z-index:10} +.graph-controls button{width:34px;height:34px;border:1px solid var(--border);border-radius:var(--radius-sm); + background:var(--surface);font-size:1rem;cursor:pointer;display:flex;align-items:center; + justify-content:center;box-shadow:var(--shadow);color:var(--text); + transition:all var(--transition)} +.graph-controls button:hover{background:#f0f0f3;box-shadow:var(--shadow-md)} +.graph-controls button:focus-visible{outline:2px solid var(--accent);outline-offset:2px} +.graph-legend{display:flex;flex-wrap:wrap;gap:.75rem;padding:.75rem 0 .25rem;font-size:.82rem} +.graph-legend-item{display:flex;align-items:center;gap:.35rem;color:var(--text-secondary)} +.graph-legend-swatch{width:12px;height:12px;border-radius:3px;flex-shrink:0} + +/* ── Filter grid ──────────────────────────────────────────────── */ +.filter-grid{display:flex;flex-wrap:wrap;gap:.6rem;margin-bottom:.75rem} +.filter-grid label{font-size:.8rem;display:flex;align-items:center;gap:.3rem; + color:var(--text-secondary);cursor:pointer;padding:.2rem .45rem; + border-radius:4px;transition:background var(--transition); + text-transform:none;letter-spacing:0;font-weight:500} +.filter-grid label:hover{background:rgba(58,134,255,.06)} +.filter-grid input[type="checkbox"]{margin:0;accent-color:var(--accent);width:14px;height:14px; + cursor:pointer;border-radius:3px} + +/* ── Document styles ──────────────────────────────────────────── */ +.doc-body{line-height:1.8;font-size:.95rem} +.doc-body h1{font-size:1.4rem;font-weight:700;margin:2rem 0 .75rem;color:var(--text); + border-bottom:2px solid var(--border);padding-bottom:.5rem} +.doc-body h2{font-size:1.2rem;font-weight:600;margin:1.5rem 0 .5rem;color:var(--text)} +.doc-body h3{font-size:1.05rem;font-weight:600;margin:1.25rem 0 .4rem;color:var(--text-secondary)} +.doc-body p{margin:.5rem 0} +.doc-body ul{margin:.5rem 0 .5rem 1.5rem} +.doc-body li{margin:.2rem 0} +.artifact-ref{display:inline-flex;align-items:center;padding:.15rem .5rem;border-radius:5px; + font-size:.8rem;font-weight:600;font-family:var(--mono);background:#edf2ff; + color:#3a63c7;cursor:pointer;text-decoration:none; + border:1px solid #d4def5;transition:all var(--transition)} +.artifact-ref:hover{background:#d4def5;text-decoration:none;transform:translateY(-1px);box-shadow:0 2px 4px rgba(0,0,0,.06)} +.artifact-ref.broken{background:#fde8e8;color:#c62828;border-color:#f4c7c3;cursor:default} +.artifact-ref.broken:hover{transform:none;box-shadow:none} +/* ── Artifact hover preview ────────────────────────────────── */ +.art-tooltip{position:absolute;z-index:1000;pointer-events:none; + background:var(--surface);border:1px solid var(--border);border-radius:var(--radius); + box-shadow:var(--shadow-lg);padding:0;max-width:340px;min-width:220px; + opacity:0;transition:opacity 120ms ease-in} +.art-tooltip.visible{opacity:1;pointer-events:auto} +.art-preview{padding:.75rem .85rem;font-size:.82rem;line-height:1.45} +.art-preview-header{display:flex;align-items:center;gap:.4rem;margin-bottom:.3rem} +.art-preview-title{font-weight:600;font-size:.85rem;margin-bottom:.3rem;color:var(--text)} +.art-preview-desc{color:var(--text-secondary);font-size:.78rem;line-height:1.4;margin-top:.3rem; + display:-webkit-box;-webkit-line-clamp:3;-webkit-box-orient:vertical;overflow:hidden} +.art-preview-links{font-size:.72rem;color:var(--text-secondary);margin-top:.35rem;font-family:var(--mono)} +.art-preview-tags{margin-top:.35rem;display:flex;flex-wrap:wrap;gap:.25rem} +.art-preview-tag{font-size:.65rem;padding:.1rem .35rem;border-radius:3px; + background:rgba(58,134,255,.08);color:var(--accent);font-family:var(--mono)} +.doc-glossary{font-size:.9rem} +.doc-glossary dt{font-weight:600;color:var(--text)} +.doc-glossary dd{margin:0 0 .5rem 1rem;color:var(--text-secondary)} +.doc-toc{font-size:.88rem;background:var(--surface);border:1px solid var(--border); + border-radius:var(--radius);padding:1rem 1.25rem;margin-bottom:1.25rem; + box-shadow:var(--shadow)} +.doc-toc strong{font-size:.75rem;text-transform:uppercase;letter-spacing:.05em;color:var(--text-secondary)} +.doc-toc ul{list-style:none;margin:.5rem 0 0;padding:0} +.doc-toc li{margin:.2rem 0;color:var(--text-secondary)} +.doc-toc .toc-h2{padding-left:0} +.doc-toc .toc-h3{padding-left:1.25rem} +.doc-toc .toc-h4{padding-left:2.5rem} +.doc-meta{display:flex;gap:.75rem;flex-wrap:wrap;align-items:center;margin-bottom:1.25rem} + +/* ── Source viewer ────────────────────────────────────────────── */ +.source-tree{font-family:var(--mono);font-size:.85rem;line-height:1.8} +.source-tree ul{list-style:none;margin:0;padding:0} +.source-tree li{margin:0} +.source-tree .tree-item{display:flex;align-items:center;gap:.4rem;padding:.2rem .5rem;border-radius:var(--radius-sm); + transition:background var(--transition);color:var(--text)} +.source-tree .tree-item:hover{background:rgba(58,134,255,.06);text-decoration:none} +.source-tree .tree-icon{display:inline-flex;width:1rem;height:1rem;align-items:center;justify-content:center;flex-shrink:0;opacity:.55} +.source-tree .indent{display:inline-block;width:1.25rem;flex-shrink:0} +.source-viewer{font-family:var(--mono);font-size:.82rem;line-height:1.7;overflow-x:auto; + background:#fafbfc;border:1px solid var(--border);border-radius:var(--radius);padding:0} +.source-viewer table{width:100%;border-collapse:collapse;margin:0} +.source-viewer table td{padding:0;border:none;vertical-align:top} +.source-viewer table tr:hover{background:rgba(58,134,255,.04)} +.source-line{display:table-row} +.source-line .line-no{display:table-cell;width:3.5rem;min-width:3.5rem;padding:.05rem .75rem .05rem .5rem; + text-align:right;color:#b0b0b8;user-select:none;border-right:1px solid var(--border);background:#f5f5f7} +.source-line .line-content{display:table-cell;padding:.05rem .75rem;white-space:pre;tab-size:4} +.source-line-highlight{background:rgba(58,134,255,.08) !important} +.source-line-highlight .line-no{background:rgba(58,134,255,.12);color:var(--accent);font-weight:600} +.source-line:target{background:rgba(255,210,50,.18) !important} +.source-line:target .line-no{background:rgba(255,210,50,.25);color:#9a6700;font-weight:700} +.source-line .line-no a{color:inherit;text-decoration:none} +.source-line .line-no a:hover{color:var(--accent);text-decoration:underline} +/* ── Syntax highlighting tokens ─────────────────────────────── */ +.hl-key{color:#0550ae}.hl-str{color:#0a3069}.hl-num{color:#0550ae} +.hl-bool{color:#cf222e;font-weight:600}.hl-null{color:#cf222e;font-style:italic} +.hl-comment{color:#6e7781;font-style:italic}.hl-tag{color:#6639ba} +.hl-anchor{color:#953800}.hl-type{color:#8250df}.hl-kw{color:#cf222e;font-weight:600} +.hl-fn{color:#8250df}.hl-macro{color:#0550ae;font-weight:600} +.hl-attr{color:#116329}.hl-punct{color:#6e7781} +.hl-sh-cmd{color:#0550ae;font-weight:600}.hl-sh-flag{color:#953800} +.hl-sh-pipe{color:#cf222e;font-weight:700} +.source-ref-link{color:var(--accent);text-decoration:none;font-family:var(--mono);font-size:.85em} +.source-ref-link:hover{text-decoration:underline} +.source-breadcrumb{display:flex;align-items:center;gap:.4rem;font-size:.85rem;color:var(--text-secondary); + margin-bottom:1rem;flex-wrap:wrap} +.source-breadcrumb a{color:var(--accent);font-weight:500} +.source-breadcrumb .sep{opacity:.35;margin:0 .1rem} +.source-meta{display:flex;gap:1.5rem;font-size:.8rem;color:var(--text-secondary);margin-bottom:1rem} +.source-meta .meta-item{display:flex;align-items:center;gap:.35rem} +.source-refs{margin-top:1.25rem} +.source-refs h3{font-size:.95rem;margin-bottom:.5rem} + +/* ── STPA tree ───────────────────────────────────────────────── */ +.stpa-tree{margin-top:1.25rem} +.stpa-level{padding-left:1.5rem;border-left:2px solid var(--border);margin-left:.5rem} +.stpa-node{display:flex;align-items:center;gap:.5rem;padding:.35rem 0;font-size:.9rem} +.stpa-node a{font-family:var(--mono);font-size:.82rem;font-weight:500} +.stpa-link-label{display:inline-block;padding:.1rem .4rem;border-radius:4px;font-size:.68rem; + font-family:var(--mono);background:rgba(58,134,255,.08);color:var(--accent);font-weight:500; + margin-right:.35rem;white-space:nowrap} +details.stpa-details>summary{cursor:pointer;list-style:none;padding:.4rem .5rem;border-radius:var(--radius-sm); + display:flex;align-items:center;gap:.5rem;transition:background var(--transition);font-size:.9rem} +details.stpa-details>summary::-webkit-details-marker{display:none} +details.stpa-details>summary:hover{background:rgba(58,134,255,.04)} +details.stpa-details>summary .stpa-chevron{transition:transform var(--transition);display:inline-flex;opacity:.4;font-size:.7rem} +details.stpa-details[open]>summary .stpa-chevron{transform:rotate(90deg)} +.stpa-uca-table{width:100%;border-collapse:collapse;font-size:.88rem;margin-top:.75rem} +.stpa-uca-table th{font-weight:600;font-size:.72rem;text-transform:uppercase;letter-spacing:.04em; + color:var(--text-secondary);padding:.5rem .75rem;border-bottom:2px solid var(--border)} +.stpa-uca-table td{padding:.55rem .75rem;border-bottom:1px solid var(--border);vertical-align:top} +.stpa-uca-table tbody tr:hover{background:rgba(58,134,255,.04)} +.uca-type-badge{display:inline-flex;padding:.15rem .5rem;border-radius:4px;font-size:.72rem;font-weight:600;white-space:nowrap} +.uca-type-not-providing{background:#fee;color:#c62828} +.uca-type-providing{background:#fff3e0;color:#e65100} +.uca-type-too-early-too-late{background:#e8f4fd;color:#0c5a82} +.uca-type-stopped-too-soon{background:#f3e5f5;color:#6a1b9a} + +/* ── Traceability explorer ──────────────────────────────────────── */ +.trace-matrix{border-collapse:collapse;font-size:.8rem;margin-bottom:1.5rem;width:100%} +.trace-matrix th{font-weight:600;font-size:.7rem;text-transform:uppercase;letter-spacing:.04em; + color:var(--text-secondary);padding:.45rem .6rem;border-bottom:2px solid var(--border);white-space:nowrap} +.trace-matrix td{padding:.35rem .6rem;border-bottom:1px solid var(--border);text-align:center} +.trace-matrix td:first-child{text-align:left;font-family:var(--mono);font-size:.78rem;font-weight:500} +.trace-matrix tbody tr:hover{background:rgba(58,134,255,.04)} +.trace-cell{display:inline-flex;align-items:center;justify-content:center;width:28px;height:22px; + border-radius:4px;font-size:.75rem;font-weight:700;font-variant-numeric:tabular-nums} +.trace-cell-ok{background:rgba(21,113,58,.1);color:#15713a} +.trace-cell-gap{background:rgba(198,40,40,.1);color:#c62828} +.trace-tree{margin-top:1rem} +.trace-node{display:flex;align-items:center;gap:.5rem;padding:.4rem .6rem;border-radius:var(--radius-sm); + transition:background var(--transition);font-size:.88rem} +.trace-node:hover{background:rgba(58,134,255,.04)} +.trace-node a{font-family:var(--mono);font-size:.82rem;font-weight:500} +.trace-edge{display:inline-block;padding:.1rem .4rem;border-radius:4px;font-size:.68rem; + font-family:var(--mono);background:rgba(58,134,255,.08);color:var(--accent);font-weight:500; + margin-right:.35rem;white-space:nowrap} +.trace-level{padding-left:1.5rem;border-left:2px solid var(--border);margin-left:.5rem} +details.trace-details>summary{cursor:pointer;list-style:none;padding:.4rem .5rem;border-radius:var(--radius-sm); + display:flex;align-items:center;gap:.5rem;transition:background var(--transition);font-size:.88rem} +details.trace-details>summary::-webkit-details-marker{display:none} +details.trace-details>summary:hover{background:rgba(58,134,255,.04)} +details.trace-details>summary .trace-chevron{transition:transform var(--transition);display:inline-flex;opacity:.4;font-size:.7rem} +details.trace-details[open]>summary .trace-chevron{transform:rotate(90deg)} +.trace-history{margin:.35rem 0 .5rem 1.5rem;padding:.5rem .75rem;background:rgba(0,0,0,.015); + border-radius:var(--radius-sm);border:1px solid var(--border);font-size:.8rem} +.trace-history-title{font-size:.7rem;font-weight:600;text-transform:uppercase;letter-spacing:.04em; + color:var(--text-secondary);margin-bottom:.35rem} +.trace-history-item{display:flex;align-items:baseline;gap:.5rem;padding:.15rem 0;color:var(--text-secondary)} +.trace-history-item code{font-size:.75rem;color:var(--accent);font-weight:500} +.trace-history-item .hist-date{font-size:.72rem;color:var(--text-secondary);opacity:.7;min-width:70px} +.trace-history-item .hist-msg{font-size:.78rem;color:var(--text);white-space:nowrap;overflow:hidden;text-overflow:ellipsis} +.trace-status{display:inline-flex;padding:.12rem .4rem;border-radius:4px;font-size:.68rem;font-weight:600; + margin-left:.25rem} +.trace-status-approved{background:rgba(21,113,58,.1);color:#15713a} +.trace-status-draft{background:rgba(184,134,11,.1);color:#b8860b} + +/* ── Scrollbar (subtle) ───────────────────────────────────────── */ +::-webkit-scrollbar{width:6px;height:6px} +::-webkit-scrollbar-track{background:transparent} +::-webkit-scrollbar-thumb{background:#c5c5cd;border-radius:3px} +::-webkit-scrollbar-thumb:hover{background:#a0a0aa} + +/* ── Selection ────────────────────────────────────────────────── */ +::selection{background:rgba(58,134,255,.18)} + +/* ── Cmd+K search modal ──────────────────────────────────────── */ +.cmd-k-overlay{position:fixed;inset:0;background:rgba(0,0,0,.55);backdrop-filter:blur(4px); + z-index:10000;display:none;align-items:flex-start;justify-content:center;padding-top:min(20vh,160px)} +.cmd-k-overlay.open{display:flex} +.cmd-k-modal{background:var(--sidebar);border-radius:12px;width:100%;max-width:600px; + box-shadow:0 16px 70px rgba(0,0,0,.35);border:1px solid rgba(255,255,255,.08); + overflow:hidden;display:flex;flex-direction:column;max-height:min(70vh,520px)} +.cmd-k-input{width:100%;padding:.875rem 1rem .875rem 2.75rem;font-size:1rem;font-family:var(--font); + background:transparent;border:none;border-bottom:1px solid rgba(255,255,255,.08); + color:#fff;outline:none;caret-color:var(--accent)} +.cmd-k-input::placeholder{color:rgba(255,255,255,.35)} +.cmd-k-icon{position:absolute;left:1rem;top:.95rem;color:rgba(255,255,255,.35);pointer-events:none; + font-size:.95rem} +.cmd-k-head{position:relative} +.cmd-k-results{overflow-y:auto;padding:.5rem 0;flex:1} +.cmd-k-empty{padding:1.5rem 1rem;text-align:center;color:rgba(255,255,255,.35);font-size:.9rem} +.cmd-k-group{padding:0 .5rem} +.cmd-k-group-label{font-size:.7rem;font-weight:600;text-transform:uppercase;letter-spacing:.06em; + color:rgba(255,255,255,.3);padding:.5rem .625rem .25rem} +.cmd-k-item{display:flex;align-items:center;gap:.75rem;padding:.5rem .625rem;border-radius:var(--radius-sm); + cursor:pointer;color:var(--sidebar-text);font-size:.88rem;transition:background 80ms ease} +.cmd-k-item:hover,.cmd-k-item.active{background:rgba(255,255,255,.08);color:#fff} +.cmd-k-item-icon{width:1.5rem;height:1.5rem;border-radius:4px;display:flex;align-items:center; + justify-content:center;font-size:.7rem;flex-shrink:0;background:rgba(255,255,255,.06);color:rgba(255,255,255,.5)} +.cmd-k-item-body{flex:1;min-width:0} +.cmd-k-item-title{font-weight:500;white-space:nowrap;overflow:hidden;text-overflow:ellipsis} +.cmd-k-item-title mark{background:transparent;color:var(--accent);font-weight:700} +.cmd-k-item-meta{font-size:.75rem;color:rgba(255,255,255,.35);white-space:nowrap;overflow:hidden;text-overflow:ellipsis} +.cmd-k-item-meta mark{background:transparent;color:var(--accent);font-weight:600} +.cmd-k-item-field{font-size:.65rem;padding:.1rem .35rem;border-radius:3px; + background:rgba(255,255,255,.06);color:rgba(255,255,255,.4);white-space:nowrap;flex-shrink:0} +.cmd-k-kbd{display:inline-flex;align-items:center;gap:.2rem;font-size:.7rem;font-family:var(--mono); + padding:.15rem .4rem;border-radius:4px;background:rgba(255,255,255,.08);color:rgba(255,255,255,.4); + border:1px solid rgba(255,255,255,.06)} +.nav-search-hint{display:flex;align-items:center;justify-content:space-between;padding:.5rem .75rem; + margin-top:auto;border-top:1px solid rgba(255,255,255,.06);padding-top:1rem; + color:var(--sidebar-text);font-size:.82rem;cursor:pointer;border-radius:var(--radius-sm); + transition:all var(--transition)} +.nav-search-hint:hover{background:var(--sidebar-hover);color:var(--sidebar-active)} +.aadl-diagram{background:var(--card-bg);border:1px solid var(--border);border-radius:8px;padding:1rem;margin:1rem 0} +.aadl-diagram svg{width:100%;height:auto;max-height:600px} +.aadl-loading{color:var(--text-secondary);font-style:italic} +.aadl-error{color:var(--danger);font-style:italic} "#; // ── Pan/zoom JS ────────────────────────────────────────────────────────── @@ -162,8 +1263,72 @@ dd{margin-left:0;margin-bottom:.25rem} const GRAPH_JS: &str = r#" -"#; -// ── Layout ─────────────────────────────────────────────────────────────── + // Touch support + var lastDist=0, lastMid=null; + c.addEventListener('touchstart',function(e){ + if(e.touches.length===1){ + drag=true; sx=e.touches[0].clientX; sy=e.touches[0].clientY; + ox=vb.x; oy=vb.y; + } else if(e.touches.length===2){ + drag=false; + var dx=e.touches[1].clientX-e.touches[0].clientX; + var dy=e.touches[1].clientY-e.touches[0].clientY; + lastDist=Math.sqrt(dx*dx+dy*dy); + lastMid={x:(e.touches[0].clientX+e.touches[1].clientX)/2, + y:(e.touches[0].clientY+e.touches[1].clientY)/2}; + } + },{passive:true}); + c.addEventListener('touchmove',function(e){ + if(e.touches.length===1 && drag){ + e.preventDefault(); + var scale=vb.width/c.clientWidth; + vb.x=ox-(e.touches[0].clientX-sx)*scale; + vb.y=oy-(e.touches[0].clientY-sy)*scale; + } else if(e.touches.length===2){ + e.preventDefault(); + var dx=e.touches[1].clientX-e.touches[0].clientX; + var dy=e.touches[1].clientY-e.touches[0].clientY; + var dist=Math.sqrt(dx*dx+dy*dy); + var f=lastDist/dist; + var r=c.getBoundingClientRect(); + var mid={x:(e.touches[0].clientX+e.touches[1].clientX)/2, + y:(e.touches[0].clientY+e.touches[1].clientY)/2}; + var mx=(mid.x-r.left)/r.width; + var my=(mid.y-r.top)/r.height; + var nx=vb.width*f, ny=vb.height*f; + vb.x+=(vb.width-nx)*mx; + vb.y+=(vb.height-ny)*my; + vb.width=nx; vb.height=ny; + lastDist=dist; lastMid=mid; + } + },{passive:false}); + c.addEventListener('touchend',function(){ drag=false; lastDist=0; }); -fn page_layout(content: &str) -> Html { - Html(format!( - r##" - - - - -Rivet Dashboard - - - - -
- -
-{content} -
-
-{GRAPH_JS} - -"## - )) -} + // Zoom buttons + var controls=c.querySelector('.graph-controls'); + if(controls){ + controls.querySelector('.zoom-in').addEventListener('click',function(){ + var cx=vb.x+vb.width/2, cy=vb.y+vb.height/2; + vb.width/=1.3; vb.height/=1.3; + vb.x=cx-vb.width/2; vb.y=cy-vb.height/2; + }); + controls.querySelector('.zoom-out').addEventListener('click',function(){ + var cx=vb.x+vb.width/2, cy=vb.y+vb.height/2; + vb.width*=1.3; vb.height*=1.3; + vb.x=cx-vb.width/2; vb.y=cy-vb.height/2; + }); + controls.querySelector('.zoom-fit').addEventListener('click',function(){ + vb.x=origVB.x; vb.y=origVB.y; vb.width=origVB.w; vb.height=origVB.h; + }); + } -// ── Routes ─────────────────────────────────────────────────────────────── + // ── Node dragging + click ────────────────────────────── + var dragNode=null, dnSX=0, dnSY=0, dnOX=0, dnOY=0, dnMoved=false; + var nodeOffsets={}; // id -> {dx,dy} -async fn index(State(state): State>) -> Html { - let inner = stats_partial(&state); - page_layout(&inner) -} + function getNodeCenter(node){ + var r=node.querySelector('rect'); + if(!r) return {x:0,y:0}; + var x=parseFloat(r.getAttribute('x'))||0; + var y=parseFloat(r.getAttribute('y'))||0; + var w=parseFloat(r.getAttribute('width'))||0; + var h=parseFloat(r.getAttribute('height'))||0; + var id=node.getAttribute('data-id')||''; + var off=nodeOffsets[id]||{dx:0,dy:0}; + return {x:x+w/2+off.dx, y:y+h/2+off.dy}; + } -async fn stats_view(State(state): State>) -> Html { - Html(stats_partial(&state)) -} + function updateEdges(){ + svg.querySelectorAll('.edge').forEach(function(edge){ + var src=edge.getAttribute('data-source'); + var tgt=edge.getAttribute('data-target'); + var srcOff=nodeOffsets[src]||{dx:0,dy:0}; + var tgtOff=nodeOffsets[tgt]||{dx:0,dy:0}; + var path=edge.querySelector('path'); + if(!path) return; + var origD=path.getAttribute('data-orig-d'); + if(!origD){ origD=path.getAttribute('d'); path.setAttribute('data-orig-d',origD); } + // Parse path points and offset them + var newD=offsetPath(origD,srcOff,tgtOff); + path.setAttribute('d',newD); + // Move label + var lbg=edge.querySelector('.label-bg'); + var ltxt=edge.querySelector('text'); + if(lbg){ + var ox=lbg.getAttribute('data-orig-x'); + if(!ox){ ox=lbg.getAttribute('x'); lbg.setAttribute('data-orig-x',ox); + var oy=lbg.getAttribute('y'); lbg.setAttribute('data-orig-y',oy); } + var avgDx=(srcOff.dx+tgtOff.dx)/2; + var avgDy=(srcOff.dy+tgtOff.dy)/2; + lbg.setAttribute('x',parseFloat(lbg.getAttribute('data-orig-x'))+avgDx); + lbg.setAttribute('y',parseFloat(lbg.getAttribute('data-orig-y'))+avgDy); + } + if(ltxt){ + var otx=ltxt.getAttribute('data-orig-x'); + if(!otx){ otx=ltxt.getAttribute('x'); ltxt.setAttribute('data-orig-x',otx); + var oty=ltxt.getAttribute('y'); ltxt.setAttribute('data-orig-y',oty); } + var avgDx2=(srcOff.dx+tgtOff.dx)/2; + var avgDy2=(srcOff.dy+tgtOff.dy)/2; + ltxt.setAttribute('x',parseFloat(ltxt.getAttribute('data-orig-x'))+avgDx2); + ltxt.setAttribute('y',parseFloat(ltxt.getAttribute('data-orig-y'))+avgDy2); + } + }); + } -fn stats_partial(state: &AppState) -> String { - let store = &state.store; - let graph = &state.graph; + function offsetPath(d,srcOff,tgtOff){ + // SVG path: M x y, L x y, C x y x y x y, etc. + // Split into commands and offset first point by srcOff, last by tgtOff, middle interpolated + var tokens=d.match(/[MLCQZ]|[-]?[\d.]+/gi); + if(!tokens) return d; + var pts=[]; + var i=0; + while(i1?j/(n-1):0; + pts[j].x+= srcOff.dx*(1-frac)+tgtOff.dx*frac; + pts[j].y+= srcOff.dy*(1-frac)+tgtOff.dy*frac; + } + // Rebuild + var out=''; + for(var j=0;j2||Math.abs(dy)>2) dnMoved=true; + var nid=dragNode.getAttribute('data-id')||''; + nodeOffsets[nid]={dx:dnOX+dx, dy:dnOY+dy}; + dragNode.setAttribute('transform','translate('+nodeOffsets[nid].dx+','+nodeOffsets[nid].dy+')'); + updateEdges(); + return; // don't pan while dragging a node + } + if(!drag) return; + var scale2=vb.width/c.clientWidth; + vb.x=ox-(e.clientX-sx)*scale2; + vb.y=oy-(e.clientY-sy)*scale2; + }); + c.addEventListener('mouseup',function(){ + if(dragNode){ dragNode.style.cursor='grab'; dragNode=null; } + drag=false; + }); + + // Fit to container on first load with some padding + var padding=40; + vb.x=-padding; vb.y=-padding; + vb.width=origVB.w+padding*2; + vb.height=origVB.h+padding*2; + origVB={x:vb.x, y:vb.y, w:vb.width, h:vb.height}; + }); + } + + // ── Artifact hover preview tooltip ─────────────────────── + (function(){ + var tip=document.createElement('div'); + tip.className='art-tooltip'; + document.body.appendChild(tip); + var timer=null, ctrl=null, currentEl=null; + + function show(el){ + var href=el.getAttribute('hx-get')||''; + var m=href.match(/^\/artifacts\/(.+)$/); + if(!m) return; + var id=m[1]; + if(ctrl) ctrl.abort(); + ctrl=new AbortController(); + fetch('/artifacts/'+encodeURIComponent(id)+'/preview',{signal:ctrl.signal,headers:{'HX-Request':'true'}}) + .then(function(r){return r.text()}) + .then(function(html){ + tip.innerHTML=html; + tip.classList.add('visible'); + position(el); + }).catch(function(){}); + } + + function position(el){ + var r=el.getBoundingClientRect(); + var tw=tip.offsetWidth, th=tip.offsetHeight; + var left=r.left+r.width/2-tw/2; + var top=r.top-th-6; + if(top<4){ top=r.bottom+6; } + if(left<4) left=4; + if(left+tw>window.innerWidth-4) left=window.innerWidth-tw-4; + tip.style.left=left+'px'; + tip.style.top=top+window.scrollY+'px'; + } + + function hide(){ + clearTimeout(timer); timer=null; + if(ctrl){ ctrl.abort(); ctrl=null; } + tip.classList.remove('visible'); + currentEl=null; + } + + document.body.addEventListener('mouseenter',function(e){ + var el=e.target.closest('[hx-get^="/artifacts/"]'); + if(!el||el.getAttribute('hx-get').indexOf('/preview')!==-1) return; + currentEl=el; + timer=setTimeout(function(){ show(el); },300); + },true); + + document.body.addEventListener('mouseleave',function(e){ + var el=e.target.closest('[hx-get^="/artifacts/"]'); + if(el&&el===currentEl) hide(); + },true); + + // also hide when clicking (navigating away) + document.body.addEventListener('click',function(){ hide(); },true); + })(); +})(); + +"#; + +// ── Cmd+K search JS ────────────────────────────────────────────────────── + +const SEARCH_JS: &str = r#" + +"#; + +// ── AADL diagram JS ───────────────────────────────────────────────────── + +const AADL_JS: &str = r#" + +"#; + +// ── Layout ─────────────────────────────────────────────────────────────── + +fn page_layout(content: &str, state: &AppState) -> Html { + let artifact_count = state.store.len(); + let diagnostics = validate::validate(&state.store, &state.schema, &state.graph); + let error_count = diagnostics + .iter() + .filter(|d| d.severity == Severity::Error) + .count(); + let error_badge = if error_count > 0 { + format!("{error_count}") + } else { + "OK".to_string() + }; + let doc_badge = if !state.doc_store.is_empty() { + format!("{}", state.doc_store.len()) + } else { + String::new() + }; + let result_badge = if !state.result_store.is_empty() { + format!( + "{}", + state.result_store.len() + ) + } else { + String::new() + }; + let stpa_types = [ + "loss", + "hazard", + "sub-hazard", + "system-constraint", + "controller", + "controlled-process", + "control-action", + "uca", + "controller-constraint", + "loss-scenario", + ]; + let stpa_count: usize = stpa_types + .iter() + .map(|t| state.store.count_by_type(t)) + .sum(); + let stpa_nav = if stpa_count > 0 { + format!( + "
  • STPA{stpa_count}
  • " + ) + } else { + String::new() + }; + let version = env!("CARGO_PKG_VERSION"); + + // Context bar + let ctx = &state.context; + let git_html = if let Some(ref git) = ctx.git { + let status = if git.is_dirty { + format!( + "{} uncommitted", + git.dirty_count + ) + } else { + "clean".to_string() + }; + format!( + "/\ + {branch}@{commit}\ + {status}", + branch = html_escape(&git.branch), + commit = html_escape(&git.commit_short), + ) + } else { + String::new() + }; + // Project switcher: show siblings as a dropdown if available + let switcher_html = if ctx.siblings.is_empty() { + String::new() + } else { + let mut s = String::from( + "\ +
    \ + \ +
    ", + ); + for sib in &ctx.siblings { + s.push_str(&format!( + "
    \ + {}\ + rivet -p {} serve -P {}\ +
    ", + html_escape(&sib.name), + html_escape(&sib.rel_path), + ctx.port, + )); + } + s.push_str("
    "); + s + }; + let context_bar = format!( + "
    \ + {project}{switcher_html}\ + /\ + {path}\ + {git_html}\ + Loaded {loaded_at}\ + \ +
    ", + project = html_escape(&ctx.project_name), + path = html_escape(&ctx.project_path), + loaded_at = html_escape(&ctx.loaded_at), + ); + Html(format!( + r##" + + + + +Rivet Dashboard + + + + + + + +
    +
    + +
    +{context_bar} +
    +{content} + +
    +
    +
    +
    +
    +
    + 🔍 + +
    +
    +
    Type to search artifacts and documents
    +
    +
    +
    +{GRAPH_JS} +{SEARCH_JS} +{AADL_JS} + +"## + )) +} + +// ── Routes ─────────────────────────────────────────────────────────────── + +#[derive(Debug, serde::Deserialize)] +struct IndexParams { + goto: Option, +} + +async fn index( + State(state): State, + Query(params): Query, +) -> Html { + let state = state.read().await; + // If goto param is set, render layout with empty content and let JS load the page + if let Some(ref goto) = params.goto { + let placeholder = format!( + "
    \ + ", + html_escape(goto), + html_escape(goto) + ); + return page_layout(&placeholder, &state); + } + let inner = stats_partial(&state); + page_layout(&inner, &state) +} + +async fn stats_view(State(state): State) -> Html { + let state = state.read().await; + Html(stats_partial(&state)) +} + +fn stats_partial(state: &AppState) -> String { + let store = &state.store; + let graph = &state.graph; + let doc_store = &state.doc_store; let mut types: Vec<&str> = store.types().collect(); types.sort(); @@ -265,46 +2060,90 @@ fn stats_partial(state: &AppState) -> String { .filter(|d| d.severity == Severity::Warning) .count(); - let mut html = String::from("

    Dashboard

    "); + // Project header + let mut html = format!( + "
    \ +

    Project Overview

    \ +

    {} — {} artifact types, {} traceability rules

    \ +
    ", + html_escape(&state.context.project_name), + types.len(), + state.schema.traceability_rules.len(), + ); - // Summary cards + // Summary cards with colored accents html.push_str("
    "); html.push_str(&format!( - "
    {}
    Artifacts
    ", + "
    {}
    Artifacts
    ", store.len() )); html.push_str(&format!( - "
    {}
    Types
    ", + "
    {}
    Types
    ", types.len() )); html.push_str(&format!( - "
    {}
    Orphans
    ", + "
    {}
    Orphans
    ", orphans.len() )); html.push_str(&format!( - "
    {}
    Errors
    ", + "
    {}
    Errors
    ", errors )); html.push_str(&format!( - "
    {}
    Warnings
    ", + "
    {}
    Warnings
    ", warnings )); html.push_str(&format!( - "
    {}
    Broken Links
    ", + "
    {}
    Broken Links
    ", graph.broken.len() )); + html.push_str(&format!( + "
    {}
    Documents
    ", + doc_store.len() + )); html.push_str("
    "); // By-type table html.push_str("

    Artifacts by Type

    "); for t in &types { html.push_str(&format!( - "", + "", + badge_for_type(t), store.count_by_type(t) )); } html.push_str("
    TypeCount
    {t}{}
    {}{}
    "); + // Status breakdown + let mut status_counts: BTreeMap = BTreeMap::new(); + for a in store.iter() { + let s = a.status.as_deref().unwrap_or("unknown"); + *status_counts.entry(s.to_string()).or_default() += 1; + } + let total_artifacts = store.len().max(1); + html.push_str("

    Status Distribution

    "); + for (status, count) in &status_counts { + let pct = (*count as f64 / total_artifacts as f64) * 100.0; + let bar_color = match status.as_str() { + "approved" => "#15713a", + "draft" => "#b8860b", + "obsolete" => "#c62828", + "unknown" => "#9898a6", + _ => "#3a86ff", + }; + html.push_str(&format!( + "
    \ +
    {}
    \ +
    \ +
    \ +
    \ +
    {count}
    \ +
    ", + html_escape(status), + )); + } + html.push_str("
    "); + // Orphans if !orphans.is_empty() { html.push_str("

    Orphan Artifacts (no links)

    "); @@ -316,20 +2155,157 @@ fn stats_partial(state: &AppState) -> String { html.push_str("
    ID
    "); } + // ── Coverage summary card ──────────────────────────────────────── + let cov_report = coverage::compute_coverage(store, &state.schema, graph); + if !cov_report.entries.is_empty() { + let overall = cov_report.overall_coverage(); + let cov_color = if overall >= 80.0 { + "#15713a" + } else if overall >= 50.0 { + "#b8860b" + } else { + "#c62828" + }; + let total_covered: usize = cov_report.entries.iter().map(|e| e.covered).sum(); + let total_items: usize = cov_report.entries.iter().map(|e| e.total).sum(); + html.push_str(&format!( + "
    \ +

    Traceability Coverage

    \ +
    \ +
    {overall:.0}%
    \ +
    \ +
    \ +
    \ +
    \ +
    \ + {total_covered} / {total_items} artifacts covered across {} rules\ +
    \ +
    \ +
    \ + \ + View full coverage report →\ +
    ", + cov_report.entries.len(), + )); + } + + // ── Test results summary ───────────────────────────────────────── + if !state.result_store.is_empty() { + let summary = state.result_store.summary(); + let rate = summary.pass_rate(); + let rate_color = if rate >= 80.0 { + "#15713a" + } else if rate >= 50.0 { + "#b8860b" + } else { + "#c62828" + }; + html.push_str("

    Test Results

    "); + html.push_str(&format!( + "
    \ +
    {rate:.0}%
    \ +
    \ +
    \ +
    \ +
    \ +
    \ +
    " + )); + html.push_str("
    "); + html.push_str(&format!( + "{} runs\ + {} passed\ + {} failed", + summary.total_runs, summary.pass_count, summary.fail_count, + )); + if summary.skip_count > 0 { + html.push_str(&format!( + "{} skipped", + summary.skip_count, + )); + } + if summary.blocked_count > 0 { + html.push_str(&format!( + "{} blocked", + summary.blocked_count, + )); + } + html.push_str("
    "); + html.push_str( + "\ + View all test runs →", + ); + html.push_str("
    "); + } + + // ── Quick links ────────────────────────────────────────────────── + // Count verifiable types for the verification link badge + let ver_count = { + let mut count = 0usize; + for rule in &state.schema.traceability_rules { + if rule.required_backlink.as_deref() == Some("verifies") { + count += store.by_type(&rule.source_type).len(); + } + } + count + }; + + html.push_str( + "
    \ +

    Quick Links

    \ +
    ", + ); + html.push_str(&format!( + "\ +
    Verification
    \ +
    {ver_count} requirements
    \ +
    ", + )); + html.push_str(&format!( + "\ +
    Documents
    \ +
    {} loaded
    \ +
    ", + doc_store.len(), + )); + html.push_str( + "\ +
    Traceability Graph
    \ +
    Full link graph
    \ +
    ", + ); + html.push_str("
    "); + html } // ── Artifacts ──────────────────────────────────────────────────────────── -async fn artifacts_list(State(state): State>) -> Html { +async fn artifacts_list(State(state): State) -> Html { + let state = state.read().await; let store = &state.store; let mut artifacts: Vec<_> = store.iter().collect(); artifacts.sort_by(|a, b| a.id.cmp(&b.id)); let mut html = String::from("

    Artifacts

    "); + // Client-side filter input + html.push_str("
    \ + \ + \ +
    "); html.push_str( - "", + "
    IDTypeTitleStatusLinks
    ", ); for a in &artifacts { @@ -342,13 +2318,13 @@ async fn artifacts_list(State(state): State>) -> Html { }; html.push_str(&format!( "\ - \ + \ \ \ ", html_escape(&a.id), html_escape(&a.id), - html_escape(&a.artifact_type), + badge_for_type(&a.artifact_type), html_escape(&a.title), status_badge, a.links.len() @@ -360,14 +2336,91 @@ async fn artifacts_list(State(state): State>) -> Html { "

    {} artifacts total

    ", artifacts.len() )); + // Inline filter script + html.push_str( + "", + ); Html(html) } -async fn artifact_detail( - State(state): State>, +/// Compact preview tooltip for an artifact — loaded on hover. +async fn artifact_preview( + State(state): State, Path(id): Path, ) -> Html { + let state = state.read().await; + let store = &state.store; + let graph = &state.graph; + + let Some(artifact) = store.get(&id) else { + return Html(format!( + "
    {}
    Not found
    ", + html_escape(&id) + )); + }; + + let mut html = String::from("
    "); + html.push_str(&format!( + "
    {} {}
    ", + badge_for_type(&artifact.artifact_type), + html_escape(&artifact.id) + )); + html.push_str(&format!( + "
    {}
    ", + html_escape(&artifact.title) + )); + if let Some(status) = &artifact.status { + let cls = match status.as_str() { + "approved" => "badge-ok", + "draft" => "badge-warn", + "obsolete" => "badge-error", + _ => "badge-info", + }; + html.push_str(&format!( + "{} ", + html_escape(status) + )); + } + if let Some(desc) = &artifact.description { + let snippet: String = desc.chars().take(160).collect(); + let ellip = if desc.len() > 160 { "..." } else { "" }; + html.push_str(&format!( + "
    {}{ellip}
    ", + html_escape(&snippet) + )); + } + let fwd = artifact.links.len(); + let back = graph.backlinks_to(&id).len(); + if fwd > 0 || back > 0 { + html.push_str(&format!( + "
    {fwd} outgoing, {back} incoming
    " + )); + } + if !artifact.tags.is_empty() { + let tags: Vec = artifact + .tags + .iter() + .map(|t| format!("{}", html_escape(t))) + .collect(); + html.push_str(&format!( + "
    {}
    ", + tags.join(" ") + )); + } + html.push_str("
    "); + Html(html) +} + +async fn artifact_detail(State(state): State, Path(id): Path) -> Html { + let state = state.read().await; let store = &state.store; let graph = &state.graph; @@ -379,9 +2432,9 @@ async fn artifact_detail( }; let mut html = format!( - "

    {}

    {}

    ", + "

    {}

    {}

    ", html_escape(&artifact.id), - html_escape(&artifact.artifact_type) + badge_for_type(&artifact.artifact_type) ); html.push_str("
    "); @@ -407,10 +2460,10 @@ async fn artifact_detail( html.push_str(&format!("
    Tags
    {}
    ", tags.join(" "))); } - // Extra fields + // Extra fields — detect file:line source references and make them clickable for (key, value) in &artifact.fields { let val = match value { - serde_yaml::Value::String(s) => html_escape(s), + serde_yaml::Value::String(s) => linkify_source_refs(&html_escape(s)), other => html_escape(&format!("{other:?}")), }; html.push_str(&format!("
    {}
    {}
    ", html_escape(key), val)); @@ -459,11 +2512,35 @@ async fn artifact_detail( html.push_str("
    IDTypeTitleStatusLinks
    {}{}{}{}{}{}
    "); } - // Show in graph link + // AADL diagram highlighting data + let mut aadl_links = Vec::new(); + for link in &artifact.links { + if link.target.starts_with("AADL-") { + aadl_links.push(link.target.clone()); + } + } + for bl in graph.backlinks_to(&id) { + if bl.source.starts_with("AADL-") { + aadl_links.push(bl.source.clone()); + } + } + if id.starts_with("AADL-") { + aadl_links.push(id.clone()); + } + if !aadl_links.is_empty() { + let json = serde_json::to_string(&aadl_links).unwrap_or_default(); + html.push_str(&format!( + "", + json + )); + } + + // Action buttons html.push_str(&format!( - r##"

    Show in graph -  |  - ← Back to artifacts

    "##, + r##""##, id_esc = html_escape(&id), )); @@ -487,9 +2564,10 @@ fn default_depth() -> usize { /// Build a filtered subgraph based on query params and return SVG. async fn graph_view( - State(state): State>, + State(state): State, Query(params): Query, ) -> Html { + let state = state.read().await; let store = &state.store; let link_graph = &state.graph; let pg = link_graph.graph(); @@ -528,11 +2606,24 @@ async fn graph_view( let colors = type_color_map(); let svg_opts = SvgOptions { - type_colors: colors, + type_colors: colors.clone(), highlight: params.focus.clone().filter(|s| !s.is_empty()), + interactive: true, + base_url: Some("/artifacts".into()), + background: Some("#fafbfc".into()), + font_size: 12.0, + edge_color: "#888".into(), ..SvgOptions::default() }; + let layout_opts = LayoutOptions { + node_width: 200.0, + node_height: 56.0, + rank_separation: 90.0, + node_separation: 30.0, + ..Default::default() + }; + let gl = pgv_layout::layout( &sub, &|_idx, n| { @@ -544,8 +2635,8 @@ async fn graph_view( .get(n.as_str()) .map(|a| a.title.clone()) .unwrap_or_default(); - let sublabel = if title.len() > 24 { - Some(format!("{}...", &title[..22])) + let sublabel = if title.len() > 28 { + Some(format!("{}...", &title[..26])) } else if title.is_empty() { None } else { @@ -559,18 +2650,28 @@ async fn graph_view( } }, &|_idx, e| EdgeInfo { label: e.clone() }, - &LayoutOptions::default(), + &layout_opts, ); let svg = render_svg(&gl, &svg_opts); + // Collect which types are actually present for the legend + let present_types: std::collections::BTreeSet = sub + .node_indices() + .filter_map(|idx| { + store + .get(sub[idx].as_str()) + .map(|a| a.artifact_type.clone()) + }) + .collect(); + // Build filter controls - let mut html = String::from("

    Graph

    "); + let mut html = String::from("

    Traceability Graph

    "); // Filter form html.push_str("
    "); html.push_str( - "
    ", + "", ); // Type checkboxes @@ -598,35 +2699,67 @@ async fn graph_view( let focus_val = params.focus.as_deref().unwrap_or(""); html.push_str(&format!( "

    \ -
    ", +
    ", html_escape(focus_val) )); + // Datalist for autocomplete + html.push_str(""); + for a in store.iter() { + html.push_str(&format!(""); + // Depth slider let depth_val = if params.depth > 0 { params.depth } else { 3 }; html.push_str(&format!( - "

    \ -
    " + "

    \ +
    " )); // Link types input let lt_val = params.link_types.as_deref().unwrap_or(""); html.push_str(&format!( "

    \ -
    ", + ", html_escape(lt_val) )); html.push_str("

    "); - html.push_str(""); + html.push_str(""); + + // Legend + if !present_types.is_empty() { + html.push_str("
    "); + for t in &present_types { + let color = colors + .get(t.as_str()) + .map(|s| s.as_str()) + .unwrap_or("#e8e8e8"); + html.push_str(&format!( + "
    {t}
    " + )); + } + html.push_str("
    "); + } + html.push_str(""); - // SVG card - html.push_str("
    "); + // SVG card with zoom controls + html.push_str( + "
    \ +
    \ +
    \ + \ + \ + \ +
    ", + ); html.push_str(&svg); html.push_str("
    "); html.push_str(&format!( - "

    {} nodes, {} edges

    ", + "

    {} nodes, {} edges — scroll to zoom, drag to pan, click nodes to navigate

    ", gl.nodes.len(), gl.edges.len() )); @@ -647,10 +2780,11 @@ fn default_ego_hops() -> usize { } async fn artifact_graph( - State(state): State>, + State(state): State, Path(id): Path, Query(params): Query, ) -> Html { + let state = state.read().await; let store = &state.store; let link_graph = &state.graph; let pg = link_graph.graph(); @@ -668,11 +2802,24 @@ async fn artifact_graph( let colors = type_color_map(); let svg_opts = SvgOptions { - type_colors: colors, + type_colors: colors.clone(), highlight: Some(id.clone()), + interactive: true, + base_url: Some("/artifacts".into()), + background: Some("#fafbfc".into()), + font_size: 12.0, + edge_color: "#888".into(), ..SvgOptions::default() }; + let layout_opts = LayoutOptions { + node_width: 200.0, + node_height: 56.0, + rank_separation: 90.0, + node_separation: 30.0, + ..Default::default() + }; + let gl = pgv_layout::layout( &sub, &|_idx, n| { @@ -684,8 +2831,8 @@ async fn artifact_graph( .get(n.as_str()) .map(|a| a.title.clone()) .unwrap_or_default(); - let sublabel = if title.len() > 24 { - Some(format!("{}...", &title[..22])) + let sublabel = if title.len() > 28 { + Some(format!("{}...", &title[..26])) } else if title.is_empty() { None } else { @@ -699,30 +2846,65 @@ async fn artifact_graph( } }, &|_idx, e| EdgeInfo { label: e.clone() }, - &LayoutOptions::default(), + &layout_opts, ); let svg = render_svg(&gl, &svg_opts); + // Collect present types for legend + let present_types: std::collections::BTreeSet = sub + .node_indices() + .filter_map(|idx| { + store + .get(sub[idx].as_str()) + .map(|a| a.artifact_type.clone()) + }) + .collect(); + let mut html = format!("

    Neighborhood of {}

    ", html_escape(&id),); - // Hop control + // Hop control + legend html.push_str("
    "); html.push_str(&format!( - "
    \ -

    \ -
    \ + "\ +

    \ +
    \

    \ -
    ", + ", id_esc = html_escape(&id), )); + // Legend + if !present_types.is_empty() { + html.push_str("
    "); + for t in &present_types { + let color = colors + .get(t.as_str()) + .map(|s| s.as_str()) + .unwrap_or("#e8e8e8"); + html.push_str(&format!( + "
    {t}
    " + )); + } + html.push_str("
    "); + } + html.push_str("
    "); - html.push_str("
    "); + // SVG with zoom controls + html.push_str( + "
    \ +
    \ +
    \ + \ + \ + \ +
    ", + ); html.push_str(&svg); html.push_str("
    "); html.push_str(&format!( - "

    {} nodes, {} edges ({}-hop neighborhood)

    ", + "

    {} nodes, {} edges ({}-hop neighborhood) — scroll to zoom, drag to pan, click nodes to navigate

    ", gl.nodes.len(), gl.edges.len(), hops @@ -829,7 +3011,8 @@ fn apply_filters_to_graph( // ── Validation ─────────────────────────────────────────────────────────── -async fn validate_view(State(state): State>) -> Html { +async fn validate_view(State(state): State) -> Html { + let state = state.read().await; let diagnostics = validate::validate(&state.store, &state.schema, &state.graph); let errors = diagnostics @@ -847,15 +3030,18 @@ async fn validate_view(State(state): State>) -> Html { let mut html = String::from("

    Validation Results

    "); - // Summary - let overall = if errors > 0 { - "FAIL" + // Colored summary bar + let total_issues = errors + warnings + infos; + if total_issues == 0 { + html.push_str("
    All checks passed
    "); } else { - "PASS" - }; - html.push_str(&format!( - "

    Status: {overall} — {errors} errors, {warnings} warnings, {infos} info

    " - )); + html.push_str(&format!( + "
    {total_issues} issue{} found — {errors} error{}, {warnings} warning{}, {infos} info
    ", + if total_issues != 1 { "s" } else { "" }, + if errors != 1 { "s" } else { "" }, + if warnings != 1 { "s" } else { "" }, + )); + } if diagnostics.is_empty() { html.push_str("

    No issues found.

    "); @@ -911,9 +3097,10 @@ struct MatrixParams { } async fn matrix_view( - State(state): State>, + State(state): State, Query(params): Query, ) -> Html { + let state = state.read().await; let store = &state.store; let mut types: Vec<&str> = store.types().collect(); @@ -923,7 +3110,7 @@ async fn matrix_view( let mut html = String::from("

    Traceability Matrix

    "); html.push_str("
    "); html.push_str( - "
    ", + "", ); // From select @@ -1029,11 +3216,3511 @@ async fn matrix_view( Html(html) } -// ── Helpers ────────────────────────────────────────────────────────────── +// ── Coverage ───────────────────────────────────────────────────────────── -fn html_escape(s: &str) -> String { - s.replace('&', "&") - .replace('<', "<") - .replace('>', ">") - .replace('"', """) +async fn coverage_view(State(state): State) -> Html { + let state = state.read().await; + let report = coverage::compute_coverage(&state.store, &state.schema, &state.graph); + let overall = report.overall_coverage(); + + let mut html = String::from("

    Traceability Coverage

    "); + + // Overall stat + let overall_color = if overall >= 80.0 { + "#15713a" + } else if overall >= 50.0 { + "#8b6914" + } else { + "#c62828" + }; + html.push_str("
    "); + html.push_str(&format!( + "
    {:.1}%
    Overall Coverage
    ", + overall + )); + html.push_str(&format!( + "
    {}
    Rules
    ", + report.entries.len() + )); + let fully_covered = report + .entries + .iter() + .filter(|e| e.covered == e.total) + .count(); + html.push_str(&format!( + "
    {}
    Fully Covered
    ", + fully_covered + )); + html.push_str("
    "); + + if report.entries.is_empty() { + html.push_str( + "

    No traceability rules defined in the schema.

    ", + ); + return Html(html); + } + + // Per-rule cards with coverage bars + html.push_str("

    Coverage by Rule

    "); + html.push_str(""); + + for entry in &report.entries { + let pct = entry.percentage(); + let (bar_color, badge_class) = if pct >= 80.0 { + ("#15713a", "badge-ok") + } else if pct >= 50.0 { + ("#b8860b", "badge-warn") + } else { + ("#c62828", "badge-error") + }; + + let dir_label = match entry.direction { + coverage::CoverageDirection::Forward => "forward", + coverage::CoverageDirection::Backward => "backward", + }; + + html.push_str(&format!( + "\ + \ + \ + \ + \ + \ + \ + ", + html_escape(&entry.description), + html_escape(&entry.rule_name), + badge_for_type(&entry.source_type), + html_escape(&entry.link_type), + dir_label, + entry.covered, + entry.total, + pct, + )); + } + + html.push_str("
    RuleSource TypeLinkDirectionCoverageProgress
    {}{}{}{}{}/{} ({:.1}%)\ +
    \ +
    \ +
    \ +
    "); + + // Uncovered artifacts + let has_uncovered = report.entries.iter().any(|e| !e.uncovered_ids.is_empty()); + if has_uncovered { + html.push_str("

    Uncovered Artifacts

    "); + + for entry in &report.entries { + if entry.uncovered_ids.is_empty() { + continue; + } + html.push_str(&format!( + "

    {} ({} uncovered)

    ", + html_escape(&entry.rule_name), + entry.uncovered_ids.len() + )); + html.push_str(""); + for id in &entry.uncovered_ids { + let title = state.store.get(id).map(|a| a.title.as_str()).unwrap_or("-"); + html.push_str(&format!( + "\ + ", + id_esc = html_escape(id), + title_esc = html_escape(title), + )); + } + html.push_str("
    IDTitle
    {id_esc}{title_esc}
    "); + } + + html.push_str("
    "); + } + + Html(html) +} + +// ── Documents ──────────────────────────────────────────────────────────── + +async fn documents_list(State(state): State) -> Html { + let state = state.read().await; + let doc_store = &state.doc_store; + + let mut html = String::from("

    Documents

    "); + + if doc_store.is_empty() { + html.push_str("

    No documents loaded. Add markdown files with YAML frontmatter to a docs/ directory and reference it in rivet.yaml:

    \ +
    docs:\n  - docs
    "); + return Html(html); + } + + html.push_str( + "", + ); + + for doc in doc_store.iter() { + let status = doc.status.as_deref().unwrap_or("-"); + let status_badge = match status { + "approved" => format!("{status}"), + "draft" => format!("{status}"), + _ => format!("{status}"), + }; + html.push_str(&format!( + "\ + \ + \ + \ + ", + html_escape(&doc.id), + html_escape(&doc.id), + badge_for_type(&doc.doc_type), + html_escape(&doc.title), + status_badge, + doc.references.len(), + )); + } + + html.push_str("
    IDTypeTitleStatusRefs
    {}{}{}{}{}
    "); + html.push_str(&format!( + "

    {} documents, {} total artifact references

    ", + doc_store.len(), + doc_store.all_references().len() + )); + + Html(html) +} + +async fn document_detail(State(state): State, Path(id): Path) -> Html { + let state = state.read().await; + let doc_store = &state.doc_store; + let store = &state.store; + + let Some(doc) = doc_store.get(&id) else { + return Html(format!( + "

    Not Found

    Document {} does not exist.

    ", + html_escape(&id) + )); + }; + + let mut html = String::new(); + + // Header with metadata + html.push_str(&format!("

    {}

    ", html_escape(&doc.title))); + + html.push_str("
    "); + html.push_str(&badge_for_type(&doc.doc_type)); + if let Some(status) = &doc.status { + let badge_class = match status.as_str() { + "approved" => "badge-ok", + "draft" => "badge-warn", + _ => "badge-info", + }; + html.push_str(&format!( + "{}", + html_escape(status) + )); + } + html.push_str(&format!( + "{} artifact references", + doc.references.len() + )); + html.push_str("
    "); + + // Table of contents + let toc_sections: Vec<_> = doc.sections.iter().filter(|s| s.level >= 2).collect(); + if toc_sections.len() > 2 { + html.push_str("
    Contents
      "); + for sec in &toc_sections { + let class = match sec.level { + 2 => "toc-h2", + 3 => "toc-h3", + _ => "toc-h4", + }; + let ref_count = if sec.artifact_ids.is_empty() { + String::new() + } else { + format!(" ({})", sec.artifact_ids.len()) + }; + html.push_str(&format!( + "
    • {}{ref_count}
    • ", + html_escape(&sec.title), + )); + } + html.push_str("
    "); + } + + // Rendered body + html.push_str("
    "); + let body_html = document::render_to_html(doc, |aid| store.contains(aid)); + html.push_str(&body_html); + html.push_str("
    "); + + // Glossary + if !doc.glossary.is_empty() { + html.push_str("

    Glossary

    "); + for (term, definition) in &doc.glossary { + html.push_str(&format!( + "
    {}
    {}
    ", + html_escape(term), + html_escape(definition) + )); + } + html.push_str("
    "); + } + + // Referenced artifacts summary + if !doc.references.is_empty() { + html.push_str("

    Referenced Artifacts

    "); + html.push_str(""); + + let mut seen = std::collections::HashSet::new(); + for reference in &doc.references { + if !seen.insert(&reference.artifact_id) { + continue; + } + if let Some(artifact) = store.get(&reference.artifact_id) { + let status = artifact.status.as_deref().unwrap_or("-"); + html.push_str(&format!( + "\ + \ + \ + ", + html_escape(&artifact.id), + html_escape(&artifact.id), + badge_for_type(&artifact.artifact_type), + html_escape(&artifact.title), + html_escape(status), + )); + } else { + html.push_str(&format!( + "\ + ", + html_escape(&reference.artifact_id), + )); + } + } + + html.push_str("
    IDTypeTitleStatus
    {}{}{}{}
    {}not found
    "); + } + + html.push_str( + "

    ← Back to documents

    ", + ); + + Html(html) +} + +// ── Search ─────────────────────────────────────────────────────────────── + +#[derive(Debug, serde::Deserialize)] +struct SearchParams { + q: Option, +} + +/// A single search hit with context about which field matched. +struct SearchHit { + id: String, + title: String, + kind: &'static str, + type_name: String, + matched_field: &'static str, + context: String, + url: String, +} + +async fn search_view( + State(state): State, + Query(params): Query, +) -> Html { + let state = state.read().await; + let query = match params.q.as_deref() { + Some(q) if !q.trim().is_empty() => q.trim(), + _ => { + return Html(String::from( + "
    Type to search artifacts and documents
    ", + )); + } + }; + + let query_lower = query.to_lowercase(); + let mut hits: Vec = Vec::new(); + + // Search artifacts + for artifact in state.store.iter() { + let id_lower = artifact.id.to_lowercase(); + let title_lower = artifact.title.to_lowercase(); + let type_lower = artifact.artifact_type.to_lowercase(); + + if id_lower.contains(&query_lower) { + hits.push(SearchHit { + id: artifact.id.clone(), + title: artifact.title.clone(), + kind: "artifact", + type_name: artifact.artifact_type.clone(), + matched_field: "id", + context: artifact.id.clone(), + url: format!("/artifacts/{}", artifact.id), + }); + continue; + } + if title_lower.contains(&query_lower) { + hits.push(SearchHit { + id: artifact.id.clone(), + title: artifact.title.clone(), + kind: "artifact", + type_name: artifact.artifact_type.clone(), + matched_field: "title", + context: artifact.title.clone(), + url: format!("/artifacts/{}", artifact.id), + }); + continue; + } + if type_lower.contains(&query_lower) { + hits.push(SearchHit { + id: artifact.id.clone(), + title: artifact.title.clone(), + kind: "artifact", + type_name: artifact.artifact_type.clone(), + matched_field: "type", + context: artifact.artifact_type.clone(), + url: format!("/artifacts/{}", artifact.id), + }); + continue; + } + if let Some(desc) = &artifact.description { + if desc.to_lowercase().contains(&query_lower) { + let desc_lower = desc.to_lowercase(); + let pos = desc_lower.find(&query_lower).unwrap_or(0); + let start = pos.saturating_sub(40); + let end = (pos + query.len() + 40).min(desc.len()); + let mut snippet = String::new(); + if start > 0 { + snippet.push_str("..."); + } + snippet.push_str(&desc[start..end]); + if end < desc.len() { + snippet.push_str("..."); + } + hits.push(SearchHit { + id: artifact.id.clone(), + title: artifact.title.clone(), + kind: "artifact", + type_name: artifact.artifact_type.clone(), + matched_field: "description", + context: snippet, + url: format!("/artifacts/{}", artifact.id), + }); + continue; + } + } + for tag in &artifact.tags { + if tag.to_lowercase().contains(&query_lower) { + hits.push(SearchHit { + id: artifact.id.clone(), + title: artifact.title.clone(), + kind: "artifact", + type_name: artifact.artifact_type.clone(), + matched_field: "tag", + context: tag.clone(), + url: format!("/artifacts/{}", artifact.id), + }); + break; + } + } + } + + // Search documents + for doc in state.doc_store.iter() { + let id_lower = doc.id.to_lowercase(); + let title_lower = doc.title.to_lowercase(); + + if id_lower.contains(&query_lower) { + hits.push(SearchHit { + id: doc.id.clone(), + title: doc.title.clone(), + kind: "document", + type_name: doc.doc_type.clone(), + matched_field: "id", + context: doc.id.clone(), + url: format!("/documents/{}", doc.id), + }); + continue; + } + if title_lower.contains(&query_lower) { + hits.push(SearchHit { + id: doc.id.clone(), + title: doc.title.clone(), + kind: "document", + type_name: doc.doc_type.clone(), + matched_field: "title", + context: doc.title.clone(), + url: format!("/documents/{}", doc.id), + }); + } + } + + // Sort: exact id match first, then by kind, then by id + hits.sort_by(|a, b| { + let a_exact = a.id.to_lowercase() == query_lower; + let b_exact = b.id.to_lowercase() == query_lower; + b_exact + .cmp(&a_exact) + .then_with(|| a.kind.cmp(b.kind)) + .then_with(|| a.id.cmp(&b.id)) + }); + + hits.truncate(50); + + if hits.is_empty() { + return Html(format!( + "
    No results for “{}”
    ", + html_escape(query) + )); + } + + // Group by kind + let mut html = String::new(); + + let artifact_hits: Vec<&SearchHit> = hits.iter().filter(|h| h.kind == "artifact").collect(); + let document_hits: Vec<&SearchHit> = hits.iter().filter(|h| h.kind == "document").collect(); + + if !artifact_hits.is_empty() { + html.push_str("
    "); + html.push_str("
    Artifacts
    "); + for hit in &artifact_hits { + render_search_hit(&mut html, hit, query); + } + html.push_str("
    "); + } + + if !document_hits.is_empty() { + html.push_str("
    "); + html.push_str("
    Documents
    "); + for hit in &document_hits { + render_search_hit(&mut html, hit, query); + } + html.push_str("
    "); + } + + Html(html) +} + +/// Render a single search result item with highlighted match context. +fn render_search_hit(html: &mut String, hit: &SearchHit, query: &str) { + let icon = match hit.kind { + "artifact" => "♦", + "document" => "☰", + _ => "•", + }; + + let highlighted_title = highlight_match(&html_escape(&hit.title), query); + + let field_label = match hit.matched_field { + "id" => "id", + "title" => "title", + "description" => "description", + "type" => "type", + "tag" => "tag", + _ => "", + }; + + let context_display = if hit.matched_field == "title" { + String::new() + } else { + let escaped = html_escape(&hit.context); + format!(" — {}", highlight_match(&escaped, query)) + }; + + html.push_str(&format!( + "
    \ +
    {icon}
    \ +
    \ +
    {highlighted_title}
    \ +
    {}{context_display}
    \ +
    \ +
    {field_label}
    \ +
    ", + html_escape(&hit.url), + html_escape(&hit.type_name), + )); +} + +/// Case-insensitive highlight: wraps matching substrings in ``. +fn highlight_match(text: &str, query: &str) -> String { + let text_lower = text.to_lowercase(); + let query_lower = query.to_lowercase(); + let mut result = String::with_capacity(text.len() + 16); + let mut start = 0; + while let Some(pos) = text_lower[start..].find(&query_lower) { + let abs = start + pos; + result.push_str(&text[start..abs]); + result.push_str(""); + result.push_str(&text[abs..abs + query.len()]); + result.push_str(""); + start = abs + query.len(); + } + result.push_str(&text[start..]); + result +} + +// ── Verification ───────────────────────────────────────────────────────── + +async fn verification_view(State(state): State) -> Html { + let state = state.read().await; + let store = &state.store; + let graph = &state.graph; + let schema = &state.schema; + + // Find types that need verification (have required-backlink: verifies rules) + let mut verifiable_types: Vec<(String, String)> = Vec::new(); // (source_type, rule_name) + for rule in &schema.traceability_rules { + if rule.required_backlink.as_deref() == Some("verifies") { + verifiable_types.push((rule.source_type.clone(), rule.name.clone())); + } + } + + // Also find types that have forward `verifies` links (the verifiers themselves) + // to auto-discover if no rules match + if verifiable_types.is_empty() { + // Fallback: find all artifact types that have backlinks of type "verifies" + let mut seen = std::collections::HashSet::new(); + for artifact in store.iter() { + let backlinks = graph.backlinks_to(&artifact.id); + for bl in backlinks { + if bl.link_type == "verifies" && seen.insert(artifact.artifact_type.clone()) { + verifiable_types.push((artifact.artifact_type.clone(), "verifies".to_string())); + } + } + } + } + + let mut html = String::from("

    Verification

    "); + + if verifiable_types.is_empty() { + html.push_str("

    No verification traceability rules found in the schema. \ + Add required-backlink: verifies rules to your schema to enable the verification dashboard.

    "); + return Html(html); + } + + // Compute stats + let mut total_reqs = 0usize; + let mut verified_reqs = 0usize; + + // Group by verifiable type + for (source_type, _rule_name) in &verifiable_types { + let source_ids = store.by_type(source_type); + if source_ids.is_empty() { + continue; + } + + total_reqs += source_ids.len(); + + // Collect requirement → verifier mapping + struct ReqRow { + id: String, + title: String, + status: String, + verifiers: Vec, + } + struct VerifierInfo { + id: String, + title: String, + artifact_type: String, + method: String, + steps: Vec, + latest_result: Option<(String, rivet_core::results::TestStatus)>, + } + struct StepInfo { + step: String, + action: String, + expected: String, + } + + let mut rows: Vec = Vec::new(); + + for req_id in source_ids { + let req = store.get(req_id).unwrap(); + let backlinks = graph.backlinks_to(req_id); + let ver_links: Vec<_> = backlinks + .iter() + .filter(|bl| bl.link_type == "verifies") + .collect(); + + if !ver_links.is_empty() { + verified_reqs += 1; + } + + let mut verifiers = Vec::new(); + for bl in &ver_links { + if let Some(ver_artifact) = store.get(&bl.source) { + let method = ver_artifact + .fields + .get("method") + .and_then(|v| v.as_str()) + .unwrap_or("unspecified") + .to_string(); + + let steps = ver_artifact + .fields + .get("steps") + .and_then(|v| v.as_sequence()) + .map(|seq| { + seq.iter() + .map(|s| { + let step = s + .get("step") + .map(|v| { + if let Some(n) = v.as_u64() { + n.to_string() + } else if let Some(s) = v.as_str() { + s.to_string() + } else { + format!("{v:?}") + } + }) + .unwrap_or_default(); + let action = s + .get("action") + .and_then(|v| v.as_str()) + .unwrap_or("") + .to_string(); + let expected = s + .get("expected") + .and_then(|v| v.as_str()) + .unwrap_or("") + .to_string(); + StepInfo { + step, + action, + expected, + } + }) + .collect() + }) + .unwrap_or_default(); + + // Look up latest test result + let latest_result = state + .result_store + .latest_for(&bl.source) + .map(|(_run, r)| (r.status.to_string(), r.status.clone())); + + verifiers.push(VerifierInfo { + id: ver_artifact.id.clone(), + title: ver_artifact.title.clone(), + artifact_type: ver_artifact.artifact_type.clone(), + method, + steps, + latest_result, + }); + } + } + + rows.push(ReqRow { + id: req.id.clone(), + title: req.title.clone(), + status: req.status.as_deref().unwrap_or("-").to_string(), + verifiers, + }); + } + + rows.sort_by(|a, b| a.id.cmp(&b.id)); + + // Render this type's section + let type_verified = rows.iter().filter(|r| !r.verifiers.is_empty()).count(); + let type_total = rows.len(); + let pct = if type_total > 0 { + (type_verified as f64 / type_total as f64) * 100.0 + } else { + 100.0 + }; + + html.push_str("
    "); + html.push_str(&format!( + "
    \ + {} \ + verified by \ + {type_verified}/{type_total} ({pct:.0}%)
    ", + badge_for_type(source_type), + )); + + for row in &rows { + let ver_count = row.verifiers.len(); + let has_verifiers = ver_count > 0; + let coverage_badge = if has_verifiers { + format!( + "{ver_count} verifier{}", + if ver_count > 1 { "s" } else { "" } + ) + } else { + "unverified".to_string() + }; + + html.push_str("
    "); + html.push_str(&format!( + "\ + {id}\ + {title}\ + {status}\ + {coverage_badge}", + id = html_escape(&row.id), + title = html_escape(&row.title), + status = html_escape(&row.status), + )); + + // Show latest result dots for verifiers + for v in &row.verifiers { + if let Some((_, ref status)) = v.latest_result { + let dot_class = match status { + rivet_core::results::TestStatus::Pass => "result-dot-pass", + rivet_core::results::TestStatus::Fail => "result-dot-fail", + rivet_core::results::TestStatus::Skip => "result-dot-skip", + rivet_core::results::TestStatus::Error => "result-dot-error", + rivet_core::results::TestStatus::Blocked => "result-dot-blocked", + }; + html.push_str(&format!( + "", + html_escape(&v.id), + status + )); + } + } + + html.push_str(""); + + if has_verifiers { + html.push_str("
    "); + for v in &row.verifiers { + html.push_str(&format!( + "

    \ + {id} \ + {type_badge} \ + {method} \ + — {title}", + id = html_escape(&v.id), + type_badge = badge_for_type(&v.artifact_type), + method = html_escape(&v.method), + title = html_escape(&v.title), + )); + if let Some((ref status_str, _)) = v.latest_result { + html.push_str(&format!( + " {status_str}", + cls = match status_str.as_str() { + "pass" => "ok", + "fail" | "error" => "error", + "skip" | "blocked" => "warn", + _ => "info", + }, + )); + } + html.push_str("

    "); + + if !v.steps.is_empty() { + html.push_str( + "\ + \ + ", + ); + for s in &v.steps { + html.push_str(&format!( + "", + html_escape(&s.step), + html_escape(&s.action), + html_escape(&s.expected), + )); + } + html.push_str("
    #ActionExpected
    {}{}{}
    "); + } + } + html.push_str("
    "); + } + + html.push_str("
    "); + } + + html.push_str("
    "); + } + + // Summary stats + let ver_pct = if total_reqs > 0 { + (verified_reqs as f64 / total_reqs as f64) * 100.0 + } else { + 100.0 + }; + let summary = format!( + "
    \ +
    {total_reqs}
    Requirements
    \ +
    {verified_reqs}
    Verified
    \ +
    {}
    Unverified
    \ +
    {ver_pct:.0}%
    Coverage
    \ +
    ", + total_reqs - verified_reqs, + ); + + // Insert summary before the level cards + html = format!( + "

    Verification

    {summary}{}", + &html["

    Verification

    ".len()..] + ); + + Html(html) +} + +// ── STPA ───────────────────────────────────────────────────────────────── + +async fn stpa_view(State(state): State) -> Html { + let state = state.read().await; + stpa_partial(&state) +} + +fn stpa_partial(state: &AppState) -> Html { + let store = &state.store; + let graph = &state.graph; + + let stpa_types = [ + ("loss", "Losses"), + ("hazard", "Hazards"), + ("sub-hazard", "Sub-Hazards"), + ("system-constraint", "System Constraints"), + ("controller", "Controllers"), + ("controlled-process", "Controlled Processes"), + ("control-action", "Control Actions"), + ("uca", "UCAs"), + ("controller-constraint", "Controller Constraints"), + ("loss-scenario", "Loss Scenarios"), + ]; + + let total: usize = stpa_types.iter().map(|(t, _)| store.count_by_type(t)).sum(); + + let mut html = String::from("

    STPA Analysis

    "); + + if total == 0 { + html.push_str( + "
    \ +

    No STPA artifacts found in this project.

    \ +

    \ + Add artifacts of types loss, hazard, uca, etc. \ + using the stpa schema to enable the STPA analysis dashboard.

    \ +
    ", + ); + return Html(html); + } + + // Summary stat cards + html.push_str("
    "); + let stat_colors = [ + "#dc3545", "#fd7e14", "#fd7e14", "#20c997", "#6f42c1", "#6610f2", "#17a2b8", "#e83e8c", + "#20c997", "#e83e8c", + ]; + for (i, (type_name, label)) in stpa_types.iter().enumerate() { + let count = store.count_by_type(type_name); + if count == 0 { + continue; + } + let color = stat_colors[i]; + html.push_str(&format!( + "
    \ +
    {count}
    \ +
    {label}
    " + )); + } + html.push_str("
    "); + + // Hierarchy tree view + html.push_str("

    STPA Hierarchy

    "); + + let losses = store.by_type("loss"); + if losses.is_empty() { + html.push_str( + "

    No losses defined. The STPA hierarchy starts with losses.

    ", + ); + } + + let mut sorted_losses: Vec<&str> = losses.iter().map(|s| s.as_str()).collect(); + sorted_losses.sort(); + + for loss_id in &sorted_losses { + let Some(loss) = store.get(loss_id) else { + continue; + }; + html.push_str("
    "); + html.push_str(" "); + html.push_str(&badge_for_type("loss")); + html.push_str(&format!( + " {id}\ + {title}", + id = html_escape(loss_id), + title = html_escape(&loss.title), + )); + html.push_str(""); + + let hazard_backlinks = graph.backlinks_of_type(loss_id, "leads-to-loss"); + if !hazard_backlinks.is_empty() { + html.push_str("
    "); + let mut hazard_ids: Vec<&str> = hazard_backlinks + .iter() + .map(|bl| bl.source.as_str()) + .collect(); + hazard_ids.sort(); + hazard_ids.dedup(); + for hazard_id in &hazard_ids { + let Some(hazard) = store.get(hazard_id) else { + continue; + }; + html.push_str("
    "); + html.push_str(" "); + html.push_str("leads-to-loss"); + html.push_str(&badge_for_type(&hazard.artifact_type)); + html.push_str(&format!( + " {id}\ + {title}", + id = html_escape(hazard_id), + title = html_escape(&hazard.title), + )); + html.push_str(""); + + let constraint_bls = graph.backlinks_of_type(hazard_id, "prevents"); + let uca_bls = graph.backlinks_of_type(hazard_id, "leads-to-hazard"); + + if !constraint_bls.is_empty() || !uca_bls.is_empty() { + html.push_str("
    "); + + // System Constraints + let mut sc_ids: Vec<&str> = constraint_bls + .iter() + .filter(|bl| { + store + .get(&bl.source) + .map(|a| a.artifact_type == "system-constraint") + .unwrap_or(false) + }) + .map(|bl| bl.source.as_str()) + .collect(); + sc_ids.sort(); + sc_ids.dedup(); + for sc_id in &sc_ids { + let Some(sc) = store.get(sc_id) else { continue }; + html.push_str(&format!( + "
    \ + prevents{badge}\ + {id}\ + {title}\ +
    ", + badge = badge_for_type("system-constraint"), + id = html_escape(sc_id), + title = html_escape(&sc.title), + )); + } + + // UCAs + let mut uca_ids: Vec<&str> = uca_bls + .iter() + .filter(|bl| { + store + .get(&bl.source) + .map(|a| a.artifact_type == "uca") + .unwrap_or(false) + }) + .map(|bl| bl.source.as_str()) + .collect(); + uca_ids.sort(); + uca_ids.dedup(); + for uca_id in &uca_ids { + let Some(uca) = store.get(uca_id) else { + continue; + }; + // Collapse below level 2 + html.push_str("
    "); + html.push_str(" "); + html.push_str("leads-to-hazard"); + html.push_str(&badge_for_type("uca")); + html.push_str(&format!( + " {id}\ + {title}", + id = html_escape(uca_id), + title = html_escape(&uca.title), + )); + html.push_str(""); + + let cc_bls = graph.backlinks_of_type(uca_id, "inverts-uca"); + let ls_bls = graph.backlinks_of_type(uca_id, "caused-by-uca"); + + if !cc_bls.is_empty() || !ls_bls.is_empty() { + html.push_str("
    "); + // Controller Constraints + let mut cc_ids: Vec<&str> = + cc_bls.iter().map(|bl| bl.source.as_str()).collect(); + cc_ids.sort(); + cc_ids.dedup(); + for cc_id in &cc_ids { + let Some(cc) = store.get(cc_id) else { continue }; + html.push_str(&format!( + "
    \ + inverts-uca{badge}\ + {id}\ + {title}\ +
    ", + badge = badge_for_type("controller-constraint"), + id = html_escape(cc_id), + title = html_escape(&cc.title), + )); + } + // Loss Scenarios + let mut ls_ids: Vec<&str> = + ls_bls.iter().map(|bl| bl.source.as_str()).collect(); + ls_ids.sort(); + ls_ids.dedup(); + for ls_id in &ls_ids { + let Some(ls) = store.get(ls_id) else { continue }; + html.push_str(&format!( + "
    \ + caused-by-uca{badge}\ + {id}\ + {title}\ +
    ", + badge = badge_for_type("loss-scenario"), + id = html_escape(ls_id), + title = html_escape(&ls.title), + )); + } + html.push_str("
    "); // stpa-level (CC/LS) + } + html.push_str("
    "); // UCA + } + html.push_str("
    "); // stpa-level (SC/UCA) + } + html.push_str("
    "); // Hazard + } + html.push_str("
    "); // stpa-level (Hazards) + } + html.push_str("
    "); // Loss + } + + html.push_str("
    "); // stpa-tree, card + + // UCA Table + let uca_ids = store.by_type("uca"); + if !uca_ids.is_empty() { + html.push_str("

    Unsafe Control Actions

    "); + + struct UcaRow { + id: String, + title: String, + uca_type: String, + control_action: String, + linked_hazards: Vec, + } + + let mut rows: Vec = Vec::new(); + for uca_id in uca_ids { + let Some(uca) = store.get(uca_id) else { + continue; + }; + let uca_type = uca + .fields + .get("uca-type") + .and_then(|v| v.as_str()) + .unwrap_or("-") + .to_string(); + let controller_links: Vec<&str> = uca + .links + .iter() + .filter(|l| l.link_type == "issued-by") + .map(|l| l.target.as_str()) + .collect(); + let control_action = if let Some(ctrl_id) = controller_links.first() { + let ca_bls = graph.backlinks_of_type(ctrl_id, "issued-by"); + ca_bls + .iter() + .filter(|bl| { + store + .get(&bl.source) + .map(|a| a.artifact_type == "control-action") + .unwrap_or(false) + }) + .map(|bl| bl.source.clone()) + .next() + .unwrap_or_else(|| ctrl_id.to_string()) + } else { + "-".to_string() + }; + let hazards: Vec = uca + .links + .iter() + .filter(|l| l.link_type == "leads-to-hazard") + .map(|l| l.target.clone()) + .collect(); + rows.push(UcaRow { + id: uca_id.clone(), + title: uca.title.clone(), + uca_type, + control_action, + linked_hazards: hazards, + }); + } + + rows.sort_by(|a, b| { + a.control_action + .cmp(&b.control_action) + .then(a.id.cmp(&b.id)) + }); + + html.push_str( + "\ + \ + \ + ", + ); + + for row in &rows { + let type_class = match row.uca_type.as_str() { + "not-providing" => "uca-type-not-providing", + "providing" => "uca-type-providing", + "too-early-too-late" => "uca-type-too-early-too-late", + "stopped-too-soon" => "uca-type-stopped-too-soon", + _ => "", + }; + let type_badge = if type_class.is_empty() { + html_escape(&row.uca_type) + } else { + format!( + "{}", + html_escape(&row.uca_type), + ) + }; + let hazard_links: Vec = row + .linked_hazards + .iter() + .map(|h| { + format!( + "{id}", + id = html_escape(h), + ) + }) + .collect(); + let ca_display = if row.control_action == "-" { + "-".to_string() + } else { + format!( + "{id}", + id = html_escape(&row.control_action), + ) + }; + html.push_str(&format!( + "\ + \ + \ + \ + \ + ", + id = html_escape(&row.id), + ca = ca_display, + title = html_escape(&row.title), + hazards = hazard_links.join(", "), + )); + } + + html.push_str("
    IDControl ActionUCA TypeDescriptionLinked Hazards
    {id}{ca}{type_badge}{title}{hazards}
    "); + } + + html.push_str(&format!( + "

    {total} STPA artifacts total

    " + )); + + Html(html) +} + +// ── Results ────────────────────────────────────────────────────────────── + +async fn results_view(State(state): State) -> Html { + let state = state.read().await; + let result_store = &state.result_store; + + let mut html = String::from("

    Test Results

    "); + + if result_store.is_empty() { + html.push_str("

    No test results loaded. Add result YAML files to a results/ directory and reference it in rivet.yaml:

    \ +
    results: results
    \ +

    Each result file contains a run: metadata block and a results: list with per-artifact pass/fail/skip status.

    "); + return Html(html); + } + + let summary = result_store.summary(); + + // Stats + html.push_str("
    "); + html.push_str(&format!( + "
    {}
    Total Runs
    ", + summary.total_runs + )); + html.push_str(&format!( + "
    {:.0}%
    Pass Rate
    ", + summary.pass_rate() + )); + html.push_str(&format!( + "
    {}
    Passed
    ", + summary.pass_count + )); + html.push_str(&format!( + "
    {}
    Failed
    ", + summary.fail_count + )); + if summary.skip_count > 0 { + html.push_str(&format!( + "
    {}
    Skipped
    ", + summary.skip_count + )); + } + if summary.blocked_count > 0 { + html.push_str(&format!( + "
    {}
    Blocked
    ", + summary.blocked_count + )); + } + html.push_str("
    "); + + // Run history table + html.push_str("

    Run History

    "); + html.push_str( + "\ + ", + ); + + for run in result_store.runs() { + let pass = run.results.iter().filter(|r| r.status.is_pass()).count(); + let fail = run.results.iter().filter(|r| r.status.is_fail()).count(); + let skip = run.results.len() - pass - fail; + let total = run.results.len(); + + let status_badge = if fail > 0 { + "FAIL" + } else { + "PASS" + }; + + html.push_str(&format!( + "\ + \ + \ + \ + \ + \ + \ + \ + \ + ", + id = html_escape(&run.run.id), + ts = html_escape(&run.run.timestamp), + src = run.run.source.as_deref().unwrap_or("-"), + env = run.run.environment.as_deref().unwrap_or("-"), + )); + } + + html.push_str("
    Run IDTimestampSourceEnvironmentPassFailSkipTotal
    {id} {status_badge}{ts}{src}{env}{pass}{fail}{skip}{total}
    "); + + Html(html) +} + +async fn result_detail( + State(state): State, + Path(run_id): Path, +) -> Html { + let state = state.read().await; + let result_store = &state.result_store; + + let Some(run) = result_store.get_run(&run_id) else { + return Html(format!( + "

    Not Found

    Run {} does not exist.

    ", + html_escape(&run_id) + )); + }; + + let mut html = format!("

    Run: {}

    ", html_escape(&run.run.id)); + + // Metadata + html.push_str("
    "); + html.push_str(&format!( + "
    Timestamp
    {}
    ", + html_escape(&run.run.timestamp) + )); + if let Some(ref source) = run.run.source { + html.push_str(&format!("
    Source
    {}
    ", html_escape(source))); + } + if let Some(ref env) = run.run.environment { + html.push_str(&format!( + "
    Environment
    {}
    ", + html_escape(env) + )); + } + if let Some(ref commit) = run.run.commit { + html.push_str(&format!( + "
    Commit
    {}
    ", + html_escape(commit) + )); + } + html.push_str("
    "); + + // Results table + html.push_str("

    Results

    "); + html.push_str( + "", + ); + + for result in &run.results { + let title = state + .store + .get(&result.artifact) + .map(|a| a.title.as_str()) + .unwrap_or("-"); + let (status_badge, status_class) = match result.status { + rivet_core::results::TestStatus::Pass => { + ("PASS", "") + } + rivet_core::results::TestStatus::Fail => ( + "FAIL", + "result-fail", + ), + rivet_core::results::TestStatus::Skip => { + ("SKIP", "") + } + rivet_core::results::TestStatus::Error => ( + "ERROR", + "result-error", + ), + rivet_core::results::TestStatus::Blocked => { + ("BLOCKED", "") + } + }; + + let duration = result.duration.as_deref().unwrap_or("-"); + let message = result.message.as_deref().unwrap_or(""); + + html.push_str(&format!( + "\ + \ + \ + \ + \ + \ + ", + aid = html_escape(&result.artifact), + title = html_escape(title), + msg = html_escape(message), + )); + } + + html.push_str("
    ArtifactTitleStatusDurationMessage
    {aid}{title}{status_badge}{duration}{msg}
    "); + + html.push_str( + "

    ← Back to results

    ", + ); + + Html(html) +} + +// ── Source viewer ────────────────────────────────────────────────────────────── + +const SOURCE_MAX_SIZE: u64 = 100 * 1024; +const SOURCE_MAX_DEPTH: usize = 3; +const SOURCE_SKIP_DIRS: &[&str] = &["target", ".git", "node_modules", ".DS_Store"]; + +struct TreeEntry { + name: String, + rel_path: String, + is_dir: bool, + children: Vec, +} + +fn build_tree(base: &std::path::Path, rel: &str, depth: usize) -> Vec { + if depth > SOURCE_MAX_DEPTH { + return Vec::new(); + } + let Ok(entries) = std::fs::read_dir(base) else { + return Vec::new(); + }; + let mut items: Vec = Vec::new(); + for entry in entries.flatten() { + let ft = match entry.file_type() { + Ok(ft) => ft, + Err(_) => continue, + }; + if ft.is_symlink() { + continue; + } + let name = entry.file_name().to_string_lossy().to_string(); + if SOURCE_SKIP_DIRS.contains(&name.as_str()) || name.starts_with('.') { + continue; + } + let child_rel = if rel.is_empty() { + name.clone() + } else { + format!("{rel}/{name}") + }; + if ft.is_dir() { + let children = build_tree(&entry.path(), &child_rel, depth + 1); + items.push(TreeEntry { + name, + rel_path: child_rel, + is_dir: true, + children, + }); + } else { + items.push(TreeEntry { + name, + rel_path: child_rel, + is_dir: false, + children: Vec::new(), + }); + } + } + items.sort_by(|a, b| { + b.is_dir + .cmp(&a.is_dir) + .then_with(|| a.name.to_lowercase().cmp(&b.name.to_lowercase())) + }); + items +} + +fn render_tree(entries: &[TreeEntry], html: &mut String, depth: usize) { + html.push_str("
      "); + for entry in entries { + html.push_str("
    • "); + let indent: String = (0..depth) + .map(|_| "") + .collect(); + if entry.is_dir { + html.push_str(&format!( + "{indent} {name}", + name = html_escape(&entry.name), + )); + if !entry.children.is_empty() { + render_tree(&entry.children, html, depth + 1); + } + } else { + let encoded = urlencoding::encode(&entry.rel_path); + let icon = if entry.name.ends_with(".yaml") || entry.name.ends_with(".yml") { + "" + } else if entry.name.ends_with(".rs") { + "" + } else if entry.name.ends_with(".md") { + "" + } else if entry.name.ends_with(".toml") { + "" + } else { + "" + }; + html.push_str(&format!( + "{indent}{icon} {name}", + name = html_escape(&entry.name), + )); + } + html.push_str("
    • "); + } + html.push_str("
    "); +} + +async fn source_tree_view(State(state): State) -> Html { + let state = state.read().await; + let project_path = &state.project_path_buf; + let tree = build_tree(project_path, "", 0); + let mut html = String::from("

    Source Files

    "); + html.push_str(&format!( + "

    Project directory: {}

    ", + html_escape(&project_path.display().to_string()) + )); + html.push_str("
    "); + render_tree(&tree, &mut html, 0); + html.push_str("
    "); + Html(html) +} + +fn format_size(bytes: u64) -> String { + if bytes < 1024 { + format!("{bytes} B") + } else if bytes < 1024 * 1024 { + format!("{:.1} KB", bytes as f64 / 1024.0) + } else { + format!("{:.1} MB", bytes as f64 / (1024.0 * 1024.0)) + } +} + +fn format_mtime(time: std::time::SystemTime) -> String { + let secs = time + .duration_since(std::time::UNIX_EPOCH) + .map(|d| d.as_secs()) + .unwrap_or(0); + std::process::Command::new("date") + .args(["-r", &secs.to_string(), "+%Y-%m-%d %H:%M:%S"]) + .output() + .ok() + .filter(|o| o.status.success()) + .map(|o| String::from_utf8_lossy(&o.stdout).trim().to_string()) + .unwrap_or_else(|| format!("epoch+{secs}s")) +} + +fn collect_artifact_ids(store: &rivet_core::store::Store) -> std::collections::HashSet { + store.iter().map(|a| a.id.clone()).collect() +} + +/// Info about an artifact that references a source file, with optional line info. +struct FileRef { + id: String, + artifact_type: String, + title: String, + line: Option, + end_line: Option, +} + +fn artifacts_referencing_file(store: &rivet_core::store::Store, file_rel: &str) -> Vec { + let rel = std::path::Path::new(file_rel); + let mut refs = Vec::new(); + + for a in store.iter() { + // Check source_file (existing behavior) + if let Some(sf) = &a.source_file { + if sf == rel || sf.ends_with(file_rel) { + refs.push(FileRef { + id: a.id.clone(), + artifact_type: a.artifact_type.clone(), + title: a.title.clone(), + line: None, + end_line: None, + }); + continue; + } + } + // Scan string fields for file:line references matching this file + for value in a.fields.values() { + if let serde_yaml::Value::String(s) = value { + if let Some((_file, line, end_line)) = extract_file_ref(s, file_rel) { + refs.push(FileRef { + id: a.id.clone(), + artifact_type: a.artifact_type.clone(), + title: a.title.clone(), + line, + end_line, + }); + break; // one ref per artifact is enough + } + } + } + } + refs +} + +/// If `val` contains a source ref matching `target_file`, return (file, line, end_line). +fn extract_file_ref(val: &str, target_file: &str) -> Option<(String, Option, Option)> { + // Look for target_file possibly followed by :line or :line-line + let idx = val.find(target_file)?; + let after = &val[idx + target_file.len()..]; + if let Some(rest) = after.strip_prefix(':') { + let digits_end = rest + .find(|c: char| !c.is_ascii_digit()) + .unwrap_or(rest.len()); + if digits_end > 0 { + let line: u32 = rest[..digits_end].parse().ok()?; + let rest2 = &rest[digits_end..]; + if let Some(rest3) = rest2.strip_prefix('-') { + let d2_end = rest3 + .find(|c: char| !c.is_ascii_digit()) + .unwrap_or(rest3.len()); + if d2_end > 0 { + let end_line: u32 = rest3[..d2_end].parse().ok()?; + return Some((target_file.to_string(), Some(line), Some(end_line))); + } + } + return Some((target_file.to_string(), Some(line), None)); + } + } + Some((target_file.to_string(), None, None)) +} + +async fn source_file_view( + State(state): State, + Path(raw_path): Path, +) -> Html { + let state = state.read().await; + let project_path = &state.project_path_buf; + let store = &state.store; + let decoded = urlencoding::decode(&raw_path).unwrap_or(std::borrow::Cow::Borrowed(&raw_path)); + let rel_path = decoded.as_ref(); + + let full_path = project_path.join(rel_path); + let canonical = match full_path.canonicalize() { + Ok(p) => p, + Err(_) => { + return Html(format!( + "

    Not Found

    File {} does not exist.

    ", + html_escape(rel_path) + )); + } + }; + let canonical_project = match project_path.canonicalize() { + Ok(p) => p, + Err(_) => { + return Html("

    Error

    Cannot resolve project path.

    ".into()); + } + }; + if !canonical.starts_with(&canonical_project) { + return Html("

    Forbidden

    Path traversal is not allowed.

    ".into()); + } + + let metadata = match std::fs::symlink_metadata(&full_path) { + Ok(m) => m, + Err(_) => { + return Html(format!( + "

    Not Found

    File {} does not exist.

    ", + html_escape(rel_path) + )); + } + }; + if metadata.file_type().is_symlink() { + return Html("

    Forbidden

    Symlinks are not followed.

    ".into()); + } + if metadata.is_dir() { + return Html(format!( + "

    Directory

    {} is a directory. Back to tree

    ", + html_escape(rel_path) + )); + } + + let file_size = metadata.len(); + if file_size > SOURCE_MAX_SIZE { + return Html(format!( + "

    File Too Large

    {} is {} which exceeds the 100 KB limit.

    ← Back to files

    ", + html_escape(rel_path), + format_size(file_size) + )); + } + + let content = match std::fs::read_to_string(&full_path) { + Ok(c) => c, + Err(e) => { + return Html(format!( + "

    Error

    Cannot read {}: {}

    ", + html_escape(rel_path), + html_escape(&e.to_string()) + )); + } + }; + + let mut html = String::new(); + + // Breadcrumb + html.push_str("
    "); + html.push_str( + "Source", + ); + let parts: Vec<&str> = rel_path.split('/').collect(); + for (i, part) in parts.iter().enumerate() { + html.push_str("/"); + if i == parts.len() - 1 { + html.push_str(&format!("{}", html_escape(part))); + } else { + html.push_str(&format!("{}", html_escape(part))); + } + } + html.push_str("
    "); + + // File metadata + let mtime_str = metadata + .modified() + .map(format_mtime) + .unwrap_or_else(|_| "unknown".into()); + html.push_str("
    "); + html.push_str(&format!( + " {}", + format_size(file_size) + )); + html.push_str(&format!( + " {}", + html_escape(&mtime_str) + )); + html.push_str(&format!( + "{} lines", + content.lines().count() + )); + html.push_str("
    "); + + let file_name = full_path.file_name().and_then(|n| n.to_str()).unwrap_or(""); + let is_yaml = file_name.ends_with(".yaml") || file_name.ends_with(".yml"); + let is_markdown = file_name.ends_with(".md"); + let is_rust = file_name.ends_with(".rs"); + let artifact_ids = collect_artifact_ids(store); + + if is_markdown && content.starts_with("---") { + if let Ok(doc) = rivet_core::document::parse_document(&content, Some(&full_path)) { + html.push_str("
    "); + let body_html = document::render_to_html(&doc, |aid| store.contains(aid)); + html.push_str(&body_html); + html.push_str("
    "); + } else { + render_code_block(&content, &artifact_ids, is_yaml, is_rust, &mut html); + } + } else { + render_code_block(&content, &artifact_ids, is_yaml, is_rust, &mut html); + } + + let refs = artifacts_referencing_file(store, rel_path); + if !refs.is_empty() { + html.push_str("
    "); + html.push_str(&format!( + "

    Artifacts Referencing This File ({})

    ", + refs.len() + )); + html.push_str(""); + for fref in &refs { + let line_info = match (fref.line, fref.end_line) { + (Some(l), Some(e)) => format!( + "{l}-{e}" + ), + (Some(l), None) => format!( + "{l}" + ), + _ => "—".into(), + }; + html.push_str(&format!( + "", + badge_for_type(&fref.artifact_type), + html_escape(&fref.title), + id = fref.id, + )); + } + html.push_str("
    IDTypeTitleLines
    {id}{}{}{line_info}
    "); + } + + html.push_str("

    ← Back to files

    "); + Html(html) +} + +/// Syntax-highlight a single line of YAML (returns HTML with `` tokens). +fn highlight_yaml_line(line: &str) -> String { + let escaped = html_escape(line); + // Blank lines + if line.trim().is_empty() { + return escaped; + } + // Full-line comments + let trimmed = line.trim_start(); + if trimmed.starts_with('#') { + let indent = &escaped[..escaped.len() - html_escape(trimmed).len()]; + return format!( + "{indent}{}", + html_escape(trimmed) + ); + } + let mut out = String::with_capacity(escaped.len() + 64); + // Check for key: value pattern + // Find the first unquoted colon + if let Some(colon_pos) = find_yaml_colon(trimmed) { + let raw_indent = escaped.len() - html_escape(trimmed).len(); + let indent_str = &escaped[..raw_indent]; + out.push_str(indent_str); + let key_part = &trimmed[..colon_pos]; + let rest = &trimmed[colon_pos..]; // starts with ':' + // List prefix + if let Some(after_dash) = key_part.strip_prefix("- ") { + out.push_str("- "); + out.push_str(&format!( + "{}", + html_escape(after_dash) + )); + } else { + out.push_str(&format!( + "{}", + html_escape(key_part) + )); + } + out.push_str(":"); + let after_colon = &rest[1..]; + if !after_colon.is_empty() { + out.push_str(&highlight_yaml_value(after_colon)); + } + } else if trimmed.starts_with("- ") { + let raw_indent = escaped.len() - html_escape(trimmed).len(); + out.push_str(&escaped[..raw_indent]); + out.push_str("-"); + out.push_str(&highlight_yaml_value(&trimmed[1..])); + } else { + out.push_str(&escaped); + } + out +} + +fn find_yaml_colon(s: &str) -> Option { + let (search, offset) = if let Some(rest) = s.strip_prefix("- ") { + (rest, 2) + } else { + (s, 0) + }; + let mut in_quote = false; + let mut quote_char = ' '; + for (i, c) in search.char_indices() { + if in_quote { + if c == quote_char { + in_quote = false; + } + continue; + } + if c == '\'' || c == '"' { + in_quote = true; + quote_char = c; + continue; + } + if c == ':' && (i + 1 >= search.len() || search.as_bytes()[i + 1] == b' ') { + return Some(i + offset); + } + } + None +} + +fn highlight_yaml_value(val: &str) -> String { + let trimmed = val.trim(); + if trimmed.is_empty() { + return html_escape(val); + } + // Inline comment + let (value_part, comment) = split_inline_comment(trimmed); + let leading_space = &val[..val.len() - val.trim_start().len()]; + let mut out = String::new(); + out.push_str(&html_escape(leading_space)); + let v = value_part.trim(); + if v.is_empty() { + // nothing + } else if v == "true" || v == "false" { + out.push_str(&format!("{v}")); + } else if v == "null" || v == "~" { + out.push_str(&format!("{v}")); + } else if v.starts_with('"') || v.starts_with('\'') { + out.push_str(&format!("{}", html_escape(v))); + } else if v.starts_with('[') || v.starts_with('{') { + // Inline collections — highlight brackets and values + out.push_str(&highlight_yaml_inline_collection(v)); + } else if v.starts_with('*') || v.starts_with('&') { + out.push_str(&format!( + "{}", + html_escape(v) + )); + } else if v == ">" || v == "|" || v == ">-" || v == "|-" { + out.push_str(&format!( + "{}", + html_escape(v) + )); + } else if v.parse::().is_ok() { + out.push_str(&format!("{}", html_escape(v))); + } else { + out.push_str(&format!("{}", html_escape(v))); + } + if !comment.is_empty() { + out.push_str(&format!( + " {}", + html_escape(comment) + )); + } + out +} + +fn split_inline_comment(s: &str) -> (&str, &str) { + let mut in_quote = false; + let mut qc = ' '; + let bytes = s.as_bytes(); + for i in 0..bytes.len() { + let c = bytes[i] as char; + if in_quote { + if c == qc { + in_quote = false; + } + continue; + } + if c == '\'' || c == '"' { + in_quote = true; + qc = c; + continue; + } + if c == '#' && (i == 0 || bytes[i - 1] == b' ') { + return (s[..i].trim_end(), &s[i..]); + } + } + (s, "") +} + +fn highlight_yaml_inline_collection(s: &str) -> String { + let mut out = String::new(); + for c in s.chars() { + match c { + '[' | ']' | '{' | '}' | ',' => { + out.push_str(&format!("{c}")); + } + _ => out.push(c), + } + } + out +} + +/// Syntax-highlight a single line of shell/bash. +fn highlight_bash_line(line: &str) -> String { + let escaped = html_escape(line); + let trimmed = line.trim(); + if trimmed.is_empty() || trimmed.starts_with('#') { + if trimmed.starts_with('#') { + return format!("{}", escaped); + } + return escaped; + } + // Simple: highlight the command name and flags + let mut out = String::new(); + let mut first_word = true; + for token in trimmed.split_whitespace() { + if !first_word || !out.is_empty() { + out.push(' '); + } + if token == "|" || token == "&&" || token == "||" { + out.push_str(&format!( + "{}", + html_escape(token) + )); + first_word = true; + continue; + } + if first_word { + out.push_str(&format!( + "{}", + html_escape(token) + )); + first_word = false; + } else if token.starts_with('-') { + out.push_str(&format!( + "{}", + html_escape(token) + )); + } else if token.starts_with('"') || token.starts_with('\'') { + out.push_str(&format!( + "{}", + html_escape(token) + )); + } else { + out.push_str(&html_escape(token)); + } + } + // Preserve leading indent + let indent = &escaped[..escaped.len() - html_escape(trimmed).len()]; + format!("{indent}{out}") +} + +/// Apply syntax highlighting to an already-escaped line, based on file type. +fn syntax_highlight_line(line: &str, lang: &str) -> String { + match lang { + "yaml" | "yml" => highlight_yaml_line(line), + "bash" | "sh" | "shell" => highlight_bash_line(line), + _ => html_escape(line), + } +} + +fn render_code_block( + content: &str, + artifact_ids: &std::collections::HashSet, + is_yaml: bool, + is_rust: bool, + html: &mut String, +) { + let lang = if is_yaml { + "yaml" + } else if is_rust { + "rust" + } else { + "" + }; + html.push_str("
    "); + for (i, line) in content.lines().enumerate() { + let line_num = i + 1; + let has_artifact = artifact_ids.iter().any(|id| line.contains(id.as_str())); + let row_class = if has_artifact { + "source-line source-line-highlight" + } else { + "source-line" + }; + // First apply syntax highlighting + let highlighted = if !lang.is_empty() { + syntax_highlight_line(line, lang) + } else { + html_escape(line) + }; + // Then overlay artifact links on top + let display_line = if is_yaml || is_rust { + let mut result = highlighted; + let mut ids: Vec<&String> = artifact_ids + .iter() + .filter(|id| line.contains(id.as_str())) + .collect(); + ids.sort_by_key(|b| std::cmp::Reverse(b.len())); + for id in ids { + let escaped_id = html_escape(id); + // The ID may be wrapped in a highlight span — search for it + if let Some(pos) = result.find(&escaped_id) { + let link = format!( + "{escaped_id}" + ); + let before = &result[..pos]; + let after = &result[pos + escaped_id.len()..]; + result = format!("{before}{link}{after}"); + } + } + result + } else { + highlighted + }; + html.push_str(&format!( + "" + )); + } + html.push_str("
    {line_num}{display_line}
    "); +} + +// ── Diff ───────────────────────────────────────────────────────────────── + +#[derive(Debug, serde::Deserialize)] +struct DiffParams { + base: Option, + head: Option, +} + +fn discover_git_refs(pp: &std::path::Path) -> (Vec, Vec) { + let rg = |a: &[&str]| -> Vec { + std::process::Command::new("git") + .args(a) + .current_dir(pp) + .output() + .ok() + .filter(|o| o.status.success()) + .map(|o| { + String::from_utf8_lossy(&o.stdout) + .lines() + .map(|l| l.trim().to_string()) + .filter(|l| !l.is_empty()) + .collect() + }) + .unwrap_or_default() + }; + let tags = rg(&["tag", "--list", "--sort=-creatordate"]); + let branches: Vec = rg(&["branch", "--list", "--format=%(refname:short)"]) + .into_iter() + .filter(|b| b != "HEAD") + .collect(); + (tags, branches) +} + +fn load_store_from_git_ref(pp: &std::path::Path, gr: &str) -> Result { + let rg = |a: &[&str]| -> Result { + let o = std::process::Command::new("git") + .args(a) + .current_dir(pp) + .output() + .map_err(|e| format!("git: {e}"))?; + if !o.status.success() { + return Err(format!( + "git {} failed: {}", + a.join(" "), + String::from_utf8_lossy(&o.stderr).trim() + )); + } + Ok(String::from_utf8_lossy(&o.stdout).to_string()) + }; + let cc = rg(&["show", &format!("{gr}:rivet.yaml")])?; + let cfg: ProjectConfig = + serde_yaml::from_str(&cc).map_err(|e| format!("parse rivet.yaml@{gr}: {e}"))?; + let mut store = Store::new(); + let adp = GenericYamlAdapter::new(); + let ac = AdapterConfig::default(); + for src in &cfg.sources { + if src.format != "generic-yaml" && src.format != "generic" { + continue; + } + let tree = rg(&["ls-tree", "-r", "--name-only", gr, "--", &src.path])?; + for fp in tree.lines() { + let fp = fp.trim(); + if fp.is_empty() || (!fp.ends_with(".yaml") && !fp.ends_with(".yml")) { + continue; + } + let ct = match rg(&["show", &format!("{gr}:{fp}")]) { + Ok(c) => c, + Err(_) => continue, + }; + if let Ok(arts) = adp.import(&AdapterSource::Bytes(ct.into_bytes()), &ac) { + for a in arts { + store.upsert(a); + } + } + } + } + Ok(store) +} + +fn diff_ref_options(sel: &str, tags: &[String], branches: &[String], inc_wt: bool) -> String { + let mut h = String::new(); + if inc_wt { + let s = if sel == "working" { " selected" } else { "" }; + h.push_str(&format!( + "" + )); + } + for o in &["HEAD", "HEAD~1", "HEAD~2", "HEAD~3", "HEAD~4", "HEAD~5"] { + let s = if sel == *o { " selected" } else { "" }; + h.push_str(&format!("")); + } + if !tags.is_empty() { + h.push_str(""); + for t in tags { + let s = if sel == t { " selected" } else { "" }; + h.push_str(&format!( + "", + t = html_escape(t) + )); + } + h.push_str(""); + } + if !branches.is_empty() { + h.push_str(""); + for b in branches { + let s = if sel == b { " selected" } else { "" }; + h.push_str(&format!( + "", + b = html_escape(b) + )); + } + h.push_str(""); + } + h +} + +async fn diff_view( + State(state): State, + Query(params): Query, +) -> Html { + let state = state.read().await; + let pp = &state.project_path_buf; + let br = params.base.unwrap_or_default(); + let hr = params.head.unwrap_or_default(); + let (tags, branches) = discover_git_refs(pp); + let mut html = String::from("

    Diff

    "); + html.push_str( + "
    ", + ); + let bs = if br.is_empty() { "HEAD" } else { &br }; + html.push_str("
    "); + let hs = if hr.is_empty() { "working" } else { &hr }; + html.push_str("
    "); + html.push_str("
    "); + html.push_str("
    "); + if br.is_empty() && hr.is_empty() { + html.push_str("

    Select a base and head revision, then click Compare.

    This will compare artifact YAML files between two git states.

    "); + return Html(html); + } + let base_store = match load_store_from_git_ref(pp, &br) { + Ok(s) => s, + Err(e) => { + html.push_str(&format!("
    Error loading base ({}): {}
    ", html_escape(&br), html_escape(&e))); + return Html(html); + } + }; + let head_store: Store; + let head_label: String; + if hr == "working" || hr.is_empty() { + head_store = state.store.clone(); + head_label = "Working tree".to_string(); + } else { + match load_store_from_git_ref(pp, &hr) { + Ok(s) => { + head_store = s; + head_label = hr.clone(); + } + Err(e) => { + html.push_str(&format!("
    Error loading head ({}): {}
    ", html_escape(&hr), html_escape(&e))); + return Html(html); + } + } + }; + let diff = ArtifactDiff::compute(&base_store, &head_store); + html.push_str(&format!("

    Comparing {}{}

    ", html_escape(&br), html_escape(&head_label))); + html.push_str("
    "); + html.push_str(&format!("+ {} added", diff.added.len())); + html.push_str(&format!(" {} removed", diff.removed.len())); + html.push_str(&format!("Δ {} modified", diff.modified.len())); + html.push_str(&format!("{} unchanged", diff.unchanged)); + html.push_str("
    "); + if diff.is_empty() { + html.push_str("

    No differences found between these revisions.

    "); + return Html(html); + } + html.push_str("
    "); + for id in &diff.added { + let title = head_store.get(id).map(|a| a.title.as_str()).unwrap_or(""); + let at = head_store + .get(id) + .map(|a| a.artifact_type.as_str()) + .unwrap_or(""); + html.push_str(&format!("
    +{} {} {}
    ", html_escape(id), badge_for_type(at), html_escape(title))); + } + for id in &diff.removed { + let title = base_store.get(id).map(|a| a.title.as_str()).unwrap_or(""); + let at = base_store + .get(id) + .map(|a| a.artifact_type.as_str()) + .unwrap_or(""); + html.push_str(&format!("
    {} {} {}
    ", html_escape(id), badge_for_type(at), html_escape(title))); + } + for ch in &diff.modified { + let at = head_store + .get(&ch.id) + .map(|a| a.artifact_type.as_str()) + .unwrap_or(""); + let title = head_store + .get(&ch.id) + .map(|a| a.title.as_str()) + .unwrap_or(""); + html.push_str(&format!("
    Δ{} {} {}
    ", html_escape(&ch.id), badge_for_type(at), html_escape(title))); + if let Some((ref o, ref n)) = ch.title_changed { + html.push_str(&format!("
    Title {} {}
    ", html_escape(o), html_escape(n))); + } + if let Some((ref o, ref n)) = ch.status_changed { + html.push_str(&format!("
    Status {} {}
    ", html_escape(o.as_deref().unwrap_or("(none)")), html_escape(n.as_deref().unwrap_or("(none)")))); + } + if let Some((ref o, ref n)) = ch.type_changed { + html.push_str(&format!("
    Type {} {}
    ", html_escape(o), html_escape(n))); + } + if ch.description_changed { + html.push_str("
    Description changed
    "); + } + for t in &ch.tags_added { + html.push_str(&format!("
    Tag + {}
    ", html_escape(t))); + } + for t in &ch.tags_removed { + html.push_str(&format!("
    Tag − {}
    ", html_escape(t))); + } + for l in &ch.links_added { + html.push_str(&format!("
    Link + {} → {}
    ", html_escape(&l.link_type), html_escape(&l.target))); + } + for l in &ch.links_removed { + html.push_str(&format!("
    Link − {} → {}
    ", html_escape(&l.link_type), html_escape(&l.target))); + } + for f in &ch.fields_changed { + html.push_str(&format!("
    Field {} changed
    ", html_escape(f))); + } + html.push_str("
    "); + } + html.push_str("
    "); + Html(html) +} + +// ── Document linkage view ──────────────────────────────────────────────── + +async fn doc_linkage_view(State(state): State) -> Html { + let state = state.read().await; + let store = &state.store; + let doc_store = &state.doc_store; + let graph = &state.graph; + + let mut html = String::from("

    Document Linkage

    "); + html.push_str("

    Shows how documents relate through their artifact references and which artifacts remain unlinked.

    "); + + // Collect per-document artifact sets + struct DocInfo { + id: String, + title: String, + artifact_ids: Vec, + } + let mut doc_infos: Vec = Vec::new(); + let mut all_doc_artifacts: std::collections::HashSet = std::collections::HashSet::new(); + + for doc in doc_store.iter() { + let mut seen = std::collections::HashSet::new(); + let art_ids: Vec = doc + .references + .iter() + .filter(|r| seen.insert(r.artifact_id.clone())) + .map(|r| r.artifact_id.clone()) + .collect(); + for aid in &art_ids { + all_doc_artifacts.insert(aid.clone()); + } + doc_infos.push(DocInfo { + id: doc.id.clone(), + title: doc.title.clone(), + artifact_ids: art_ids, + }); + } + + // Also consider artifacts loaded from YAML source files as "belonging" to that source + // Group by source file directory + let mut source_groups: std::collections::BTreeMap> = + std::collections::BTreeMap::new(); + for a in store.iter() { + if let Some(sf) = &a.source_file { + let dir = sf.parent().and_then(|p| p.to_str()).unwrap_or("artifacts"); + source_groups + .entry(dir.to_string()) + .or_default() + .push(a.id.clone()); + } + } + + // ── Document linkage graph (via etch layout engine) ── + // Build a petgraph where nodes = documents + source groups, edges = cross-doc links + { + use petgraph::Graph; + let mut pg: Graph = Graph::new(); + let mut node_idx_map: std::collections::HashMap = + std::collections::HashMap::new(); + + // Add document nodes + for doc in &doc_infos { + let idx = pg.add_node(doc.id.clone()); + node_idx_map.insert(doc.id.clone(), idx); + } + // Add source group nodes + for path in source_groups.keys() { + let short = std::path::Path::new(path.as_str()) + .file_name() + .and_then(|n| n.to_str()) + .unwrap_or(path); + let label = format!("{short}/"); + let idx = pg.add_node(label.clone()); + node_idx_map.insert(path.clone(), idx); + } + + // Build artifact→node index (which node "owns" each artifact) + let mut art_to_node: std::collections::HashMap = + std::collections::HashMap::new(); + for doc in &doc_infos { + for aid in &doc.artifact_ids { + art_to_node.insert(aid.clone(), doc.id.clone()); + } + } + for (path, ids) in &source_groups { + for aid in ids { + art_to_node + .entry(aid.clone()) + .or_insert_with(|| path.clone()); + } + } + + // Add edges: collect link types per (src_node→tgt_node) pair + // Uses both forward links and backlinks so target-only nodes (like SRS-001) get edges too + let mut edge_types: std::collections::HashMap< + (String, String), + std::collections::BTreeSet, + > = std::collections::HashMap::new(); + for (aid, src_node) in &art_to_node { + if let Some(a) = store.get(aid) { + for link in &a.links { + if let Some(tgt_node) = art_to_node.get(&link.target) { + if tgt_node != src_node { + edge_types + .entry((src_node.clone(), tgt_node.clone())) + .or_default() + .insert(link.link_type.clone()); + } + } + } + } + } + for ((src, tgt), types) in &edge_types { + if let (Some(&si), Some(&ti)) = (node_idx_map.get(src), node_idx_map.get(tgt)) { + let label = types.iter().cloned().collect::>().join(", "); + pg.add_edge(si, ti, label); + } + } + + // Build type map for coloring: documents=specification, source groups=source + let doc_ids: std::collections::HashSet = + doc_infos.iter().map(|d| d.id.clone()).collect(); + + let mut colors = type_color_map(); + colors.insert("document".into(), "#3a86ff".into()); + colors.insert("source-group".into(), "#4caf50".into()); + + let svg_opts = SvgOptions { + type_colors: colors, + interactive: true, + base_url: Some("/documents".into()), + background: Some("#fafbfc".into()), + font_size: 12.0, + edge_color: "#3a86ff".into(), + ..SvgOptions::default() + }; + + let layout_opts = LayoutOptions { + node_width: 220.0, + node_height: 60.0, + rank_separation: 100.0, + node_separation: 40.0, + ..Default::default() + }; + + let gl = pgv_layout::layout( + &pg, + &|_idx, label| { + let node_type = if doc_ids.contains(label) { + "document" + } else { + "source-group" + }; + let sublabel = if doc_ids.contains(label) { + doc_infos.iter().find(|d| d.id == *label).map(|d| { + let s = format!("{} ({} refs)", d.title, d.artifact_ids.len()); + if s.len() > 30 { + format!("{}...", &s[..28]) + } else { + s + } + }) + } else { + source_groups + .iter() + .find(|(p, _)| { + let short = std::path::Path::new(p.as_str()) + .file_name() + .and_then(|n| n.to_str()) + .unwrap_or(p); + format!("{short}/") == *label + }) + .map(|(_, ids)| format!("{} artifacts", ids.len())) + }; + NodeInfo { + id: label.clone(), + label: label.clone(), + node_type: node_type.into(), + sublabel, + } + }, + &|_idx, e| EdgeInfo { label: e.clone() }, + &layout_opts, + ); + + let svg = render_svg(&gl, &svg_opts); + html.push_str( + "
    \ +
    \ +
    \ + \ + \ + \ +
    ", + ); + html.push_str(&svg); + html.push_str("
    "); + html.push_str(&format!( + "

    {} nodes, {} edges — scroll to zoom, drag to pan, drag nodes to reposition

    ", + gl.nodes.len(), gl.edges.len() + )); + } + + // ── Inter-document link table ── + html.push_str("

    Cross-Document Links

    "); + html.push_str("

    Artifacts in one document that link to artifacts in another document.

    "); + html.push_str(""); + + let mut cross_link_count = 0u32; + // Build artifact→document index + let mut art_to_doc: std::collections::HashMap = + std::collections::HashMap::new(); + for doc in &doc_infos { + for aid in &doc.artifact_ids { + art_to_doc.insert(aid.clone(), doc.id.clone()); + } + } + + for doc in &doc_infos { + for aid in &doc.artifact_ids { + if let Some(a) = store.get(aid) { + for link in &a.links { + if let Some(target_doc) = art_to_doc.get(&link.target) { + if target_doc != &doc.id { + cross_link_count += 1; + html.push_str(&format!( + "\ + \ + \ + \ + ", + src_doc = html_escape(&doc.id), + lt = html_escape(&link.link_type), + tgt = html_escape(&link.target), + tgt_doc = html_escape(target_doc), + )); + } + } + } + } + } + } + + if cross_link_count == 0 { + html.push_str(""); + } + html.push_str("
    Source DocArtifactLinkTargetTarget Doc
    {src_doc}{aid}{lt}{tgt}{tgt_doc}
    No cross-document links found
    "); + + // ── Unlinked artifacts ── + // Artifacts that exist in the store but are NOT referenced by any document + let all_artifact_ids: std::collections::HashSet = + store.iter().map(|a| a.id.clone()).collect(); + let unlinked: Vec<&rivet_core::model::Artifact> = store + .iter() + .filter(|a| !all_doc_artifacts.contains(&a.id)) + .collect(); + + html.push_str("

    Artifacts Not Referenced in Any Document

    "); + if unlinked.is_empty() { + html.push_str("

    All artifacts are referenced by at least one document.

    "); + } else { + html.push_str(&format!("

    {} artifacts are not referenced by any document via [[ID]].

    ", unlinked.len())); + html.push_str(""); + for a in &unlinked { + let link_count = a.links.len() + graph.backlinks_to(&a.id).len(); + html.push_str(&format!( + "", + badge_for_type(&a.artifact_type), + html_escape(&a.title), + id = html_escape(&a.id), + )); + } + html.push_str("
    IDTypeTitleLinks
    {id}{}{}{link_count}
    "); + } + html.push_str("
    "); + + // ── Per-document summary cards ── + html.push_str("

    Document Summary

    "); + html.push_str(""); + for doc in doc_store.iter() { + let total_refs = doc.references.len(); + let valid = doc + .references + .iter() + .filter(|r| store.contains(&r.artifact_id)) + .count(); + let broken = total_refs - valid; + let broken_class = if broken > 0 { + " style=\"color:var(--error);font-weight:600\"" + } else { + "" + }; + html.push_str(&format!( + "\ + {broken}", + badge_for_type(&doc.doc_type), + id = html_escape(&doc.id), + )); + } + html.push_str("
    DocumentTypeReferencesValid RefsBroken Refs
    {id}{}{total_refs}{valid}
    "); + + let _ = all_artifact_ids; + Html(html) +} + +// ── Traceability explorer ──────────────────────────────────────────────── + +#[derive(Debug, serde::Deserialize)] +struct TraceParams { + root_type: Option, + status: Option, + search: Option, +} + +#[derive(Debug, serde::Deserialize)] +struct TraceHistoryParams { + file: Option, +} + +/// A node in the traceability tree. +struct TraceNode { + id: String, + artifact_type: String, + title: String, + status: String, + link_type: String, + children: Vec, +} + +/// Recursively build a trace tree starting from the backlinks of a given +/// artifact, descending up to `max_depth` levels. +fn build_trace_children( + id: &str, + store: &Store, + graph: &LinkGraph, + depth: usize, + max_depth: usize, +) -> Vec { + if depth >= max_depth { + return Vec::new(); + } + let backlinks = graph.backlinks_to(id); + let mut nodes: Vec = Vec::new(); + for bl in backlinks { + let child_id = &bl.source; + let (artifact_type, title, status) = if let Some(a) = store.get(child_id) { + ( + a.artifact_type.clone(), + a.title.clone(), + a.status.clone().unwrap_or_default(), + ) + } else { + continue; + }; + let children = build_trace_children(child_id, store, graph, depth + 1, max_depth); + nodes.push(TraceNode { + id: child_id.clone(), + artifact_type, + title, + status, + link_type: bl.link_type.clone(), + children, + }); + } + // Sort by link type then ID for stable ordering + nodes.sort_by(|a, b| a.link_type.cmp(&b.link_type).then(a.id.cmp(&b.id))); + nodes +} + +/// Render a trace node and its children as nested `
    ` HTML. +fn render_trace_node(node: &TraceNode, depth: usize, project_path: &str) -> String { + let badge = badge_for_type(&node.artifact_type); + let status_class = match node.status.as_str() { + "approved" => "trace-status-approved", + "draft" => "trace-status-draft", + _ => "", + }; + let status_badge = if !node.status.is_empty() { + format!( + "{}", + html_escape(&node.status) + ) + } else { + String::new() + }; + let edge_label = format!( + "{}", + html_escape(&node.link_type) + ); + let escaped_title = html_escape(&node.title); + let escaped_id = html_escape(&node.id); + + if node.children.is_empty() { + // Leaf node — no expanding + format!( + "
    {edge_label}{badge} \ + {escaped_id} \ + {escaped_title}{status_badge}\ +
    \ +
    ", + id = node.id, + file = html_escape(project_path), + safe_id = node.id.replace('.', "_"), + ) + } else { + let open_attr = if depth == 0 { " open" } else { "" }; + let child_count = node.children.len(); + let mut html = format!( + "
    \ + {edge_label}{badge} \ + {escaped_id} \ + {escaped_title}{status_badge}\ + ({child_count})\ + \ + \ +
    \ +
    ", + id = node.id, + file = html_escape(project_path), + safe_id = node.id.replace('.', "_"), + ); + for child in &node.children { + html.push_str(&render_trace_node(child, depth + 1, project_path)); + } + html.push_str("
    "); + html + } +} + +async fn traceability_view( + State(state): State, + Query(params): Query, +) -> Html { + let state = state.read().await; + let store = &state.store; + let graph = &state.graph; + + // Collect all artifact types + let mut all_types: Vec<&str> = store.types().collect(); + all_types.sort(); + + let default_root = if store.count_by_type("requirement") > 0 { + "requirement" + } else if store.count_by_type("stakeholder-req") > 0 { + "stakeholder-req" + } else { + all_types.first().copied().unwrap_or("requirement") + }; + let root_type = params.root_type.as_deref().unwrap_or(default_root); + let status_filter = params.status.as_deref().unwrap_or("all"); + let search_filter = params.search.as_deref().unwrap_or("").to_lowercase(); + + // Get root artifacts + let mut root_ids: Vec<&str> = store + .by_type(root_type) + .iter() + .map(|s| s.as_str()) + .collect(); + root_ids.sort(); + + // Apply filters + let root_artifacts: Vec<&str> = root_ids + .into_iter() + .filter(|id| { + if let Some(a) = store.get(id) { + // Status filter + if status_filter != "all" && a.status.as_deref().unwrap_or("") != status_filter { + return false; + } + // Search filter + if !search_filter.is_empty() { + let id_match = id.to_lowercase().contains(&search_filter); + let title_match = a.title.to_lowercase().contains(&search_filter); + if !id_match && !title_match { + return false; + } + } + true + } else { + false + } + }) + .collect(); + + let mut html = String::from("

    Traceability Explorer

    "); + + // ── Filter controls ────────────────────────────────────────────── + html.push_str("
    "); + html.push_str("
    "); + html.push_str("
    "); + html.push_str(&format!( + "
    ", + html_escape(&search_filter) + )); + html.push_str("
    "); + html.push_str("
    "); + + // ── Traceability matrix summary ────────────────────────────────── + // Collect all link types that point TO the root type artifacts + let mut link_types_set: Vec = Vec::new(); + for id in &root_artifacts { + let backlinks = graph.backlinks_to(id); + for bl in backlinks { + if !link_types_set.contains(&bl.link_type) { + link_types_set.push(bl.link_type.clone()); + } + } + } + link_types_set.sort(); + + if !root_artifacts.is_empty() && !link_types_set.is_empty() { + html.push_str("

    Coverage Matrix

    "); + html.push_str(""); + for lt in &link_types_set { + html.push_str(&format!("", html_escape(lt))); + } + html.push_str(""); + for id in &root_artifacts { + let a = store.get(id).unwrap(); + let backlinks = graph.backlinks_to(id); + html.push_str(&format!( + "", + html_escape(id), + html_escape(id), + html_escape(&a.title) + )); + for lt in &link_types_set { + let count = backlinks.iter().filter(|bl| bl.link_type == *lt).count(); + let (cell_class, display) = if count > 0 { + ("trace-cell-ok", count.to_string()) + } else { + ("trace-cell-gap", "0".to_string()) + }; + html.push_str(&format!( + "" + )); + } + html.push_str(""); + } + html.push_str("
    ArtifactTitle{}
    {}{}{display}
    "); + } + + // ── Traceability chain explorer ────────────────────────────────── + html.push_str("

    Linkage Chains

    "); + if root_artifacts.is_empty() { + html.push_str( + "

    No artifacts match the current filters.

    ", + ); + } else { + html.push_str("
    "); + for id in &root_artifacts { + let a = store.get(id).unwrap(); + let children = build_trace_children(id, store, graph, 0, 3); + let badge = badge_for_type(&a.artifact_type); + let status = a.status.as_deref().unwrap_or(""); + let status_class = match status { + "approved" => "trace-status-approved", + "draft" => "trace-status-draft", + _ => "", + }; + let status_badge = if !status.is_empty() { + format!( + "{}", + html_escape(status) + ) + } else { + String::new() + }; + let source_path = a + .source_file + .as_ref() + .map(|p| p.display().to_string()) + .unwrap_or_default(); + let safe_id = id.replace('.', "_"); + + if children.is_empty() { + html.push_str(&format!( + "
    {badge} \ + {escaped_id} \ + {title}{status_badge} \ + (no inbound links)\ +
    \ +
    ", + id = html_escape(id), + escaped_id = html_escape(id), + title = html_escape(&a.title), + file = html_escape(&source_path), + )); + } else { + let child_count = children.len(); + html.push_str(&format!( + "
    \ + {badge} \ + {escaped_id} \ + {title}{status_badge}\ + ({child_count} inbound)\ + \ + \ +
    \ +
    ", + id = html_escape(id), + escaped_id = html_escape(id), + title = html_escape(&a.title), + file = html_escape(&source_path), + )); + for child in &children { + html.push_str(&render_trace_node( + child, + 1, + &source_path_for_artifact(store, &child.id), + )); + } + html.push_str("
    "); + } + } + html.push_str("
    "); + } + html.push_str("
    "); + + Html(html) +} + +/// Get source file path string for an artifact. +fn source_path_for_artifact(store: &Store, id: &str) -> String { + store + .get(id) + .and_then(|a| a.source_file.as_ref()) + .map(|p| p.display().to_string()) + .unwrap_or_default() +} + +/// HTMX endpoint: return git history for a specific file as HTML fragment. +async fn traceability_history( + State(state): State, + Query(params): Query, +) -> Html { + let state = state.read().await; + let pp = &state.project_path_buf; + + let file = match params.file { + Some(ref f) if !f.is_empty() => f.clone(), + _ => return Html("
    No source file recorded
    ".to_string()), + }; + + // Make the path relative to the project directory for git log + let file_path = std::path::Path::new(&file); + let rel_path = file_path.strip_prefix(pp).unwrap_or(file_path); + + let output = std::process::Command::new("git") + .args([ + "log", + "--oneline", + "--follow", + "--format=%h|%as|%s", + "-10", + "--", + ]) + .arg(rel_path) + .current_dir(pp) + .output(); + + match output { + Ok(o) if o.status.success() => { + let stdout = String::from_utf8_lossy(&o.stdout); + let lines: Vec<&str> = stdout.lines().filter(|l| !l.is_empty()).collect(); + if lines.is_empty() { + return Html("
    No git history found
    ".to_string()); + } + let mut h = String::from("
    Git History
    "); + for line in &lines { + let parts: Vec<&str> = line.splitn(3, '|').collect(); + if parts.len() == 3 { + h.push_str(&format!( + "
    \ + {}\ + {}\ + {}
    ", + html_escape(parts[0]), + html_escape(parts[1]), + html_escape(parts[2]), + )); + } + } + h.push_str("
    "); + Html(h) + } + _ => Html("
    Git history unavailable
    ".to_string()), + } +} + +// ── Helpers ────────────────────────────────────────────────────────────── + +fn html_escape(s: &str) -> String { + s.replace('&', "&") + .replace('<', "<") + .replace('>', ">") + .replace('"', """) +} + +/// Turn `path/to/file.rs:42` patterns into clickable `/source/path/to/file.rs#L42` links. +/// Also handles ranges like `file.rs:10-20` and plain `path/to/file.rs` (no line). +fn linkify_source_refs(s: &str) -> String { + // Regex-free: scan for patterns like word/word.ext:digits or word/word.ext:digits-digits + let mut result = String::new(); + let src = s; + let mut pos = 0usize; + + while pos < src.len() { + // Look for file-like patterns: contains '/' or '.' and optionally ':digits' + if let Some(m) = find_source_ref(&src[pos..]) { + result.push_str(&src[pos..pos + m.start]); + let file_path = &m.file; + let encoded_path = urlencoding::encode(file_path); + if let Some(line) = m.line { + if let Some(end_line) = m.end_line { + result.push_str(&format!( + "{file_path}:{line}-{end_line}" + )); + } else { + result.push_str(&format!( + "{file_path}:{line}" + )); + } + } else { + result.push_str(&format!( + "{file_path}" + )); + } + pos += m.start + m.len; + } else { + result.push_str(&src[pos..]); + break; + } + } + result +} + +struct SourceRefMatch { + start: usize, + len: usize, + file: String, + line: Option, + end_line: Option, +} + +/// Find the next source-ref pattern in text: `some/path.ext:line` or `some/path.ext:line-line` +/// File must contain a `/` or `.` with a recognized extension. +fn find_source_ref(s: &str) -> Option { + let extensions = [ + ".rs", ".yaml", ".yml", ".toml", ".md", ".py", ".js", ".ts", ".tsx", ".jsx", ".c", ".h", + ".cpp", ".hpp", ".go", ".java", ".rb", ".sh", ".json", ".xml", ".aadl", + ]; + let len = s.len(); + let mut i = 0; + while i < len { + // Try to match a file path starting at position i + // A file path: sequence of [a-zA-Z0-9_/.\-] containing at least one '/' and ending with a known extension + let start = i; + let mut j = i; + let mut has_slash = false; + let mut has_ext = false; + while j < len { + let c = s.as_bytes()[j]; + if c.is_ascii_alphanumeric() || c == b'_' || c == b'/' || c == b'.' || c == b'-' { + if c == b'/' { + has_slash = true; + } + j += 1; + } else { + break; + } + } + if has_slash && j > start + 2 { + let candidate = &s[start..j]; + // Check if it ends with a known extension + for ext in &extensions { + if candidate.ends_with(ext) { + has_ext = true; + break; + } + } + if has_ext { + let file = candidate.to_string(); + // Check for :line or :line-line + if j < len && s.as_bytes()[j] == b':' { + let _colon_pos = j; + j += 1; + let line_start = j; + while j < len && s.as_bytes()[j].is_ascii_digit() { + j += 1; + } + if j > line_start { + let line: u32 = s[line_start..j].parse().unwrap_or(0); + if line > 0 { + // Check for range: -digits + if j < len && s.as_bytes()[j] == b'-' { + let dash = j; + j += 1; + let end_start = j; + while j < len && s.as_bytes()[j].is_ascii_digit() { + j += 1; + } + if j > end_start { + let end_line: u32 = s[end_start..j].parse().unwrap_or(0); + if end_line > 0 { + return Some(SourceRefMatch { + start, + len: j - start, + file, + line: Some(line), + end_line: Some(end_line), + }); + } + } + // Not a valid range, just use line + return Some(SourceRefMatch { + start, + len: dash - start, + file, + line: Some(line), + end_line: None, + }); + } + return Some(SourceRefMatch { + start, + len: j - start, + file, + line: Some(line), + end_line: None, + }); + } + } + } + // No line number, just file path + return Some(SourceRefMatch { + start, + len: j - start, + file, + line: None, + end_line: None, + }); + } + } + i += 1; + } + None +} + +// ── Help / Docs / Schema dashboard views ─────────────────────────────── + +async fn help_view(State(state): State) -> Html { + let state = state.read().await; + let schema = &state.schema; + + // Count things for the overview + let type_count = schema.artifact_types.len(); + let link_count = schema.link_types.len(); + let rule_count = schema.traceability_rules.len(); + + let mut html = String::with_capacity(4096); + html.push_str("

    Help & Documentation

    "); + + // Quick-links cards + html.push_str(r#"
    "#); + + let link_style = "display:inline-block;margin-top:.75rem;font-size:.85rem"; + html.push_str(&format!( + "
    \ +

    Schema Types

    \ +

    {type_count}

    \ +

    artifact types loaded

    \ + Browse types →\ +
    " + )); + + html.push_str(&format!( + "
    \ +

    Link Types

    \ +

    {link_count}

    \ +

    with inverse mappings

    \ + View links →\ +
    " + )); + + html.push_str(&format!( + "
    \ +

    Traceability Rules

    \ +

    {rule_count}

    \ +

    enforced by validation

    \ + View rules →\ +
    " + )); + + html.push_str(&format!( + "
    \ +

    Documentation

    \ +

    Built-in guides, references, and schema docs — searchable.

    \ + Browse topics →\ +
    " + )); + + html.push_str("
    "); + + // CLI quick reference + html.push_str( + r#"
    +

    CLI Quick Reference

    +
    "#,
    +    );
    +    html.push_str("rivet validate              Validate all artifacts\n");
    +    html.push_str("rivet list [-t TYPE]        List artifacts\n");
    +    html.push_str("rivet stats                 Summary statistics\n");
    +    html.push_str("rivet coverage              Traceability coverage\n");
    +    html.push_str("rivet matrix --from X --to Y  Traceability matrix\n");
    +    html.push_str("rivet schema list           List artifact types\n");
    +    html.push_str("rivet schema show TYPE      Show type details\n");
    +    html.push_str("rivet docs                  List documentation topics\n");
    +    html.push_str("rivet docs --grep PATTERN   Search docs\n");
    +    html.push_str("rivet context               Generate agent context\n");
    +    html.push_str("rivet serve [-P PORT]       Start dashboard\n");
    +    html.push_str("
    "); + + Html(html) +} + +async fn help_docs_list(State(_state): State) -> Html { + let raw = docs::list_topics("text"); + + let mut html = String::with_capacity(4096); + html.push_str(r#"

    Documentation Topics

    "#); + html.push_str(r#"

    Built-in reference docs. Click a topic to read, or use rivet docs --grep PATTERN on the CLI.

    "#); + + // Parse the topic list and render as cards + html.push_str(r#"
    "#); + + let topics_json = docs::list_topics("json"); + if let Ok(val) = serde_json::from_str::(&topics_json) { + let mut current_cat = String::new(); + if let Some(topics) = val.get("topics").and_then(|t| t.as_array()) { + for topic in topics { + let slug = topic.get("slug").and_then(|s| s.as_str()).unwrap_or(""); + let title = topic.get("title").and_then(|s| s.as_str()).unwrap_or(""); + let category = topic.get("category").and_then(|s| s.as_str()).unwrap_or(""); + + if category != current_cat { + if !current_cat.is_empty() { + html.push_str("
    "); + } + html.push_str(&format!( + r#"

    {category}

    "# + )); + html.push_str(r#"
    "#); + current_cat = category.to_string(); + } + + html.push_str(&format!( + "\ + {slug}\ + {title}\ + " + )); + } + if !current_cat.is_empty() { + html.push_str("
    "); + } + } + } else { + // Fallback: render raw text + html.push_str(&format!("
    {}
    ", html_escape(&raw))); + } + + html.push_str("
    "); + Html(html) +} + +async fn help_docs_topic( + State(_state): State, + Path(slug): Path, +) -> Html { + let raw = docs::show_topic(&slug, "text"); + + let mut html = String::with_capacity(8192); + html.push_str(""); + html.push_str("
    "); + + // Render the markdown-ish content as HTML + let mut in_code_block = false; + let mut code_lang = String::new(); + let mut in_table = false; + for line in raw.lines() { + if line.starts_with("```") { + if in_code_block { + html.push_str("
    "); + in_code_block = false; + code_lang.clear(); + } else { + let lang = line.trim_start_matches('`').trim(); + code_lang = lang.to_string(); + html.push_str(r#"
    "#);
    +                in_code_block = true;
    +            }
    +            continue;
    +        }
    +        if in_code_block {
    +            let lang = match code_lang.as_str() {
    +                "yaml" | "yml" => "yaml",
    +                "bash" | "sh" | "shell" => "bash",
    +                _ => "",
    +            };
    +            if !lang.is_empty() {
    +                html.push_str(&syntax_highlight_line(line, lang));
    +            } else {
    +                html.push_str(&html_escape(line));
    +            }
    +            html.push('\n');
    +            continue;
    +        }
    +        if let Some(h1) = line.strip_prefix("# ") {
    +            html.push_str(&format!("

    {}

    ", html_escape(h1))); + } else if let Some(h2) = line.strip_prefix("## ") { + html.push_str(&format!( + "

    {}

    ", + html_escape(h2) + )); + } else if let Some(h3) = line.strip_prefix("### ") { + html.push_str(&format!( + "

    {}

    ", + html_escape(h3) + )); + } else if line.starts_with('|') { + if !in_table { + html.push_str(r#"
    "#); + in_table = true; + } + if line.contains("---") && !line.contains(' ') + || line.chars().all(|c| c == '|' || c == '-' || c == ' ') + { + // Skip separator rows + } else { + html.push_str(""); + let cells: Vec<&str> = line.split('|').collect(); + for cell in &cells[1..cells.len().saturating_sub(1)] { + html.push_str(&format!( + "", + html_escape(cell.trim()) + )); + } + html.push_str(""); + } + } else { + if in_table { + html.push_str("
    {}
    "); + in_table = false; + } + if line.is_empty() { + html.push_str("
    "); + } else { + html.push_str(&format!( + "

    {}

    ", + html_escape(line) + )); + } + } + } + if in_table { + html.push_str(""); + } + if in_code_block { + html.push_str("
    "); + } + + html.push_str(""); + Html(html) +} + +async fn help_schema_list(State(state): State) -> Html { + let state = state.read().await; + let schema = &state.schema; + + let mut types: Vec<_> = schema.artifact_types.values().collect(); + types.sort_by_key(|t| &t.name); + + let mut html = String::with_capacity(4096); + html.push_str("

    Schema Types

    "); + html.push_str(r#"

    Click a type to see fields, link fields, traceability rules, and example YAML.

    "#); + + html.push_str( + r#" + + "#, + ); + + for t in &types { + let proc = t.aspice_process.as_deref().unwrap_or("-"); + html.push_str(&format!( + "\ + \ + \ + \ + \ + \ + ", + name = t.name, + desc = html_escape(&t.description), + fields = t.fields.len(), + links = t.link_fields.len(), + proc = proc, + )); + } + + html.push_str("
    TypeDescriptionFieldsLinksProcess
    {name}{desc}{fields}{links}{proc}
    "); + Html(html) +} + +async fn help_schema_show( + State(state): State, + Path(name): Path, +) -> Html { + let state = state.read().await; + let raw = schema_cmd::cmd_show(&state.schema, &name, "text"); + + let mut html = String::with_capacity(8192); + html.push_str(""); + + // Render the output as structured HTML + html.push_str("
    ");
    +    html.push_str(&html_escape(&raw));
    +    html.push_str("
    "); + + Html(html) +} + +async fn help_links_view(State(state): State) -> Html { + let state = state.read().await; + let schema = &state.schema; + + let mut links: Vec<_> = schema.link_types.values().collect(); + links.sort_by_key(|l| &l.name); + + let mut html = String::with_capacity(4096); + html.push_str(""); + html.push_str("

    Link Types

    "); + + html.push_str( + "\ + \ + ", + ); + + for l in &links { + let inv = l.inverse.as_deref().unwrap_or("-"); + html.push_str(&format!( + "", + html_escape(&l.name), + html_escape(inv), + html_escape(&l.description), + )); + } + + html.push_str("
    NameInverseDescription
    {}{}{}
    "); + Html(html) +} + +async fn help_rules_view(State(state): State) -> Html { + let state = state.read().await; + let raw = schema_cmd::cmd_rules(&state.schema, "text"); + + let mut html = String::with_capacity(4096); + html.push_str(""); + html.push_str("

    Traceability Rules

    "); + html.push_str("
    ");
    +    html.push_str(&html_escape(&raw));
    +    html.push_str("
    "); + Html(html) } diff --git a/rivet-core/Cargo.toml b/rivet-core/Cargo.toml index f7efcd7..eb08d8c 100644 --- a/rivet-core/Cargo.toml +++ b/rivet-core/Cargo.toml @@ -8,9 +8,10 @@ license.workspace = true rust-version.workspace = true [features] -default = [] +default = ["aadl"] oslc = ["dep:reqwest", "dep:urlencoding"] wasm = ["dep:wasmtime", "dep:wasmtime-wasi"] +aadl = ["dep:spar-hir", "dep:spar-analysis"] [dependencies] serde = { workspace = true } @@ -18,6 +19,7 @@ serde_yaml = { workspace = true } serde_json = { workspace = true } thiserror = { workspace = true } petgraph = { workspace = true } +anyhow = { workspace = true } log = { workspace = true } quick-xml = { workspace = true } @@ -29,6 +31,10 @@ urlencoding = { workspace = true, optional = true } wasmtime = { workspace = true, optional = true } wasmtime-wasi = { workspace = true, optional = true } +# AADL / spar (optional, behind "aadl" feature — default on) +spar-hir = { workspace = true, optional = true } +spar-analysis = { workspace = true, optional = true } + [dev-dependencies] proptest = "1.5" criterion = { workspace = true } diff --git a/rivet-core/src/coverage.rs b/rivet-core/src/coverage.rs new file mode 100644 index 0000000..f2dc381 --- /dev/null +++ b/rivet-core/src/coverage.rs @@ -0,0 +1,317 @@ +//! Traceability coverage reporting. +//! +//! Auto-discovers traceability rules from the schema and computes +//! per-rule coverage percentages. Each rule checks whether artifacts of +//! a given source type have the required forward or backward links. + +use serde::Serialize; + +use crate::links::LinkGraph; +use crate::schema::Schema; +use crate::store::Store; + +/// Coverage result for a single traceability rule. +#[derive(Debug, Clone, Serialize)] +pub struct CoverageEntry { + /// Rule name from the schema. + pub rule_name: String, + /// Human-readable description. + pub description: String, + /// Source artifact type being checked. + pub source_type: String, + /// The link type that is required (forward or backward). + pub link_type: String, + /// Whether the check uses forward links or backlinks. + pub direction: CoverageDirection, + /// Target / from types for the required link. + pub target_types: Vec, + /// Number of source artifacts that satisfy the rule. + pub covered: usize, + /// Total source artifacts of the given type. + pub total: usize, + /// IDs of artifacts that are NOT covered. + pub uncovered_ids: Vec, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)] +#[serde(rename_all = "lowercase")] +pub enum CoverageDirection { + Forward, + Backward, +} + +impl CoverageEntry { + /// Coverage percentage (0..100). Returns 100 when total is 0. + pub fn percentage(&self) -> f64 { + if self.total == 0 { + 100.0 + } else { + (self.covered as f64 / self.total as f64) * 100.0 + } + } +} + +/// Full coverage report across all traceability rules. +#[derive(Debug, Clone, Serialize)] +pub struct CoverageReport { + pub entries: Vec, +} + +impl CoverageReport { + /// Overall coverage: weighted average across all rules (by artifact count). + pub fn overall_coverage(&self) -> f64 { + let total: usize = self.entries.iter().map(|e| e.total).sum(); + if total == 0 { + return 100.0; + } + let covered: usize = self.entries.iter().map(|e| e.covered).sum(); + (covered as f64 / total as f64) * 100.0 + } + + /// Serialize the report to a JSON string. + pub fn to_json(&self) -> Result { + serde_json::to_string_pretty(self) + } +} + +/// Compute coverage for every traceability rule in the schema. +pub fn compute_coverage(store: &Store, schema: &Schema, graph: &LinkGraph) -> CoverageReport { + let mut entries = Vec::new(); + + for rule in &schema.traceability_rules { + let source_ids = store.by_type(&rule.source_type); + let total = source_ids.len(); + let mut covered = 0usize; + let mut uncovered_ids = Vec::new(); + + let (link_type, direction, target_types) = if let Some(ref req_link) = rule.required_link { + ( + req_link.clone(), + CoverageDirection::Forward, + rule.target_types.clone(), + ) + } else if let Some(ref req_bl) = rule.required_backlink { + ( + req_bl.clone(), + CoverageDirection::Backward, + rule.from_types.clone(), + ) + } else { + // Rule has neither required-link nor required-backlink; skip. + continue; + }; + + for id in source_ids { + let has_match = match direction { + CoverageDirection::Forward => graph + .links_from(id) + .iter() + .filter(|l| l.link_type == link_type) + .any(|l| { + if target_types.is_empty() { + true + } else { + store + .get(&l.target) + .is_some_and(|a| target_types.contains(&a.artifact_type)) + } + }), + CoverageDirection::Backward => graph + .backlinks_to(id) + .iter() + .filter(|bl| bl.link_type == link_type) + .any(|bl| { + if target_types.is_empty() { + true + } else { + store + .get(&bl.source) + .is_some_and(|a| target_types.contains(&a.artifact_type)) + } + }), + }; + + if has_match { + covered += 1; + } else { + uncovered_ids.push(id.clone()); + } + } + + entries.push(CoverageEntry { + rule_name: rule.name.clone(), + description: rule.description.clone(), + source_type: rule.source_type.clone(), + link_type: link_type.clone(), + direction, + target_types, + covered, + total, + uncovered_ids, + }); + } + + CoverageReport { entries } +} + +// ── Tests ──────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use crate::model::{Artifact, Link}; + use crate::schema::{SchemaFile, SchemaMetadata, Severity, TraceabilityRule}; + + fn test_schema() -> Schema { + let file = SchemaFile { + schema: SchemaMetadata { + name: "test".into(), + version: "0.1.0".into(), + namespace: None, + description: None, + extends: vec![], + }, + base_fields: vec![], + artifact_types: vec![], + link_types: vec![], + traceability_rules: vec![ + TraceabilityRule { + name: "req-coverage".into(), + description: "Every req should be satisfied".into(), + source_type: "requirement".into(), + required_link: None, + required_backlink: Some("satisfies".into()), + target_types: vec![], + from_types: vec!["design-decision".into()], + severity: Severity::Warning, + }, + TraceabilityRule { + name: "dd-justification".into(), + description: "Every DD must satisfy a req".into(), + source_type: "design-decision".into(), + required_link: Some("satisfies".into()), + required_backlink: None, + target_types: vec!["requirement".into()], + from_types: vec![], + severity: Severity::Error, + }, + ], + }; + Schema::merge(&[file]) + } + + fn make_artifact(id: &str, atype: &str, links: Vec) -> Artifact { + Artifact { + id: id.into(), + artifact_type: atype.into(), + title: id.into(), + description: None, + status: None, + tags: vec![], + links, + fields: Default::default(), + source_file: None, + } + } + + #[test] + fn full_coverage() { + let schema = test_schema(); + let mut store = Store::new(); + store + .insert(make_artifact("REQ-001", "requirement", vec![])) + .unwrap(); + store + .insert(make_artifact( + "DD-001", + "design-decision", + vec![Link { + link_type: "satisfies".into(), + target: "REQ-001".into(), + }], + )) + .unwrap(); + + let graph = LinkGraph::build(&store, &schema); + let report = compute_coverage(&store, &schema, &graph); + + assert_eq!(report.entries.len(), 2); + + // req-coverage: REQ-001 has a backlink from DD-001 via satisfies + let req_entry = &report.entries[0]; + assert_eq!(req_entry.rule_name, "req-coverage"); + assert_eq!(req_entry.covered, 1); + assert_eq!(req_entry.total, 1); + assert!((req_entry.percentage() - 100.0).abs() < f64::EPSILON); + + // dd-justification: DD-001 has forward link satisfies -> REQ-001 + let dd_entry = &report.entries[1]; + assert_eq!(dd_entry.rule_name, "dd-justification"); + assert_eq!(dd_entry.covered, 1); + assert_eq!(dd_entry.total, 1); + + assert!((report.overall_coverage() - 100.0).abs() < f64::EPSILON); + } + + #[test] + fn partial_coverage() { + let schema = test_schema(); + let mut store = Store::new(); + store + .insert(make_artifact("REQ-001", "requirement", vec![])) + .unwrap(); + store + .insert(make_artifact("REQ-002", "requirement", vec![])) + .unwrap(); + store + .insert(make_artifact( + "DD-001", + "design-decision", + vec![Link { + link_type: "satisfies".into(), + target: "REQ-001".into(), + }], + )) + .unwrap(); + + let graph = LinkGraph::build(&store, &schema); + let report = compute_coverage(&store, &schema, &graph); + + // req-coverage: 1/2 covered + let req_entry = &report.entries[0]; + assert_eq!(req_entry.covered, 1); + assert_eq!(req_entry.total, 2); + assert!((req_entry.percentage() - 50.0).abs() < f64::EPSILON); + assert_eq!(req_entry.uncovered_ids, vec!["REQ-002"]); + + // overall: 2 covered out of 3 total + assert!((report.overall_coverage() - 66.666_666_666_666_66).abs() < 0.01); + } + + #[test] + fn zero_artifacts_gives_100_percent() { + let schema = test_schema(); + let store = Store::new(); + let graph = LinkGraph::build(&store, &schema); + let report = compute_coverage(&store, &schema, &graph); + + // Both rules have 0 source artifacts → percentage is 100 + for entry in &report.entries { + assert_eq!(entry.total, 0); + assert!((entry.percentage() - 100.0).abs() < f64::EPSILON); + } + assert!((report.overall_coverage() - 100.0).abs() < f64::EPSILON); + } + + #[test] + fn to_json_roundtrip() { + let schema = test_schema(); + let store = Store::new(); + let graph = LinkGraph::build(&store, &schema); + let report = compute_coverage(&store, &schema, &graph); + + let json = report.to_json().expect("serialize"); + assert!(json.contains("req-coverage")); + assert!(json.contains("dd-justification")); + } +} diff --git a/rivet-core/src/document.rs b/rivet-core/src/document.rs new file mode 100644 index 0000000..c7cec76 --- /dev/null +++ b/rivet-core/src/document.rs @@ -0,0 +1,942 @@ +//! Document model — markdown files with YAML frontmatter and `[[ID]]` artifact references. +//! +//! Documents represent prose content that surrounds and contextualizes artifacts: +//! specifications, design documents, test plans, glossaries. They complement +//! the structured YAML artifacts with narrative text and hierarchical ordering. +//! +//! ## File format +//! +//! ```markdown +//! --- +//! id: SRS-001 +//! type: specification +//! title: System Requirements Specification +//! status: draft +//! glossary: +//! STPA: Systems-Theoretic Process Analysis +//! --- +//! +//! # System Requirements Specification +//! +//! ## 1. Introduction +//! +//! [[REQ-001]] — Text-file-first artifact management. +//! ``` +//! +//! ## Tool mapping +//! +//! | Concept | ReqIF | OSLC | Polarion | +//! |---------------|------------------|-------------------------|-----------| +//! | Document | SPECIFICATION | RequirementCollection | LiveDoc | +//! | Section | SPEC-HIERARCHY | nested Collection | Heading | +//! | `[[REQ-001]]` | SPEC-OBJECT ref | member link | embedded | + +use std::collections::BTreeMap; +use std::path::{Path, PathBuf}; + +use crate::error::Error; + +// --------------------------------------------------------------------------- +// Data model +// --------------------------------------------------------------------------- + +/// A document loaded from a markdown file with YAML frontmatter. +#[derive(Debug, Clone)] +pub struct Document { + /// Unique document identifier (from frontmatter). + pub id: String, + /// Document type (e.g. "specification", "design", "test-plan"). + pub doc_type: String, + /// Human-readable title. + pub title: String, + /// Lifecycle status. + pub status: Option, + /// Term definitions scoped to this document. + pub glossary: BTreeMap, + /// Raw markdown body (after frontmatter). + pub body: String, + /// Heading-based section hierarchy extracted from the body. + pub sections: Vec
    , + /// All `[[ID]]` references found in the body. + pub references: Vec, + /// Source file path. + pub source_file: Option, +} + +/// A section extracted from markdown headings. +#[derive(Debug, Clone)] +pub struct Section { + /// Heading level (1–6). + pub level: u8, + /// Heading text (without `#` prefix). + pub title: String, + /// Artifact IDs referenced within this section (until the next heading). + pub artifact_ids: Vec, +} + +/// A single `[[ID]]` reference found in the document body. +#[derive(Debug, Clone)] +pub struct DocReference { + /// The artifact ID referenced. + pub artifact_id: String, + /// Line number (1-based) where the reference appears. + pub line: usize, +} + +// --------------------------------------------------------------------------- +// YAML frontmatter model (for serde deserialization) +// --------------------------------------------------------------------------- + +#[derive(Debug, serde::Deserialize)] +struct Frontmatter { + id: String, + #[serde(rename = "type", default = "default_doc_type")] + doc_type: String, + title: String, + #[serde(default)] + status: Option, + #[serde(default)] + glossary: BTreeMap, +} + +fn default_doc_type() -> String { + "document".into() +} + +// --------------------------------------------------------------------------- +// Parsing +// --------------------------------------------------------------------------- + +/// Parse a markdown file with YAML frontmatter into a [`Document`]. +pub fn parse_document(content: &str, source: Option<&Path>) -> Result { + let (frontmatter, body) = split_frontmatter(content)?; + + let fm: Frontmatter = serde_yaml::from_str(&frontmatter) + .map_err(|e| Error::Schema(format!("document frontmatter: {e}")))?; + + let references = extract_references(&body); + let sections = extract_sections(&body); + + Ok(Document { + id: fm.id, + doc_type: fm.doc_type, + title: fm.title, + status: fm.status, + glossary: fm.glossary, + body, + sections, + references, + source_file: source.map(|p| p.to_path_buf()), + }) +} + +/// Load all `.md` files from a directory as documents. +pub fn load_documents(dir: &Path) -> Result, Error> { + if !dir.is_dir() { + return Ok(Vec::new()); + } + + let mut docs = Vec::new(); + let mut entries: Vec<_> = std::fs::read_dir(dir) + .map_err(|e| Error::Io(format!("{}: {e}", dir.display())))? + .filter_map(|e| e.ok()) + .filter(|e| { + e.path() + .extension() + .is_some_and(|ext| ext == "md" || ext == "markdown") + }) + .collect(); + + // Sort for deterministic ordering. + entries.sort_by_key(|e| e.file_name()); + + for entry in entries { + let path = entry.path(); + let content = std::fs::read_to_string(&path) + .map_err(|e| Error::Io(format!("{}: {e}", path.display())))?; + + // Skip files without frontmatter (e.g. plain README.md). + if !content.starts_with("---") { + continue; + } + + match parse_document(&content, Some(&path)) { + Ok(doc) => docs.push(doc), + Err(e) => { + log::warn!("skipping {}: {e}", path.display()); + } + } + } + + Ok(docs) +} + +// --------------------------------------------------------------------------- +// Internals +// --------------------------------------------------------------------------- + +/// Split `---\nfrontmatter\n---\nbody` into (frontmatter, body). +fn split_frontmatter(content: &str) -> Result<(String, String), Error> { + let trimmed = content.trim_start(); + if !trimmed.starts_with("---") { + return Err(Error::Schema( + "document must start with YAML frontmatter (---)".into(), + )); + } + + // Find the closing `---`. + let after_first = &trimmed[3..]; + let close_pos = after_first + .find("\n---") + .ok_or_else(|| Error::Schema("unterminated frontmatter (missing closing ---)".into()))?; + + let frontmatter = after_first[..close_pos].trim().to_string(); + let body = after_first[close_pos + 4..] + .trim_start_matches('\n') + .to_string(); + + Ok((frontmatter, body)) +} + +/// Extract all `[[ID]]` references from the markdown body. +fn extract_references(body: &str) -> Vec { + let mut refs = Vec::new(); + + for (line_idx, line) in body.lines().enumerate() { + let mut rest = line; + while let Some(start) = rest.find("[[") { + let after = &rest[start + 2..]; + if let Some(end) = after.find("]]") { + let id = after[..end].trim(); + if !id.is_empty() { + refs.push(DocReference { + artifact_id: id.to_string(), + line: line_idx + 1, + }); + } + rest = &after[end + 2..]; + } else { + break; + } + } + } + + refs +} + +/// Extract section hierarchy from markdown headings. +fn extract_sections(body: &str) -> Vec
    { + let mut sections = Vec::new(); + let mut current_refs: Vec = Vec::new(); + + for line in body.lines() { + let trimmed = line.trim_start(); + + if let Some(level) = heading_level(trimmed) { + // If we have a previous section, finalize its references. + if let Some(last) = sections.last_mut() { + let sec: &mut Section = last; + sec.artifact_ids = std::mem::take(&mut current_refs); + } + + let title = trimmed[level as usize..] + .trim_start_matches(' ') + .trim() + .to_string(); + + sections.push(Section { + level, + title, + artifact_ids: Vec::new(), + }); + current_refs.clear(); + } else { + // Collect [[ID]] refs for the current section. + let mut rest = trimmed; + while let Some(start) = rest.find("[[") { + let after = &rest[start + 2..]; + if let Some(end) = after.find("]]") { + let id = after[..end].trim(); + if !id.is_empty() { + current_refs.push(id.to_string()); + } + rest = &after[end + 2..]; + } else { + break; + } + } + } + } + + // Finalize last section. + if let Some(last) = sections.last_mut() { + last.artifact_ids = current_refs; + } + + sections +} + +/// Return the heading level (1–6) if the line starts with `# `. +fn heading_level(line: &str) -> Option { + let hashes = line.bytes().take_while(|&b| b == b'#').count(); + if (1..=6).contains(&hashes) && line.as_bytes().get(hashes) == Some(&b' ') { + Some(hashes as u8) + } else { + None + } +} + +/// Render markdown body to simple HTML, resolving `[[ID]]` into links. +/// +/// This is a lightweight renderer — not a full CommonMark implementation. +/// It handles headings, paragraphs, bold/italic, lists, and `[[ID]]` links. +pub fn render_to_html(doc: &Document, artifact_exists: impl Fn(&str) -> bool) -> String { + let mut html = String::with_capacity(doc.body.len() * 2); + let mut in_list = false; + let mut in_ordered_list = false; + let mut in_paragraph = false; + let mut in_table = false; + let mut table_header_done = false; + let mut in_code_block = false; + let mut code_block_lines: Vec = Vec::new(); + let mut code_block_lang: Option = None; + let mut in_blockquote = false; + + for line in doc.body.lines() { + let trimmed = line.trim(); + + // Code blocks must be handled first — content inside is literal. + if trimmed.starts_with("```") { + if in_code_block { + // Closing fence: check if this is an AADL diagram block. + if code_block_lang.as_deref() == Some("aadl") { + // Parse `root:` from accumulated lines. + let root = code_block_lines + .iter() + .find_map(|l| l.strip_prefix("root:").or_else(|| l.strip_prefix("root: "))) + .unwrap_or("") + .trim(); + html.push_str(&format!( + "

    Loading AADL diagram...

    \n" + )); + } else { + html.push_str("
    ");
    +                    html.push_str(&code_block_lines.join("\n"));
    +                    html.push_str("
    \n"); + } + code_block_lines.clear(); + code_block_lang = None; + in_code_block = false; + } else { + // Opening fence: close any open block-level element first. + if in_paragraph { + html.push_str("

    \n"); + in_paragraph = false; + } + if in_list { + html.push_str("\n"); + in_list = false; + } + if in_ordered_list { + html.push_str("\n"); + in_ordered_list = false; + } + if in_table { + html.push_str("\n"); + in_table = false; + table_header_done = false; + } + if in_blockquote { + html.push_str("\n"); + in_blockquote = false; + } + // Capture language tag from the opening fence. + let lang = trimmed.trim_start_matches('`').trim(); + code_block_lang = if lang.is_empty() { + None + } else { + Some(lang.to_string()) + }; + in_code_block = true; + } + continue; + } + + if in_code_block { + code_block_lines.push(html_escape(line)); + continue; + } + + if trimmed.is_empty() { + if in_paragraph { + html.push_str("

    \n"); + in_paragraph = false; + } + if in_list { + html.push_str("\n"); + in_list = false; + } + if in_ordered_list { + html.push_str("\n"); + in_ordered_list = false; + } + if in_table { + html.push_str("\n"); + in_table = false; + table_header_done = false; + } + if in_blockquote { + html.push_str("\n"); + in_blockquote = false; + } + continue; + } + + // Headings + if let Some(level) = heading_level(trimmed) { + if in_paragraph { + html.push_str("

    \n"); + in_paragraph = false; + } + if in_list { + html.push_str("\n"); + in_list = false; + } + if in_ordered_list { + html.push_str("\n"); + in_ordered_list = false; + } + if in_table { + html.push_str("\n"); + in_table = false; + table_header_done = false; + } + if in_blockquote { + html.push_str("\n"); + in_blockquote = false; + } + let text = &trimmed[level as usize + 1..]; + let text = resolve_inline(text, &artifact_exists); + html.push_str(&format!("{text}\n")); + continue; + } + + // Table rows (lines starting and ending with |) + if trimmed.starts_with('|') && trimmed.ends_with('|') { + if in_paragraph { + html.push_str("

    \n"); + in_paragraph = false; + } + if in_list { + html.push_str("\n"); + in_list = false; + } + if in_ordered_list { + html.push_str("\n"); + in_ordered_list = false; + } + if in_blockquote { + html.push_str("\n"); + in_blockquote = false; + } + + // Skip separator rows like |---|---| + if is_table_separator(trimmed) { + continue; + } + + let cells: Vec<&str> = trimmed + .trim_matches('|') + .split('|') + .map(|c| c.trim()) + .collect(); + + if !in_table { + // First row is the header + html.push_str(""); + for cell in &cells { + let text = resolve_inline(cell, &artifact_exists); + html.push_str(&format!("")); + } + html.push_str("\n"); + in_table = true; + table_header_done = true; + } else if table_header_done { + html.push_str(""); + for cell in &cells { + let text = resolve_inline(cell, &artifact_exists); + html.push_str(&format!("")); + } + html.push_str("\n"); + } + continue; + } + + // Blockquotes + if let Some(bq_text) = trimmed.strip_prefix("> ") { + if in_paragraph { + html.push_str("

    \n"); + in_paragraph = false; + } + if in_list { + html.push_str("\n"); + in_list = false; + } + if in_ordered_list { + html.push_str("\n"); + in_ordered_list = false; + } + if in_table { + html.push_str("
    {text}
    {text}
    \n"); + in_table = false; + table_header_done = false; + } + if !in_blockquote { + html.push_str("
    "); + in_blockquote = true; + } + let text = resolve_inline(bq_text, &artifact_exists); + html.push_str(&format!("

    {text}

    ")); + continue; + } + + // Unordered list items + if trimmed.starts_with("- ") || trimmed.starts_with("* ") { + if in_paragraph { + html.push_str("

    \n"); + in_paragraph = false; + } + if in_ordered_list { + html.push_str("\n"); + in_ordered_list = false; + } + if in_table { + html.push_str("\n"); + in_table = false; + table_header_done = false; + } + if in_blockquote { + html.push_str("
    \n"); + in_blockquote = false; + } + if !in_list { + html.push_str("
      \n"); + in_list = true; + } + let text = resolve_inline(&trimmed[2..], &artifact_exists); + html.push_str(&format!("
    • {text}
    • \n")); + continue; + } + + // Ordered list items (e.g. "1. item") + if let Some(rest) = ordered_list_text(trimmed) { + if in_paragraph { + html.push_str("

      \n"); + in_paragraph = false; + } + if in_list { + html.push_str("
    \n"); + in_list = false; + } + if in_table { + html.push_str("\n"); + in_table = false; + table_header_done = false; + } + if in_blockquote { + html.push_str("\n"); + in_blockquote = false; + } + if !in_ordered_list { + html.push_str("
      \n"); + in_ordered_list = true; + } + let text = resolve_inline(rest, &artifact_exists); + html.push_str(&format!("
    1. {text}
    2. \n")); + continue; + } + + // Regular text → paragraph + if in_list { + html.push_str("\n"); + in_list = false; + } + if in_ordered_list { + html.push_str("
    \n"); + in_ordered_list = false; + } + if in_table { + html.push_str("\n"); + in_table = false; + table_header_done = false; + } + if in_blockquote { + html.push_str("\n"); + in_blockquote = false; + } + if !in_paragraph { + html.push_str("

    "); + in_paragraph = true; + } else { + html.push('\n'); + } + html.push_str(&resolve_inline(trimmed, &artifact_exists)); + } + + if in_paragraph { + html.push_str("

    \n"); + } + if in_list { + html.push_str("\n"); + } + if in_ordered_list { + html.push_str("\n"); + } + if in_table { + html.push_str("\n"); + } + if in_blockquote { + html.push_str("\n"); + } + + html +} + +/// Check if a table row is a separator (e.g. `|---|---|`). +fn is_table_separator(line: &str) -> bool { + line.trim_matches('|') + .split('|') + .all(|cell| cell.trim().chars().all(|c| c == '-' || c == ':')) +} + +/// If the line is an ordered list item (e.g. `1. text`), return the text after the marker. +fn ordered_list_text(line: &str) -> Option<&str> { + let digit_end = line.as_bytes().iter().position(|b| !b.is_ascii_digit())?; + if digit_end == 0 { + return None; + } + let rest = &line[digit_end..]; + rest.strip_prefix(". ") +} + +/// Resolve inline formatting: `[[ID]]` links, **bold**, *italic*, `code`, [text](url). +fn resolve_inline(text: &str, artifact_exists: &impl Fn(&str) -> bool) -> String { + let mut result = String::with_capacity(text.len() * 2); + let mut chars = text.char_indices().peekable(); + + while let Some((i, ch)) = chars.next() { + // Inline code (backticks) — must come before bold/italic since content is literal. + if ch == '`' { + if let Some(end) = text[i + 1..].find('`') { + let inner = html_escape(&text[i + 1..i + 1 + end]); + result.push_str(&format!("{inner}")); + let skip_to = i + 1 + end + 1; + while chars.peek().is_some_and(|&(j, _)| j < skip_to) { + chars.next(); + } + continue; + } + } + + // Markdown links [text](url) — must come before [[id]] artifact refs. + if ch == '[' && !text[i..].starts_with("[[") { + if let Some(link) = parse_markdown_link(&text[i..]) { + let text_part = html_escape(&link.text); + result.push_str(&format!( + "{text_part}", + href = html_escape(&link.url), + )); + let skip_to = i + link.total_len; + while chars.peek().is_some_and(|&(j, _)| j < skip_to) { + chars.next(); + } + continue; + } + } + + if ch == '[' && text[i..].starts_with("[[") { + // Find closing ]] + if let Some(end) = text[i + 2..].find("]]") { + let id = text[i + 2..i + 2 + end].trim(); + if artifact_exists(id) { + result.push_str(&format!( + "{id}" + )); + } else { + result.push_str(&format!("{id}")); + } + // Skip past ]] + let skip_to = i + 2 + end + 2; + while chars.peek().is_some_and(|&(j, _)| j < skip_to) { + chars.next(); + } + continue; + } + } + + if ch == '*' && text[i..].starts_with("**") { + // Bold + if let Some(end) = text[i + 2..].find("**") { + let inner = html_escape(&text[i + 2..i + 2 + end]); + result.push_str(&format!("{inner}")); + let skip_to = i + 2 + end + 2; + while chars.peek().is_some_and(|&(j, _)| j < skip_to) { + chars.next(); + } + continue; + } + } + + if ch == '*' { + // Italic + if let Some(end) = text[i + 1..].find('*') { + let inner = html_escape(&text[i + 1..i + 1 + end]); + result.push_str(&format!("{inner}")); + let skip_to = i + 1 + end + 1; + while chars.peek().is_some_and(|&(j, _)| j < skip_to) { + chars.next(); + } + continue; + } + } + + // Default: escape HTML + match ch { + '&' => result.push_str("&"), + '<' => result.push_str("<"), + '>' => result.push_str(">"), + '"' => result.push_str("""), + _ => result.push(ch), + } + } + + result +} + +fn html_escape(s: &str) -> String { + s.replace('&', "&") + .replace('<', "<") + .replace('>', ">") + .replace('"', """) +} + +/// Result of parsing a `[text](url)` markdown link. +struct MarkdownLink { + text: String, + url: String, + /// Total number of bytes consumed from the input (including `[`, `]`, `(`, `)`). + total_len: usize, +} + +/// Try to parse `[text](url)` at the start of `s`. +/// +/// Only allows `http://`, `https://`, and `#` URLs for safety (no `javascript:` etc.). +fn parse_markdown_link(s: &str) -> Option { + if !s.starts_with('[') { + return None; + } + let close_bracket = s[1..].find(']')?; + let text = &s[1..1 + close_bracket]; + let after_bracket = &s[1 + close_bracket + 1..]; + if !after_bracket.starts_with('(') { + return None; + } + let close_paren = after_bracket[1..].find(')')?; + let url = &after_bracket[1..1 + close_paren]; + // Safety check: only allow http, https, and fragment (#) URLs. + if !(url.starts_with("http://") || url.starts_with("https://") || url.starts_with('#')) { + return None; + } + let total_len = 1 + close_bracket + 1 + 1 + close_paren + 1; // [text](url) + Some(MarkdownLink { + text: text.to_string(), + url: url.to_string(), + total_len, + }) +} + +// --------------------------------------------------------------------------- +// Document store +// --------------------------------------------------------------------------- + +/// In-memory collection of loaded documents. +#[derive(Debug, Default)] +pub struct DocumentStore { + docs: Vec, +} + +impl DocumentStore { + pub fn new() -> Self { + Self::default() + } + + pub fn insert(&mut self, doc: Document) { + self.docs.push(doc); + } + + pub fn get(&self, id: &str) -> Option<&Document> { + self.docs.iter().find(|d| d.id == id) + } + + pub fn iter(&self) -> impl Iterator { + self.docs.iter() + } + + pub fn len(&self) -> usize { + self.docs.len() + } + + pub fn is_empty(&self) -> bool { + self.docs.is_empty() + } + + /// All artifact IDs referenced across all documents. + pub fn all_references(&self) -> Vec<&DocReference> { + self.docs.iter().flat_map(|d| &d.references).collect() + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + const SAMPLE_DOC: &str = r#"--- +id: SRS-001 +type: specification +title: System Requirements Specification +status: draft +glossary: + STPA: Systems-Theoretic Process Analysis + UCA: Unsafe Control Action +--- + +# System Requirements Specification + +## 1. Introduction + +This document specifies the system-level requirements. + +## 2. Functional Requirements + +### 2.1 Artifact Management + +[[REQ-001]] — Text-file-first artifact management. + +[[REQ-002]] — STPA artifact support. + +### 2.2 Traceability + +[[REQ-003]] — Full ASPICE V-model traceability. + +## 3. Glossary + +See frontmatter. +"#; + + #[test] + fn parse_frontmatter() { + let doc = parse_document(SAMPLE_DOC, None).unwrap(); + assert_eq!(doc.id, "SRS-001"); + assert_eq!(doc.doc_type, "specification"); + assert_eq!(doc.title, "System Requirements Specification"); + assert_eq!(doc.status.as_deref(), Some("draft")); + assert_eq!(doc.glossary.len(), 2); + assert_eq!( + doc.glossary.get("STPA").unwrap(), + "Systems-Theoretic Process Analysis" + ); + } + + #[test] + fn extract_references_from_body() { + let doc = parse_document(SAMPLE_DOC, None).unwrap(); + let ids: Vec<&str> = doc + .references + .iter() + .map(|r| r.artifact_id.as_str()) + .collect(); + assert_eq!(ids, vec!["REQ-001", "REQ-002", "REQ-003"]); + } + + #[test] + fn extract_sections_hierarchy() { + let doc = parse_document(SAMPLE_DOC, None).unwrap(); + assert_eq!(doc.sections.len(), 6); + assert_eq!(doc.sections[0].level, 1); + assert_eq!(doc.sections[0].title, "System Requirements Specification"); + assert_eq!(doc.sections[1].level, 2); + assert_eq!(doc.sections[1].title, "1. Introduction"); + assert_eq!(doc.sections[2].level, 2); + assert_eq!(doc.sections[2].title, "2. Functional Requirements"); + assert_eq!(doc.sections[3].level, 3); + assert_eq!(doc.sections[3].title, "2.1 Artifact Management"); + assert_eq!(doc.sections[3].artifact_ids, vec!["REQ-001", "REQ-002"]); + assert_eq!(doc.sections[4].level, 3); + assert_eq!(doc.sections[4].title, "2.2 Traceability"); + assert_eq!(doc.sections[4].artifact_ids, vec!["REQ-003"]); + } + + #[test] + fn multiple_refs_on_one_line() { + let content = "---\nid: D-1\ntitle: T\n---\n[[A-1]] and [[B-2]] here\n"; + let doc = parse_document(content, None).unwrap(); + assert_eq!(doc.references.len(), 2); + assert_eq!(doc.references[0].artifact_id, "A-1"); + assert_eq!(doc.references[1].artifact_id, "B-2"); + } + + #[test] + fn missing_frontmatter_is_error() { + let result = parse_document("# Just markdown\n\nNo frontmatter.", None); + assert!(result.is_err()); + } + + #[test] + fn render_html_resolves_refs() { + let doc = parse_document(SAMPLE_DOC, None).unwrap(); + let html = render_to_html(&doc, |id| id == "REQ-001" || id == "REQ-002"); + assert!(html.contains("artifact-ref")); + assert!(html.contains("hx-get=\"/artifacts/REQ-001\"")); + assert!(html.contains("class=\"artifact-ref broken\"")); + } + + #[test] + fn render_html_headings() { + let doc = parse_document(SAMPLE_DOC, None).unwrap(); + let html = render_to_html(&doc, |_| true); + assert!(html.contains("

    ")); + assert!(html.contains("

    ")); + assert!(html.contains("

    ")); + } + + #[test] + fn document_store() { + let doc = parse_document(SAMPLE_DOC, None).unwrap(); + let mut store = DocumentStore::new(); + store.insert(doc); + assert_eq!(store.len(), 1); + assert!(store.get("SRS-001").is_some()); + assert_eq!(store.all_references().len(), 3); + } + + #[test] + fn default_doc_type_when_omitted() { + let content = "---\nid: D-1\ntitle: Test\n---\nBody.\n"; + let doc = parse_document(content, None).unwrap(); + assert_eq!(doc.doc_type, "document"); + } + + #[test] + fn render_aadl_code_block_placeholder() { + let content = "---\nid: DOC-001\ntitle: Architecture\n---\n\n## Overview\n\n```aadl\nroot: FlightControl::Controller.Basic\n```\n\nSome text after.\n"; + let doc = parse_document(content, None).unwrap(); + let html = render_to_html(&doc, |_| true); + assert!(html.contains("aadl-diagram")); + assert!(html.contains("data-root=\"FlightControl::Controller.Basic\"")); + assert!(!html.contains("
    root: FlightControl"));
    +    }
    +}
    diff --git a/rivet-core/src/embedded.rs b/rivet-core/src/embedded.rs
    new file mode 100644
    index 0000000..0cf51a4
    --- /dev/null
    +++ b/rivet-core/src/embedded.rs
    @@ -0,0 +1,64 @@
    +//! Embedded schemas — compiled into the binary via `include_str!`.
    +//!
    +//! Provides fallback schema loading when no `schemas/` directory is found,
    +//! and enables `rivet docs`, `rivet schema show`, etc. without filesystem.
    +
    +use crate::error::Error;
    +use crate::schema::SchemaFile;
    +
    +// ── Embedded schema content ─────────────────────────────────────────────
    +
    +pub const SCHEMA_COMMON: &str = include_str!("../../schemas/common.yaml");
    +pub const SCHEMA_DEV: &str = include_str!("../../schemas/dev.yaml");
    +pub const SCHEMA_STPA: &str = include_str!("../../schemas/stpa.yaml");
    +pub const SCHEMA_ASPICE: &str = include_str!("../../schemas/aspice.yaml");
    +pub const SCHEMA_CYBERSECURITY: &str = include_str!("../../schemas/cybersecurity.yaml");
    +pub const SCHEMA_AADL: &str = include_str!("../../schemas/aadl.yaml");
    +
    +/// All known built-in schema names.
    +pub const SCHEMA_NAMES: &[&str] = &["common", "dev", "stpa", "aspice", "cybersecurity", "aadl"];
    +
    +/// Look up embedded schema content by name.
    +pub fn embedded_schema(name: &str) -> Option<&'static str> {
    +    match name {
    +        "common" => Some(SCHEMA_COMMON),
    +        "dev" => Some(SCHEMA_DEV),
    +        "stpa" => Some(SCHEMA_STPA),
    +        "aspice" => Some(SCHEMA_ASPICE),
    +        "cybersecurity" => Some(SCHEMA_CYBERSECURITY),
    +        "aadl" => Some(SCHEMA_AADL),
    +        _ => None,
    +    }
    +}
    +
    +/// Parse an embedded schema by name.
    +pub fn load_embedded_schema(name: &str) -> Result {
    +    let content = embedded_schema(name)
    +        .ok_or_else(|| Error::Schema(format!("unknown built-in schema: {name}")))?;
    +    serde_yaml::from_str(content)
    +        .map_err(|e| Error::Schema(format!("parsing embedded schema '{name}': {e}")))
    +}
    +
    +/// Load and merge schemas, falling back to embedded when files are not found.
    +pub fn load_schemas_with_fallback(
    +    schema_names: &[String],
    +    schemas_dir: &std::path::Path,
    +) -> Result {
    +    let mut files = Vec::new();
    +
    +    for name in schema_names {
    +        let path = schemas_dir.join(format!("{name}.yaml"));
    +        if path.exists() {
    +            let file = crate::schema::Schema::load_file(&path)?;
    +            files.push(file);
    +        } else if let Some(content) = embedded_schema(name) {
    +            let file: SchemaFile = serde_yaml::from_str(content)
    +                .map_err(|e| Error::Schema(format!("embedded '{name}': {e}")))?;
    +            files.push(file);
    +        } else {
    +            log::warn!("schema '{name}' not found on disk or embedded");
    +        }
    +    }
    +
    +    Ok(crate::schema::Schema::merge(&files))
    +}
    diff --git a/rivet-core/src/formats/aadl.rs b/rivet-core/src/formats/aadl.rs
    new file mode 100644
    index 0000000..5f089d3
    --- /dev/null
    +++ b/rivet-core/src/formats/aadl.rs
    @@ -0,0 +1,456 @@
    +//! AADL adapter — uses spar crates to parse `.aadl` files directly.
    +//!
    +//! Integration via `spar-hir` (parsing + HIR) and `spar-analysis`
    +//! (connectivity, scheduling, latency, etc.). No CLI invocation needed.
    +//!
    +//! Import modes:
    +//! - `Bytes` — parse JSON (legacy/test compatibility)
    +//! - `Path` — single `.aadl` file or JSON file
    +//! - `Directory` — find `.aadl` files, parse with spar-hir, run analyses
    +
    +use std::collections::BTreeMap;
    +use std::path::Path;
    +
    +use serde::Deserialize;
    +
    +use crate::adapter::{Adapter, AdapterConfig, AdapterSource};
    +use crate::error::Error;
    +use crate::model::Artifact;
    +
    +// ── Public adapter ───────────────────────────────────────────────────────
    +
    +pub struct AadlAdapter {
    +    supported: Vec,
    +}
    +
    +impl AadlAdapter {
    +    pub fn new() -> Self {
    +        Self {
    +            supported: vec![
    +                "aadl-component".into(),
    +                "aadl-analysis-result".into(),
    +                "aadl-flow".into(),
    +            ],
    +        }
    +    }
    +}
    +
    +impl Default for AadlAdapter {
    +    fn default() -> Self {
    +        Self::new()
    +    }
    +}
    +
    +impl Adapter for AadlAdapter {
    +    fn id(&self) -> &str {
    +        "aadl"
    +    }
    +
    +    fn name(&self) -> &str {
    +        "AADL (spar)"
    +    }
    +
    +    fn supported_types(&self) -> &[String] {
    +        &self.supported
    +    }
    +
    +    fn import(
    +        &self,
    +        source: &AdapterSource,
    +        config: &AdapterConfig,
    +    ) -> Result, Error> {
    +        match source {
    +            AdapterSource::Bytes(bytes) => {
    +                let content = std::str::from_utf8(bytes)
    +                    .map_err(|e| Error::Adapter(format!("invalid UTF-8: {}", e)))?;
    +                // Try JSON first (legacy), then AADL source.
    +                if content.trim_start().starts_with('{') {
    +                    parse_spar_json(content)
    +                } else {
    +                    import_aadl_sources(&[("input.aadl".into(), content.to_string())], config)
    +                }
    +            }
    +            AdapterSource::Path(path) => import_single_file(path, config),
    +            AdapterSource::Directory(dir) => import_aadl_directory(dir, config),
    +        }
    +    }
    +
    +    fn export(&self, _artifacts: &[Artifact], _config: &AdapterConfig) -> Result, Error> {
    +        Err(Error::Adapter("AADL export is not supported".into()))
    +    }
    +}
    +
    +// ── Direct spar-hir integration ─────────────────────────────────────────
    +
    +#[cfg(feature = "aadl")]
    +fn import_aadl_sources(
    +    sources: &[(String, String)],
    +    config: &AdapterConfig,
    +) -> Result, Error> {
    +    use spar_hir::Database;
    +
    +    let db = Database::from_aadl(sources);
    +    let packages = db.packages();
    +
    +    let mut artifacts = Vec::new();
    +
    +    // Convert component types and implementations from HIR.
    +    for pkg in &packages {
    +        for ct in &pkg.component_types {
    +            let category = ct.category.to_string();
    +            // Map spaces to dashes for schema compatibility (e.g. "thread group" → "thread-group")
    +            let category_id = category.replace(' ', "-");
    +            artifacts.push(component_to_artifact(
    +                &pkg.name,
    +                &ct.name,
    +                &category_id,
    +                "type",
    +            ));
    +        }
    +        for ci in &pkg.component_impls {
    +            let category = ci.category.to_string();
    +            let category_id = category.replace(' ', "-");
    +            artifacts.push(component_to_artifact(
    +                &pkg.name,
    +                &ci.name,
    +                &category_id,
    +                "implementation",
    +            ));
    +        }
    +    }
    +
    +    // Run tree-level analyses (category rules, naming) on all files.
    +    let tree_diags = run_tree_analyses(&db);
    +    let mut diag_index = 0;
    +    for diag in &tree_diags {
    +        artifacts.push(analysis_diagnostic_to_artifact(diag_index, diag));
    +        diag_index += 1;
    +    }
    +
    +    // Run instance-level analyses if a root classifier is configured.
    +    let root_classifier = config.get("root-classifier");
    +    if let Some(root_name) = root_classifier {
    +        if let Some(instance) = db.instantiate(root_name) {
    +            let instance_diags = run_instance_analyses(&instance);
    +            for diag in &instance_diags {
    +                artifacts.push(analysis_diagnostic_to_artifact(diag_index, diag));
    +                diag_index += 1;
    +            }
    +        }
    +    }
    +
    +    Ok(artifacts)
    +}
    +
    +#[cfg(feature = "aadl")]
    +fn run_instance_analyses(instance: &spar_hir::Instance) -> Vec {
    +    use spar_analysis::AnalysisRunner;
    +
    +    let mut runner = AnalysisRunner::new();
    +    // Instance-level analyses (operate on SystemInstance).
    +    runner.register(Box::new(spar_analysis::connectivity::ConnectivityAnalysis));
    +    runner.register(Box::new(spar_analysis::hierarchy::HierarchyAnalysis));
    +    runner.register(Box::new(spar_analysis::completeness::CompletenessAnalysis));
    +    runner.register(Box::new(
    +        spar_analysis::direction_rules::DirectionRuleAnalysis,
    +    ));
    +    runner.register(Box::new(spar_analysis::flow_check::FlowCheckAnalysis));
    +    runner.register(Box::new(spar_analysis::mode_check::ModeCheckAnalysis));
    +    runner.register(Box::new(spar_analysis::binding_check::BindingCheckAnalysis));
    +    runner.register(Box::new(spar_analysis::latency::LatencyAnalysis));
    +    runner.register(Box::new(spar_analysis::scheduling::SchedulingAnalysis));
    +    runner.register(Box::new(
    +        spar_analysis::resource_budget::ResourceBudgetAnalysis,
    +    ));
    +
    +    runner.run_all(instance.inner())
    +}
    +
    +/// Run tree-level checks (category rules, naming, legality) on all item trees.
    +#[cfg(feature = "aadl")]
    +fn run_tree_analyses(db: &spar_hir::Database) -> Vec {
    +    let mut diags = Vec::new();
    +    for tree in db.item_trees() {
    +        diags.extend(spar_analysis::category_check::check_category_rules(tree));
    +        diags.extend(spar_analysis::naming_rules::check_naming_rules(tree));
    +    }
    +    diags
    +}
    +
    +#[cfg(feature = "aadl")]
    +fn analysis_diagnostic_to_artifact(
    +    index: usize,
    +    diag: &spar_analysis::AnalysisDiagnostic,
    +) -> Artifact {
    +    let id = format!("AADL-DIAG-{:04}", index + 1);
    +    let severity = match diag.severity {
    +        spar_analysis::Severity::Error => "error",
    +        spar_analysis::Severity::Warning => "warning",
    +        spar_analysis::Severity::Info => "info",
    +    };
    +
    +    let mut fields = BTreeMap::new();
    +    fields.insert(
    +        "analysis-name".into(),
    +        serde_yaml::Value::String(diag.analysis.clone()),
    +    );
    +    fields.insert(
    +        "severity".into(),
    +        serde_yaml::Value::String(severity.into()),
    +    );
    +    fields.insert(
    +        "component-path".into(),
    +        serde_yaml::Value::String(diag.path.join(".")),
    +    );
    +    fields.insert(
    +        "details".into(),
    +        serde_yaml::Value::String(diag.message.clone()),
    +    );
    +
    +    Artifact {
    +        id,
    +        artifact_type: "aadl-analysis-result".into(),
    +        title: format!("[{}] {}", diag.analysis, diag.message),
    +        description: Some(diag.message.clone()),
    +        status: None,
    +        tags: vec!["aadl".into(), diag.analysis.clone()],
    +        links: vec![],
    +        fields,
    +        source_file: None,
    +    }
    +}
    +
    +// Fallback when the aadl feature is disabled.
    +#[cfg(not(feature = "aadl"))]
    +fn import_aadl_sources(
    +    _sources: &[(String, String)],
    +    _config: &AdapterConfig,
    +) -> Result, Error> {
    +    Err(Error::Adapter(
    +        "AADL support requires the 'aadl' feature (spar crates)".into(),
    +    ))
    +}
    +
    +// ── Legacy JSON parsing (test compatibility) ────────────────────────────
    +
    +#[derive(Debug, Deserialize)]
    +struct SparOutput {
    +    #[allow(dead_code)]
    +    root: String,
    +    #[serde(default)]
    +    packages: Vec,
    +    #[allow(dead_code)]
    +    #[serde(default)]
    +    instance: Option,
    +    #[serde(default)]
    +    diagnostics: Vec,
    +}
    +
    +#[derive(Debug, Deserialize)]
    +struct SparPackage {
    +    name: String,
    +    #[serde(default)]
    +    component_types: Vec,
    +    #[serde(default)]
    +    component_impls: Vec,
    +}
    +
    +#[derive(Debug, Deserialize)]
    +struct SparComponentType {
    +    name: String,
    +    category: String,
    +}
    +
    +#[derive(Debug, Deserialize)]
    +struct SparComponentImpl {
    +    name: String,
    +    category: String,
    +}
    +
    +#[derive(Debug, Deserialize)]
    +struct SparDiagnostic {
    +    severity: String,
    +    message: String,
    +    #[serde(default)]
    +    path: Vec,
    +    #[serde(default)]
    +    analysis: String,
    +}
    +
    +fn parse_spar_json(content: &str) -> Result, Error> {
    +    let output: SparOutput = serde_json::from_str(content)
    +        .map_err(|e| Error::Adapter(format!("failed to parse spar JSON: {}", e)))?;
    +
    +    let mut artifacts = Vec::new();
    +
    +    for pkg in &output.packages {
    +        for ct in &pkg.component_types {
    +            artifacts.push(component_to_artifact(
    +                &pkg.name,
    +                &ct.name,
    +                &ct.category,
    +                "type",
    +            ));
    +        }
    +        for ci in &pkg.component_impls {
    +            artifacts.push(component_to_artifact(
    +                &pkg.name,
    +                &ci.name,
    +                &ci.category,
    +                "implementation",
    +            ));
    +        }
    +    }
    +
    +    for (index, diag) in output.diagnostics.iter().enumerate() {
    +        artifacts.push(diagnostic_to_artifact(index, diag));
    +    }
    +
    +    Ok(artifacts)
    +}
    +
    +// ── Shared artifact builders ────────────────────────────────────────────
    +
    +fn component_to_artifact(
    +    pkg_name: &str,
    +    comp_name: &str,
    +    category: &str,
    +    classifier_kind: &str,
    +) -> Artifact {
    +    let id = format!("AADL-{}-{}", pkg_name, comp_name);
    +
    +    let mut fields = BTreeMap::new();
    +    fields.insert(
    +        "category".into(),
    +        serde_yaml::Value::String(category.into()),
    +    );
    +    fields.insert(
    +        "aadl-package".into(),
    +        serde_yaml::Value::String(pkg_name.into()),
    +    );
    +    fields.insert(
    +        "classifier-kind".into(),
    +        serde_yaml::Value::String(classifier_kind.into()),
    +    );
    +
    +    Artifact {
    +        id,
    +        artifact_type: "aadl-component".into(),
    +        title: format!("{} {} ({})", category, comp_name, classifier_kind),
    +        description: None,
    +        status: Some("imported".into()),
    +        tags: vec!["aadl".into()],
    +        links: vec![],
    +        fields,
    +        source_file: None,
    +    }
    +}
    +
    +fn diagnostic_to_artifact(index: usize, diag: &SparDiagnostic) -> Artifact {
    +    let id = format!("AADL-DIAG-{:04}", index + 1);
    +
    +    let mut fields = BTreeMap::new();
    +    fields.insert(
    +        "analysis-name".into(),
    +        serde_yaml::Value::String(diag.analysis.clone()),
    +    );
    +    fields.insert(
    +        "severity".into(),
    +        serde_yaml::Value::String(diag.severity.clone()),
    +    );
    +    fields.insert(
    +        "component-path".into(),
    +        serde_yaml::Value::String(diag.path.join(".")),
    +    );
    +    fields.insert(
    +        "details".into(),
    +        serde_yaml::Value::String(diag.message.clone()),
    +    );
    +
    +    Artifact {
    +        id,
    +        artifact_type: "aadl-analysis-result".into(),
    +        title: format!("[{}] {}", diag.analysis, diag.message),
    +        description: Some(diag.message.clone()),
    +        status: None,
    +        tags: vec!["aadl".into(), diag.analysis.clone()],
    +        links: vec![],
    +        fields,
    +        source_file: None,
    +    }
    +}
    +
    +// ── File / directory import ─────────────────────────────────────────────
    +
    +fn import_single_file(path: &Path, config: &AdapterConfig) -> Result, Error> {
    +    let content = std::fs::read_to_string(path)
    +        .map_err(|e| Error::Io(format!("{}: {}", path.display(), e)))?;
    +
    +    let is_json =
    +        path.extension().is_some_and(|ext| ext == "json") || content.trim_start().starts_with('{');
    +
    +    let mut artifacts = if is_json {
    +        parse_spar_json(&content)?
    +    } else {
    +        let name = path
    +            .file_name()
    +            .unwrap_or_default()
    +            .to_string_lossy()
    +            .into_owned();
    +        import_aadl_sources(&[(name, content)], config)?
    +    };
    +
    +    for a in &mut artifacts {
    +        a.source_file = Some(path.to_path_buf());
    +    }
    +    Ok(artifacts)
    +}
    +
    +fn import_aadl_directory(dir: &Path, config: &AdapterConfig) -> Result, Error> {
    +    let aadl_files = collect_aadl_files(dir)?;
    +    if aadl_files.is_empty() {
    +        return Ok(Vec::new());
    +    }
    +
    +    // Read all .aadl files into (name, content) pairs for spar-hir.
    +    let mut sources = Vec::new();
    +    for path in &aadl_files {
    +        let content = std::fs::read_to_string(path)
    +            .map_err(|e| Error::Io(format!("{}: {}", path.display(), e)))?;
    +        let name = path
    +            .file_name()
    +            .unwrap_or_default()
    +            .to_string_lossy()
    +            .into_owned();
    +        sources.push((name, content));
    +    }
    +
    +    let mut artifacts = import_aadl_sources(&sources, config)?;
    +
    +    // Tag artifacts with source file info.
    +    for a in &mut artifacts {
    +        if a.source_file.is_none() {
    +            a.source_file = Some(dir.to_path_buf());
    +        }
    +    }
    +
    +    Ok(artifacts)
    +}
    +
    +fn collect_aadl_files(dir: &Path) -> Result, Error> {
    +    let mut files = Vec::new();
    +    let entries =
    +        std::fs::read_dir(dir).map_err(|e| Error::Io(format!("{}: {}", dir.display(), e)))?;
    +
    +    for entry in entries {
    +        let entry = entry.map_err(|e| Error::Io(e.to_string()))?;
    +        let path = entry.path();
    +        if path.extension().is_some_and(|ext| ext == "aadl") {
    +            files.push(path);
    +        } else if path.is_dir() {
    +            files.extend(collect_aadl_files(&path)?);
    +        }
    +    }
    +
    +    Ok(files)
    +}
    diff --git a/rivet-core/src/formats/mod.rs b/rivet-core/src/formats/mod.rs
    index d577091..fe46f40 100644
    --- a/rivet-core/src/formats/mod.rs
    +++ b/rivet-core/src/formats/mod.rs
    @@ -1,2 +1,8 @@
    +pub mod aadl;
     pub mod generic;
     pub mod stpa;
    +
    +// Note: The aadl module is always compiled. When the "aadl" feature is
    +// enabled (default), it uses spar-hir/spar-analysis for direct parsing.
    +// Without the feature, directory/file import of .aadl files returns an error
    +// but JSON import still works for test compatibility.
    diff --git a/rivet-core/src/lib.rs b/rivet-core/src/lib.rs
    index 450f4d7..5f5d851 100644
    --- a/rivet-core/src/lib.rs
    +++ b/rivet-core/src/lib.rs
    @@ -1,5 +1,8 @@
     pub mod adapter;
    +pub mod coverage;
     pub mod diff;
    +pub mod document;
    +pub mod embedded;
     pub mod error;
     pub mod formats;
     pub mod links;
    @@ -9,6 +12,7 @@ pub mod model;
     pub mod oslc;
     pub mod query;
     pub mod reqif;
    +pub mod results;
     pub mod schema;
     pub mod store;
     pub mod validate;
    @@ -31,20 +35,10 @@ pub fn load_project_config(path: &Path) -> Result {
     }
     
     /// Load schemas from the built-in schemas directory or from file paths.
    +///
    +/// Falls back to embedded (compiled-in) schemas when files are not on disk.
     pub fn load_schemas(schema_names: &[String], schemas_dir: &Path) -> Result {
    -    let mut files = Vec::new();
    -
    -    for name in schema_names {
    -        let path = schemas_dir.join(format!("{}.yaml", name));
    -        if path.exists() {
    -            let file = schema::Schema::load_file(&path)?;
    -            files.push(file);
    -        } else {
    -            log::warn!("schema file not found: {}", path.display());
    -        }
    -    }
    -
    -    Ok(schema::Schema::merge(&files))
    +    embedded::load_schemas_with_fallback(schema_names, schemas_dir)
     }
     
     /// Load artifacts from a source using the appropriate adapter.
    @@ -77,6 +71,10 @@ pub fn load_artifacts(
                 let adapter = reqif::ReqIfAdapter::new();
                 adapter::Adapter::import(&adapter, &source_input, &adapter_config)
             }
    +        "aadl" => {
    +            let adapter = formats::aadl::AadlAdapter::new();
    +            adapter::Adapter::import(&adapter, &source_input, &adapter_config)
    +        }
             other => Err(Error::Adapter(format!("unknown format: {}", other))),
         }
     }
    diff --git a/rivet-core/src/model.rs b/rivet-core/src/model.rs
    index 7d86530..2308f23 100644
    --- a/rivet-core/src/model.rs
    +++ b/rivet-core/src/model.rs
    @@ -76,12 +76,18 @@ impl Artifact {
         }
     }
     
    -/// Project configuration loaded from `trace.yaml`.
    +/// Project configuration loaded from `rivet.yaml`.
     #[derive(Debug, Clone, Serialize, Deserialize)]
     pub struct ProjectConfig {
         pub project: ProjectMetadata,
         #[serde(default)]
         pub sources: Vec,
    +    /// Directories containing markdown documents (with YAML frontmatter).
    +    #[serde(default)]
    +    pub docs: Vec,
    +    /// Directory containing test result YAML files.
    +    #[serde(default)]
    +    pub results: Option,
     }
     
     #[derive(Debug, Clone, Serialize, Deserialize)]
    diff --git a/rivet-core/src/results.rs b/rivet-core/src/results.rs
    new file mode 100644
    index 0000000..f6e37bf
    --- /dev/null
    +++ b/rivet-core/src/results.rs
    @@ -0,0 +1,475 @@
    +//! Test run results model and loader.
    +//!
    +//! Results are stored as YAML files, each representing a single test run
    +//! with per-artifact pass/fail/skip results.
    +
    +use std::path::{Path, PathBuf};
    +
    +use serde::{Deserialize, Serialize};
    +
    +/// Outcome of a single test.
    +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
    +#[serde(rename_all = "lowercase")]
    +pub enum TestStatus {
    +    Pass,
    +    Fail,
    +    Skip,
    +    Error,
    +    Blocked,
    +}
    +
    +impl TestStatus {
    +    pub fn is_pass(&self) -> bool {
    +        matches!(self, Self::Pass)
    +    }
    +    pub fn is_fail(&self) -> bool {
    +        matches!(self, Self::Fail | Self::Error)
    +    }
    +}
    +
    +impl std::fmt::Display for TestStatus {
    +    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
    +        match self {
    +            Self::Pass => write!(f, "pass"),
    +            Self::Fail => write!(f, "fail"),
    +            Self::Skip => write!(f, "skip"),
    +            Self::Error => write!(f, "error"),
    +            Self::Blocked => write!(f, "blocked"),
    +        }
    +    }
    +}
    +
    +/// A single test result for one artifact in a run.
    +#[derive(Debug, Clone, Serialize, Deserialize)]
    +pub struct TestResult {
    +    /// The artifact ID this result is for (e.g., "UVER-1").
    +    pub artifact: String,
    +    pub status: TestStatus,
    +    #[serde(default, skip_serializing_if = "Option::is_none")]
    +    pub duration: Option,
    +    #[serde(default, skip_serializing_if = "Option::is_none")]
    +    pub message: Option,
    +}
    +
    +/// Metadata for a test run.
    +#[derive(Debug, Clone, Serialize, Deserialize)]
    +pub struct RunMetadata {
    +    pub id: String,
    +    pub timestamp: String,
    +    #[serde(default, skip_serializing_if = "Option::is_none")]
    +    pub source: Option,
    +    #[serde(default, skip_serializing_if = "Option::is_none")]
    +    pub environment: Option,
    +    #[serde(default, skip_serializing_if = "Option::is_none")]
    +    pub commit: Option,
    +}
    +
    +/// YAML file structure for a test run.
    +#[derive(Debug, Clone, Serialize, Deserialize)]
    +pub struct TestRunFile {
    +    pub run: RunMetadata,
    +    pub results: Vec,
    +}
    +
    +/// A loaded test run.
    +#[derive(Debug, Clone)]
    +pub struct TestRun {
    +    pub run: RunMetadata,
    +    pub results: Vec,
    +    pub source_file: Option,
    +}
    +
    +/// Aggregate statistics for a result set.
    +#[derive(Debug, Clone, Default)]
    +pub struct ResultSummary {
    +    pub total_runs: usize,
    +    pub total_results: usize,
    +    pub pass_count: usize,
    +    pub fail_count: usize,
    +    pub skip_count: usize,
    +    pub error_count: usize,
    +    pub blocked_count: usize,
    +}
    +
    +impl ResultSummary {
    +    pub fn pass_rate(&self) -> f64 {
    +        if self.total_results == 0 {
    +            return 0.0;
    +        }
    +        (self.pass_count as f64 / self.total_results as f64) * 100.0
    +    }
    +}
    +
    +/// In-memory collection of test runs.
    +#[derive(Debug, Default)]
    +pub struct ResultStore {
    +    runs: Vec,
    +}
    +
    +impl ResultStore {
    +    pub fn new() -> Self {
    +        Self::default()
    +    }
    +
    +    pub fn insert(&mut self, run: TestRun) {
    +        self.runs.push(run);
    +        // Keep sorted by timestamp descending (newest first)
    +        self.runs
    +            .sort_by(|a, b| b.run.timestamp.cmp(&a.run.timestamp));
    +    }
    +
    +    pub fn is_empty(&self) -> bool {
    +        self.runs.is_empty()
    +    }
    +
    +    pub fn len(&self) -> usize {
    +        self.runs.len()
    +    }
    +
    +    /// All runs, sorted newest first.
    +    pub fn runs(&self) -> &[TestRun] {
    +        &self.runs
    +    }
    +
    +    /// Get a specific run by ID.
    +    pub fn get_run(&self, run_id: &str) -> Option<&TestRun> {
    +        self.runs.iter().find(|r| r.run.id == run_id)
    +    }
    +
    +    /// Latest result for a given artifact ID across all runs.
    +    /// Returns the run metadata and the test result.
    +    pub fn latest_for(&self, artifact_id: &str) -> Option<(&RunMetadata, &TestResult)> {
    +        // runs are sorted newest first, so first match is latest
    +        for run in &self.runs {
    +            if let Some(result) = run.results.iter().find(|r| r.artifact == artifact_id) {
    +                return Some((&run.run, result));
    +            }
    +        }
    +        None
    +    }
    +
    +    /// All results for a specific artifact across all runs (newest first).
    +    pub fn history_for(&self, artifact_id: &str) -> Vec<(&RunMetadata, &TestResult)> {
    +        self.runs
    +            .iter()
    +            .filter_map(|run| {
    +                run.results
    +                    .iter()
    +                    .find(|r| r.artifact == artifact_id)
    +                    .map(|result| (&run.run, result))
    +            })
    +            .collect()
    +    }
    +
    +    /// Aggregate summary across all runs.
    +    pub fn summary(&self) -> ResultSummary {
    +        let mut s = ResultSummary {
    +            total_runs: self.runs.len(),
    +            ..Default::default()
    +        };
    +        // Count from the latest run only for overall stats
    +        if let Some(latest) = self.runs.first() {
    +            for r in &latest.results {
    +                s.total_results += 1;
    +                match r.status {
    +                    TestStatus::Pass => s.pass_count += 1,
    +                    TestStatus::Fail => s.fail_count += 1,
    +                    TestStatus::Skip => s.skip_count += 1,
    +                    TestStatus::Error => s.error_count += 1,
    +                    TestStatus::Blocked => s.blocked_count += 1,
    +                }
    +            }
    +        }
    +        s
    +    }
    +}
    +
    +/// Load all test run YAML files from a directory.
    +pub fn load_results(dir: &Path) -> anyhow::Result> {
    +    let mut runs = Vec::new();
    +
    +    if !dir.exists() {
    +        return Ok(runs);
    +    }
    +
    +    let mut entries: Vec<_> = std::fs::read_dir(dir)?
    +        .filter_map(|e| e.ok())
    +        .filter(|e| {
    +            let p = e.path();
    +            matches!(p.extension().and_then(|x| x.to_str()), Some("yaml" | "yml"))
    +        })
    +        .collect();
    +    entries.sort_by_key(|e| e.path());
    +
    +    for entry in entries {
    +        let path = entry.path();
    +        let content = std::fs::read_to_string(&path)?;
    +        let file: TestRunFile = serde_yaml::from_str(&content)
    +            .map_err(|e| anyhow::anyhow!("{}: {e}", path.display()))?;
    +        runs.push(TestRun {
    +            run: file.run,
    +            results: file.results,
    +            source_file: Some(path),
    +        });
    +    }
    +
    +    Ok(runs)
    +}
    +
    +#[cfg(test)]
    +mod tests {
    +    use super::*;
    +
    +    fn make_run(id: &str, timestamp: &str, results: Vec) -> TestRun {
    +        TestRun {
    +            run: RunMetadata {
    +                id: id.to_string(),
    +                timestamp: timestamp.to_string(),
    +                source: None,
    +                environment: None,
    +                commit: None,
    +            },
    +            results,
    +            source_file: None,
    +        }
    +    }
    +
    +    fn make_result(artifact: &str, status: TestStatus) -> TestResult {
    +        TestResult {
    +            artifact: artifact.to_string(),
    +            status,
    +            duration: None,
    +            message: None,
    +        }
    +    }
    +
    +    #[test]
    +    fn test_status_display() {
    +        assert_eq!(TestStatus::Pass.to_string(), "pass");
    +        assert_eq!(TestStatus::Fail.to_string(), "fail");
    +        assert_eq!(TestStatus::Skip.to_string(), "skip");
    +        assert_eq!(TestStatus::Error.to_string(), "error");
    +        assert_eq!(TestStatus::Blocked.to_string(), "blocked");
    +    }
    +
    +    #[test]
    +    fn test_status_is_pass_fail() {
    +        assert!(TestStatus::Pass.is_pass());
    +        assert!(!TestStatus::Fail.is_pass());
    +        assert!(!TestStatus::Skip.is_pass());
    +        assert!(!TestStatus::Error.is_pass());
    +        assert!(!TestStatus::Blocked.is_pass());
    +
    +        assert!(TestStatus::Fail.is_fail());
    +        assert!(TestStatus::Error.is_fail());
    +        assert!(!TestStatus::Pass.is_fail());
    +        assert!(!TestStatus::Skip.is_fail());
    +        assert!(!TestStatus::Blocked.is_fail());
    +    }
    +
    +    #[test]
    +    fn test_result_store_insert_and_sort() {
    +        let mut store = ResultStore::new();
    +        assert!(store.is_empty());
    +
    +        let run_old = make_run(
    +            "run-1",
    +            "2026-03-01T00:00:00Z",
    +            vec![make_result("A-1", TestStatus::Pass)],
    +        );
    +        let run_new = make_run(
    +            "run-2",
    +            "2026-03-05T00:00:00Z",
    +            vec![make_result("A-1", TestStatus::Fail)],
    +        );
    +
    +        // Insert older first, then newer
    +        store.insert(run_old);
    +        store.insert(run_new);
    +
    +        assert_eq!(store.len(), 2);
    +        // Newest first
    +        assert_eq!(store.runs()[0].run.id, "run-2");
    +        assert_eq!(store.runs()[1].run.id, "run-1");
    +    }
    +
    +    #[test]
    +    fn test_latest_for() {
    +        let mut store = ResultStore::new();
    +
    +        store.insert(make_run(
    +            "run-1",
    +            "2026-03-01T00:00:00Z",
    +            vec![make_result("A-1", TestStatus::Fail)],
    +        ));
    +        store.insert(make_run(
    +            "run-2",
    +            "2026-03-05T00:00:00Z",
    +            vec![make_result("A-1", TestStatus::Pass)],
    +        ));
    +
    +        let (meta, result) = store.latest_for("A-1").unwrap();
    +        assert_eq!(meta.id, "run-2");
    +        assert_eq!(result.status, TestStatus::Pass);
    +
    +        assert!(store.latest_for("NONEXISTENT").is_none());
    +    }
    +
    +    #[test]
    +    fn test_history_for() {
    +        let mut store = ResultStore::new();
    +
    +        store.insert(make_run(
    +            "run-1",
    +            "2026-03-01T00:00:00Z",
    +            vec![make_result("A-1", TestStatus::Fail)],
    +        ));
    +        store.insert(make_run(
    +            "run-2",
    +            "2026-03-05T00:00:00Z",
    +            vec![make_result("A-1", TestStatus::Pass)],
    +        ));
    +        store.insert(make_run(
    +            "run-3",
    +            "2026-03-03T00:00:00Z",
    +            vec![make_result("B-1", TestStatus::Skip)],
    +        ));
    +
    +        let history = store.history_for("A-1");
    +        assert_eq!(history.len(), 2);
    +        // Newest first
    +        assert_eq!(history[0].0.id, "run-2");
    +        assert_eq!(history[0].1.status, TestStatus::Pass);
    +        assert_eq!(history[1].0.id, "run-1");
    +        assert_eq!(history[1].1.status, TestStatus::Fail);
    +
    +        // B-1 only appears in run-3
    +        let history_b = store.history_for("B-1");
    +        assert_eq!(history_b.len(), 1);
    +        assert_eq!(history_b[0].0.id, "run-3");
    +    }
    +
    +    #[test]
    +    fn test_summary() {
    +        let mut store = ResultStore::new();
    +
    +        store.insert(make_run(
    +            "run-1",
    +            "2026-03-01T00:00:00Z",
    +            vec![
    +                make_result("A-1", TestStatus::Pass),
    +                make_result("A-2", TestStatus::Fail),
    +            ],
    +        ));
    +        store.insert(make_run(
    +            "run-2",
    +            "2026-03-05T00:00:00Z",
    +            vec![
    +                make_result("A-1", TestStatus::Pass),
    +                make_result("A-2", TestStatus::Pass),
    +                make_result("A-3", TestStatus::Skip),
    +                make_result("A-4", TestStatus::Error),
    +                make_result("A-5", TestStatus::Blocked),
    +            ],
    +        ));
    +
    +        let summary = store.summary();
    +        assert_eq!(summary.total_runs, 2);
    +        // Stats come from the latest run only (run-2)
    +        assert_eq!(summary.total_results, 5);
    +        assert_eq!(summary.pass_count, 2);
    +        assert_eq!(summary.fail_count, 0);
    +        assert_eq!(summary.skip_count, 1);
    +        assert_eq!(summary.error_count, 1);
    +        assert_eq!(summary.blocked_count, 1);
    +        // pass_rate = 2/5 = 40%
    +        assert!((summary.pass_rate() - 40.0).abs() < f64::EPSILON);
    +    }
    +
    +    #[test]
    +    fn test_load_results_empty_dir() {
    +        let dir = std::env::temp_dir().join("rivet_test_empty_results");
    +        let _ = std::fs::create_dir_all(&dir);
    +        // Remove any leftover yaml files
    +        if let Ok(entries) = std::fs::read_dir(&dir) {
    +            for entry in entries.flatten() {
    +                let _ = std::fs::remove_file(entry.path());
    +            }
    +        }
    +
    +        let runs = load_results(&dir).unwrap();
    +        assert!(runs.is_empty());
    +
    +        let _ = std::fs::remove_dir(&dir);
    +    }
    +
    +    #[test]
    +    fn test_load_results_nonexistent_dir() {
    +        let dir = std::env::temp_dir().join("rivet_test_nonexistent_results_dir");
    +        let _ = std::fs::remove_dir_all(&dir); // ensure it doesn't exist
    +        let runs = load_results(&dir).unwrap();
    +        assert!(runs.is_empty());
    +    }
    +
    +    #[test]
    +    fn test_roundtrip_yaml() {
    +        let run_file = TestRunFile {
    +            run: RunMetadata {
    +                id: "run-roundtrip".to_string(),
    +                timestamp: "2026-03-08T12:00:00Z".to_string(),
    +                source: Some("CI".to_string()),
    +                environment: Some("HIL bench".to_string()),
    +                commit: Some("abc123".to_string()),
    +            },
    +            results: vec![
    +                TestResult {
    +                    artifact: "UVER-1".to_string(),
    +                    status: TestStatus::Pass,
    +                    duration: Some("1.5s".to_string()),
    +                    message: None,
    +                },
    +                TestResult {
    +                    artifact: "UVER-2".to_string(),
    +                    status: TestStatus::Fail,
    +                    duration: None,
    +                    message: Some("Threshold exceeded".to_string()),
    +                },
    +                TestResult {
    +                    artifact: "UVER-3".to_string(),
    +                    status: TestStatus::Skip,
    +                    duration: None,
    +                    message: None,
    +                },
    +                TestResult {
    +                    artifact: "UVER-4".to_string(),
    +                    status: TestStatus::Error,
    +                    duration: None,
    +                    message: Some("Runtime panic".to_string()),
    +                },
    +                TestResult {
    +                    artifact: "UVER-5".to_string(),
    +                    status: TestStatus::Blocked,
    +                    duration: None,
    +                    message: Some("Dependency unavailable".to_string()),
    +                },
    +            ],
    +        };
    +
    +        let yaml = serde_yaml::to_string(&run_file).unwrap();
    +        let deserialized: TestRunFile = serde_yaml::from_str(&yaml).unwrap();
    +
    +        assert_eq!(deserialized.run.id, run_file.run.id);
    +        assert_eq!(deserialized.run.timestamp, run_file.run.timestamp);
    +        assert_eq!(deserialized.run.source, run_file.run.source);
    +        assert_eq!(deserialized.run.environment, run_file.run.environment);
    +        assert_eq!(deserialized.run.commit, run_file.run.commit);
    +        assert_eq!(deserialized.results.len(), run_file.results.len());
    +
    +        for (orig, deser) in run_file.results.iter().zip(deserialized.results.iter()) {
    +            assert_eq!(orig.artifact, deser.artifact);
    +            assert_eq!(orig.status, deser.status);
    +            assert_eq!(orig.duration, deser.duration);
    +            assert_eq!(orig.message, deser.message);
    +        }
    +    }
    +}
    diff --git a/rivet-core/src/store.rs b/rivet-core/src/store.rs
    index 67d3a8d..4f2f03c 100644
    --- a/rivet-core/src/store.rs
    +++ b/rivet-core/src/store.rs
    @@ -8,7 +8,7 @@ use crate::model::{Artifact, ArtifactId};
     /// Holds all loaded artifacts and provides lookup by ID and by type.
     /// The store is the central data structure consumed by the link graph,
     /// validator, query engine, and matrix generator.
    -#[derive(Debug, Default)]
    +#[derive(Debug, Default, Clone)]
     pub struct Store {
         artifacts: HashMap,
         by_type: HashMap>,
    diff --git a/rivet-core/src/validate.rs b/rivet-core/src/validate.rs
    index e64d882..57f9c2e 100644
    --- a/rivet-core/src/validate.rs
    +++ b/rivet-core/src/validate.rs
    @@ -1,3 +1,4 @@
    +use crate::document::DocumentStore;
     use crate::links::LinkGraph;
     use crate::schema::{Cardinality, Schema, Severity};
     use crate::store::Store;
    @@ -224,3 +225,28 @@ pub fn validate(store: &Store, schema: &Schema, graph: &LinkGraph) -> Vec Vec {
    +    let mut diagnostics = Vec::new();
    +
    +    for doc in doc_store.iter() {
    +        for reference in &doc.references {
    +            if !store.contains(&reference.artifact_id) {
    +                diagnostics.push(Diagnostic {
    +                    severity: Severity::Warning,
    +                    artifact_id: Some(doc.id.clone()),
    +                    rule: "doc-broken-ref".into(),
    +                    message: format!(
    +                        "document references [[{}]] (line {}) which does not exist",
    +                        reference.artifact_id, reference.line
    +                    ),
    +                });
    +            }
    +        }
    +    }
    +
    +    diagnostics
    +}
    diff --git a/rivet-core/src/wasm_runtime.rs b/rivet-core/src/wasm_runtime.rs
    index 6ea69ea..f787ffa 100644
    --- a/rivet-core/src/wasm_runtime.rs
    +++ b/rivet-core/src/wasm_runtime.rs
    @@ -32,6 +32,20 @@ use crate::adapter::{Adapter, AdapterConfig, AdapterSource};
     use crate::error::Error;
     use crate::model::Artifact;
     
    +// ---------------------------------------------------------------------------
    +// Generated WIT bindings (component-model typed interface)
    +// ---------------------------------------------------------------------------
    +
    +/// Type-safe bindings generated from `wit/adapter.wit` for the
    +/// `spar-component` world.  This gives us typed access to the
    +/// exported `adapter` and `renderer` interfaces.
    +mod wit_bindings {
    +    wasmtime::component::bindgen!({
    +        path: "../wit/adapter.wit",
    +        world: "spar-component",
    +    });
    +}
    +
     // ---------------------------------------------------------------------------
     // Configuration
     // ---------------------------------------------------------------------------
    @@ -360,6 +374,68 @@ impl WasmAdapter {
             ))
         }
     
    +    /// Call the guest `render` function from the renderer interface.
    +    ///
    +    /// This creates a fresh WASI-enabled store (optionally pre-opening
    +    /// `aadl_dir` so the guest can read `.aadl` files), instantiates the
    +    /// component using the generated WIT bindings, and calls the
    +    /// `pulseengine:rivet/renderer.render` export.
    +    pub fn call_render(
    +        &self,
    +        root: &str,
    +        highlight: &[String],
    +        aadl_dir: Option<&Path>,
    +    ) -> Result {
    +        // -- Build WASI context ------------------------------------------------
    +        let mut wasi_builder = wasmtime_wasi::WasiCtxBuilder::new();
    +        wasi_builder.inherit_stderr();
    +
    +        // Pre-open the AADL directory so the guest can read .aadl files.
    +        if let Some(dir) = aadl_dir {
    +            wasi_builder
    +                .preopened_dir(
    +                    dir,
    +                    ".",
    +                    wasmtime_wasi::DirPerms::READ,
    +                    wasmtime_wasi::FilePerms::READ,
    +                )
    +                .map_err(|e| WasmError::Instantiation(format!("preopened dir: {}", e)))?;
    +        }
    +
    +        let state = HostState {
    +            wasi: wasi_builder.build(),
    +            table: wasmtime::component::ResourceTable::new(),
    +            limiter: self
    +                .runtime_config
    +                .max_memory_bytes
    +                .map(|max| MemoryLimiter { max_memory: max }),
    +        };
    +
    +        let mut store = Store::new(&self.engine, state);
    +
    +        if let Some(fuel) = self.runtime_config.fuel {
    +            store
    +                .set_fuel(fuel)
    +                .map_err(|e| WasmError::Instantiation(e.to_string()))?;
    +        }
    +        if self.runtime_config.max_memory_bytes.is_some() {
    +            store.limiter(|state| state.limiter.as_mut().unwrap());
    +        }
    +
    +        // -- Instantiate via generated bindings --------------------------------
    +        let linker = self.create_linker()?;
    +
    +        let bindings =
    +            wit_bindings::SparComponent::instantiate(&mut store, &self.component, &linker)
    +                .map_err(|e| WasmError::Instantiation(e.to_string()))?;
    +
    +        bindings
    +            .pulseengine_rivet_renderer()
    +            .call_render(&mut store, root, highlight)
    +            .map_err(|e| WasmError::Guest(e.to_string()))?
    +            .map_err(|e| WasmError::Guest(format!("render error: {:?}", e)))
    +    }
    +
         /// Call the guest `export` function.
         fn call_export(
             &self,
    @@ -562,4 +638,88 @@ mod tests {
                 other => panic!("expected Adapter error, got: {other:?}"),
             }
         }
    +
    +    /// End-to-end: load the spar WASM component, preopen a directory with
    +    /// real AADL files, call the renderer, and verify the SVG output.
    +    ///
    +    /// Set `SPAR_WASM_PATH` to override the default component location.
    +    /// The test is skipped if the component or AADL files are not found.
    +    #[test]
    +    fn render_aadl_via_wasm() {
    +        // Only run if the WASM component exists
    +        let wasm_path = std::env::var("SPAR_WASM_PATH").unwrap_or_else(|_| {
    +            "/Volumes/Home/git/pulseengine/spar/target/wasm32-wasip2/release/spar_wasm.wasm".into()
    +        });
    +        let path = std::path::Path::new(&wasm_path);
    +        if !path.exists() {
    +            eprintln!("Skipping: WASM component not found at {}", path.display());
    +            return;
    +        }
    +
    +        // The AADL example directory
    +        let aadl_dir =
    +            std::path::Path::new(env!("CARGO_MANIFEST_DIR")).join("../examples/aadl/aadl");
    +        if !aadl_dir.exists() {
    +            eprintln!("Skipping: AADL example not found at {}", aadl_dir.display());
    +            return;
    +        }
    +
    +        let runtime = WasmAdapterRuntime::with_defaults().unwrap();
    +        let adapter = runtime.load_adapter(path).unwrap();
    +
    +        // Call render with the AADL directory preopened
    +        let result = adapter.call_render("FlightControl::Controller.Basic", &[], Some(&aadl_dir));
    +
    +        match result {
    +            Ok(svg) => {
    +                assert!(svg.contains(""), "SVG should be complete");
    +                assert!(svg.contains("data-id"), "nodes should have data-id");
    +
    +                // Write to temp for inspection
    +                let out = std::env::temp_dir().join("rivet-wasm-test");
    +                std::fs::create_dir_all(&out).ok();
    +                let svg_path = out.join("wasm-rendered.svg");
    +                std::fs::write(&svg_path, &svg).unwrap();
    +                eprintln!("SVG written to: {}", svg_path.display());
    +            }
    +            Err(e) => {
    +                // Some WASM/WASI issues are expected in test environments
    +                eprintln!("Render returned error (may be expected): {:?}", e);
    +            }
    +        }
    +    }
    +
    +    /// Load the real spar WASM component and call the renderer interface.
    +    ///
    +    /// Set `SPAR_WASM_PATH` to override the default component location.
    +    /// The test is skipped if the component file does not exist.
    +    #[test]
    +    fn load_spar_wasm_component() {
    +        let wasm_path = std::env::var("SPAR_WASM_PATH").unwrap_or_else(|_| {
    +            "/Volumes/Home/git/pulseengine/spar/target/wasm32-wasip2/release/spar_wasm.wasm".into()
    +        });
    +        let path = Path::new(&wasm_path);
    +        if !path.exists() {
    +            eprintln!("Skipping: WASM component not found at {}", path.display());
    +            return;
    +        }
    +
    +        let runtime = WasmAdapterRuntime::with_defaults().unwrap();
    +        let adapter = runtime.load_adapter(path).unwrap();
    +
    +        // Call render without any preopened AADL files.  The component should
    +        // load and the interface should be callable, but we expect an error
    +        // because there are no .aadl source files available to the guest.
    +        let result = adapter.call_render("Test::S.I", &[], None);
    +        assert!(result.is_err());
    +        let err_msg = format!("{:?}", result.unwrap_err());
    +        assert!(
    +            err_msg.contains("no .aadl files")
    +                || err_msg.contains("render error")
    +                || err_msg.contains("cannot instantiate"),
    +            "unexpected error: {}",
    +            err_msg
    +        );
    +    }
     }
    diff --git a/rivet-core/tests/integration.rs b/rivet-core/tests/integration.rs
    index ca93bc4..cbb9372 100644
    --- a/rivet-core/tests/integration.rs
    +++ b/rivet-core/tests/integration.rs
    @@ -1088,3 +1088,95 @@ fn test_diff_diagnostic_changes() {
             "1 new errors, 1 resolved errors, 0 new warnings, 0 resolved warnings"
         );
     }
    +
    +// ── AADL diagram placeholder in documents ────────────────────────────────
    +
    +#[test]
    +fn document_with_aadl_block_renders_placeholder() {
    +    let doc_content = "---\nid: DOC-ARCH\ntitle: System Architecture\n---\n\n## Flight Control Architecture\n\nThe system uses the following AADL architecture:\n\n```aadl\nroot: FlightControl::Controller.Basic\n```\n\nThis design satisfies [[SYSREQ-001]].\n";
    +
    +    let doc = rivet_core::document::parse_document(doc_content, None).unwrap();
    +    let html = rivet_core::document::render_to_html(&doc, |id| id == "SYSREQ-001");
    +
    +    // AADL block becomes a diagram placeholder
    +    assert!(html.contains("class=\"aadl-diagram\""));
    +    assert!(html.contains("data-root=\"FlightControl::Controller.Basic\""));
    +
    +    // Wiki-link still resolves
    +    assert!(html.contains("SYSREQ-001"));
    +
    +    // Other text renders normally
    +    assert!(html.contains("Flight Control Architecture"));
    +}
    +
    +// ── AADL adapter ─────────────────────────────────────────────────────────
    +
    +#[test]
    +fn aadl_adapter_parses_spar_json() {
    +    use rivet_core::adapter::{Adapter, AdapterConfig, AdapterSource};
    +    use rivet_core::formats::aadl::AadlAdapter;
    +
    +    let json = r#"{
    +        "root": "Pkg::Sys.Impl",
    +        "packages": [
    +            {
    +                "name": "Pkg",
    +                "component_types": [
    +                    { "name": "Sys", "category": "system" }
    +                ],
    +                "component_impls": [
    +                    { "name": "Sys.Impl", "category": "system" }
    +                ]
    +            }
    +        ],
    +        "instance": null,
    +        "diagnostics": [
    +            {
    +                "severity": "warning",
    +                "message": "No binding for cpu1",
    +                "path": ["root", "cpu1"],
    +                "analysis": "binding_check"
    +            }
    +        ]
    +    }"#;
    +
    +    let adapter = AadlAdapter::new();
    +    let source = AdapterSource::Bytes(json.as_bytes().to_vec());
    +    let config = AdapterConfig::default();
    +    let artifacts = adapter.import(&source, &config).unwrap();
    +
    +    // 1 type + 1 impl + 1 diagnostic = 3 artifacts
    +    assert_eq!(artifacts.len(), 3);
    +    assert!(
    +        artifacts
    +            .iter()
    +            .any(|a| a.artifact_type == "aadl-component" && a.id == "AADL-Pkg-Sys")
    +    );
    +    assert!(
    +        artifacts
    +            .iter()
    +            .any(|a| a.artifact_type == "aadl-component" && a.id == "AADL-Pkg-Sys.Impl")
    +    );
    +    assert!(
    +        artifacts
    +            .iter()
    +            .any(|a| a.artifact_type == "aadl-analysis-result")
    +    );
    +}
    +
    +// ── AADL schema ──────────────────────────────────────────────────────────
    +
    +#[test]
    +fn aadl_schema_loads() {
    +    let schemas_dir = std::path::Path::new(env!("CARGO_MANIFEST_DIR"))
    +        .parent()
    +        .unwrap()
    +        .join("schemas");
    +    let common = rivet_core::schema::Schema::load_file(&schemas_dir.join("common.yaml")).unwrap();
    +    let aadl = rivet_core::schema::Schema::load_file(&schemas_dir.join("aadl.yaml")).unwrap();
    +    let merged = rivet_core::schema::Schema::merge(&[common, aadl]);
    +    assert!(merged.artifact_type("aadl-component").is_some());
    +    assert!(merged.artifact_type("aadl-analysis-result").is_some());
    +    assert!(merged.artifact_type("aadl-flow").is_some());
    +    assert!(merged.link_type("modeled-by").is_some());
    +}
    diff --git a/rivet.yaml b/rivet.yaml
    index 675cb58..3bf00e6 100644
    --- a/rivet.yaml
    +++ b/rivet.yaml
    @@ -4,7 +4,14 @@ project:
       schemas:
         - common
         - dev
    +    - aadl
     
     sources:
       - path: artifacts
         format: generic-yaml
    +
    +docs:
    +  - docs
    +  - arch
    +
    +results: results
    diff --git a/schemas/aadl.yaml b/schemas/aadl.yaml
    new file mode 100644
    index 0000000..f559033
    --- /dev/null
    +++ b/schemas/aadl.yaml
    @@ -0,0 +1,126 @@
    +# AADL Architecture schema for rivet
    +#
    +# Maps AADL components, analysis results, and flows into the rivet
    +# artifact model. Bridges ASPICE SYS.3/SWE.2 architecture levels
    +# with formal AADL models analyzed by spar.
    +
    +schema:
    +  name: aadl
    +  version: "0.1.0"
    +  namespace: "http://pulseengine.dev/ns/aadl#"
    +  extends: [common]
    +  description: >
    +    AADL architecture model artifact types for spar integration.
    +
    +artifact-types:
    +
    +  - name: aadl-component
    +    description: AADL component type or implementation imported from spar
    +    fields:
    +      - name: category
    +        type: string
    +        required: true
    +        allowed-values:
    +          - system
    +          - process
    +          - thread
    +          - thread-group
    +          - processor
    +          - virtual-processor
    +          - memory
    +          - bus
    +          - virtual-bus
    +          - device
    +          - subprogram
    +          - subprogram-group
    +          - data
    +          - abstract
    +      - name: aadl-package
    +        type: string
    +        required: true
    +        description: AADL package containing this component
    +      - name: classifier-kind
    +        type: string
    +        required: false
    +        allowed-values: [type, implementation, feature-group-type]
    +      - name: features
    +        type: structured
    +        required: false
    +        description: Port/access/feature group declarations
    +      - name: properties
    +        type: structured
    +        required: false
    +        description: AADL property associations
    +      - name: aadl-file
    +        type: string
    +        required: false
    +        description: Source .aadl file path
    +    link-fields:
    +      - name: allocated-from
    +        link-type: allocated-from
    +        target-types: [system-req, sw-req, system-arch-component, requirement, feature]
    +        required: false
    +        cardinality: zero-or-many
    +
    +  - name: aadl-analysis-result
    +    description: Output of a spar analysis pass
    +    fields:
    +      - name: analysis-name
    +        type: string
    +        required: true
    +        description: Name of the analysis (e.g., connectivity, scheduling, latency)
    +      - name: severity
    +        type: string
    +        required: true
    +        allowed-values: [error, warning, info]
    +      - name: component-path
    +        type: string
    +        required: false
    +        description: Hierarchical path to the component (e.g., root/subsystem/cpu)
    +      - name: details
    +        type: text
    +        required: false
    +    link-fields:
    +      - name: analyzes
    +        link-type: verifies
    +        target-types: [aadl-component]
    +        required: false
    +        cardinality: zero-or-many
    +
    +  - name: aadl-flow
    +    description: End-to-end flow with latency bounds
    +    fields:
    +      - name: flow-kind
    +        type: string
    +        required: true
    +        allowed-values: [source, sink, path, end-to-end]
    +      - name: latency-best-ms
    +        type: number
    +        required: false
    +      - name: latency-worst-ms
    +        type: number
    +        required: false
    +      - name: segments
    +        type: structured
    +        required: false
    +    link-fields:
    +      - name: part-of
    +        link-type: allocated-from
    +        target-types: [aadl-component]
    +        required: false
    +        cardinality: zero-or-many
    +
    +link-types:
    +  - name: modeled-by
    +    inverse: models
    +    description: An architecture component is modeled by an AADL component
    +    source-types: [system-arch-component, sw-arch-component]
    +    target-types: [aadl-component]
    +
    +traceability-rules:
    +  - name: aadl-component-has-allocation
    +    description: AADL component should trace to a requirement or architecture element
    +    source-type: aadl-component
    +    required-link: allocated-from
    +    target-types: [system-req, sw-req, system-arch-component, requirement, feature]
    +    severity: info
    diff --git a/scripts/build-wasm.sh b/scripts/build-wasm.sh
    new file mode 100755
    index 0000000..ed749ac
    --- /dev/null
    +++ b/scripts/build-wasm.sh
    @@ -0,0 +1,28 @@
    +#!/usr/bin/env bash
    +set -euo pipefail
    +
    +# Build spar-wasm component and transpile for browser use.
    +# Usage: ./scripts/build-wasm.sh [spar-repo-path]
    +
    +SPAR_DIR="${1:-../spar}"
    +OUT_DIR="rivet-cli/assets/wasm"
    +
    +if [ ! -d "$SPAR_DIR/crates/spar-wasm" ]; then
    +    echo "Error: spar repo not found at $SPAR_DIR"
    +    echo "Usage: $0 /path/to/spar"
    +    exit 1
    +fi
    +
    +echo "Building spar-wasm (wasm32-wasip2, release)..."
    +(cd "$SPAR_DIR" && cargo build --target wasm32-wasip2 -p spar-wasm --release)
    +
    +mkdir -p "$OUT_DIR"
    +cp "$SPAR_DIR/target/wasm32-wasip2/release/spar_wasm.wasm" "$OUT_DIR/"
    +echo "Copied WASM component to $OUT_DIR/spar_wasm.wasm"
    +ls -lh "$OUT_DIR/spar_wasm.wasm"
    +
    +echo ""
    +echo "Transpiling for browser with jco..."
    +npx @bytecodealliance/jco transpile "$OUT_DIR/spar_wasm.wasm" -o "$OUT_DIR/js/" 2>&1
    +echo "Browser JS module written to $OUT_DIR/js/"
    +ls -lh "$OUT_DIR/js/spar_wasm.js" "$OUT_DIR/js/spar_wasm.core.wasm" 2>/dev/null || true
    diff --git a/scripts/fetch-wasm.sh b/scripts/fetch-wasm.sh
    new file mode 100755
    index 0000000..0a9c73f
    --- /dev/null
    +++ b/scripts/fetch-wasm.sh
    @@ -0,0 +1,37 @@
    +#!/usr/bin/env bash
    +set -euo pipefail
    +
    +# Fetch pre-built WASM components from GitHub releases.
    +# Usage: ./scripts/fetch-wasm.sh [version]
    +
    +VERSION="${1:-latest}"
    +REPO="pulseengine/spar"
    +ASSET="spar_wasm.wasm"
    +OUT_DIR="rivet-cli/assets/wasm"
    +
    +mkdir -p "$OUT_DIR"
    +
    +if [ "$VERSION" = "latest" ]; then
    +    echo "Fetching latest release from $REPO..."
    +    URL=$(gh release view --repo "$REPO" --json assets -q ".assets[] | select(.name==\"$ASSET\") | .url" 2>/dev/null || true)
    +    if [ -z "$URL" ]; then
    +        echo "No release found with asset $ASSET. Build from source instead:"
    +        echo "  cd /path/to/spar && cargo build --target wasm32-wasip2 -p spar-wasm --release"
    +        echo "  cp target/wasm32-wasip2/release/spar_wasm.wasm $OUT_DIR/"
    +        exit 1
    +    fi
    +else
    +    echo "Fetching release $VERSION from $REPO..."
    +    URL=$(gh release view "$VERSION" --repo "$REPO" --json assets -q ".assets[] | select(.name==\"$ASSET\") | .url" 2>/dev/null || true)
    +    if [ -z "$URL" ]; then
    +        echo "Release $VERSION not found or does not contain $ASSET"
    +        exit 1
    +    fi
    +fi
    +
    +echo "Downloading $ASSET..."
    +gh release download ${VERSION:+$VERSION} --repo "$REPO" --pattern "$ASSET" --dir "$OUT_DIR" --clobber
    +echo "Saved to $OUT_DIR/$ASSET"
    +
    +# Check size
    +ls -lh "$OUT_DIR/$ASSET"
    diff --git a/wit/adapter.wit b/wit/adapter.wit
    index 9e586d0..76c1d80 100644
    --- a/wit/adapter.wit
    +++ b/wit/adapter.wit
    @@ -83,13 +83,42 @@ interface adapter {
         supported-types: func() -> list;
     
         /// Import artifacts from raw bytes
    -    import: func(source: list, config: adapter-config) -> result, adapter-error>;
    +    %import: func(source: list, config: adapter-config) -> result, adapter-error>;
     
         /// Export artifacts to raw bytes
    -    export: func(artifacts: list, config: adapter-config) -> result, adapter-error>;
    +    %export: func(artifacts: list, config: adapter-config) -> result, adapter-error>;
     }
     
     /// World for a rivet adapter component
     world rivet-adapter {
         export adapter;
     }
    +
    +/// Interface for rendering AADL instance models to SVG.
    +///
    +/// A `renderer` implementation parses AADL sources, instantiates
    +/// from the given root component implementation, lays out the
    +/// architecture, and returns an SVG string.
    +interface renderer {
    +    /// Errors returned by renderer operations
    +    variant render-error {
    +        parse-error(string),
    +        no-root(string),
    +        layout-error(string),
    +    }
    +
    +    /// Render an AADL instance tree rooted at `root` (e.g. "Pkg::Impl").
    +    ///
    +    /// `highlight` is an optional list of component paths to visually
    +    /// emphasise in the output (e.g. flow participants).
    +    ///
    +    /// Returns the SVG document as a UTF-8 string, or a `render-error`.
    +    render: func(root: string, highlight: list) -> result;
    +}
    +
    +/// World for a spar WASM component that can both adapt AADL artifacts
    +/// for rivet and render instance-model diagrams.
    +world spar-component {
    +    export adapter;
    +    export renderer;
    +}