diff --git a/.githooks/commit-msg b/.githooks/commit-msg new file mode 100644 index 0000000..179c856 --- /dev/null +++ b/.githooks/commit-msg @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +set -euo pipefail +FILE="${1:-}" +if [[ -z "$FILE" || ! -f "$FILE" ]]; then + echo "[commit-msg] Missing commit message file. Usage: commit-msg " >&2 + exit 1 +fi + +SUBJECT=$(head -n1 "$FILE") +if [[ -z "${SUBJECT// /}" ]]; then + echo "[commit-msg] Empty subject line is not allowed." >&2 + exit 1 +fi + +# Discourage WIP commits (built-in regex; avoid spawning grep) +if [[ "$SUBJECT" =~ ^[[:space:]]*[Ww][Ii][Pp]\b ]]; then + echo "[commit-msg] Please avoid 'WIP' in commit subjects. Tell a short story instead." >&2 + exit 1 +fi + +# Soft length guidance (warn only) +LEN=${#SUBJECT} +if (( LEN > 72 )); then + echo "[commit-msg] Note: subject exceeds 72 chars ($LEN). Consider wrapping for readability." >&2 +fi + +exit 0 diff --git a/.githooks/pre-commit b/.githooks/pre-commit index 2c97a1f..976ed50 100644 --- a/.githooks/pre-commit +++ b/.githooks/pre-commit @@ -1,53 +1,80 @@ #!/usr/bin/env bash set -euo pipefail -# Enforce coupling between PRNG algorithm/version and golden regression vector. - -PRNG_FILE="crates/rmg-core/src/math/prng.rs" - -# Only run if the PRNG file is staged -if ! git diff --cached --name-only | grep -qx "$PRNG_FILE"; then +if [[ "${SKIP_HOOKS:-}" == 1 ]]; then exit 0 fi -DIFF=$(git diff --cached -- "$PRNG_FILE" || true) - -# Heuristics to detect algorithm changes: edits to these functions imply behavior change -if echo "$DIFF" | grep -E '^(\+|-)\s*(fn\s+next_u64|fn\s+from_seed_u64|fn\s+from_seed\(|fn\s+next_int\()' >/dev/null; then - ALGO_CHANGED=1 -else - ALGO_CHANGED=0 +# 1) PRNG coupling guard (existing logic) +PRNG_FILE="crates/rmg-core/src/math/prng.rs" +if git diff --cached --name-only | grep -qx "$PRNG_FILE"; then + DIFF=$(git diff --cached -- "$PRNG_FILE" || true) + if echo "$DIFF" | grep -E '^(\+|-)\s*(fn\s+next_u64|fn\s+from_seed_u64|fn\s+from_seed\(|fn\s+next_int\()' >/dev/null; then + ALGO_CHANGED=1 + else + ALGO_CHANGED=0 + fi + if echo "$DIFF" | grep -E 'PRNG_ALGO_VERSION' >/dev/null; then + VERSION_CHANGED=1 + else + VERSION_CHANGED=0 + fi + if echo "$DIFF" | grep -E 'next_int_golden_regression|assert_eq!\(values,\s*vec!\[' >/dev/null; then + GOLDEN_CHANGED=1 + else + GOLDEN_CHANGED=0 + fi + FAIL=0 + if [[ "$ALGO_CHANGED" -eq 1 && "$VERSION_CHANGED" -eq 0 ]]; then + echo "pre-commit: PRNG algorithm changed but PRNG_ALGO_VERSION was not bumped." >&2 + FAIL=1 + fi + if [[ "$VERSION_CHANGED" -eq 1 && "$GOLDEN_CHANGED" -eq 0 ]]; then + echo "pre-commit: PRNG_ALGO_VERSION bumped but golden regression vector was not updated." >&2 + FAIL=1 + fi + if [[ "$FAIL" -eq 1 ]]; then + echo "pre-commit: Refusing commit. Update algorithm version and golden regression together." >&2 + exit 1 + fi fi -# Version bump present? -if echo "$DIFF" | grep -E 'PRNG_ALGO_VERSION' >/dev/null; then - VERSION_CHANGED=1 -else - VERSION_CHANGED=0 +# 2) Enforce toolchain pin (matches rust-toolchain.toml) +if command -v rustup >/dev/null 2>&1; then + PINNED=$(awk -F '"' '/^channel/ {print $2}' rust-toolchain.toml 2>/dev/null || echo "") + ACTIVE=$(rustup show active-toolchain 2>/dev/null | awk '{print $1}') + if [[ -n "$PINNED" && "$ACTIVE" != "$PINNED"* ]]; then + echo "pre-commit: Active toolchain '$ACTIVE' != pinned '$PINNED'. Run: rustup override set $PINNED" >&2 + exit 1 + fi fi -# Golden regression vector updated? -if echo "$DIFF" | grep -E 'next_int_golden_regression|assert_eq!\(values,\s*vec!\[' >/dev/null; then - GOLDEN_CHANGED=1 -else - GOLDEN_CHANGED=0 -fi +# 3) Format check (fast) +cargo fmt --all -- --check -FAIL=0 -if [[ "$ALGO_CHANGED" -eq 1 && "$VERSION_CHANGED" -eq 0 ]]; then - echo "pre-commit: PRNG algorithm changed but PRNG_ALGO_VERSION was not bumped." >&2 - FAIL=1 +# 4) Docs guard (scaled): only require docs when core public API changed +STAGED=$(git diff --cached --name-only) +CORE_API_CHANGED=$(echo "$STAGED" | grep -E '^crates/rmg-core/src/.*\.rs$' | grep -v '/tests/' || true) +if [[ -n "$CORE_API_CHANGED" ]]; then + echo "$STAGED" | grep -Fx 'docs/execution-plan.md' >/dev/null || { echo 'pre-commit: docs/execution-plan.md must be updated when core API changes.' >&2; exit 1; } + echo "$STAGED" | grep -Fx 'docs/decision-log.md' >/dev/null || { echo 'pre-commit: docs/decision-log.md must be updated when core API changes.' >&2; exit 1; } fi -if [[ "$VERSION_CHANGED" -eq 1 && "$GOLDEN_CHANGED" -eq 0 ]]; then - echo "pre-commit: PRNG_ALGO_VERSION bumped but golden regression vector was not updated." >&2 - FAIL=1 +# 5) Lockfile guard: ensure lockfile version is v3 (compatible with MSRV cargo) +if [[ -f Cargo.lock ]]; then + VER_LINE=$(grep -n '^version = ' Cargo.lock | head -n1 | awk -F'= ' '{print $2}') + if [[ "$VER_LINE" != "3" && "$VER_LINE" != "3\r" ]]; then + echo "pre-commit: Cargo.lock must be generated with Cargo 1.68 (lockfile v3)." >&2 + echo "Run: cargo +1.68.0 generate-lockfile" >&2 + exit 1 + fi fi -if [[ "$FAIL" -eq 1 ]]; then - echo "pre-commit: Refusing commit. Update algorithm version and golden regression together." >&2 - exit 1 -fi +# 6) Targeted clippy + check for changed crates (fast-ish) +CRATES=$(echo "$STAGED" | sed -n 's#^crates/\([^/]*\)/.*#\1#p' | sort -u) +for c in $CRATES; do + cargo clippy -p "$c" --all-targets -- -D warnings -D missing_docs + cargo check -p "$c" --quiet +done exit 0 - diff --git a/.githooks/pre-push b/.githooks/pre-push new file mode 100644 index 0000000..edfe707 --- /dev/null +++ b/.githooks/pre-push @@ -0,0 +1,54 @@ +#!/usr/bin/env bash +set -euo pipefail +PINNED="${PINNED:-1.68.0}" + +for cmd in cargo rustup rg; do + if ! command -v "$cmd" >/dev/null 2>&1; then + echo "[pre-push] missing dependency: $cmd. Install it and retry." >&2 + exit 1 + fi +done + +echo "🐰 BunBun 🐇" + +if [[ "${SKIP_HOOKS:-}" == 1 ]]; then + exit 0 +fi + +echo "[pre-push] fmt" +cargo +"$PINNED" fmt --all -- --check + +echo "[pre-push] clippy (workspace)" +cargo +"$PINNED" clippy --all-targets -- -D warnings -D missing_docs + +echo "[pre-push] tests (workspace)" +cargo +"$PINNED" test --workspace + +# MSRV check for rmg-core +echo "[pre-push] MSRV check (rmg-core @ $PINNED)" +if rustup run "$PINNED" cargo -V >/dev/null 2>&1; then + cargo +"$PINNED" check -p rmg-core --all-targets +else + echo "[pre-push] MSRV toolchain $PINNED not installed. Install via: rustup toolchain install $PINNED" >&2 + exit 1 +fi + +# Rustdoc warnings guard (core API) +echo "[pre-push] rustdoc warnings gate (rmg-core)" +RUSTDOCFLAGS="-D warnings" cargo +"$PINNED" doc -p rmg-core --no-deps + +# Banned patterns +echo "[pre-push] scanning banned patterns" +# Match any crate-level allow(...) that includes missing_docs; exclude telemetry.rs explicitly +if rg -n '#!\[allow\([^]]*missing_docs[^]]*\)\]' --glob '!crates/rmg-core/src/telemetry.rs' crates >/dev/null; then + echo "pre-push: crate-level allow(missing_docs) is forbidden (except telemetry.rs)." >&2 + rg -n '#!\[allow\([^]]*missing_docs[^]]*\)\]' --glob '!crates/rmg-core/src/telemetry.rs' crates | cat >&2 || true + exit 1 +fi +if rg -n "\#\[unsafe\(no_mangle\)\]" crates >/dev/null; then + echo "pre-push: #[unsafe(no_mangle)] is invalid; use #[no_mangle]." >&2 + rg -n "\#\[unsafe\(no_mangle\)\]" crates | cat >&2 || true + exit 1 +fi + +exit 0 diff --git a/.githooks/pre-rebase b/.githooks/pre-rebase new file mode 100644 index 0000000..d453539 --- /dev/null +++ b/.githooks/pre-rebase @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +echo "[pre-rebase] Rebase is disallowed for this repository. Use merge instead." >&2 +exit 1 + diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b1dd3d7..453257f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -16,6 +16,7 @@ jobs: with: submodules: false - uses: dtolnay/rust-toolchain@stable + - uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 with: workspaces: | @@ -31,11 +32,18 @@ jobs: with: submodules: false - uses: dtolnay/rust-toolchain@stable + with: + toolchain: stable + components: clippy + - name: rustup override stable + run: rustup toolchain install stable && rustup override set stable - uses: Swatinem/rust-cache@v2 with: workspaces: | . - name: cargo clippy + env: + RUSTUP_TOOLCHAIN: stable run: cargo clippy --all-targets -- -D warnings -D missing_docs test: @@ -46,11 +54,17 @@ jobs: with: submodules: false - uses: dtolnay/rust-toolchain@stable + with: + toolchain: stable + - name: rustup override stable + run: rustup toolchain install stable && rustup override set stable - uses: Swatinem/rust-cache@v2 with: workspaces: | . - name: cargo test + env: + RUSTUP_TOOLCHAIN: stable run: cargo test - name: PRNG golden regression (rmg-core) run: cargo test -p rmg-core --features golden_prng -- tests::next_int_golden_regression @@ -86,3 +100,36 @@ jobs: echo 'docs/decision-log.md must be updated when non-doc files change.'; exit 1; } + + msrv: + name: MSRV (rmg-core @ 1.68) + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + submodules: false + - uses: dtolnay/rust-toolchain@1.68.0 + - uses: Swatinem/rust-cache@v2 + with: + workspaces: | + . + - name: cargo check (rmg-core) + run: cargo check -p rmg-core --all-targets + + rustdoc: + name: Rustdoc (rmg-core warnings gate) + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + submodules: false + - uses: dtolnay/rust-toolchain@stable + with: + toolchain: stable + - name: rustup override stable + run: rustup toolchain install stable && rustup override set stable + - uses: Swatinem/rust-cache@v2 + - name: rustdoc warnings gate + env: + RUSTUP_TOOLCHAIN: stable + run: RUSTDOCFLAGS="-D warnings" cargo doc -p rmg-core --no-deps diff --git a/Cargo.lock b/Cargo.lock index aa0e4ad..9e64f64 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 4 +version = 3 [[package]] name = "arrayref" @@ -41,9 +41,9 @@ checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "cc" -version = "1.2.41" +version = "1.2.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac9fe6cdbb24b6ade63616c0a0688e45bb56732262c158df3c0c4bea4ca47cb7" +checksum = "739eb0f94557554b3ca9a86d2d37bebd49c5e6d0c1d2bda35ba5bdac830befc2" dependencies = [ "find-msvc-tools", "shlex", @@ -77,6 +77,12 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52051878f80a721bb68ebfbc930e07b65ba72f2da88968ea5c06fd6ca3d3a127" +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + [[package]] name = "itoa" version = "1.0.15" @@ -85,20 +91,14 @@ checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "js-sys" -version = "0.3.81" +version = "0.3.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec48937a97411dcb524a265206ccd4c90bb711fca92b2792c407f268825b9305" +checksum = "b011eec8cc36da2aab2d5cff675ec18454fad408585853910a202391cf9f8e65" dependencies = [ "once_cell", "wasm-bindgen", ] -[[package]] -name = "log" -version = "0.4.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" - [[package]] name = "memchr" version = "2.7.6" @@ -149,6 +149,7 @@ version = "0.1.0" dependencies = [ "blake3", "bytes", + "hex", "once_cell", "serde", "serde_json", @@ -292,9 +293,9 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.104" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1da10c01ae9f1ae40cbfac0bac3b1e724b320abfcf52229f80b547c0d250e2d" +checksum = "da95793dfc411fbbd93f5be7715b0578ec61fe87cb1a42b12eb625caa5c5ea60" dependencies = [ "cfg-if", "once_cell", @@ -303,25 +304,11 @@ dependencies = [ "wasm-bindgen-shared", ] -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.104" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "671c9a5a66f49d8a47345ab942e2cb93c7d1d0339065d4f8139c486121b43b19" -dependencies = [ - "bumpalo", - "log", - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-shared", -] - [[package]] name = "wasm-bindgen-futures" -version = "0.4.54" +version = "0.4.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e038d41e478cc73bae0ff9b36c60cff1c98b8f38f8d7e8061e79ee63608ac5c" +checksum = "551f88106c6d5e7ccc7cd9a16f312dd3b5d36ea8b4954304657d5dfba115d4a0" dependencies = [ "cfg-if", "js-sys", @@ -332,9 +319,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.104" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ca60477e4c59f5f2986c50191cd972e3a50d8a95603bc9434501cf156a9a119" +checksum = "04264334509e04a7bf8690f2384ef5265f05143a4bff3889ab7a3269adab59c2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -342,31 +329,31 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.104" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f07d2f20d4da7b26400c9f4a0511e6e0345b040694e8a75bd41d578fa4421d7" +checksum = "420bc339d9f322e562942d52e115d57e950d12d88983a14c79b86859ee6c7ebc" dependencies = [ + "bumpalo", "proc-macro2", "quote", "syn", - "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.104" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bad67dc8b2a1a6e5448428adec4c3e84c43e561d8c9ee8a9e5aabeb193ec41d1" +checksum = "76f218a38c84bcb33c25ec7059b07847d465ce0e0a76b995e134a45adcb6af76" dependencies = [ "unicode-ident", ] [[package]] name = "wasm-bindgen-test" -version = "0.3.54" +version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e381134e148c1062f965a42ed1f5ee933eef2927c3f70d1812158f711d39865" +checksum = "bfc379bfb624eb59050b509c13e77b4eb53150c350db69628141abce842f2373" dependencies = [ "js-sys", "minicov", @@ -377,9 +364,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.54" +version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b673bca3298fe582aeef8352330ecbad91849f85090805582400850f8270a2e8" +checksum = "085b2df989e1e6f9620c1311df6c996e83fe16f57792b272ce1e024ac16a90f1" dependencies = [ "proc-macro2", "quote", @@ -388,9 +375,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.81" +version = "0.3.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9367c417a924a74cae129e6a2ae3b47fabb1f8995595ab474029da749a8be120" +checksum = "3a1f95c0d03a47f4ae1f7a64643a6bb97465d9b740f0fa8f90ea33915c99a9a1" dependencies = [ "js-sys", "wasm-bindgen", diff --git a/crates/rmg-cli/Cargo.toml b/crates/rmg-cli/Cargo.toml index 437298f..3e86bf6 100644 --- a/crates/rmg-cli/Cargo.toml +++ b/crates/rmg-cli/Cargo.toml @@ -2,5 +2,12 @@ name = "rmg-cli" version = "0.1.0" edition = "2021" +rust-version = "1.68" +description = "Echo CLI: demos, benches, inspector launcher (future)" +license = "Apache-2.0" +repository = "https://github.com/flyingrobots/echo" +readme = "README.md" +keywords = ["echo", "cli", "ecs"] +categories = ["command-line-utilities"] [dependencies] diff --git a/crates/rmg-cli/README.md b/crates/rmg-cli/README.md new file mode 100644 index 0000000..9afef19 --- /dev/null +++ b/crates/rmg-cli/README.md @@ -0,0 +1,6 @@ +# rmg-cli + +Placeholder CLI for Echo tooling. Subcommands will be added as the engine matures. + +See the repository root `README.md` for project context. + diff --git a/crates/rmg-core/Cargo.toml b/crates/rmg-core/Cargo.toml index 60e6800..5e1b126 100644 --- a/crates/rmg-core/Cargo.toml +++ b/crates/rmg-core/Cargo.toml @@ -2,19 +2,34 @@ name = "rmg-core" version = "0.1.0" edition = "2021" +rust-version = "1.68" +description = "Echo core: deterministic typed graph rewriting engine" +license = "Apache-2.0" +repository = "https://github.com/flyingrobots/echo" +readme = "README.md" +keywords = ["echo", "ecs", "deterministic", "graph"] +categories = ["game-engines", "data-structures"] +build = "build.rs" [dependencies] -blake3 = "1" -bytes = "1" -thiserror = "1" +blake3 = "1.0" +bytes = "1.0" +thiserror = "1.0" +hex = { version = "0.4", optional = true } +serde = { version = "1.0", features = ["derive"], optional = true } +serde_json = { version = "1.0", optional = true } [dev-dependencies] -once_cell = "1" -serde = { version = "1", features = ["derive"] } -serde_json = "1" +once_cell = "1.19" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" [features] default = [] # Optional regression check for PRNG sequences; off by default to avoid # freezing algorithm choices. Used only in tests guarded with `cfg(feature)`. golden_prng = [] +telemetry = ["serde", "serde_json", "hex"] + +[build-dependencies] +blake3 = "1.0" diff --git a/crates/rmg-core/README.md b/crates/rmg-core/README.md new file mode 100644 index 0000000..6ee9f0f --- /dev/null +++ b/crates/rmg-core/README.md @@ -0,0 +1,6 @@ +# rmg-core + +Deterministic typed graph rewriting engine used by Echo. + +This crate is the Rust core. See the repository root `README.md` for the full project vision and documentation index. + diff --git a/crates/rmg-core/build.rs b/crates/rmg-core/build.rs new file mode 100644 index 0000000..18ad39f --- /dev/null +++ b/crates/rmg-core/build.rs @@ -0,0 +1,21 @@ +#![allow(missing_docs)] +use std::env; +use std::fs; +use std::path::PathBuf; + +fn main() { + // Generate canonical rule ids (domain-separated) for zero-CPU runtime. + let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap()); + let dest = out_dir.join("rule_ids.rs"); + + // Motion rule id: blake3("rule:motion/update") + let mut hasher = blake3::Hasher::new(); + hasher.update(b"rule:motion/update"); + let bytes: [u8; 32] = hasher.finalize().into(); + + let generated = format!( + "/// Canonical family id for `rule:motion/update` (BLAKE3).\npub const MOTION_UPDATE_FAMILY_ID: [u8; 32] = {:?};\n", + bytes + ); + fs::write(dest, generated).expect("write rule_ids.rs"); +} diff --git a/crates/rmg-core/src/demo/mod.rs b/crates/rmg-core/src/demo/mod.rs new file mode 100644 index 0000000..aab3283 --- /dev/null +++ b/crates/rmg-core/src/demo/mod.rs @@ -0,0 +1,3 @@ +//! Demo rules and helpers used by tests and examples. +pub mod motion; +pub mod ports; diff --git a/crates/rmg-core/src/demo/motion.rs b/crates/rmg-core/src/demo/motion.rs new file mode 100644 index 0000000..60af060 --- /dev/null +++ b/crates/rmg-core/src/demo/motion.rs @@ -0,0 +1,187 @@ +//! Demo motion rule: advances position by velocity stored in payload. + +use crate::engine_impl::Engine; +use crate::footprint::{Footprint, IdSet}; +use crate::graph::GraphStore; +use crate::ident::{make_node_id, make_type_id, Hash, NodeId}; +use crate::payload::{decode_motion_payload, encode_motion_payload}; +use crate::record::NodeRecord; +use crate::rule::{ConflictPolicy, PatternGraph, RewriteRule}; +// Build-time generated canonical ids (domain-separated). +include!(concat!(env!("OUT_DIR"), "/rule_ids.rs")); + +/// Rule name constant for the built-in motion update rule. +/// +/// Pass this name to [`Engine::apply`] to execute the motion update rule, +/// which advances an entity's position by its velocity. Operates on nodes +/// whose payload is a valid 24-byte motion encoding (position + velocity as +/// 6 × f32 little-endian). +/// +/// Example usage (in tests): +/// ```ignore +/// let mut engine = build_motion_demo_engine(); +/// let entity_id = make_node_id("entity"); +/// // ... insert entity and payload ... +/// let tx = engine.begin(); +/// engine.apply(tx, MOTION_RULE_NAME, &entity_id)?; +/// ``` +pub const MOTION_RULE_NAME: &str = "motion/update"; + +fn motion_executor(store: &mut GraphStore, scope: &NodeId) { + if let Some(node) = store.node_mut(scope) { + if let Some(payload) = &mut node.payload { + if let Some((mut pos, vel)) = decode_motion_payload(payload) { + pos[0] += vel[0]; + pos[1] += vel[1]; + pos[2] += vel[2]; + *payload = encode_motion_payload(pos, vel); + } + } + } +} + +fn motion_matcher(store: &GraphStore, scope: &NodeId) -> bool { + store + .node(scope) + .and_then(|n| n.payload.as_ref()) + .and_then(decode_motion_payload) + .is_some() +} + +/// Deterministic rule id bytes for `rule:motion/update`. +const MOTION_RULE_ID: Hash = MOTION_UPDATE_FAMILY_ID; + +/// Returns a rewrite rule that updates entity positions based on velocity. +/// +/// This rule matches any node containing a valid 24-byte motion payload +/// (position + velocity encoded as 6 × f32 little-endian) and updates the +/// position by adding the velocity component-wise. +/// +/// Register this rule with [`Engine::register_rule`], then apply it with +/// [`Engine::apply`] using [`MOTION_RULE_NAME`]. +/// +/// Returns a [`RewriteRule`] with deterministic id, empty pattern (relies on +/// the matcher), and the motion update executor. +#[must_use] +pub fn motion_rule() -> RewriteRule { + RewriteRule { + id: MOTION_RULE_ID, + name: MOTION_RULE_NAME, + left: PatternGraph { nodes: vec![] }, + matcher: motion_matcher, + executor: motion_executor, + compute_footprint: compute_motion_footprint, + factor_mask: 0, + conflict_policy: ConflictPolicy::Abort, + join_fn: None, + } +} + +fn compute_motion_footprint(store: &GraphStore, scope: &NodeId) -> Footprint { + // Motion updates the payload on the scoped node only (write), no edges/ports. + let mut n_write = IdSet::default(); + if store.node(scope).is_some() { + n_write.insert_node(scope); + } + Footprint { + n_read: IdSet::default(), + n_write, + e_read: IdSet::default(), + e_write: IdSet::default(), + b_in: crate::footprint::PortSet::default(), + b_out: crate::footprint::PortSet::default(), + factor_mask: 0, + } +} + +/// Constructs a demo [`Engine`] with a world-root node and motion rule pre-registered. +/// +/// Creates a [`GraphStore`] with a single root node (id: "world-root", type: +/// "world"), initializes an [`Engine`] with that root, and registers the +/// [`motion_rule`]. Ready for immediate use in tests and demos. +/// +/// Returns an [`Engine`] with the motion rule registered and an empty +/// world‑root node. +/// +/// # Panics +/// Panics if rule registration fails (should not happen in a fresh engine). +#[must_use] +#[allow(clippy::expect_used)] +pub fn build_motion_demo_engine() -> Engine { + let mut store = GraphStore::default(); + let root_id = make_node_id("world-root"); + let root_type = make_type_id("world"); + store.insert_node( + root_id, + NodeRecord { + ty: root_type, + payload: None, + }, + ); + + let mut engine = Engine::new(store, root_id); + engine + .register_rule(motion_rule()) + .expect("motion rule should register successfully in fresh engine"); + engine +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn motion_rule_id_matches_domain_separated_name() { + // Our build.rs generates the family id using a domain separator: + // blake3("rule:" ++ MOTION_RULE_NAME) + let mut hasher = blake3::Hasher::new(); + hasher.update(b"rule:"); + hasher.update(MOTION_RULE_NAME.as_bytes()); + let expected: Hash = hasher.finalize().into(); + assert_eq!( + MOTION_RULE_ID, expected, + "MOTION_RULE_ID must equal blake3(\"rule:\" ++ MOTION_RULE_NAME)" + ); + } + + #[test] + fn motion_executor_updates_position_and_bytes() { + let mut store = GraphStore::default(); + let ent = make_node_id("entity-motion-bytes"); + let ty = make_type_id("entity"); + let pos = [10.0, -2.0, 3.5]; + let vel = [0.125, 2.0, -1.5]; + let payload = encode_motion_payload(pos, vel); + store.insert_node( + ent, + NodeRecord { + ty, + payload: Some(payload), + }, + ); + + // Run executor directly and validate position math and encoded bytes. + motion_executor(&mut store, &ent); + let Some(rec) = store.node(&ent) else { + unreachable!("entity present"); + }; + let Some(bytes) = rec.payload.as_ref() else { + unreachable!("payload present"); + }; + let Some((new_pos, new_vel)) = decode_motion_payload(bytes) else { + unreachable!("payload decode"); + }; + // Compare component-wise using exact bit equality for deterministic values. + for i in 0..3 { + assert_eq!(new_vel[i].to_bits(), vel[i].to_bits()); + let expected = (pos[i] + vel[i]).to_bits(); + assert_eq!(new_pos[i].to_bits(), expected); + } + // Encoding round-trip should match re-encoding of updated values exactly. + let expected_bytes = encode_motion_payload(new_pos, new_vel); + let Some(bytes) = rec.payload.as_ref() else { + unreachable!("payload present after executor"); + }; + assert_eq!(bytes, &expected_bytes); + } +} diff --git a/crates/rmg-core/src/demo/ports.rs b/crates/rmg-core/src/demo/ports.rs new file mode 100644 index 0000000..b57fb74 --- /dev/null +++ b/crates/rmg-core/src/demo/ports.rs @@ -0,0 +1,123 @@ +//! Demo rule that reserves a boundary input port, used to exercise the +//! reservation gate and independence checks. + +use crate::engine_impl::Engine; +use crate::footprint::{pack_port_key, Footprint, IdSet, PortSet}; +use crate::graph::GraphStore; +use crate::ident::{make_node_id, make_type_id, Hash, NodeId}; +use crate::payload::{decode_motion_payload, encode_motion_payload}; +use crate::record::NodeRecord; +use crate::rule::{ConflictPolicy, PatternGraph, RewriteRule}; + +/// Public identifier for the port demo rule. +pub const PORT_RULE_NAME: &str = "demo/port_nop"; + +fn port_matcher(_: &GraphStore, _: &NodeId) -> bool { + true +} + +fn port_executor(store: &mut GraphStore, scope: &NodeId) { + if let Some(node) = store.node_mut(scope) { + // Use motion payload layout; increment pos.x by 1.0 + if let Some(bytes) = &mut node.payload { + if let Some((mut pos, vel)) = decode_motion_payload(bytes) { + pos[0] += 1.0; + *bytes = encode_motion_payload(pos, vel); + } + } else { + let pos = [1.0, 0.0, 0.0]; + let vel = [0.0, 0.0, 0.0]; + node.payload = Some(encode_motion_payload(pos, vel)); + } + } +} + +fn compute_port_footprint(store: &GraphStore, scope: &NodeId) -> Footprint { + let mut n_write = IdSet::default(); + let mut b_in = PortSet::default(); + if store.node(scope).is_some() { + n_write.insert_node(scope); + b_in.insert(pack_port_key(scope, 0, true)); + } + Footprint { + n_read: IdSet::default(), + n_write, + e_read: IdSet::default(), + e_write: IdSet::default(), + b_in, + b_out: PortSet::default(), + factor_mask: 0, + } +} + +/// Returns a demo rewrite rule that reserves a boundary input port. +/// +/// This rule always matches and increments the x component of the scoped +/// node's motion payload by 1.0 (or initializes to `[1.0, 0.0, 0.0]` if +/// absent). Its footprint reserves a single boundary input port (port 0, +/// direction=in) on the scoped node, used to test port-based independence +/// checks. +/// +/// Register with [`Engine::register_rule`], then apply with [`Engine::apply`] +/// using [`PORT_RULE_NAME`]. Returns a [`RewriteRule`] with a runtime-computed +/// id (BLAKE3 of the name for the spike), empty pattern, and +/// [`ConflictPolicy::Abort`]. +#[must_use] +pub fn port_rule() -> RewriteRule { + // Family id will be generated later via build.rs when promoted to a stable demo. + // For the spike, derive from a domain-separated name at runtime. + let mut hasher = blake3::Hasher::new(); + hasher.update(b"rule:"); + hasher.update(PORT_RULE_NAME.as_bytes()); + let id: Hash = hasher.finalize().into(); + RewriteRule { + id, + name: PORT_RULE_NAME, + left: PatternGraph { nodes: vec![] }, + matcher: port_matcher, + executor: port_executor, + compute_footprint: compute_port_footprint, + factor_mask: 0, + conflict_policy: ConflictPolicy::Abort, + join_fn: None, + } +} + +/// Builds an engine with a world root for port-rule tests. +/// +/// # Panics +/// Panics if registering the port rule fails (should not occur in a fresh engine). +#[must_use] +#[allow(clippy::expect_used)] +pub fn build_port_demo_engine() -> Engine { + let mut store = GraphStore::default(); + let root_id = make_node_id("world-root-ports"); + let root_type = make_type_id("world"); + store.insert_node( + root_id, + NodeRecord { + ty: root_type, + payload: None, + }, + ); + let mut engine = Engine::new(store, root_id); + engine + .register_rule(port_rule()) + .expect("port rule should register successfully in fresh engine"); + engine +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn port_rule_id_is_domain_separated() { + let rule = port_rule(); + let mut hasher = blake3::Hasher::new(); + hasher.update(b"rule:"); + hasher.update(PORT_RULE_NAME.as_bytes()); + let expected: Hash = hasher.finalize().into(); + assert_eq!(rule.id, expected); + } +} diff --git a/crates/rmg-core/src/engine_impl.rs b/crates/rmg-core/src/engine_impl.rs new file mode 100644 index 0000000..817918f --- /dev/null +++ b/crates/rmg-core/src/engine_impl.rs @@ -0,0 +1,296 @@ +//! Core rewrite engine implementation. +use std::collections::{HashMap, HashSet}; + +use blake3::Hasher; +use thiserror::Error; + +use crate::graph::GraphStore; +use crate::ident::{CompactRuleId, Hash, NodeId}; +use crate::record::NodeRecord; +use crate::rule::{ConflictPolicy, RewriteRule}; +use crate::scheduler::{DeterministicScheduler, PendingRewrite, RewritePhase}; +use crate::snapshot::{compute_snapshot_hash, Snapshot}; +use crate::tx::TxId; + +/// Result of calling [`Engine::apply`]. +#[derive(Debug)] +pub enum ApplyResult { + /// The rewrite matched and was enqueued for execution. + Applied, + /// The rewrite did not match the provided scope. + NoMatch, +} + +/// Errors emitted by the engine. +#[derive(Debug, Error)] +pub enum EngineError { + /// The supplied transaction identifier did not exist or was already closed. + #[error("transaction not active")] + UnknownTx, + /// A rule was requested that has not been registered with the engine. + #[error("rule not registered: {0}")] + UnknownRule(String), + /// Attempted to register a rule with a duplicate name. + #[error("duplicate rule name: {0}")] + DuplicateRuleName(&'static str), + /// Attempted to register a rule with a duplicate ID. + #[error("duplicate rule id: {0:?}")] + DuplicateRuleId(Hash), + /// Conflict policy Join requires a join function. + #[error("missing join function for ConflictPolicy::Join")] + MissingJoinFn, + /// Internal invariant violated (engine state corruption). + #[error("internal invariant violated: {0}")] + InternalCorruption(&'static str), +} + +/// Core rewrite engine used by the spike. +/// +/// It owns a `GraphStore`, the registered rules, and the deterministic +/// scheduler. Snapshot determinism is provided by the snapshot hashing routine: +/// includes the root id, all nodes in ascending `NodeId` order, and all +/// outbound edges per node sorted by `EdgeId`. All length prefixes are 8-byte +/// little-endian and ids are raw 32-byte values. Changing any of these rules is +/// a breaking change to snapshot identity and must be recorded in the +/// determinism spec and tests. +pub struct Engine { + store: GraphStore, + rules: HashMap<&'static str, RewriteRule>, + rules_by_id: HashMap, + compact_rule_ids: HashMap, + rules_by_compact: HashMap, + scheduler: DeterministicScheduler, + tx_counter: u64, + live_txs: HashSet, + current_root: NodeId, + last_snapshot: Option, +} + +impl Engine { + /// Constructs a new engine with the supplied backing store and root node id. + pub fn new(store: GraphStore, root: NodeId) -> Self { + Self { + store, + rules: HashMap::new(), + rules_by_id: HashMap::new(), + compact_rule_ids: HashMap::new(), + rules_by_compact: HashMap::new(), + scheduler: DeterministicScheduler::default(), + tx_counter: 0, + live_txs: HashSet::new(), + current_root: root, + last_snapshot: None, + } + } + + /// Registers a rewrite rule so it can be referenced by name. + /// + /// # Errors + /// Returns [`EngineError::DuplicateRuleName`] if a rule with the same + /// name has already been registered, or [`EngineError::DuplicateRuleId`] + /// if a rule with the same id was previously registered. + pub fn register_rule(&mut self, rule: RewriteRule) -> Result<(), EngineError> { + if self.rules.contains_key(rule.name) { + return Err(EngineError::DuplicateRuleName(rule.name)); + } + if self.rules_by_id.contains_key(&rule.id) { + return Err(EngineError::DuplicateRuleId(rule.id)); + } + if matches!(rule.conflict_policy, ConflictPolicy::Join) && rule.join_fn.is_none() { + return Err(EngineError::MissingJoinFn); + } + self.rules_by_id.insert(rule.id, rule.name); + debug_assert!( + self.compact_rule_ids.len() < u32::MAX as usize, + "too many rules to assign a compact id" + ); + #[allow(clippy::cast_possible_truncation)] + let next = CompactRuleId(self.compact_rule_ids.len() as u32); + let compact = *self.compact_rule_ids.entry(rule.id).or_insert(next); + self.rules_by_compact.insert(compact, rule.name); + self.rules.insert(rule.name, rule); + Ok(()) + } + + /// Begins a new transaction and returns its identifier. + #[must_use] + pub fn begin(&mut self) -> TxId { + // Increment with wrap and ensure we never produce 0 (reserved invalid). + self.tx_counter = self.tx_counter.wrapping_add(1); + if self.tx_counter == 0 { + self.tx_counter = 1; + } + self.live_txs.insert(self.tx_counter); + TxId::from_raw(self.tx_counter) + } + + /// Queues a rewrite for execution if it matches the provided scope. + /// + /// # Errors + /// Returns [`EngineError::UnknownTx`] if the transaction is invalid, or + /// [`EngineError::UnknownRule`] if the named rule is not registered. + /// + /// # Panics + /// Panics only if internal rule tables are corrupted (should not happen + /// when rules are registered via `register_rule`). + pub fn apply( + &mut self, + tx: TxId, + rule_name: &str, + scope: &NodeId, + ) -> Result { + if tx.value() == 0 || !self.live_txs.contains(&tx.value()) { + return Err(EngineError::UnknownTx); + } + let Some(rule) = self.rules.get(rule_name) else { + return Err(EngineError::UnknownRule(rule_name.to_owned())); + }; + let matches = (rule.matcher)(&self.store, scope); + if !matches { + return Ok(ApplyResult::NoMatch); + } + + let scope_fp = scope_hash(rule, scope); + let footprint = (rule.compute_footprint)(&self.store, scope); + let Some(&compact_rule) = self.compact_rule_ids.get(&rule.id) else { + return Err(EngineError::InternalCorruption( + "missing compact rule id for a registered rule", + )); + }; + self.scheduler.pending.entry(tx).or_default().insert( + (scope_fp, rule.id), + PendingRewrite { + rule_id: rule.id, + compact_rule, + scope_hash: scope_fp, + scope: *scope, + footprint, + phase: RewritePhase::Matched, + }, + ); + + Ok(ApplyResult::Applied) + } + + /// Executes all pending rewrites for the transaction and produces a snapshot. + /// + /// # Errors + /// - Returns [`EngineError::UnknownTx`] if `tx` does not refer to a live transaction. + /// - Returns [`EngineError::InternalCorruption`] if internal rule tables are + /// corrupted (e.g., a reserved rewrite references a missing rule). + pub fn commit(&mut self, tx: TxId) -> Result { + if tx.value() == 0 || !self.live_txs.contains(&tx.value()) { + return Err(EngineError::UnknownTx); + } + // Reserve phase: enforce independence against active frontier. + let mut reserved: Vec = Vec::new(); + for mut rewrite in self.scheduler.drain_for_tx(tx) { + if self.scheduler.reserve(tx, &mut rewrite) { + reserved.push(rewrite); + } + } + for rewrite in reserved { + let id = rewrite.compact_rule; + let Some(rule) = self.rule_by_compact(id) else { + debug_assert!(false, "missing rule for compact id: {id:?}"); + return Err(EngineError::InternalCorruption( + "missing rule for compact id during commit", + )); + }; + (rule.executor)(&mut self.store, &rewrite.scope); + } + + let hash = compute_snapshot_hash(&self.store, &self.current_root); + let snapshot = Snapshot { + root: self.current_root, + hash, + parent: self.last_snapshot.as_ref().map(|s| s.hash), + tx, + }; + self.last_snapshot = Some(snapshot.clone()); + // Mark transaction as closed/inactive and finalize scheduler accounting. + self.live_txs.remove(&tx.value()); + self.scheduler.finalize_tx(tx); + Ok(snapshot) + } + + /// Returns a snapshot for the current graph state without executing rewrites. + #[must_use] + pub fn snapshot(&self) -> Snapshot { + let hash = compute_snapshot_hash(&self.store, &self.current_root); + Snapshot { + root: self.current_root, + hash, + parent: self.last_snapshot.as_ref().map(|s| s.hash), + tx: TxId::from_raw(self.tx_counter), + } + } + + /// Returns a shared view of a node when it exists. + #[must_use] + pub fn node(&self, id: &NodeId) -> Option<&NodeRecord> { + self.store.node(id) + } + + /// Inserts or replaces a node directly inside the store. + /// + /// The spike uses this to create motion entities prior to executing rewrites. + pub fn insert_node(&mut self, id: NodeId, record: NodeRecord) { + self.store.insert_node(id, record); + } +} + +impl Engine { + fn rule_by_compact(&self, id: CompactRuleId) -> Option<&RewriteRule> { + let name = self.rules_by_compact.get(&id)?; + self.rules.get(name) + } +} + +fn scope_hash(rule: &RewriteRule, scope: &NodeId) -> Hash { + let mut hasher = Hasher::new(); + hasher.update(&rule.id); + hasher.update(&scope.0); + hasher.finalize().into() +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{demo::motion::motion_rule, ident::make_node_id}; + + #[test] + fn scope_hash_stable_for_rule_and_scope() { + let rule = motion_rule(); + let scope = make_node_id("scope-hash-entity"); + let h1 = super::scope_hash(&rule, &scope); + // Recompute expected value manually using the same inputs. + let mut hasher = blake3::Hasher::new(); + hasher.update(&rule.id); + hasher.update(&scope.0); + let expected: Hash = hasher.finalize().into(); + assert_eq!(h1, expected); + } + + #[test] + fn register_rule_join_requires_join_fn() { + // Build a rule that declares Join but provides no join_fn. + let bad = RewriteRule { + id: [0u8; 32], + name: "bad/join", + left: crate::rule::PatternGraph { nodes: vec![] }, + matcher: |_s, _n| true, + executor: |_s, _n| {}, + compute_footprint: |_s, _n| crate::footprint::Footprint::default(), + factor_mask: 0, + conflict_policy: crate::rule::ConflictPolicy::Join, + join_fn: None, + }; + let mut engine = Engine::new(GraphStore::default(), make_node_id("r")); + let res = engine.register_rule(bad); + assert!( + matches!(res, Err(EngineError::MissingJoinFn)), + "expected MissingJoinFn, got {res:?}" + ); + } +} diff --git a/crates/rmg-core/src/footprint.rs b/crates/rmg-core/src/footprint.rs new file mode 100644 index 0000000..64ec02f --- /dev/null +++ b/crates/rmg-core/src/footprint.rs @@ -0,0 +1,193 @@ +//! Footprints and independence checks for MWMR scheduling. +//! +//! A footprint summarises the read/write sets of a pending rewrite over nodes, +//! edges, and boundary ports (typed interfaces), plus a coarse-grained +//! `factor_mask` used as an O(1) prefilter for spatial or subsystem +//! partitioning. +//! +//! This module intentionally uses simple set types for clarity; a future +//! optimisation replaces them with block‑sparse bitmaps and SIMD kernels. + +use std::collections::BTreeSet; + +use crate::ident::{EdgeId, Hash, NodeId}; + +/// Packed 64‑bit key for a boundary port. +/// +/// This is an opaque, caller-supplied stable identifier used to detect +/// conflicts on boundary interfaces. The engine only requires stable equality +/// and ordering; it does not rely on a specific bit layout. +/// +/// For demos/tests, use [`pack_port_key`] to derive a deterministic 64‑bit key +/// from a [`NodeId`], a `port_id`, and a direction flag. +pub type PortKey = u64; + +/// Simple ordered set of 256‑bit ids based on `BTreeSet` for deterministic +/// iteration. Optimised representations (Roaring + SIMD) can back this API in +/// the future without changing call‑sites. +#[derive(Debug, Clone, Default)] +pub struct IdSet(BTreeSet); + +impl IdSet { + /// Inserts an identifier. + pub fn insert_node(&mut self, id: &NodeId) { + self.0.insert(id.0); + } + /// Inserts an identifier. + pub fn insert_edge(&mut self, id: &EdgeId) { + self.0.insert(id.0); + } + /// Returns true if any element is shared with `other`. + pub fn intersects(&self, other: &Self) -> bool { + // Early‑exit by zipping ordered sets. + let mut a = self.0.iter(); + let mut b = other.0.iter(); + let mut va = a.next(); + let mut vb = b.next(); + while let (Some(x), Some(y)) = (va, vb) { + match x.cmp(y) { + core::cmp::Ordering::Less => va = a.next(), + core::cmp::Ordering::Greater => vb = b.next(), + core::cmp::Ordering::Equal => return true, + } + } + false + } +} + +/// Ordered set of boundary ports. +#[derive(Debug, Clone, Default)] +pub struct PortSet(BTreeSet); + +impl PortSet { + /// Inserts a port key. + pub fn insert(&mut self, key: PortKey) { + let _ = self.0.insert(key); + } + /// Returns true if any element is shared with `other`. + pub fn intersects(&self, other: &Self) -> bool { + let mut a = self.0.iter(); + let mut b = other.0.iter(); + let mut va = a.next(); + let mut vb = b.next(); + while let (Some(x), Some(y)) = (va, vb) { + match x.cmp(y) { + core::cmp::Ordering::Less => va = a.next(), + core::cmp::Ordering::Greater => vb = b.next(), + core::cmp::Ordering::Equal => return true, + } + } + false + } +} + +/// Footprint capturing the read/write sets and factor mask of a rewrite. +#[derive(Debug, Clone, Default)] +pub struct Footprint { + /// Nodes read by the rewrite. + pub n_read: IdSet, + /// Nodes written/created/deleted by the rewrite. + pub n_write: IdSet, + /// Edges read by the rewrite. + pub e_read: IdSet, + /// Edges written/created/deleted by the rewrite. + pub e_write: IdSet, + /// Boundary input ports touched. + pub b_in: PortSet, + /// Boundary output ports touched. + pub b_out: PortSet, + /// Coarse partition mask; used as an O(1) prefilter. + pub factor_mask: u64, +} + +impl Footprint { + /// Returns `true` when this footprint is independent of `other`. + /// + /// Fast path checks the factor mask; then boundary ports; then edges and + /// nodes. The check is symmetric but implemented with early exits. + /// Disjoint `factor_mask` values guarantee independence by construction + /// (the mask is a coarse superset of touched partitions). + pub fn independent(&self, other: &Self) -> bool { + if (self.factor_mask & other.factor_mask) == 0 { + return true; + } + if self.b_in.intersects(&other.b_in) + || self.b_in.intersects(&other.b_out) + || self.b_out.intersects(&other.b_in) + || self.b_out.intersects(&other.b_out) + { + return false; + } + if self.e_write.intersects(&other.e_write) + || self.e_write.intersects(&other.e_read) + || other.e_write.intersects(&self.e_read) + { + return false; + } + if self.n_write.intersects(&other.n_write) + || self.n_write.intersects(&other.n_read) + || other.n_write.intersects(&self.n_read) + { + return false; + } + true + } +} + +/// Helper to derive a deterministic [`PortKey`] from node, port id, and direction. +/// +/// Layout used by this helper: +/// - bits 63..32: lower 32 bits of the node's first 8 bytes (LE) — a stable +/// per-node fingerprint, not reversible +/// - bits 31..2: `port_id` (u30; must be < 2^30) +/// - bit 1: reserved (0) +/// - bit 0: direction flag (1 = input, 0 = output) +/// +/// This is sufficient for tests and demos; production code may adopt a +/// different stable scheme as long as equality and ordering are preserved. +#[inline] +pub fn pack_port_key(node: &NodeId, port_id: u32, dir_in: bool) -> PortKey { + let mut first8 = [0u8; 8]; + first8.copy_from_slice(&node.0[0..8]); + let node_fingerprint = u64::from_le_bytes(first8) & 0xFFFF_FFFF; + let dir_bit = u64::from(dir_in); + debug_assert!(port_id < (1 << 30), "port_id must fit in 30 bits"); + let port30 = u64::from(port_id & 0x3FFF_FFFF); + (node_fingerprint << 32) | (port30 << 2) | dir_bit +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn pack_port_key_is_stable_and_distinct_by_inputs() { + let a = NodeId(blake3::hash(b"node-a").into()); + let b = NodeId(blake3::hash(b"node-b").into()); + let k1 = pack_port_key(&a, 0, true); + let k2 = pack_port_key(&a, 1, true); + let k3 = pack_port_key(&a, 0, false); + let k4 = pack_port_key(&b, 0, true); + assert_ne!(k1, k2); + assert_ne!(k1, k3); + assert_ne!(k1, k4); + // Stability + assert_eq!(k1, pack_port_key(&a, 0, true)); + } + + #[test] + fn pack_port_key_masks_port_id_to_u30() { + let a = NodeId(blake3::hash(b"node-a").into()); + let hi = (1u32 << 30) - 1; + let k_ok = pack_port_key(&a, hi, true); + if !cfg!(debug_assertions) { + // Same node/dir; port_id above u30 must not alter higher fields. + let k_over = pack_port_key(&a, hi + 1, true); + assert_eq!( + k_ok & !0b11, + k_over & !0b11, + "overflow must not spill into fingerprint" + ); + } + } +} diff --git a/crates/rmg-core/src/graph.rs b/crates/rmg-core/src/graph.rs new file mode 100644 index 0000000..d1f8a8c --- /dev/null +++ b/crates/rmg-core/src/graph.rs @@ -0,0 +1,51 @@ +//! Minimal in-memory graph store used by the rewrite executor and tests. +use std::collections::BTreeMap; + +use crate::ident::NodeId; +use crate::record::{EdgeRecord, NodeRecord}; + +/// In-memory graph storage for the spike. +/// +/// The production engine will eventually swap in a content-addressed store, +/// but this structure keeps the motion rewrite spike self-contained. +#[derive(Default, Clone)] +pub struct GraphStore { + /// Mapping from node identifiers to their materialised records. + pub(crate) nodes: BTreeMap, + /// Mapping from source node to outbound edge records. + pub(crate) edges_from: BTreeMap>, +} + +impl GraphStore { + /// Returns a shared reference to a node when it exists. + pub fn node(&self, id: &NodeId) -> Option<&NodeRecord> { + self.nodes.get(id) + } + + /// Returns an iterator over edges that originate from the provided node. + /// + /// Edges are yielded in insertion order. For deterministic traversal + /// (e.g., snapshot hashing), callers must sort by `EdgeId`. + pub fn edges_from(&self, id: &NodeId) -> impl Iterator { + self.edges_from.get(id).into_iter().flatten() + } + + /// Returns a mutable reference to a node when it exists. + pub fn node_mut(&mut self, id: &NodeId) -> Option<&mut NodeRecord> { + self.nodes.get_mut(id) + } + + /// Inserts or replaces a node in the store. + pub fn insert_node(&mut self, id: NodeId, record: NodeRecord) { + self.nodes.insert(id, record); + } + + /// Inserts a directed edge into the store in insertion order. + /// + /// Ordering note: The underlying vector preserves insertion order. When + /// deterministic ordering is required (e.g., snapshot hashing), callers + /// must sort by `EdgeId` explicitly. + pub fn insert_edge(&mut self, from: NodeId, edge: EdgeRecord) { + self.edges_from.entry(from).or_default().push(edge); + } +} diff --git a/crates/rmg-core/src/ident.rs b/crates/rmg-core/src/ident.rs new file mode 100644 index 0000000..a876d5f --- /dev/null +++ b/crates/rmg-core/src/ident.rs @@ -0,0 +1,72 @@ +//! Identifier and hashing utilities. +use blake3::Hasher; + +/// Canonical 256-bit hash used throughout the engine for addressing nodes, +/// types, snapshots, and rewrite rules. +pub type Hash = [u8; 32]; + +/// Strongly typed identifier for a registered entity or structural node. +/// +/// `NodeId` values are obtained from [`make_node_id`] and remain stable across +/// runs because they are derived from a BLAKE3 hash of a string label. +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub struct NodeId(pub Hash); + +/// Strongly typed identifier for the logical kind of a node or component. +/// +/// `TypeId` values are produced by [`make_type_id`] which hashes a label; using +/// a dedicated wrapper prevents accidental mixing of node and type identifiers. +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub struct TypeId(pub Hash); + +/// Identifier for a directed edge within the graph. +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub struct EdgeId(pub Hash); + +/// Produces a stable, domain‑separated type identifier (prefix `b"type:"`) using BLAKE3. +pub fn make_type_id(label: &str) -> TypeId { + let mut hasher = Hasher::new(); + hasher.update(b"type:"); + hasher.update(label.as_bytes()); + TypeId(hasher.finalize().into()) +} + +/// Produces a stable, domain‑separated node identifier (prefix `b"node:"`) using BLAKE3. +pub fn make_node_id(label: &str) -> NodeId { + let mut hasher = Hasher::new(); + hasher.update(b"node:"); + hasher.update(label.as_bytes()); + NodeId(hasher.finalize().into()) +} + +/// Compact, process-local rule identifier used on hot paths. +/// +/// The engine maps canonical 256-bit rule ids (family ids) to compact u32 +/// handles at registration time. These handles are never serialized; they are +/// purely an in-process acceleration. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct CompactRuleId(pub u32); + +/// Produces a stable, domain‑separated edge identifier (prefix `b"edge:"`) using BLAKE3. +pub fn make_edge_id(label: &str) -> EdgeId { + let mut hasher = Hasher::new(); + hasher.update(b"edge:"); + hasher.update(label.as_bytes()); + EdgeId(hasher.finalize().into()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn domain_separation_prevents_cross_type_collisions() { + let lbl = "foo"; + let t = make_type_id(lbl).0; + let n = make_node_id(lbl).0; + let e = make_edge_id(lbl).0; + assert_ne!(t, n); + assert_ne!(t, e); + assert_ne!(n, e); + } +} diff --git a/crates/rmg-core/src/lib.rs b/crates/rmg-core/src/lib.rs index 8022a49..01213e0 100644 --- a/crates/rmg-core/src/lib.rs +++ b/crates/rmg-core/src/lib.rs @@ -3,567 +3,76 @@ //! The current implementation executes queued rewrites deterministically via the //! motion-rule spike utilities. Broader storage and scheduling features will //! continue to land over subsequent phases. -#![deny(missing_docs)] - -use std::collections::{BTreeMap, HashMap}; - -use blake3::Hasher; -use bytes::Bytes; -use thiserror::Error; - +#![forbid(unsafe_code)] +#![deny(missing_docs, rust_2018_idioms, unused_must_use)] +#![deny( + clippy::all, + clippy::pedantic, + clippy::nursery, + clippy::cargo, + clippy::unwrap_used, + clippy::expect_used, + clippy::panic, + clippy::todo, + clippy::unimplemented, + clippy::dbg_macro, + clippy::print_stdout, + clippy::print_stderr +)] +#![allow( + clippy::must_use_candidate, + clippy::return_self_not_must_use, + clippy::unreadable_literal, + clippy::missing_const_for_fn, + clippy::suboptimal_flops, + clippy::redundant_pub_crate, + clippy::many_single_char_names, + clippy::module_name_repetitions, + clippy::use_self +)] + +/// Deterministic math subsystem (Vec3, Mat4, Quat, PRNG). pub mod math; -const POSITION_VELOCITY_BYTES: usize = 24; -/// Public identifier for the built-in motion update rule. -pub const MOTION_RULE_NAME: &str = "motion/update"; - -/// Canonical 256-bit hash used throughout the engine for addressing nodes, -/// types, snapshots, and rewrite rules. -pub type Hash = [u8; 32]; - -/// Strongly typed identifier for a registered entity or structural node. -/// -/// `NodeId` values are obtained from `make_node_id` and remain stable across -/// runs because they are derived from a BLAKE3 hash of a string label. -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] -pub struct NodeId(pub Hash); - -/// Strongly typed identifier for the logical kind of a node or component. -/// -/// `TypeId` values are produced by `make_type_id` which hashes a label; using -/// a dedicated wrapper prevents accidental mixing of node and type identifiers. -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] -pub struct TypeId(pub Hash); - -/// Identifier for a directed edge within the graph. -#[derive(Clone, PartialEq, Eq, Hash, Debug)] -pub struct EdgeId(pub Hash); - -/// Materialised record for a single node stored in the graph. -/// -/// The optional `payload` carries domain-specific bytes (component data, -/// attachments, etc) and is interpreted by higher layers. -#[derive(Clone, Debug)] -pub struct NodeRecord { - /// Type identifier describing the node. - pub ty: TypeId, - /// Optional payload owned by the node (component data, attachments, etc.). - pub payload: Option, -} - -/// Materialised record for a single edge stored in the graph. -#[derive(Clone, Debug)] -pub struct EdgeRecord { - /// Stable identifier for the edge. - pub id: EdgeId, - /// Source node identifier. - pub from: NodeId, - /// Destination node identifier. - pub to: NodeId, - /// Type identifier describing the edge. - pub ty: TypeId, - /// Optional payload owned by the edge. - pub payload: Option, -} - -/// Minimal in-memory graph store used by the rewrite executor tests. -/// -/// The production engine will eventually swap in a content-addressed store, -/// but this structure keeps the motion rewrite spike self-contained. -#[derive(Default)] -pub struct GraphStore { - /// Mapping from node identifiers to their materialised records. - pub nodes: BTreeMap, - /// Mapping from source node to outbound edge records. - pub edges_from: BTreeMap>, -} - -impl GraphStore { - /// Returns a shared reference to a node when it exists. - pub fn node(&self, id: &NodeId) -> Option<&NodeRecord> { - self.nodes.get(id) - } - - /// Returns an iterator over edges that originate from the provided node. - pub fn edges_from(&self, id: &NodeId) -> impl Iterator { - self.edges_from.get(id).into_iter().flatten() - } - - /// Returns a mutable reference to a node when it exists. - pub fn node_mut(&mut self, id: &NodeId) -> Option<&mut NodeRecord> { - self.nodes.get_mut(id) - } - - /// Inserts or replaces a node in the store. - pub fn insert_node(&mut self, id: NodeId, record: NodeRecord) { - self.nodes.insert(id, record); - } -} - -/// Pattern metadata used by a rewrite rule to describe the input graph shape. -#[derive(Debug)] -pub struct PatternGraph { - /// Ordered list of type identifiers that make up the pattern. - pub nodes: Vec, -} - -/// Function pointer used to determine whether a rule matches the provided scope. -pub type MatchFn = fn(&GraphStore, &NodeId) -> bool; - -/// Function pointer that applies a rewrite to the given scope. -pub type ExecuteFn = fn(&mut GraphStore, &NodeId); - -/// Descriptor for a rewrite rule registered with the engine. -/// -/// Each rule owns: -/// * a deterministic identifier (`id`) -/// * a human-readable name -/// * a left pattern (currently unused by the spike) -/// * callbacks for matching and execution -pub struct RewriteRule { - /// Deterministic identifier for the rewrite rule. - pub id: Hash, - /// Human-readable name for logs and debugging. - pub name: &'static str, - /// Pattern used to describe the left-hand side of the rule. - pub left: PatternGraph, - /// Callback that determines whether the rule matches a given scope. - pub matcher: MatchFn, - /// Callback that applies the rewrite to the given scope. - pub executor: ExecuteFn, -} - -/// Thin wrapper around an auto-incrementing transaction identifier. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct TxId(pub u64); - -/// Snapshot returned after a successful commit. +/// Demo implementations showcasing engine capabilities (e.g., motion rule). +pub mod demo; +mod engine_impl; +mod footprint; +mod graph; +mod ident; +mod payload; +mod record; +mod rule; +mod scheduler; +mod snapshot; +mod tx; + +// Re-exports for stable public API +/// Demo helpers and constants for the motion rule. +pub use demo::motion::{build_motion_demo_engine, motion_rule, MOTION_RULE_NAME}; +/// Rewrite engine and error types. +pub use engine_impl::{ApplyResult, Engine, EngineError}; +/// Footprint utilities for MWMR independence checks. +/// `pack_port_key(node, port_id, dir_in)` packs a 64‑bit key as: +/// - upper 32 bits: low 32 bits of the `NodeId` (LE) +/// - bits 31..2: `port_id` (must be < 2^30) +/// - bit 1: reserved (0) +/// - bit 0: direction flag (`1` = input, `0` = output) /// -/// The `hash` value is deterministic and reflects the entire canonicalised -/// graph state (root + payloads). -#[derive(Debug, Clone)] -pub struct Snapshot { - /// Node identifier that serves as the root of the snapshot. - pub root: NodeId, - /// Canonical hash derived from the entire graph state. - pub hash: Hash, - /// Optional parent snapshot hash (if one exists). - pub parent: Option, - /// Transaction identifier associated with the snapshot. - pub tx: TxId, -} - -/// Ordering queue that guarantees rewrites execute deterministically. -#[derive(Debug, Default)] -pub struct DeterministicScheduler { - pending: HashMap>, -} - -/// Internal representation of a rewrite waiting to be applied. -#[derive(Debug)] -pub struct PendingRewrite { - /// Transaction identifier that enqueued the rewrite. - pub tx: TxId, - /// Identifier of the rule to execute. - pub rule_id: Hash, - /// Scope node supplied when `apply` was invoked. - pub scope: NodeId, -} - -/// Result of calling `Engine::apply`. -#[derive(Debug)] -pub enum ApplyResult { - /// The rewrite matched and was enqueued for execution. - Applied, - /// The rewrite did not match the provided scope. - NoMatch, -} - -/// Errors emitted by the engine. -#[derive(Debug, Error)] -pub enum EngineError { - /// The supplied transaction identifier did not exist or was already closed. - #[error("transaction not found")] - UnknownTx, - /// A rule was requested that has not been registered with the engine. - #[error("rule not registered: {0}")] - UnknownRule(String), -} - -/// Core rewrite engine used by the spike. -/// -/// It owns a `GraphStore`, the registered rules, and the deterministic scheduler. -pub struct Engine { - store: GraphStore, - rules: HashMap<&'static str, RewriteRule>, - scheduler: DeterministicScheduler, - tx_counter: u64, - current_root: NodeId, - last_snapshot: Option, -} - -impl Engine { - /// Constructs a new engine with the supplied backing store and root node id. - pub fn new(store: GraphStore, root: NodeId) -> Self { - Self { - store, - rules: HashMap::new(), - scheduler: DeterministicScheduler::default(), - tx_counter: 0, - current_root: root, - last_snapshot: None, - } - } - - /// Registers a rewrite rule so it can be referenced by name. - pub fn register_rule(&mut self, rule: RewriteRule) { - self.rules.insert(rule.name, rule); - } - - /// Begins a new transaction and returns its identifier. - pub fn begin(&mut self) -> TxId { - self.tx_counter += 1; - TxId(self.tx_counter) - } - - /// Queues a rewrite for execution if it matches the provided scope. - pub fn apply( - &mut self, - tx: TxId, - rule_name: &str, - scope: &NodeId, - ) -> Result { - if tx.0 == 0 || tx.0 > self.tx_counter { - return Err(EngineError::UnknownTx); - } - let rule = match self.rules.get(rule_name) { - Some(rule) => rule, - None => return Err(EngineError::UnknownRule(rule_name.to_owned())), - }; - let matches = (rule.matcher)(&self.store, scope); - if !matches { - return Ok(ApplyResult::NoMatch); - } - - let scope_hash = scope_hash(rule, scope); - self.scheduler.pending.entry(tx).or_default().insert( - (scope_hash, rule.id), - PendingRewrite { - tx, - rule_id: rule.id, - scope: *scope, - }, - ); - - Ok(ApplyResult::Applied) - } - - /// Executes all pending rewrites for the transaction and produces a snapshot. - pub fn commit(&mut self, tx: TxId) -> Result { - if tx.0 == 0 || tx.0 > self.tx_counter { - return Err(EngineError::UnknownTx); - } - let pending = self.scheduler.drain_for_tx(tx); - for rewrite in pending { - if let Some(rule) = self.rule_by_id(&rewrite.rule_id) { - (rule.executor)(&mut self.store, &rewrite.scope); - } - } - - let hash = compute_snapshot_hash(&self.store, &self.current_root); - let snapshot = Snapshot { - root: self.current_root, - hash, - parent: self.last_snapshot.as_ref().map(|s| s.hash), - tx, - }; - self.last_snapshot = Some(snapshot.clone()); - Ok(snapshot) - } - - /// Returns a snapshot for the current graph state without executing rewrites. - pub fn snapshot(&self) -> Snapshot { - let hash = compute_snapshot_hash(&self.store, &self.current_root); - Snapshot { - root: self.current_root, - hash, - parent: self.last_snapshot.as_ref().map(|s| s.hash), - tx: TxId(self.tx_counter), - } - } - - /// Returns a shared view of a node when it exists. - pub fn node(&self, id: &NodeId) -> Option<&NodeRecord> { - self.store.node(id) - } - - /// Inserts or replaces a node directly inside the store. - /// - /// The spike uses this to create motion entities prior to executing rewrites. - pub fn insert_node(&mut self, id: NodeId, record: NodeRecord) { - self.store.insert_node(id, record); - } -} - -impl Engine { - fn rule_by_id(&self, id: &Hash) -> Option<&RewriteRule> { - self.rules.values().find(|rule| &rule.id == id) - } -} - -fn scope_hash(rule: &RewriteRule, scope: &NodeId) -> Hash { - let mut hasher = Hasher::new(); - hasher.update(&rule.id); - hasher.update(&scope.0); - hasher.finalize().into() -} - -fn compute_snapshot_hash(store: &GraphStore, root: &NodeId) -> Hash { - let mut hasher = Hasher::new(); - hasher.update(&root.0); - for (node_id, node) in &store.nodes { - hasher.update(&node_id.0); - hasher.update(&(node.ty).0); - match &node.payload { - Some(payload) => { - hasher.update(&(payload.len() as u64).to_le_bytes()); - hasher.update(payload); - } - None => { - hasher.update(&0u64.to_le_bytes()); - } - } - } - for (from, edges) in &store.edges_from { - hasher.update(&from.0); - hasher.update(&(edges.len() as u64).to_le_bytes()); - let mut sorted_edges: Vec<&EdgeRecord> = edges.iter().collect(); - sorted_edges.sort_by(|a, b| a.id.0.cmp(&b.id.0)); - for edge in sorted_edges { - hasher.update(&(edge.id).0); - hasher.update(&(edge.ty).0); - hasher.update(&(edge.to).0); - match &edge.payload { - Some(payload) => { - hasher.update(&(payload.len() as u64).to_le_bytes()); - hasher.update(payload); - } - None => { - hasher.update(&0u64.to_le_bytes()); - } - } - } - } - hasher.finalize().into() -} - -impl DeterministicScheduler { - fn drain_for_tx(&mut self, tx: TxId) -> Vec { - self.pending - .remove(&tx) - .map(|map| map.into_values().collect()) - .unwrap_or_default() - } -} - -/// Serialises a 3D position + velocity vector pair into the canonical payload. -pub fn encode_motion_payload(position: [f32; 3], velocity: [f32; 3]) -> Bytes { - let mut buf = Vec::with_capacity(POSITION_VELOCITY_BYTES); - for value in position.into_iter().chain(velocity.into_iter()) { - buf.extend_from_slice(&value.to_le_bytes()); - } - Bytes::from(buf) -} - -/// Deserialises a canonical motion payload into (position, velocity) slices. -pub fn decode_motion_payload(bytes: &Bytes) -> Option<([f32; 3], [f32; 3])> { - if bytes.len() != POSITION_VELOCITY_BYTES { - return None; - } - let mut floats = [0f32; 6]; - for (index, chunk) in bytes.chunks_exact(4).enumerate() { - floats[index] = f32::from_le_bytes(chunk.try_into().ok()?); - } - let position = [floats[0], floats[1], floats[2]]; - let velocity = [floats[3], floats[4], floats[5]]; - Some((position, velocity)) -} - -/// Convenience helper for deriving `TypeId` values from human-readable labels. -pub fn make_type_id(label: &str) -> TypeId { - TypeId(hash_label(label)) -} - -/// Convenience helper for deriving `NodeId` values from human-readable labels. -pub fn make_node_id(label: &str) -> NodeId { - NodeId(hash_label(label)) -} - -fn hash_label(label: &str) -> Hash { - let mut hasher = Hasher::new(); - hasher.update(label.as_bytes()); - hasher.finalize().into() -} - -fn add_vec(a: [f32; 3], b: [f32; 3]) -> [f32; 3] { - [a[0] + b[0], a[1] + b[1], a[2] + b[2]] -} - -/// Executor that updates the encoded position in the entity payload. -fn motion_executor(store: &mut GraphStore, scope: &NodeId) { - if let Some(record) = store.node_mut(scope) { - if let Some(payload) = &record.payload { - if let Some((position, velocity)) = decode_motion_payload(payload) { - let updated = encode_motion_payload(add_vec(position, velocity), velocity); - record.payload = Some(updated); - } - } - } -} - -/// Matcher used by the motion rule to ensure the payload is well-formed. -fn motion_matcher(store: &GraphStore, scope: &NodeId) -> bool { - store - .node(scope) - .and_then(|record| record.payload.as_ref()) - .and_then(decode_motion_payload) - .is_some() -} - -/// Returns the built-in motion rule used by the spike. -/// -/// The rule advances an entity's position by its velocity; it is deliberately -/// deterministic so hash comparisons stay stable across independent executions. -pub fn motion_rule() -> RewriteRule { - let mut hasher = Hasher::new(); - hasher.update(MOTION_RULE_NAME.as_bytes()); - let id = hasher.finalize().into(); - RewriteRule { - id, - name: MOTION_RULE_NAME, - left: PatternGraph { nodes: vec![] }, - matcher: motion_matcher, - executor: motion_executor, - } -} - -/// Builds an engine with the default world root and the motion rule registered. -pub fn build_motion_demo_engine() -> Engine { - let mut store = GraphStore::default(); - let root_id = make_node_id("world-root"); - let root_type = make_type_id("world"); - store.insert_node( - root_id, - NodeRecord { - ty: root_type, - payload: None, - }, - ); - - let mut engine = Engine::new(store, root_id); - engine.register_rule(motion_rule()); - engine -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn motion_rule_updates_position_deterministically() { - let entity = make_node_id("entity-1"); - let entity_type = make_type_id("entity"); - let payload = encode_motion_payload([1.0, 2.0, 3.0], [0.5, -1.0, 0.25]); - - let mut store = GraphStore::default(); - store.insert_node( - entity, - NodeRecord { - ty: entity_type, - payload: Some(payload), - }, - ); - - let mut engine = Engine::new(store, entity); - engine.register_rule(motion_rule()); - - let tx = engine.begin(); - let apply = engine.apply(tx, MOTION_RULE_NAME, &entity).unwrap(); - assert!(matches!(apply, ApplyResult::Applied)); - - let snap = engine.commit(tx).expect("commit"); - let hash_after_first_apply = snap.hash; - - // Run a second engine with identical initial state and ensure hashes match. - let mut store_b = GraphStore::default(); - let payload_b = encode_motion_payload([1.0, 2.0, 3.0], [0.5, -1.0, 0.25]); - store_b.insert_node( - entity, - NodeRecord { - ty: entity_type, - payload: Some(payload_b), - }, - ); - - let mut engine_b = Engine::new(store_b, entity); - engine_b.register_rule(motion_rule()); - let tx_b = engine_b.begin(); - let apply_b = engine_b.apply(tx_b, MOTION_RULE_NAME, &entity).unwrap(); - assert!(matches!(apply_b, ApplyResult::Applied)); - let snap_b = engine_b.commit(tx_b).expect("commit B"); - - assert_eq!(hash_after_first_apply, snap_b.hash); - - // Ensure the position actually moved. - let node = engine - .node(&entity) - .expect("entity exists") - .payload - .as_ref() - .and_then(decode_motion_payload) - .expect("payload decode"); - assert_eq!(node.0, [1.5, 1.0, 3.25]); - } - - #[test] - fn motion_rule_no_match_on_missing_payload() { - let entity = make_node_id("entity-2"); - let entity_type = make_type_id("entity"); - - let mut store = GraphStore::default(); - store.insert_node( - entity, - NodeRecord { - ty: entity_type, - payload: None, - }, - ); - - let mut engine = Engine::new(store, entity); - engine.register_rule(motion_rule()); - - let tx = engine.begin(); - let apply = engine.apply(tx, MOTION_RULE_NAME, &entity).unwrap(); - assert!(matches!(apply, ApplyResult::NoMatch)); - } - - #[test] - fn apply_unknown_rule_returns_error() { - let entity = make_node_id("entity-unknown-rule"); - let entity_type = make_type_id("entity"); - - let mut store = GraphStore::default(); - store.insert_node( - entity, - NodeRecord { - ty: entity_type, - payload: Some(encode_motion_payload([0.0, 0.0, 0.0], [0.0, 0.0, 0.0])), - }, - ); - - let mut engine = Engine::new(store, entity); - let tx = engine.begin(); - let result = engine.apply(tx, "missing-rule", &entity); - assert!(matches!(result, Err(EngineError::UnknownRule(rule)) if rule == "missing-rule")); - } -} +/// Collisions are possible across nodes that share the same low 32‑bit +/// fingerprint; choose ids/ports accordingly. +pub use footprint::{pack_port_key, Footprint, PortKey}; +/// In-memory graph store used by the engine spike. +pub use graph::GraphStore; +/// Core identifier types and constructors for nodes, types, and edges. +pub use ident::{make_edge_id, make_node_id, make_type_id, EdgeId, Hash, NodeId, TypeId}; +/// Motion payload encoding/decoding helpers. +pub use payload::{decode_motion_payload, encode_motion_payload}; +/// Graph node and edge record types. +pub use record::{EdgeRecord, NodeRecord}; +/// Rule primitives for pattern/match/execute. +pub use rule::{ConflictPolicy, ExecuteFn, MatchFn, PatternGraph, RewriteRule}; +/// Immutable deterministic snapshot. +pub use snapshot::Snapshot; +/// Transaction identifier type. +pub use tx::TxId; diff --git a/crates/rmg-core/src/math/mat4.rs b/crates/rmg-core/src/math/mat4.rs index ee7e8de..6891fe3 100644 --- a/crates/rmg-core/src/math/mat4.rs +++ b/crates/rmg-core/src/math/mat4.rs @@ -1,4 +1,4 @@ -use crate::math::Vec3; +use crate::math::{Quat, Vec3}; /// Column-major 4×4 matrix matching Echo’s deterministic math layout. /// @@ -11,6 +11,120 @@ pub struct Mat4 { } impl Mat4 { + /// Returns the identity matrix. + /// + /// The identity is the multiplicative neutral element for matrices: + /// `M * I = I * M = M`. Use it as a no‑op transform or as a starting + /// point for composing transforms. + pub const fn identity() -> Self { + Self { + data: [ + 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, + ], + } + } + + /// Builds a translation matrix in meters. + /// + /// Constructs a 4×4 homogeneous translation matrix intended for + /// transforming points in world space (positioning objects). When using + /// [`Mat4::transform_point`], the translation is applied; when using + /// [`Mat4::transform_direction`], translation is ignored (only the upper‑left + /// 3×3 linear part is used). Matrices are column‑major and the bottom‑right + /// element is `1.0`. + pub const fn translation(tx: f32, ty: f32, tz: f32) -> Self { + Self { + data: [ + 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, tx, ty, tz, 1.0, + ], + } + } + + /// Builds a non-uniform scale matrix. + /// + /// Invariants: + /// - Determinant is `sx * sy * sz`. Any zero component produces a + /// degenerate (non-invertible) matrix. + /// - A negative component reflects about the corresponding axis; an odd + /// number of negative components flips handedness. + pub const fn scale(sx: f32, sy: f32, sz: f32) -> Self { + Self { + data: [ + sx, 0.0, 0.0, 0.0, 0.0, sy, 0.0, 0.0, 0.0, 0.0, sz, 0.0, 0.0, 0.0, 0.0, 1.0, + ], + } + } + + /// Builds a rotation matrix around the X axis by `angle` radians. + /// + /// Right‑handed convention: positive angles rotate counter‑clockwise when + /// looking down the +X axis toward the origin. See + /// [`Mat4::rotation_from_euler`] for the full convention. + pub fn rotation_x(angle: f32) -> Self { + let (s, c) = angle.sin_cos(); + Self::new([ + 1.0, 0.0, 0.0, 0.0, 0.0, c, s, 0.0, 0.0, -s, c, 0.0, 0.0, 0.0, 0.0, 1.0, + ]) + } + + /// Builds a rotation matrix around the Y axis by `angle` radians. + /// + /// Right‑handed convention: positive angles rotate counter‑clockwise when + /// looking down the +Y axis toward the origin. See + /// [`Mat4::rotation_from_euler`] for the full convention. + pub fn rotation_y(angle: f32) -> Self { + let (s, c) = angle.sin_cos(); + Self::new([ + c, 0.0, -s, 0.0, 0.0, 1.0, 0.0, 0.0, s, 0.0, c, 0.0, 0.0, 0.0, 0.0, 1.0, + ]) + } + + /// Builds a rotation matrix around the Z axis by `angle` radians. + /// + /// Right‑handed convention: positive angles rotate counter‑clockwise when + /// looking down the +Z axis toward the origin. See + /// [`Mat4::rotation_from_euler`] for the full convention. + pub fn rotation_z(angle: f32) -> Self { + let (s, c) = angle.sin_cos(); + Self::new([ + c, s, 0.0, 0.0, -s, c, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, + ]) + } + + /// Builds a rotation matrix from Euler angles in radians. + /// + /// Convention and order: + /// - Constructs `R = R_y(yaw) * R_x(pitch) * R_z(roll)`. + /// - Matrix multiplication in the code is performed left-to-right in this + /// same order, so the rightmost rotation (`R_z`) is applied first when + /// transforming a vector. + /// - Matrices are intended for column vectors with transforms of the form + /// `M * v` (column-major storage; no implicit transpose). + pub fn rotation_from_euler(yaw: f32, pitch: f32, roll: f32) -> Self { + Self::rotation_y(yaw) + .multiply(&Self::rotation_x(pitch)) + .multiply(&Self::rotation_z(roll)) + } + + /// Constructs a rotation matrix from an axis and angle in radians. + /// + /// The `axis` argument does not need to be pre‑normalised; it is + /// normalised internally. If a zero‑length axis is supplied, the identity + /// matrix is returned (behaviour delegated to + /// [`Quat::from_axis_angle`](crate::math::Quat::from_axis_angle)). + pub fn rotation_axis_angle(axis: Vec3, angle: f32) -> Self { + Self::from_quat(&Quat::from_axis_angle(axis, angle)) + } + + /// Constructs a rotation matrix from a quaternion. + /// + /// Expects a unit (normalised) quaternion for a pure rotation. Passing an + /// unnormalised quaternion scales the resulting matrix. Component order is + /// `(x, y, z, w)` to match [`Quat`]. See [`Quat`] for construction and + /// normalisation helpers. + pub fn from_quat(q: &Quat) -> Self { + q.to_mat4() + } /// Creates a matrix from column-major array data. /// /// Callers must supply 16 finite values already laid out column-major. @@ -19,7 +133,7 @@ impl Mat4 { } /// Returns the matrix as a column-major array. - pub fn to_array(self) -> [f32; 16] { + pub const fn to_array(self) -> [f32; 16] { self.data } @@ -30,7 +144,7 @@ impl Mat4 { /// Multiplies the matrix with another matrix (`self * rhs`). /// /// Multiplication follows column-major semantics (`self` on the left, - /// [`rhs`] on the right) to mirror GPU-style transforms. + /// rhs on the right) to mirror GPU-style transforms. pub fn multiply(&self, rhs: &Self) -> Self { let mut out = [0.0; 16]; for row in 0..4 { @@ -83,3 +197,37 @@ impl From<[f32; 16]> for Mat4 { Self { data: value } } } + +impl core::ops::Mul for Mat4 { + type Output = Self; + fn mul(self, rhs: Self) -> Self::Output { + self.multiply(&rhs) + } +} + +impl core::ops::Mul<&Mat4> for &Mat4 { + type Output = Mat4; + fn mul(self, rhs: &Mat4) -> Self::Output { + self.multiply(rhs) + } +} + +impl core::ops::Mul<&Mat4> for Mat4 { + type Output = Mat4; + fn mul(self, rhs: &Mat4) -> Self::Output { + self.multiply(rhs) + } +} + +impl core::ops::Mul for &Mat4 { + type Output = Mat4; + fn mul(self, rhs: Mat4) -> Self::Output { + self.multiply(&rhs) + } +} + +impl Default for Mat4 { + fn default() -> Self { + Self::identity() + } +} diff --git a/crates/rmg-core/src/math/mod.rs b/crates/rmg-core/src/math/mod.rs index fd97902..de28c16 100644 --- a/crates/rmg-core/src/math/mod.rs +++ b/crates/rmg-core/src/math/mod.rs @@ -37,7 +37,7 @@ pub const EPSILON: f32 = 1e-6; /// ensure inputs are finite if deterministic behavior is required. pub fn clamp(value: f32, min: f32, max: f32) -> f32 { assert!(min <= max, "invalid clamp range: {min} > {max}"); - value.max(min).min(max) + value.clamp(min, max) } /// Converts degrees to radians with float32 precision. diff --git a/crates/rmg-core/src/math/prng.rs b/crates/rmg-core/src/math/prng.rs index 3f7c622..741e0a7 100644 --- a/crates/rmg-core/src/math/prng.rs +++ b/crates/rmg-core/src/math/prng.rs @@ -34,7 +34,7 @@ impl Prng { Self { state } } - /// Constructs a PRNG from a single 64-bit seed via SplitMix64 expansion. + /// Constructs a PRNG from a single 64-bit seed via `SplitMix64` expansion. pub fn from_seed_u64(seed: u64) -> Self { fn splitmix64(state: &mut u64) -> u64 { *state = state.wrapping_add(0x9e37_79b9_7f4a_7c15); @@ -81,6 +81,11 @@ impl Prng { /// /// Uses rejection sampling with a power-of-two fast path to avoid modulo /// bias, and supports the full `i32` span. + #[allow( + clippy::cast_sign_loss, + clippy::cast_possible_wrap, + clippy::cast_possible_truncation + )] pub fn next_int(&mut self, min: i32, max: i32) -> i32 { assert!(min <= max, "invalid range: {min}..={max}"); let span = (i64::from(max) - i64::from(min)) as u64 + 1; diff --git a/crates/rmg-core/src/math/quat.rs b/crates/rmg-core/src/math/quat.rs index 97c68e1..69e6d3b 100644 --- a/crates/rmg-core/src/math/quat.rs +++ b/crates/rmg-core/src/math/quat.rs @@ -16,11 +16,16 @@ impl Quat { /// In debug builds this asserts that all components are finite; in release /// builds construction is unchecked. Prefer [`Quat::from_axis_angle`] for /// axis/angle construction when possible. - pub const fn new(x: f32, y: f32, z: f32, w: f32) -> Self { + pub fn new(x: f32, y: f32, z: f32, w: f32) -> Self { debug_assert!(x.is_finite() && y.is_finite() && z.is_finite() && w.is_finite()); Self { data: [x, y, z, w] } } + /// Const constructor without debug checks for use in other const fns. + pub const fn new_unchecked(x: f32, y: f32, z: f32, w: f32) -> Self { + Self { data: [x, y, z, w] } + } + /// Returns the quaternion as an array `[x, y, z, w]`. pub fn to_array(self) -> [f32; 4] { self.data @@ -125,7 +130,7 @@ impl Quat { /// Represents no rotation (the multiplicative identity for quaternion /// multiplication). pub const fn identity() -> Self { - Self::new(0.0, 0.0, 0.0, 1.0) + Self::new_unchecked(0.0, 0.0, 0.0, 1.0) } /// Converts the quaternion to a 4×4 rotation matrix in column‑major order. diff --git a/crates/rmg-core/src/math/vec3.rs b/crates/rmg-core/src/math/vec3.rs index 4245978..451279e 100644 --- a/crates/rmg-core/src/math/vec3.rs +++ b/crates/rmg-core/src/math/vec3.rs @@ -14,6 +14,10 @@ pub struct Vec3 { } impl Vec3 { + /// Standard zero vector (0, 0, 0). + pub const ZERO: Self = Self { + data: [0.0, 0.0, 0.0], + }; /// Unit vector pointing along the positive X axis. pub const UNIT_X: Self = Self::new(1.0, 0.0, 0.0); @@ -31,8 +35,13 @@ impl Vec3 { Self { data: [x, y, z] } } + /// Constructs the zero vector. + pub const fn zero() -> Self { + Self::ZERO + } + /// Returns the components as an array. - pub fn to_array(self) -> [f32; 3] { + pub const fn to_array(self) -> [f32; 3] { self.data } @@ -122,3 +131,107 @@ impl From<[f32; 3]> for Vec3 { Self { data: value } } } + +impl From for [f32; 3] { + fn from(v: Vec3) -> Self { + v.to_array() + } +} + +impl core::ops::Add for Vec3 { + type Output = Self; + fn add(self, rhs: Self) -> Self::Output { + Self::new( + self.component(0) + rhs.component(0), + self.component(1) + rhs.component(1), + self.component(2) + rhs.component(2), + ) + } +} + +impl core::ops::Add<&Vec3> for &Vec3 { + type Output = Vec3; + fn add(self, rhs: &Vec3) -> Self::Output { + Vec3::new( + self.component(0) + rhs.component(0), + self.component(1) + rhs.component(1), + self.component(2) + rhs.component(2), + ) + } +} + +impl core::ops::Sub for Vec3 { + type Output = Self; + fn sub(self, rhs: Self) -> Self::Output { + Self::new( + self.component(0) - rhs.component(0), + self.component(1) - rhs.component(1), + self.component(2) - rhs.component(2), + ) + } +} + +impl core::ops::Sub<&Vec3> for &Vec3 { + type Output = Vec3; + fn sub(self, rhs: &Vec3) -> Self::Output { + Vec3::new( + self.component(0) - rhs.component(0), + self.component(1) - rhs.component(1), + self.component(2) - rhs.component(2), + ) + } +} + +impl core::ops::Mul for Vec3 { + type Output = Self; + fn mul(self, rhs: f32) -> Self::Output { + Self::new( + self.component(0) * rhs, + self.component(1) * rhs, + self.component(2) * rhs, + ) + } +} + +impl core::ops::Mul for f32 { + type Output = Vec3; + fn mul(self, rhs: Vec3) -> Self::Output { + rhs * self + } +} + +impl core::ops::Mul for &Vec3 { + type Output = Vec3; + fn mul(self, rhs: f32) -> Self::Output { + Vec3::new( + self.component(0) * rhs, + self.component(1) * rhs, + self.component(2) * rhs, + ) + } +} + +impl<'a> core::ops::Mul<&'a Vec3> for f32 { + type Output = Vec3; + fn mul(self, rhs: &'a Vec3) -> Self::Output { + rhs * self + } +} + +impl core::ops::AddAssign for Vec3 { + fn add_assign(&mut self, rhs: Self) { + *self = *self + rhs; + } +} + +impl core::ops::SubAssign for Vec3 { + fn sub_assign(&mut self, rhs: Self) { + *self = *self - rhs; + } +} + +impl core::ops::MulAssign for Vec3 { + fn mul_assign(&mut self, rhs: f32) { + *self = *self * rhs; + } +} diff --git a/crates/rmg-core/src/payload.rs b/crates/rmg-core/src/payload.rs new file mode 100644 index 0000000..1bbc57d --- /dev/null +++ b/crates/rmg-core/src/payload.rs @@ -0,0 +1,75 @@ +//! Canonical payload encoding for the motion demo. +use bytes::Bytes; + +const POSITION_VELOCITY_BYTES: usize = 24; + +/// Serialises a 3D position + velocity pair into the canonical payload. +/// +/// Note: Values are encoded verbatim as `f32` little‑endian bytes; callers are +/// responsible for ensuring finiteness if deterministic behaviour is required +/// (NaN bit patterns compare unequal across some platforms). +/// +/// Layout (little‑endian): +/// - bytes 0..12: position [x, y, z] as 3 × f32 +/// - bytes 12..24: velocity [vx, vy, vz] as 3 × f32 +/// Always 24 bytes. +#[inline] +pub fn encode_motion_payload(position: [f32; 3], velocity: [f32; 3]) -> Bytes { + let mut buf = Vec::with_capacity(POSITION_VELOCITY_BYTES); + for value in position.into_iter().chain(velocity.into_iter()) { + buf.extend_from_slice(&value.to_le_bytes()); + } + Bytes::from(buf) +} + +/// Deserialises a canonical motion payload into `(position, velocity)` arrays. +/// +/// Expects exactly 24 bytes laid out as six little-endian `f32` values in +/// the order: position `[x, y, z]` followed by velocity `[vx, vy, vz]`. +/// +/// Returns `None` if `bytes.len() != 24` or if any 4-byte chunk cannot be +/// converted into an `f32` (invalid input). On success, returns two `[f32; 3]` +/// arrays representing position and velocity respectively. +pub fn decode_motion_payload(bytes: &Bytes) -> Option<([f32; 3], [f32; 3])> { + if bytes.len() != POSITION_VELOCITY_BYTES { + return None; + } + let mut floats = [0f32; 6]; + for (index, chunk) in bytes.chunks_exact(4).enumerate() { + floats[index] = f32::from_le_bytes(chunk.try_into().ok()?); + } + let position = [floats[0], floats[1], floats[2]]; + let velocity = [floats[3], floats[4], floats[5]]; + Some((position, velocity)) +} + +#[cfg(test)] +#[allow( + clippy::panic, + clippy::expect_used, + clippy::unwrap_used, + clippy::float_cmp +)] +mod tests { + use super::*; + + #[test] + fn round_trip_ok() { + let pos = [1.0, 2.0, 3.0]; + let vel = [0.5, -1.0, 0.25]; + let bytes = encode_motion_payload(pos, vel); + let (p, v) = decode_motion_payload(&bytes).expect("24-byte payload"); + for i in 0..3 { + assert_eq!(p[i].to_bits(), pos[i].to_bits()); + assert_eq!(v[i].to_bits(), vel[i].to_bits()); + } + } + + #[test] + fn reject_wrong_len() { + let b = Bytes::from_static(&[0u8; 23]); + assert!(decode_motion_payload(&b).is_none()); + let b = Bytes::from_static(&[0u8; 25]); + assert!(decode_motion_payload(&b).is_none()); + } +} diff --git a/crates/rmg-core/src/record.rs b/crates/rmg-core/src/record.rs new file mode 100644 index 0000000..4b49466 --- /dev/null +++ b/crates/rmg-core/src/record.rs @@ -0,0 +1,31 @@ +//! Graph record types: nodes and edges. +use bytes::Bytes; + +use crate::ident::{EdgeId, NodeId, TypeId}; + +/// Materialised record for a single node stored in the graph. +/// +/// The optional `payload` carries domain-specific bytes (component data, +/// attachments, etc) and is interpreted by higher layers. +#[derive(Clone, Debug)] +pub struct NodeRecord { + /// Type identifier describing the node. + pub ty: TypeId, + /// Optional payload owned by the node (component data, attachments, etc.). + pub payload: Option, +} + +/// Materialised record for a single edge stored in the graph. +#[derive(Clone, Debug)] +pub struct EdgeRecord { + /// Stable identifier for the edge. + pub id: EdgeId, + /// Source node identifier. + pub from: NodeId, + /// Destination node identifier. + pub to: NodeId, + /// Type identifier describing the edge. + pub ty: TypeId, + /// Optional payload owned by the edge. + pub payload: Option, +} diff --git a/crates/rmg-core/src/rule.rs b/crates/rmg-core/src/rule.rs new file mode 100644 index 0000000..43f1aa8 --- /dev/null +++ b/crates/rmg-core/src/rule.rs @@ -0,0 +1,82 @@ +//! Rewrite rule definitions. +use crate::footprint::Footprint; +use crate::graph::GraphStore; +use crate::ident::{Hash, NodeId, TypeId}; + +/// Pattern metadata used by a rewrite rule to describe the input graph shape. +#[derive(Debug)] +pub struct PatternGraph { + /// Ordered list of type identifiers that make up the pattern. + pub nodes: Vec, +} + +/// Function pointer used to determine whether a rule matches the provided scope. +pub type MatchFn = fn(&GraphStore, &NodeId) -> bool; + +/// Function pointer that applies a rewrite to the given scope. +pub type ExecuteFn = fn(&mut GraphStore, &NodeId); + +/// Function pointer that computes a rewrite footprint at the provided scope. +pub type FootprintFn = fn(&GraphStore, &NodeId) -> Footprint; + +/// Conflict resolution policies for independence failures. +#[derive(Debug, Clone, Copy)] +pub enum ConflictPolicy { + /// Abort the rewrite when a conflict is detected. + Abort, + /// Retry (re-match) against the latest state. + Retry, + /// Attempt a join using a rule-provided strategy. + /// + /// Requires the rule's [`RewriteRule::join_fn`] field to be `Some`; rules + /// specifying `Join` without providing a join function will be rejected at + /// registration time in future revisions. + Join, +} + +/// Optional join strategy used when `conflict_policy == ConflictPolicy::Join`. +/// +/// The spike does not use joins yet; the signature is kept minimal until +/// pending rewrite metadata stabilises across modules. +pub type JoinFn = fn(/* left */ &NodeId, /* right */ &NodeId) -> bool; + +/// Descriptor for a rewrite rule registered with the engine. +/// +/// Each rule owns: +/// * a deterministic identifier (`id`) +/// * a human-readable name +/// * a left pattern (currently unused by the spike) +/// * callbacks for matching and execution +pub struct RewriteRule { + /// Deterministic identifier for the rewrite rule. + pub id: Hash, + /// Human-readable name for logs and debugging. + pub name: &'static str, + /// Pattern used to describe the left-hand side of the rule. + pub left: PatternGraph, + /// Callback used to determine if the rule matches the provided scope. + pub matcher: MatchFn, + /// Callback that applies the rewrite to the provided scope. + pub executor: ExecuteFn, + /// Callback that computes a footprint for independence checks. + pub compute_footprint: FootprintFn, + /// Spatial partition bitmask used as an O(1) prefilter. + pub factor_mask: u64, + /// Conflict resolution policy when independence fails. + pub conflict_policy: ConflictPolicy, + /// Join function required when `conflict_policy == ConflictPolicy::Join`. + /// + /// Invariant: If `conflict_policy` is `ConflictPolicy::Join`, this field + /// must be `Some`. Rules that violate this invariant are subject to being + /// rejected by `Engine::register_rule` in future revisions. + pub join_fn: Option, +} + +impl core::fmt::Debug for RewriteRule { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("RewriteRule") + .field("id", &self.id) + .field("name", &self.name) + .finish_non_exhaustive() + } +} diff --git a/crates/rmg-core/src/scheduler.rs b/crates/rmg-core/src/scheduler.rs new file mode 100644 index 0000000..feec645 --- /dev/null +++ b/crates/rmg-core/src/scheduler.rs @@ -0,0 +1,116 @@ +//! Deterministic rewrite scheduler and pending queue. +//! +//! Ordering invariant +//! - Rewrites for a transaction are executed in ascending lexicographic order +//! of `(scope_hash, rule_id)`. This ordering is stable across platforms and +//! runs and is enforced before returning the pending queue to callers. +use std::collections::{BTreeMap, HashMap}; + +use crate::footprint::Footprint; +use crate::ident::{CompactRuleId, Hash, NodeId}; +#[cfg(feature = "telemetry")] +use crate::telemetry; +use crate::tx::TxId; + +/// Ordering queue that guarantees rewrites execute deterministically. +#[derive(Debug, Default)] +pub(crate) struct DeterministicScheduler { + pub(crate) pending: HashMap>, + pub(crate) active: HashMap>, // Reserved/Committed frontier + #[cfg(feature = "telemetry")] + pub(crate) counters: HashMap, // (reserved, conflict) +} + +/// Internal representation of a rewrite waiting to be applied. +#[derive(Debug)] +pub(crate) struct PendingRewrite { + /// Identifier of the rule to execute. + #[cfg_attr(not(feature = "telemetry"), allow(dead_code))] + pub rule_id: Hash, + /// Compact in-process rule handle used on hot paths. + #[allow(dead_code)] + pub compact_rule: CompactRuleId, + /// Scope hash used for deterministic ordering together with `rule_id`. + #[allow(dead_code)] + pub scope_hash: Hash, + /// Scope node supplied when `apply` was invoked. + pub scope: NodeId, + /// Footprint used for independence checks and conflict resolution. + #[allow(dead_code)] + pub footprint: Footprint, + /// State machine phase for the rewrite. + #[allow(dead_code)] + pub phase: RewritePhase, +} + +/// Phase of a pending rewrite in the lock-free scheduler. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum RewritePhase { + /// Match found and footprint computed. + Matched, + /// Passed independence checks and reserved. + #[allow(dead_code)] + Reserved, + /// Successfully applied. + #[allow(dead_code)] + Committed, + /// Aborted due to conflict or validation failure. + #[allow(dead_code)] + Aborted, +} + +impl DeterministicScheduler { + /// Removes and returns all pending rewrites for `tx`, ordered by + /// `(scope_hash, rule_id)` in ascending lexicographic order. + pub(crate) fn drain_for_tx(&mut self, tx: TxId) -> Vec { + self.pending + .remove(&tx) + .map(|map| map.into_values().collect()) + .unwrap_or_default() + } + + /// Attempts to reserve a rewrite by checking independence against the + /// active frontier for `tx`. On success, pushes the footprint into the + /// frontier and transitions the phase to `Reserved`. + /// + /// Current implementation: O(n) scan of the active frontier. For large + /// transaction sizes, consider spatial indexing or hierarchical structures + /// to reduce reservation cost. + pub(crate) fn reserve(&mut self, tx: TxId, pr: &mut PendingRewrite) -> bool { + let frontier = self.active.entry(tx).or_default(); + for fp in frontier.iter() { + if !pr.footprint.independent(fp) { + pr.phase = RewritePhase::Aborted; + #[cfg(feature = "telemetry")] + { + let entry = self.counters.entry(tx).or_default(); + entry.1 += 1; + } + #[cfg(feature = "telemetry")] + telemetry::conflict(tx, &pr.rule_id); + return false; + } + } + pr.phase = RewritePhase::Reserved; + frontier.push(pr.footprint.clone()); + #[cfg(feature = "telemetry")] + { + let entry = self.counters.entry(tx).or_default(); + entry.0 += 1; + } + #[cfg(feature = "telemetry")] + telemetry::reserved(tx, &pr.rule_id); + true + } + + /// Finalizes accounting for `tx`: emits a telemetry summary when enabled + /// and clears the active frontier and counters for the transaction. + pub(crate) fn finalize_tx(&mut self, tx: TxId) { + #[cfg(feature = "telemetry")] + if let Some((reserved, conflict)) = self.counters.remove(&tx) { + telemetry::summary(tx, reserved, conflict); + } + self.active.remove(&tx); + self.pending.remove(&tx); + } +} diff --git a/crates/rmg-core/src/snapshot.rs b/crates/rmg-core/src/snapshot.rs new file mode 100644 index 0000000..c2156fb --- /dev/null +++ b/crates/rmg-core/src/snapshot.rs @@ -0,0 +1,123 @@ +//! Snapshot type and hash computation. +//! +//! Determinism contract +//! - The snapshot hash is a BLAKE3 digest over a canonical byte stream that +//! encodes the entire reachable graph state for the current root. +//! - Ordering is explicit and stable: nodes are visited in ascending `NodeId` +//! order (lexicographic over 32-byte ids). For each node, outbound edges are +//! sorted by ascending `EdgeId` before being encoded. +//! - Encoding is fixed-size and architecture-independent: +//! - All ids (`NodeId`, `TypeId`, `EdgeId`) are raw 32-byte values. +//! - Payloads are prefixed by an 8-byte little-endian length, followed by the +//! exact payload bytes (or length `0` with no payload). +//! - The root id is included first to bind the subgraph identity. +//! +//! Notes +//! - Little-endian was chosen for length fields to match the rest of the code +//! base; changing endianness would change hash values and must be treated as a +//! breaking change. If we decide to adopt big-endian, update the encoding +//! here and add a migration note in the determinism spec. +//! - The in-memory store uses `BTreeMap`, which guarantees deterministic key +//! iteration. For vectors (edge lists), we sort explicitly by `EdgeId`. +use std::collections::{BTreeSet, VecDeque}; + +use blake3::Hasher; + +use crate::graph::GraphStore; +use crate::ident::{Hash, NodeId}; +use crate::record::EdgeRecord; +use crate::tx::TxId; + +/// Snapshot returned after a successful commit. +/// +/// The `hash` value is deterministic and reflects the entire canonicalised +/// graph state (root + payloads). +#[derive(Debug, Clone)] +pub struct Snapshot { + /// Node identifier that serves as the root of the snapshot. + pub root: NodeId, + /// Canonical hash derived from the entire graph state. + pub hash: Hash, + /// Optional parent snapshot hash (if one exists). + pub parent: Option, + /// Transaction identifier associated with the snapshot. + pub tx: TxId, +} + +/// Computes a canonical hash for the current graph state. +/// +/// Algorithm +/// 1) Update with `root` id bytes. +/// 2) For each `(node_id, node)` in `store.nodes` (ascending by `node_id`): +/// - Update with `node_id`, `node.ty`. +/// - Update with 8-byte LE payload length, then payload bytes (if any). +/// 3) For each `(from, edges)` in `store.edges_from` (ascending by `from`): +/// - Update with `from` id and edge count (8-byte LE). +/// - Sort `edges` by `edge.id` ascending and for each edge: +/// - Update with `edge.id`, `edge.ty`, `edge.to`. +/// - Update with 8-byte LE payload length, then payload bytes (if any). +pub(crate) fn compute_snapshot_hash(store: &GraphStore, root: &NodeId) -> Hash { + // 1) Determine reachable subgraph using a deterministic BFS over outgoing edges. + let mut reachable: BTreeSet = BTreeSet::new(); + let mut queue: VecDeque = VecDeque::new(); + reachable.insert(*root); + queue.push_back(*root); + while let Some(current) = queue.pop_front() { + for edge in store.edges_from(¤t) { + if reachable.insert(edge.to) { + queue.push_back(edge.to); + } + } + } + + let mut hasher = Hasher::new(); + hasher.update(&root.0); + + // 2) Hash nodes in ascending NodeId order but only if reachable. + for (node_id, node) in &store.nodes { + if !reachable.contains(node_id) { + continue; + } + hasher.update(&node_id.0); + hasher.update(&(node.ty).0); + match &node.payload { + Some(payload) => { + hasher.update(&(payload.len() as u64).to_le_bytes()); + hasher.update(payload); + } + None => { + hasher.update(&0u64.to_le_bytes()); + } + } + } + + // 3) Hash outgoing edges per reachable source, sorted by EdgeId, and only + // include edges whose destination is also reachable. + for (from, edges) in &store.edges_from { + if !reachable.contains(from) { + continue; + } + // Filter to reachable targets first; length counts included edges only. + let mut sorted_edges: Vec<&EdgeRecord> = + edges.iter().filter(|e| reachable.contains(&e.to)).collect(); + sorted_edges.sort_by(|a, b| a.id.0.cmp(&b.id.0)); + + hasher.update(&from.0); + hasher.update(&(sorted_edges.len() as u64).to_le_bytes()); + for edge in sorted_edges { + hasher.update(&(edge.id).0); + hasher.update(&(edge.ty).0); + hasher.update(&(edge.to).0); + match &edge.payload { + Some(payload) => { + hasher.update(&(payload.len() as u64).to_le_bytes()); + hasher.update(payload); + } + None => { + hasher.update(&0u64.to_le_bytes()); + } + } + } + } + hasher.finalize().into() +} diff --git a/crates/rmg-core/src/telemetry.rs b/crates/rmg-core/src/telemetry.rs new file mode 100644 index 0000000..414ad6c --- /dev/null +++ b/crates/rmg-core/src/telemetry.rs @@ -0,0 +1,102 @@ +// Telemetry helpers for JSONL logging when the `telemetry` feature is enabled. + +#[cfg(feature = "telemetry")] +use serde::Serialize; + +use crate::ident::Hash; +use crate::tx::TxId; + +#[cfg(feature = "telemetry")] +#[derive(Serialize)] +struct Event<'a> { + timestamp_micros: u128, + tx_id: u64, + event: &'a str, + rule_id_short: String, +} + +#[inline] +fn short_id(h: &Hash) -> String { + #[cfg(feature = "telemetry")] + { + let mut short = [0u8; 8]; + short.copy_from_slice(&h[0..8]); + return hex::encode(short); + } + #[allow(unreachable_code)] + String::new() +} + +#[cfg(feature = "telemetry")] +fn ts_micros() -> u128 { + use std::time::{SystemTime, UNIX_EPOCH}; + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_micros() +} + +#[cfg(feature = "telemetry")] +fn emit(kind: &str, tx: TxId, rule: &Hash) { + let ev = Event { + timestamp_micros: ts_micros(), + tx_id: tx.value(), + event: kind, + rule_id_short: short_id(rule), + }; + // Best-effort stdout with a single locked write sequence to avoid interleaving. + let mut out = std::io::stdout().lock(); + let _ = serde_json::to_writer(&mut out, &ev); + use std::io::Write as _; + let _ = out.write_all(b"\n"); +} + +/// Emits a conflict telemetry event when a rewrite fails independence checks. +/// +/// Logs the transaction id and rule id (shortened) as a JSON line to stdout +/// when the `telemetry` feature is enabled. Best-effort: I/O errors are +/// ignored and timestamps fall back to 0 on clock errors. +#[cfg(feature = "telemetry")] +pub fn conflict(tx: TxId, rule: &Hash) { + emit("conflict", tx, rule); +} + +/// Emits a reserved telemetry event when a rewrite passes independence checks. +/// +/// Logs the transaction id and rule id (shortened) as a JSON line to stdout +/// when the `telemetry` feature is enabled. Best-effort: I/O errors are +/// ignored and timestamps fall back to 0 on clock errors. +#[cfg(feature = "telemetry")] +pub fn reserved(tx: TxId, rule: &Hash) { + emit("reserved", tx, rule); +} + +/// Emits a summary telemetry event with transaction statistics. +/// +/// Logs the transaction id, reserved count, and conflict count as a JSON line +/// to stdout when the `telemetry` feature is enabled. Called at transaction +/// finalization. Best-effort: I/O errors are ignored and timestamps may fall +/// back to 0 on clock errors. +#[cfg(feature = "telemetry")] +pub fn summary(tx: TxId, reserved_count: u64, conflict_count: u64) { + use serde::Serialize; + #[derive(Serialize)] + struct Summary { + timestamp_micros: u128, + tx_id: u64, + event: &'static str, + reserved: u64, + conflicts: u64, + } + let s = Summary { + timestamp_micros: ts_micros(), + tx_id: tx.value(), + event: "summary", + reserved: reserved_count, + conflicts: conflict_count, + }; + let mut out = std::io::stdout().lock(); + let _ = serde_json::to_writer(&mut out, &s); + use std::io::Write as _; + let _ = out.write_all(b"\n"); +} diff --git a/crates/rmg-core/src/tx.rs b/crates/rmg-core/src/tx.rs new file mode 100644 index 0000000..d32e6c8 --- /dev/null +++ b/crates/rmg-core/src/tx.rs @@ -0,0 +1,45 @@ +//! Transaction identifier types. + +/// Thin wrapper around a transaction identifier. +/// +/// The engine issues monotonically increasing identifiers via +/// [`crate::Engine::begin`]. External bindings may construct `TxId` values for +/// FFI/Wasm interop using [`TxId::from_raw`]. +/// +/// # Invariants +/// - The underlying `u64` may wrap at `u64::MAX` (wrapping is intentional). +/// When wrapping occurs, the engine resumes at `1` (skipping zero). +/// - Zero (`TxId(0)`) is reserved as invalid. [`crate::Engine::begin`] never returns zero. +/// - External callers using [`TxId::from_raw`] must not construct `TxId(0)` unless +/// they have a valid reason (e.g., sentinel in FFI); using invalid ids with engine +/// operations returns [`EngineError::UnknownTx`]. +/// +/// The `#[repr(transparent)]` attribute ensures FFI ABI compatibility: `TxId` has +/// the same memory layout as `u64` across the FFI/Wasm boundary. +#[repr(transparent)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Ord, PartialOrd)] +pub struct TxId(u64); + +impl TxId { + /// Constructs a `TxId` from a raw `u64` value. + /// + /// # Safety Note + /// Callers must not construct `TxId(0)` as it is reserved as invalid. + /// Using an invalid `TxId` with engine operations results in undefined behavior. + #[must_use] + pub const fn from_raw(value: u64) -> Self { + Self(value) + } + + /// Returns the underlying raw value. + #[must_use] + pub const fn value(self) -> u64 { + self.0 + } +} + +impl core::fmt::Display for TxId { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", self.0) + } +} diff --git a/crates/rmg-core/tests/common/mod.rs b/crates/rmg-core/tests/common/mod.rs new file mode 100644 index 0000000..e3fd815 --- /dev/null +++ b/crates/rmg-core/tests/common/mod.rs @@ -0,0 +1,6 @@ +use rmg_core::{Engine, GraphStore, NodeId}; + +pub fn snapshot_hash_of(store: GraphStore, root: NodeId) -> [u8; 32] { + let engine = Engine::new(store, root); + engine.snapshot().hash +} diff --git a/crates/rmg-core/tests/engine_motion_tests.rs b/crates/rmg-core/tests/engine_motion_tests.rs new file mode 100644 index 0000000..a855311 --- /dev/null +++ b/crates/rmg-core/tests/engine_motion_tests.rs @@ -0,0 +1,161 @@ +#![allow(missing_docs)] +use rmg_core::{ + decode_motion_payload, encode_motion_payload, make_node_id, make_type_id, ApplyResult, Engine, + EngineError, GraphStore, NodeRecord, MOTION_RULE_NAME, +}; + +#[test] +fn motion_rule_updates_position_deterministically() { + let entity = make_node_id("entity-1"); + let entity_type = make_type_id("entity"); + let payload = encode_motion_payload([1.0, 2.0, 3.0], [0.5, -1.0, 0.25]); + + let mut store = GraphStore::default(); + store.insert_node( + entity, + NodeRecord { + ty: entity_type, + payload: Some(payload), + }, + ); + + let mut engine = Engine::new(store, entity); + engine + .register_rule(rmg_core::motion_rule()) + .expect("duplicate rule name"); + + let tx = engine.begin(); + let apply = engine.apply(tx, MOTION_RULE_NAME, &entity).unwrap(); + assert!(matches!(apply, ApplyResult::Applied)); + + let snap = engine.commit(tx).expect("commit"); + let hash_after_first_apply = snap.hash; + + // Run a second engine with identical initial state and ensure hashes match. + let mut store_b = GraphStore::default(); + let payload_b = encode_motion_payload([1.0, 2.0, 3.0], [0.5, -1.0, 0.25]); + store_b.insert_node( + entity, + NodeRecord { + ty: entity_type, + payload: Some(payload_b), + }, + ); + + let mut engine_b = Engine::new(store_b, entity); + engine_b + .register_rule(rmg_core::motion_rule()) + .expect("duplicate rule name"); + let tx_b = engine_b.begin(); + let apply_b = engine_b.apply(tx_b, MOTION_RULE_NAME, &entity).unwrap(); + assert!(matches!(apply_b, ApplyResult::Applied)); + let snap_b = engine_b.commit(tx_b).expect("commit B"); + + assert_eq!(hash_after_first_apply, snap_b.hash); + + // Ensure the position actually moved. + let node = engine + .node(&entity) + .expect("entity exists") + .payload + .as_ref() + .and_then(decode_motion_payload) + .expect("payload decode"); + assert_eq!(node.0, [1.5, 1.0, 3.25]); + assert_eq!(node.1, [0.5, -1.0, 0.25]); +} + +#[test] +fn motion_rule_no_match_on_missing_payload() { + let entity = make_node_id("entity-2"); + let entity_type = make_type_id("entity"); + + let mut store = GraphStore::default(); + store.insert_node( + entity, + NodeRecord { + ty: entity_type, + payload: None, + }, + ); + + let mut engine = Engine::new(store, entity); + engine + .register_rule(rmg_core::motion_rule()) + .expect("duplicate rule name"); + + // Capture hash before any tx + let before = engine.snapshot().hash; + let tx = engine.begin(); + let apply = engine.apply(tx, MOTION_RULE_NAME, &entity).unwrap(); + assert!(matches!(apply, ApplyResult::NoMatch)); + // Commit should be a no-op for state; hash remains identical and payload stays None. + let snap = engine.commit(tx).expect("no-op commit"); + assert_eq!(snap.hash, before); + assert!(engine.node(&entity).unwrap().payload.is_none()); +} + +#[test] +fn motion_rule_twice_is_deterministic_across_engines() { + let entity = make_node_id("entity-1-twice"); + let entity_type = make_type_id("entity"); + let payload = encode_motion_payload([1.0, 2.0, 3.0], [0.5, -1.0, 0.25]); + + let mut store_a = GraphStore::default(); + store_a.insert_node( + entity, + NodeRecord { + ty: entity_type, + payload: Some(payload.clone()), + }, + ); + let mut engine_a = Engine::new(store_a, entity); + engine_a + .register_rule(rmg_core::motion_rule()) + .expect("duplicate rule name"); + for _ in 0..2 { + let tx = engine_a.begin(); + engine_a.apply(tx, MOTION_RULE_NAME, &entity).unwrap(); + engine_a.commit(tx).unwrap(); + } + + let mut store_b = GraphStore::default(); + store_b.insert_node( + entity, + NodeRecord { + ty: entity_type, + payload: Some(payload), + }, + ); + let mut engine_b = Engine::new(store_b, entity); + engine_b + .register_rule(rmg_core::motion_rule()) + .expect("duplicate rule name"); + for _ in 0..2 { + let tx = engine_b.begin(); + engine_b.apply(tx, MOTION_RULE_NAME, &entity).unwrap(); + engine_b.commit(tx).unwrap(); + } + + assert_eq!(engine_a.snapshot().hash, engine_b.snapshot().hash); +} + +#[test] +fn apply_unknown_rule_returns_error() { + let entity = make_node_id("entity-unknown-rule"); + let entity_type = make_type_id("entity"); + + let mut store = GraphStore::default(); + store.insert_node( + entity, + NodeRecord { + ty: entity_type, + payload: Some(encode_motion_payload([0.0, 0.0, 0.0], [0.0, 0.0, 0.0])), + }, + ); + + let mut engine = Engine::new(store, entity); + let tx = engine.begin(); + let result = engine.apply(tx, "missing-rule", &entity); + assert!(matches!(result, Err(EngineError::UnknownRule(rule)) if rule == "missing-rule")); +} diff --git a/crates/rmg-core/tests/footprint_independence_tests.rs b/crates/rmg-core/tests/footprint_independence_tests.rs new file mode 100644 index 0000000..9dfe426 --- /dev/null +++ b/crates/rmg-core/tests/footprint_independence_tests.rs @@ -0,0 +1,95 @@ +#![allow(missing_docs)] +use rmg_core::{make_node_id, Footprint, NodeId, PortKey}; + +fn pack_port(node: &NodeId, port_id: u32, dir_in: bool) -> PortKey { + // Test-only packer: use the leading 8 bytes of NodeId for a stable key. + let mut node_hi = [0u8; 8]; + node_hi.copy_from_slice(&node.0[0..8]); + let node_bits = u64::from_le_bytes(node_hi); + let dir_bit = if dir_in { 1u64 } else { 0u64 }; + (node_bits << 32) | ((port_id as u64) << 2) | dir_bit +} + +#[test] +fn disjoint_factors_are_independent() { + let a = Footprint { + factor_mask: 0b0001, + ..Default::default() + }; + let b = Footprint { + factor_mask: 0b0010, + ..Default::default() + }; + assert!(a.independent(&b)); +} + +#[test] +fn overlapping_node_writes_conflict() { + let mut a = Footprint { + factor_mask: 0b0001, + ..Default::default() + }; + let n = make_node_id("n"); + a.n_write.insert_node(&n); + + let mut b = Footprint { + factor_mask: 0b0001, + ..Default::default() + }; + b.n_write.insert_node(&n); + + assert!(!a.independent(&b)); +} + +#[test] +fn write_read_conflict() { + let mut a = Footprint { + factor_mask: 0b0001, + ..Default::default() + }; + let n = make_node_id("n"); + a.n_write.insert_node(&n); + + let mut b = Footprint { + factor_mask: 0b0001, + ..Default::default() + }; + b.n_read.insert_node(&n); + + assert!(!a.independent(&b)); +} + +#[test] +fn independent_nodes_no_conflict() { + let mut a = Footprint { + factor_mask: 0b0001, + ..Default::default() + }; + a.n_write.insert_node(&make_node_id("a")); + + let mut b = Footprint { + factor_mask: 0b0001, + ..Default::default() + }; + b.n_write.insert_node(&make_node_id("b")); + + assert!(a.independent(&b)); +} + +#[test] +fn port_conflict_detected() { + let node = make_node_id("p"); + let mut a = Footprint { + factor_mask: 0b0001, + ..Default::default() + }; + a.b_in.insert(pack_port(&node, 0, true)); + + let mut b = Footprint { + factor_mask: 0b0001, + ..Default::default() + }; + b.b_in.insert(pack_port(&node, 0, true)); + + assert!(!a.independent(&b)); +} diff --git a/crates/rmg-core/tests/mat4_mul_tests.rs b/crates/rmg-core/tests/mat4_mul_tests.rs new file mode 100644 index 0000000..534265e --- /dev/null +++ b/crates/rmg-core/tests/mat4_mul_tests.rs @@ -0,0 +1,25 @@ +#![allow(missing_docs)] +use rmg_core::math::Mat4; + +const EPS: f32 = 1e-6; + +fn approx_eq16(a: [f32; 16], b: [f32; 16]) { + for i in 0..16 { + assert!((a[i] - b[i]).abs() <= EPS, "index {i}: {a:?} vs {b:?}"); + } +} + +#[test] +fn mat4_mul_operator_matches_method() { + let s = Mat4::scale(2.0, 3.0, 4.0); + let id = Mat4::identity(); + // operator + let op = id * s; + // method + let meth = id.multiply(&s); + approx_eq16(op.to_array(), meth.to_array()); + // also verify the opposite order + let op2 = s * id; + let meth2 = s.multiply(&id); + approx_eq16(op2.to_array(), meth2.to_array()); +} diff --git a/crates/rmg-core/tests/math_additional_tests.rs b/crates/rmg-core/tests/math_additional_tests.rs new file mode 100644 index 0000000..b0d6a59 --- /dev/null +++ b/crates/rmg-core/tests/math_additional_tests.rs @@ -0,0 +1,34 @@ +#![allow(missing_docs)] +use rmg_core::math::{self, Mat4, Quat, Vec3}; + +fn approx_eq(a: f32, b: f32) { + let diff = (a - b).abs(); + assert!(diff <= 1e-6, "expected {b}, got {a} (diff {diff})"); +} + +#[test] +fn vec3_normalize_degenerate_returns_zero() { + let v = Vec3::new(1e-12, -1e-12, 0.0); + let n = v.normalize(); + assert_eq!(n.to_array(), [0.0, 0.0, 0.0]); +} + +#[test] +fn quat_identity_properties() { + let id = Quat::identity(); + // identity * identity == identity + let composed = id.multiply(&id); + assert_eq!(composed.to_array(), id.to_array()); + // to_mat4(identity) == Mat4::identity() + let m = id.to_mat4(); + assert_eq!(m.to_array(), Mat4::identity().to_array()); +} + +#[test] +fn deg_rad_roundtrip_basic_angles() { + for deg in [0.0f32, 45.0, 90.0, 180.0, -90.0] { + let rad = math::deg_to_rad(deg); + let back = math::rad_to_deg(rad); + approx_eq(back, deg); + } +} diff --git a/crates/rmg-core/tests/math_convenience_tests.rs b/crates/rmg-core/tests/math_convenience_tests.rs new file mode 100644 index 0000000..dc04444 --- /dev/null +++ b/crates/rmg-core/tests/math_convenience_tests.rs @@ -0,0 +1,51 @@ +//! Focused tests for math convenience constructors to boost coverage +//! and ensure expected semantics for identity/translation/scale and +//! vector basis constants. + +use rmg_core::math::{Mat4, Vec3}; + +#[test] +fn identity_multiply_is_noop() { + // A matrix multiplied by identity should equal the original. + let a = Mat4::from([ + 1.0, 0.0, 0.0, 0.0, // col 0 + 0.0, 0.0, -1.0, 0.0, // col 1 + 0.0, 1.0, 0.0, 0.0, // col 2 + 5.0, -3.0, 2.0, 1.0, // col 3 + ]); + let id = Mat4::identity(); + assert_eq!(a.multiply(&id).to_array(), a.to_array()); + assert_eq!(id.multiply(&a).to_array(), a.to_array()); +} + +#[test] +fn translation_affects_points_but_not_directions() { + let t = Mat4::translation(5.0, -3.0, 2.0); + let p = Vec3::new(2.0, 4.0, -1.0); + let d = Vec3::new(2.0, 4.0, -1.0); + + let p2 = t.transform_point(&p); + let d2 = t.transform_direction(&d); + + assert_eq!(p2.to_array(), [7.0, 1.0, 1.0]); + assert_eq!(d2.to_array(), d.to_array()); +} + +#[test] +fn scale_stretches_points_and_directions() { + let s = Mat4::scale(2.0, 3.0, -1.0); + let v = Vec3::new(1.0, -2.0, 0.5); + let p = s.transform_point(&v); + let d = s.transform_direction(&v); + assert_eq!(p.to_array(), [2.0, -6.0, -0.5]); + assert_eq!(d.to_array(), [2.0, -6.0, -0.5]); +} + +#[test] +fn vec3_basis_and_zero() { + assert_eq!(Vec3::ZERO.to_array(), [0.0, 0.0, 0.0]); + assert_eq!(Vec3::UNIT_X.to_array(), [1.0, 0.0, 0.0]); + assert_eq!(Vec3::UNIT_Y.to_array(), [0.0, 1.0, 0.0]); + assert_eq!(Vec3::UNIT_Z.to_array(), [0.0, 0.0, 1.0]); + assert_eq!(Vec3::zero().to_array(), [0.0, 0.0, 0.0]); +} diff --git a/crates/rmg-core/tests/math_rotation_tests.rs b/crates/rmg-core/tests/math_rotation_tests.rs new file mode 100644 index 0000000..033aa2d --- /dev/null +++ b/crates/rmg-core/tests/math_rotation_tests.rs @@ -0,0 +1,76 @@ +#![allow(missing_docs)] +use core::f32::consts::FRAC_PI_2; +use rmg_core::math::{Mat4, Vec3}; + +fn approx_eq3(a: [f32; 3], b: [f32; 3]) { + const ABS_TOL: f32 = 1e-7; + const REL_TOL: f32 = 1e-6; + for i in 0..3 { + let ai = a[i]; + let bi = b[i]; + let diff = (ai - bi).abs(); + let scale = ai.abs().max(bi.abs()); + let tol = ABS_TOL.max(REL_TOL * scale); + assert!( + diff <= tol, + "index {i}: {a:?} vs {b:?}, diff={diff}, tol={tol} (scale={scale})" + ); + } +} + +#[test] +fn rot_z_maps_x_to_y() { + let y = Mat4::rotation_z(FRAC_PI_2).transform_direction(&Vec3::UNIT_X); + approx_eq3(y.to_array(), [0.0, 1.0, 0.0]); +} + +#[test] +fn rot_y_maps_z_to_x() { + let x = Mat4::rotation_y(FRAC_PI_2).transform_direction(&Vec3::UNIT_Z); + approx_eq3(x.to_array(), [1.0, 0.0, 0.0]); +} + +#[test] +fn rot_x_maps_y_to_z() { + let z = Mat4::rotation_x(FRAC_PI_2).transform_direction(&Vec3::UNIT_Y); + approx_eq3(z.to_array(), [0.0, 0.0, 1.0]); +} + +#[test] +fn axis_angle_matches_axis_specific_rotation() { + // Y-rotation via axis-angle should match rotation_y. + let aa = Mat4::rotation_axis_angle(Vec3::UNIT_Y, FRAC_PI_2); + let ry = Mat4::rotation_y(FRAC_PI_2); + let v = Vec3::UNIT_Z; + approx_eq3( + aa.transform_direction(&v).to_array(), + ry.transform_direction(&v).to_array(), + ); +} + +#[test] +fn euler_matches_axis_specific_rotations() { + // Yaw only + let e = Mat4::rotation_from_euler(FRAC_PI_2, 0.0, 0.0); + let y = Mat4::rotation_y(FRAC_PI_2); + approx_eq3( + e.transform_direction(&Vec3::UNIT_Z).to_array(), + y.transform_direction(&Vec3::UNIT_Z).to_array(), + ); + + // Pitch only + let e = Mat4::rotation_from_euler(0.0, FRAC_PI_2, 0.0); + let x = Mat4::rotation_x(FRAC_PI_2); + approx_eq3( + e.transform_direction(&Vec3::UNIT_Y).to_array(), + x.transform_direction(&Vec3::UNIT_Y).to_array(), + ); + + // Roll only + let e = Mat4::rotation_from_euler(0.0, 0.0, FRAC_PI_2); + let z = Mat4::rotation_z(FRAC_PI_2); + approx_eq3( + e.transform_direction(&Vec3::UNIT_X).to_array(), + z.transform_direction(&Vec3::UNIT_X).to_array(), + ); +} diff --git a/crates/rmg-core/tests/math_validation.rs b/crates/rmg-core/tests/math_validation.rs index a762d83..4aed8fe 100644 --- a/crates/rmg-core/tests/math_validation.rs +++ b/crates/rmg-core/tests/math_validation.rs @@ -11,9 +11,17 @@ use rmg_core::math::{self, Mat4, Prng, Quat, Vec3}; const FIXTURE_PATH: &str = "crates/rmg-core/tests/fixtures/math-fixtures.json"; static RAW_FIXTURES: &str = include_str!("fixtures/math-fixtures.json"); +#[allow(clippy::expect_fun_call)] static FIXTURES: Lazy = Lazy::new(|| { - let fixtures: MathFixtures = serde_json::from_str(RAW_FIXTURES) - .unwrap_or_else(|err| panic!("failed to parse math fixtures at {FIXTURE_PATH}: {err}")); + let fixtures: MathFixtures = { + #[allow(clippy::expect_fun_call)] + { + serde_json::from_str(RAW_FIXTURES).expect(&format!( + "failed to parse math fixtures at {}", + FIXTURE_PATH + )) + } + }; fixtures.validate(); fixtures }); @@ -32,9 +40,11 @@ struct MathFixtures { impl MathFixtures { fn validate(&self) { fn ensure(name: &str, slice: &[T]) { - if slice.is_empty() { - panic!("math fixtures set '{name}' must not be empty"); - } + assert!( + !slice.is_empty(), + "math fixtures set '{name}' must not be empty (len={})", + slice.len() + ); } ensure("scalars.clamp", &self.scalars.clamp); @@ -298,6 +308,13 @@ fn scalar_fixtures_all_match() { } } +#[test] +fn clamp_propagates_nan() { + let nan = f32::NAN; + let clamped = math::clamp(nan, -1.0, 1.0); + assert!(clamped.is_nan(), "clamp should propagate NaN"); +} + #[test] fn vec3_fixtures_cover_operations() { let tol = &FIXTURES.tolerance; diff --git a/crates/rmg-core/tests/permutation_commute_tests.rs b/crates/rmg-core/tests/permutation_commute_tests.rs new file mode 100644 index 0000000..b5d4b98 --- /dev/null +++ b/crates/rmg-core/tests/permutation_commute_tests.rs @@ -0,0 +1,72 @@ +#![allow(missing_docs)] +use rmg_core::{encode_motion_payload, make_node_id, make_type_id, GraphStore, NodeRecord}; +mod common; +use common::snapshot_hash_of; + +#[test] +fn n_permutation_commute_n3_and_n4() { + for &n in &[3usize, 4usize] { + // Build initial graph: root + n entities with unique velocities. + let root = make_node_id("world-root-perm"); + let world_ty = make_type_id("world"); + let ent_ty = make_type_id("entity"); + let mut store = GraphStore::default(); + store.insert_node( + root, + NodeRecord { + ty: world_ty, + payload: None, + }, + ); + let mut scopes = Vec::new(); + for i in 0..n { + let id = make_node_id(&format!("entity-{i}")); + let v = match i { + 0 => [1.0, 0.0, 0.0], + 1 => [0.0, 1.0, 0.0], + 2 => [0.0, 0.0, 1.0], + 3 => [1.0, 1.0, 0.0], + _ => unreachable!(), + }; + store.insert_node( + id, + NodeRecord { + ty: ent_ty, + payload: Some(encode_motion_payload([0.0, 0.0, 0.0], v)), + }, + ); + // Connect entity to root so snapshot reachability includes it. + let edge = rmg_core::EdgeRecord { + id: rmg_core::make_edge_id(&format!("root-to-entity-{i}")), + from: root, + to: id, + ty: make_type_id("contains"), + payload: None, + }; + store.insert_edge(root, edge); + scopes.push(id); + } + let rule = rmg_core::motion_rule(); + + // Enumerate a few permutations deterministically (not all for n=4 to keep runtime low). + let perms: Vec> = match n { + 3 => vec![vec![0, 1, 2], vec![2, 1, 0], vec![1, 2, 0], vec![0, 2, 1]], + 4 => vec![vec![0, 1, 2, 3], vec![3, 2, 1, 0], vec![1, 3, 0, 2]], + _ => unreachable!(), + }; + + let mut baseline: Option<[u8; 32]> = None; + for p in perms { + let mut s = store.clone(); + for &idx in &p { + (rule.executor)(&mut s, &scopes[idx]); + } + let h = snapshot_hash_of(s, root); + if let Some(b) = baseline { + assert_eq!(b, h, "commutation failed for n={n} perm={p:?}"); + } else { + baseline = Some(h); + } + } + } +} diff --git a/crates/rmg-core/tests/property_commute_tests.rs b/crates/rmg-core/tests/property_commute_tests.rs new file mode 100644 index 0000000..b5b957c --- /dev/null +++ b/crates/rmg-core/tests/property_commute_tests.rs @@ -0,0 +1,75 @@ +#![allow(missing_docs)] +use rmg_core::{encode_motion_payload, make_node_id, make_type_id, GraphStore, NodeRecord}; +mod common; +use common::snapshot_hash_of; + +#[test] +fn independent_motion_rewrites_commute_on_distinct_nodes() { + // Build initial store with root and two entities that each have motion payloads. + let root = make_node_id("world-root-commute"); + let world_ty = make_type_id("world"); + let ent_ty = make_type_id("entity"); + let a = make_node_id("entity-a"); + let b = make_node_id("entity-b"); + + let mut store1 = GraphStore::default(); + store1.insert_node( + root, + NodeRecord { + ty: world_ty, + payload: None, + }, + ); + store1.insert_node( + a, + NodeRecord { + ty: ent_ty, + payload: Some(encode_motion_payload([0.0, 0.0, 0.0], [1.0, 0.0, 0.0])), + }, + ); + store1.insert_node( + b, + NodeRecord { + ty: ent_ty, + payload: Some(encode_motion_payload([0.0, 0.0, 0.0], [0.0, 1.0, 0.0])), + }, + ); + // Make entities reachable from root via edges so snapshots include them. + let edge_ty = make_type_id("edge"); + use rmg_core::{make_edge_id, EdgeRecord}; + store1.insert_edge( + root, + EdgeRecord { + id: make_edge_id("root->a"), + from: root, + to: a, + ty: edge_ty, + payload: None, + }, + ); + store1.insert_edge( + root, + EdgeRecord { + id: make_edge_id("root->b"), + from: root, + to: b, + ty: edge_ty, + payload: None, + }, + ); + let mut store2 = store1.clone(); + + let rule = rmg_core::motion_rule(); + + // Order 1: apply to A then B + (rule.executor)(&mut store1, &a); + (rule.executor)(&mut store1, &b); + let h1 = snapshot_hash_of(store1, root); + + // Order 2: apply to B then A + (rule.executor)(&mut store2, &b); + (rule.executor)(&mut store2, &a); + let h2 = snapshot_hash_of(store2, root); + + assert_eq!(h1, h2, "independent rewrites must commute"); +} diff --git a/crates/rmg-core/tests/reserve_gate_tests.rs b/crates/rmg-core/tests/reserve_gate_tests.rs new file mode 100644 index 0000000..6127a77 --- /dev/null +++ b/crates/rmg-core/tests/reserve_gate_tests.rs @@ -0,0 +1,37 @@ +#![allow(missing_docs)] +use rmg_core::{decode_motion_payload, make_node_id, make_type_id, NodeRecord}; + +#[test] +fn reserve_gate_aborts_second_on_port_conflict() { + // Engine with a single entity; register the port rule; apply it twice on same scope in one tx. + let mut engine = rmg_core::demo::ports::build_port_demo_engine(); + + // Create an entity node under root that we’ll target. + let entity = make_node_id("reserve-entity"); + let entity_ty = make_type_id("entity"); + engine.insert_node( + entity, + NodeRecord { + ty: entity_ty, + payload: None, + }, + ); + + let tx = engine.begin(); + let _ = engine.apply(tx, rmg_core::demo::ports::PORT_RULE_NAME, &entity); + let _ = engine.apply(tx, rmg_core::demo::ports::PORT_RULE_NAME, &entity); + let _snap = engine.commit(tx).expect("commit"); + + // Exactly one executor should have run: pos.x == 1.0 + let (pos, _vel) = engine + .node(&entity) + .unwrap() + .payload + .as_ref() + .and_then(decode_motion_payload) + .expect("payload present"); + assert!( + (pos[0] - 1.0).abs() < 1e-6, + "expected exactly one reservation to succeed" + ); +} diff --git a/crates/rmg-core/tests/rule_id_domain_tests.rs b/crates/rmg-core/tests/rule_id_domain_tests.rs new file mode 100644 index 0000000..cfa76cf --- /dev/null +++ b/crates/rmg-core/tests/rule_id_domain_tests.rs @@ -0,0 +1,10 @@ +#![allow(missing_docs)] + +#[test] +fn motion_rule_family_id_uses_domain_separation() { + let mut hasher = blake3::Hasher::new(); + hasher.update(b"rule:motion/update"); + let expected: [u8; 32] = hasher.finalize().into(); + // Access const exposed via the motion demo module. + assert_eq!(expected, rmg_core::demo::motion::MOTION_UPDATE_FAMILY_ID); +} diff --git a/crates/rmg-core/tests/snapshot_reachability_tests.rs b/crates/rmg-core/tests/snapshot_reachability_tests.rs new file mode 100644 index 0000000..d51e499 --- /dev/null +++ b/crates/rmg-core/tests/snapshot_reachability_tests.rs @@ -0,0 +1,91 @@ +#![allow(missing_docs)] +use rmg_core::{ + encode_motion_payload, make_edge_id, make_node_id, make_type_id, GraphStore, NodeRecord, +}; + +fn snapshot_hash(store: GraphStore, root: rmg_core::NodeId) -> [u8; 32] { + let engine = rmg_core::Engine::new(store, root); + engine.snapshot().hash +} + +#[test] +fn unreachable_nodes_do_not_affect_hash() { + // Root world + let root = make_node_id("root"); + let world_ty = make_type_id("world"); + + let mut store_a = GraphStore::default(); + store_a.insert_node( + root, + NodeRecord { + ty: world_ty, + payload: None, + }, + ); + + let hash_a = snapshot_hash(store_a, root); + + // Add an unreachable entity elsewhere; hash should remain identical. + let mut store_b = GraphStore::default(); + store_b.insert_node( + root, + NodeRecord { + ty: world_ty, + payload: None, + }, + ); + let unreachable = make_node_id("ghost-entity"); + let ent_ty = make_type_id("entity"); + store_b.insert_node( + unreachable, + NodeRecord { + ty: ent_ty, + payload: Some(encode_motion_payload([0.0, 0.0, 0.0], [0.0, 0.0, 0.0])), + }, + ); + + let hash_b = snapshot_hash(store_b, root); + assert_eq!(hash_a, hash_b); +} + +#[test] +fn reachable_edges_affect_hash() { + let root = make_node_id("root2"); + let world_ty = make_type_id("world"); + let mut store = GraphStore::default(); + store.insert_node( + root, + NodeRecord { + ty: world_ty, + payload: None, + }, + ); + + // Initially only root is reachable; hash0 + let hash0 = snapshot_hash(store.clone(), root); + + // Add a reachable child entity and a typed edge from root -> child + let child = make_node_id("child"); + let ent_ty = make_type_id("entity"); + let edge_ty = make_type_id("has"); + store.insert_node( + child, + NodeRecord { + ty: ent_ty, + payload: None, + }, + ); + store.insert_edge( + root, + rmg_core::EdgeRecord { + id: make_edge_id("root->child"), + from: root, + to: child, + ty: edge_ty, + payload: None, + }, + ); + + let hash1 = snapshot_hash(store, root); + assert_ne!(hash0, hash1); +} diff --git a/crates/rmg-core/tests/tx_lifecycle_tests.rs b/crates/rmg-core/tests/tx_lifecycle_tests.rs new file mode 100644 index 0000000..2478810 --- /dev/null +++ b/crates/rmg-core/tests/tx_lifecycle_tests.rs @@ -0,0 +1,38 @@ +#![allow(missing_docs)] +use rmg_core::{ + encode_motion_payload, make_node_id, make_type_id, EngineError, GraphStore, NodeRecord, + MOTION_RULE_NAME, +}; + +#[test] +fn tx_invalid_after_commit() { + let entity = make_node_id("tx-lifecycle-entity"); + let entity_type = make_type_id("entity"); + let payload = encode_motion_payload([0.0, 0.0, 0.0], [0.0, 0.0, 0.0]); + + let mut store = GraphStore::default(); + store.insert_node( + entity, + NodeRecord { + ty: entity_type, + payload: Some(payload), + }, + ); + + let mut engine = rmg_core::Engine::new(store, entity); + engine + .register_rule(rmg_core::motion_rule()) + .expect("duplicate rule name"); + + let tx = engine.begin(); + // Valid apply then commit + engine.apply(tx, MOTION_RULE_NAME, &entity).unwrap(); + engine.commit(tx).unwrap(); + + // Reusing the same tx should be rejected + let err = engine.apply(tx, MOTION_RULE_NAME, &entity).unwrap_err(); + match err { + EngineError::UnknownTx => {} + other => panic!("unexpected error: {other:?}"), + } +} diff --git a/crates/rmg-core/tests/vec3_ops_tests.rs b/crates/rmg-core/tests/vec3_ops_tests.rs new file mode 100644 index 0000000..ecba7bd --- /dev/null +++ b/crates/rmg-core/tests/vec3_ops_tests.rs @@ -0,0 +1,26 @@ +#![allow(missing_docs)] +use rmg_core::math::Vec3; + +#[test] +fn add_sub_mul_ops_work() { + let a = Vec3::new(1.0, -2.0, 0.5); + let b = Vec3::new(-3.0, 4.0, 1.5); + assert_eq!((a + b).to_array(), [-2.0, 2.0, 2.0]); + assert_eq!((a - b).to_array(), [4.0, -6.0, -1.0]); + assert_eq!((a * 2.0).to_array(), [2.0, -4.0, 1.0]); + assert_eq!((2.0 * a).to_array(), [2.0, -4.0, 1.0]); + // Negative scalar multiply (both orders) + assert_eq!((a * -2.0).to_array(), [-2.0, 4.0, -1.0]); + assert_eq!((-2.0 * a).to_array(), [-2.0, 4.0, -1.0]); +} + +#[test] +fn add_assign_sub_assign_mul_assign_work() { + let mut v = Vec3::new(1.0, 2.0, 3.0); + v += Vec3::new(-1.0, 1.0, 0.0); + assert_eq!(v.to_array(), [0.0, 3.0, 3.0]); + v -= Vec3::new(0.0, 1.0, 1.0); + assert_eq!(v.to_array(), [0.0, 2.0, 2.0]); + v *= 0.5; + assert_eq!(v.to_array(), [0.0, 1.0, 1.0]); +} diff --git a/crates/rmg-ffi/Cargo.toml b/crates/rmg-ffi/Cargo.toml index 096d9f4..85d001b 100644 --- a/crates/rmg-ffi/Cargo.toml +++ b/crates/rmg-ffi/Cargo.toml @@ -2,6 +2,13 @@ name = "rmg-ffi" version = "0.1.0" edition = "2021" +rust-version = "1.68" +description = "Echo FFI: C ABI for host integrations (Lua/C/etc.)" +license = "Apache-2.0" +repository = "https://github.com/flyingrobots/echo" +readme = "README.md" +keywords = ["echo", "ffi", "ecs", "deterministic"] +categories = ["external-ffi-bindings", "game-engines"] [lib] crate-type = ["rlib", "cdylib", "staticlib"] diff --git a/crates/rmg-ffi/README.md b/crates/rmg-ffi/README.md new file mode 100644 index 0000000..e3ffc5f --- /dev/null +++ b/crates/rmg-ffi/README.md @@ -0,0 +1,68 @@ +# rmg-ffi + +Thin C ABI bindings for the `rmg-core` deterministic graph rewriting engine. + +This crate produces a C-callable library for embedding Echo’s core in other runtimes (C/C++, Lua, etc.). It exposes a minimal, stable surface: engine creation, rule registration by name, apply/commit, and snapshot hash retrieval. + +## Platforms and Toolchain + +- Rust: 1.68 (pinned via `rust-toolchain.toml`) +- Targets: macOS (aarch64/x86_64), Linux (x86_64). Windows support is planned. + +## Building + +Build static and shared libraries: + +``` +cargo build -p rmg-ffi --release +``` + +Artifacts (platform-dependent): + +- `target/release/librmg_ffi.a` (static) +- `target/release/librmg_ffi.dylib` or `librmg_ffi.so` (shared) + +## Linking + +Example (clang): + +``` +clang -o demo demo.c -L target/release -lrmg_ffi -Wl,-rpath,@executable_path/../lib +``` + +Ensure the library search path includes `target/release` (or install path) at runtime. + +## API Overview + +Headers are generated in a follow-up task; the intended functions mirror `rmg-core`: + +- `rmg_engine_new(...) -> rmg_engine*` +- `rmg_engine_free(rmg_engine*)` +- `rmg_engine_register_rule(rmg_engine*, const char* name) -> int` (0 = ok) +- `rmg_engine_begin(rmg_engine*) -> uint64_t` +- `rmg_engine_apply(rmg_engine*, uint64_t tx, const char* rule_name, const rmg_node_id* scope) -> int` +- `rmg_engine_commit(rmg_engine*, uint64_t tx, rmg_snapshot* out) -> int` + +Snapshots expose a 32-byte BLAKE3 hash and root id. See `docs/spec-mwmr-concurrency.md` for determinism rules. + +## Quick Start (Pseudo‑C) + +```c +rmg_engine* eng = rmg_engine_new(); +rmg_engine_register_rule(eng, "motion/update"); +uint64_t tx = rmg_engine_begin(eng); +rmg_node_id scope = rmg_make_node_id("entity-1"); +int applied = rmg_engine_apply(eng, tx, "motion/update", &scope); +rmg_snapshot snap; +rmg_engine_commit(eng, tx, &snap); +``` + +## Troubleshooting + +- Undefined symbols at link: verify `-L` and `-l` flags and that `cargo build --release` produced the library. +- Snapshot hashes differ across runs: confirm identical state and rule registrations; see determinism invariants in `docs/determinism-invariants.md`. + +## More Documentation + +- Root docs: see repository `README.md` for the architecture and links. +- Engine surface: `crates/rmg-core/src/lib.rs` (re‑exports) and rustdoc. diff --git a/crates/rmg-ffi/src/lib.rs b/crates/rmg-ffi/src/lib.rs index ffd1fc9..ae54e5d 100644 --- a/crates/rmg-ffi/src/lib.rs +++ b/crates/rmg-ffi/src/lib.rs @@ -48,8 +48,7 @@ pub struct rmg_snapshot { /// # Safety /// The caller assumes ownership of the returned pointer and must release it /// via [`rmg_engine_free`] to avoid leaking memory. -// Rust 2024 requires `#[unsafe(no_mangle)]` as `no_mangle` is an unsafe attribute. -#[unsafe(no_mangle)] +#[no_mangle] pub unsafe extern "C" fn rmg_engine_new() -> *mut RmgEngine { Box::into_raw(Box::new(RmgEngine { inner: build_motion_demo_engine(), @@ -61,7 +60,7 @@ pub unsafe extern "C" fn rmg_engine_new() -> *mut RmgEngine { /// # Safety /// `engine` must be a pointer previously returned by [`rmg_engine_new`] that /// has not already been freed. -#[unsafe(no_mangle)] +#[no_mangle] pub unsafe extern "C" fn rmg_engine_free(engine: *mut RmgEngine) { if engine.is_null() { return; @@ -76,7 +75,7 @@ pub unsafe extern "C" fn rmg_engine_free(engine: *mut RmgEngine) { /// # Safety /// `engine`, `label`, and `out_handle` must be valid pointers. `label` must /// reference a null-terminated UTF-8 string. -#[unsafe(no_mangle)] +#[no_mangle] pub unsafe extern "C" fn rmg_engine_spawn_motion_entity( engine: *mut RmgEngine, label: *const c_char, @@ -120,21 +119,21 @@ pub unsafe extern "C" fn rmg_engine_spawn_motion_entity( /// /// # Safety /// `engine` must be a valid pointer created by [`rmg_engine_new`]. -#[unsafe(no_mangle)] +#[no_mangle] pub unsafe extern "C" fn rmg_engine_begin(engine: *mut RmgEngine) -> rmg_tx_id { if engine.is_null() { return rmg_tx_id { value: 0 }; } let engine = unsafe { &mut *engine }; let tx = engine.inner.begin(); - rmg_tx_id { value: tx.0 } + rmg_tx_id { value: tx.value() } } /// Applies the motion rewrite to the provided entity within transaction `tx`. /// /// # Safety /// All pointers must be valid. `tx` must reference an active transaction. -#[unsafe(no_mangle)] +#[no_mangle] pub unsafe extern "C" fn rmg_engine_apply_motion( engine: *mut RmgEngine, tx: rmg_tx_id, @@ -153,7 +152,7 @@ pub unsafe extern "C" fn rmg_engine_apply_motion( }; match engine .inner - .apply(TxId(tx.value), MOTION_RULE_NAME, &node_id) + .apply(TxId::from_raw(tx.value), MOTION_RULE_NAME, &node_id) { Ok(ApplyResult::Applied) => true, Ok(ApplyResult::NoMatch) => false, @@ -165,7 +164,7 @@ pub unsafe extern "C" fn rmg_engine_apply_motion( /// /// # Safety /// Pointers must be valid; `tx` must correspond to a live transaction. -#[unsafe(no_mangle)] +#[no_mangle] pub unsafe extern "C" fn rmg_engine_commit( engine: *mut RmgEngine, tx: rmg_tx_id, @@ -175,7 +174,7 @@ pub unsafe extern "C" fn rmg_engine_commit( return false; } let engine = unsafe { &mut *engine }; - match engine.inner.commit(TxId(tx.value)) { + match engine.inner.commit(TxId::from_raw(tx.value)) { Ok(snapshot) => { unsafe { (*out_snapshot).hash = snapshot.hash; @@ -190,7 +189,7 @@ pub unsafe extern "C" fn rmg_engine_commit( /// /// # Safety /// Pointers must be valid; output buffers must have length at least three. -#[unsafe(no_mangle)] +#[no_mangle] pub unsafe extern "C" fn rmg_engine_read_motion( engine: *mut RmgEngine, node_handle: *const rmg_node_id, diff --git a/crates/rmg-wasm/Cargo.toml b/crates/rmg-wasm/Cargo.toml index 81a12d3..4e298c0 100644 --- a/crates/rmg-wasm/Cargo.toml +++ b/crates/rmg-wasm/Cargo.toml @@ -2,6 +2,13 @@ name = "rmg-wasm" version = "0.1.0" edition = "2021" +rust-version = "1.68" +description = "Echo WASM: wasm-bindgen bindings for tools and web" +license = "Apache-2.0" +repository = "https://github.com/flyingrobots/echo" +readme = "README.md" +keywords = ["echo", "wasm", "ecs", "deterministic"] +categories = ["wasm", "game-engines"] [lib] crate-type = ["cdylib"] diff --git a/crates/rmg-wasm/README.md b/crates/rmg-wasm/README.md new file mode 100644 index 0000000..be9ed16 --- /dev/null +++ b/crates/rmg-wasm/README.md @@ -0,0 +1,6 @@ +# rmg-wasm + +wasm-bindgen bindings for the `rmg-core` engine for tooling and web environments. + +See the repository root `README.md` for the full overview. + diff --git a/crates/rmg-wasm/src/lib.rs b/crates/rmg-wasm/src/lib.rs index f1d2acc..21f7f33 100644 --- a/crates/rmg-wasm/src/lib.rs +++ b/crates/rmg-wasm/src/lib.rs @@ -166,7 +166,7 @@ impl WasmEngine { #[wasm_bindgen] /// Begins a new transaction and returns its identifier. pub fn begin(&self) -> u64 { - self.inner.borrow_mut().begin().0 + self.inner.borrow_mut().begin().value() } #[wasm_bindgen] @@ -184,7 +184,7 @@ impl WasmEngine { None => return false, }; let mut engine = self.inner.borrow_mut(); - match engine.apply(TxId(tx_id), MOTION_RULE_NAME, &node_id) { + match engine.apply(TxId::from_raw(tx_id), MOTION_RULE_NAME, &node_id) { Ok(ApplyResult::Applied) => true, Ok(ApplyResult::NoMatch) => false, Err(_) => false, @@ -198,7 +198,7 @@ impl WasmEngine { return None; } let mut engine = self.inner.borrow_mut(); - let snapshot = engine.commit(TxId(tx_id)).ok()?; + let snapshot = engine.commit(TxId::from_raw(tx_id)).ok()?; Some(snapshot.hash.to_vec()) } diff --git a/docs/decision-log.md b/docs/decision-log.md index b74a1b6..ee34cd7 100644 --- a/docs/decision-log.md +++ b/docs/decision-log.md @@ -14,3 +14,6 @@ | 2025-10-26 | EPI bundle | Adopt entropy, plugin, inspector, runtime config specs (Phase 0.75) | Close causality & extensibility gap | Phase 1 implementation backlog defined | | 2025-10-26 | RMG + Confluence | Adopt RMG v2 (typed DPOi engine) and Confluence synchronization as core architecture | Unify runtime/persistence/tooling on deterministic rewrites | Launch Rust workspace (rmg-core/ffi/wasm/cli), port ECS rules, set up Confluence networking | | 2025-10-27 | Core math split | Split `rmg-core` math into focused submodules (`vec3`, `mat4`, `quat`, `prng`) replacing monolithic `math.rs`. | Improves readability, testability, and aligns with strict linting. | Update imports; no behavior changes intended; follow-up determinism docs in snapshot hashing. | +| 2025-10-27 | PR #7 prep | Extracted math + engine spike into `rmg-core` (split-core-math-engine); added inline rustdoc on canonical snapshot hashing (node/edge order, payload encoding). | Land the isolated, reviewable portion now; keep larger geometry/broad‑phase work split for follow-ups. | After docs update, run fmt/clippy/tests; merge is a fast‑forward over `origin/main`. | +| 2025-10-28 | PR #7 finalize | Reachability-only snapshot hashing; ports demo registers rule; guarded ports footprint; scheduler `finalize_tx()` clears `pending`; `PortKey` u30 mask; hooks+CI hardened (toolchain pin, rustdoc fixes). | Determinism + memory hygiene; remove test footguns; pass CI with stable toolchain while keeping rmg-core MSRV=1.68. | Merge PR #7 after green CI; queue MWMR Phase 2 perf tests + retry policy work. | +| 2025-10-27 | MWMR reserve gate | Engine calls `scheduler.finalize_tx()` at commit; compact rule id used on execute path; per‑tx telemetry summary behind feature. | Enforce independence and clear active frontier deterministically; keep ordering stable with `(scope_hash, family_id)`. | Toolchain pinned to Rust 1.68; add design note for telemetry graph snapshot replay. | diff --git a/docs/execution-plan.md b/docs/execution-plan.md index cfe0d6e..3795d60 100644 --- a/docs/execution-plan.md +++ b/docs/execution-plan.md @@ -33,11 +33,33 @@ This is Codex’s working map for building Echo. Update it relentlessly—each s ## Today’s Intent +> 2025-10-28 — PR #7 finalize and land + +- Focus: close out review nits on engine/scheduler/footprint/demo; ensure CI/hook stability; keep scope tight. +- Done: + - Snapshot hashes: reachable-only BFS; sorted edges by `EdgeId`. + - Scheduler: `finalize_tx()` clears `active` and `pending` to avoid leaks; reserve gate wired. + - Demo parity: `build_port_demo_engine()` registers its rule; ports footprint guards writes/ports on existing nodes. + - Footprint: `PortKey` bit layout documented (bits 31..2: port_id u30; bit 1: reserved; bit 0: dir) + u30 masking with debug-assert; factor_mask invariant documented. + - Hooks/CI: pinned pre-push toolchain, robust banned-pattern scan, adjusted docs-guard to core API; fixed rustdoc links. + - MSRV: rmg-core stays at 1.68; workspace uses stable for wasm dependencies. + - Engine/tests: enforce `join_fn` invariant for `ConflictPolicy::Join`; remove `expect` panic in `apply()` corruption path; add NaN-propagation test for `clamp`; do not push yet (waiting for more feedback). + > 2025-10-27 — Core math modularization (PR #5) - **Focus**: Split `rmg-core` math into focused submodules (`vec3`, `mat4`, `quat`, `prng`). - **Definition of done**: CI passes; decision log updated; no behavior changes (pure refactor). +> 2025-10-27 — PR #7 (echo/split-core-math-engine) merge prep + +- **Focus**: Land the extracted math + engine spike; add doc guard updates and preflight fmt/clippy/tests. +- **Definition of done**: `docs/decision-log.md` + `docs/execution-plan.md` updated; `cargo fmt --check`, `cargo clippy -D warnings -D missing_docs`, and `cargo test` pass; branch is fast‑forward mergeable into `main`. + +> 2025-10-27 — MWMR reserve gate + telemetry wiring + +- **Focus**: Enforce `reserve()` gate (independence), add compact rule id execution path, and emit per‑tx telemetry summary; pin toolchain. +- **Definition of done**: Scheduler `finalize_tx()` called by `Engine::commit`, compact‑id → rule lookup used on execute path, `rust-toolchain.toml` added and `rust-version = 1.68` set in crates; tests remain green. + --- ## Immediate Backlog diff --git a/docs/roadmap-mwmr-mini-epic.md b/docs/roadmap-mwmr-mini-epic.md new file mode 100644 index 0000000..8627bf6 --- /dev/null +++ b/docs/roadmap-mwmr-mini-epic.md @@ -0,0 +1,90 @@ +# MWMR Concurrency Mini‑Epic Roadmap (Footprints, Reserve Gate, Telemetry) + +Status: Active • Owner: rmg-core • Created: 2025-10-27 + + +## Outcomes + +- Enforce MWMR determinism via independence checks (footprints + ports + factor masks). +- Keep the hot path zero‑overhead (compact u32 rule ids; domain‑separated family ids only at boundaries). +- Prove commutation with property tests (N‑permutation) and add basic telemetry for conflict rates. + +--- + + +## Phase 0.5 — Foundations (Done / In‑Progress) + +- [x] Footprint type with ports and factor mask (IdSet/PortSet; deterministic intersects) +- [x] RewriteRule surface extended with `compute_footprint`, `factor_mask`, `ConflictPolicy` +- [x] PendingRewrite carries `footprint` + `phase` +- [x] Property test: 2 independent motion rewrites commute (equal snapshot hash) +- [x] Spec doc: `docs/spec-mwmr-concurrency.md` + +--- + + +## Phase 1 — Reservation Gate & Compact IDs + +- [x] CompactRuleId(u32) and rule table mapping family_id → compact id (in Engine) +- [x] DeterministicScheduler::reserve(tx, &mut PendingRewrite) → bool (active frontier per tx) +- [x] Engine commit() wires the reserve gate (execute only Reserved rewrites) +- [x] Feature‑gated JSONL telemetry (reserved/conflict) with timestamp, tx_id, short rule id +- [ ] Use CompactRuleId in PendingRewrite and internal execution paths (leave family id for ordering/disk/wire) + +--- + + +## Phase 2 — Proof & Performance + +- [ ] Property test: N‑permutation commutation (N = 3..6 independent rewrites) +- [ ] Reserve gate smoke tests (same PortKey ⇒ conflict; disjoint ports ⇒ reserve) +- [ ] Criterion bench: independence checks (10/100/1k rewrites) — target < 1 ms @ 100 +- [ ] Telemetry counters per tick (conflict_rate, retry_count, reservation_latency_ms, epoch_flip_ms) +- [ ] Add Retry with randomized backoff (behind flag) once telemetry lands; keep default Abort + +--- + + +## Phase 3 — Rule Identity & Hot‑Load + +- [x] build.rs generates const family id for `rule:motion/update` (domain‑separated) +- [ ] Generalize generator (src/gen/rule_ids.rs) and runtime assert test to catch drift +- [ ] Lua FFI registration: `register_rule{name, match, exec, ?id, ?revision}`; engine computes if omitted +- [ ] Revision ID = blake3("rule-rev::canon-ast-v1" || canonical AST bytes) + +--- + + +## Phase 4 — Storage & Epochs (Scoping/Design) + +- [ ] Offset‑graph arena + mmap view (zero‑copy snapshots) +- [ ] Double‑buffered planes (attachments/skeleton), lazy epoch flips, grace‑period reclamation +- [ ] Optional Merkle overlays for partial verification + +--- + + +## Guardrails & Invariants + +- Deterministic planning key = (scope_hash, family_id); execution may be parallel, ordering stays stable. +- Footprint independence order: factor_mask → ports → edges → nodes; fail fast on ports. +- Keep |L| ≤ 5–10; split rules or seed from rare types if larger. +- Never serialize CompactRuleId; boundary formats carry family id + (optional) revision id. + +--- + + +## Telemetry (dev feature) + +- Events: `reserved`, `conflict` (ts_micros, tx_id, rule_id_short) +- Counters per tick: conflict_rate, retry_count, reservation_latency_ms, epoch_flip_ms, bitmap_blocks_checked + +--- + + +## Links + +- Spec: `docs/spec-mwmr-concurrency.md` +- Tests: `crates/rmg-core/tests/footprint_independence_tests.rs`, `crates/rmg-core/tests/property_commute_tests.rs` +- Engine: `crates/rmg-core/src/engine_impl.rs`, `crates/rmg-core/src/scheduler.rs` +- Build: `crates/rmg-core/build.rs` diff --git a/docs/spec-mwmr-concurrency.md b/docs/spec-mwmr-concurrency.md new file mode 100644 index 0000000..596c736 --- /dev/null +++ b/docs/spec-mwmr-concurrency.md @@ -0,0 +1,116 @@ +# RMG MWMR Concurrency Spec (Footprints, Ports, Factor Masks) + +Status: Draft • Date: 2025-10-27 • Owner: rmg-core + +## Why + +We want lock-free multi-writer/multi-reader (MWMR) deterministic rewriting. Under DPOI semantics, if matches are pairwise independent and the no-delete-under-descent invariant holds, a batch’s result is unique up to typed open-graph isomorphism independent of order. This doc fixes the runtime model, data structures, and perf plan. + +## Runtime Model + +State ⟨G, epoch_att, epoch_skel, P⟩ +- G: working graph (skeleton + attachments) +- epoch_att / epoch_skel: monotonically increasing u64 counters (attachments, skeleton) +- P: pending rewrites ⟨rule, match, footprint, stamp, phase⟩ + +Phases +- MATCH: compute monic match m: L ↪ G; gluing tests; compute footprint F; enqueue Matched +- RESERVE (lock-free OCC): allowed iff independent(F, Y.F) for all Y with phase∈{Reserved,Committed}; then phase := Reserved +- COMMIT (bounded CAS): + - (a) skeleton edits (N/E) with release-stores + - (b) port occupancy (B) with release-stores + - publish journals; if any P_write ⇒ epoch_att++; if any N/E_write ⇒ epoch_skel++ +- ABORT/RETRY/JOIN on independence failure or validation error + +Reader isolation +- Readers acquire both epochs at entry and never see torn state; flips happen only after publication. Reclamation after a grace period. + +## Footprints & Independence + +Footprint F = (N_read, N_write, E_read, E_write, B_in, B_out; factor_mask) +- N_*: node bitmaps; E_*: edge bitmaps +- B_in/B_out: boundary port occupancy bitmaps; port key = `(node_id << 32) | (port_id << 2) | dir_bits` +- factor_mask: u64 coarse partition (room/shard/system factor) + +Independence(F1,F2) iff +- (F1.N_write ∪ F1.E_write ∪ F1.B_in ∪ F1.B_out) is disjoint from all read/write sets of F2, and symmetrically; and +- (F1.factor_mask & F2.factor_mask) == 0 + +Ordering & determinism +- Physical execution is parallel; planning/logs use a stable key `(scope_hash, rule_id, stamp)`; results are order-independent by Theorem A. + +## Scheduler & Batching + +- Build maximal independent sets (MIS) from Matched. +- Reserve MIS entries; commit them in parallel. +- Conflicts ⇒ RETRY or JOIN (precomputed join) per rule policy. +- Priorities: physics > gameplay > cosmetic (configurable); fairness via randomized backoff. + +## Data Structures + +- Bitmaps: block-sparse (Roaring-style) with SIMD kernels for AND-isZero/OR (AVX2/NEON); scalar fallback. +- Ports: two bitmaps B_in/B_out keyed by packed port id; hot path for interface conflicts. +- Factor masks: O(1) precheck before bitmaps. +- Compact ids: internal `CompactRuleId(u32)`; wire/disk keeps canonical `Hash256`. +- Node/Edge indices: `NodeIx/EdgeIx`; hash ids for global identity. + +## Two-Plane Publish + +- Enforce no-delete-under-descent: attachment positions touched cannot be deleted by concurrent skeleton rewrites. +- Publish attachments, then skeleton; epochs per plane; pointer swaps/double-buffered sections; readers pinned by epoch. +- Lazy flips: new readers bind to new epochs immediately; old readers finish on old epochs; reclamation after grace period. + +## Zero-Copy Storage Alignment + +- Snapshot = page-aligned slabs: headers, NodeEntry[], EdgeEntry[], payload arena. +- Load via mmap; base+offset arithmetic; zero decode. +- Snapshot hash = BLAKE3 over canonical slabs; optional Merkle overlays for partial verify. + +## Rule Identity & Hot-Load + +- Family ID (stable): `blake3("rule-family:v1" || fully_qualified_name)` — compile-time const in Rust; computed once on load in Lua. +- Revision ID (dynamic): `blake3("rule-rev::canon-ast-v1" || canonical AST graph bytes)` — flips on semantic changes; used for hot‑reload/peer compatibility; not in scheduling keys. + +## Performance Targets + +Baseline demo (Phase 1): +- 1k nodes; 10 concurrent rewrites/tick @ 60 FPS +- Independence + commit ≤ 2 ms; matching ≤ 8 ms (typed, local, incremental optional) + +Stretch demo (Phase 2): +- 10k nodes; 100 concurrent rewrites/tick; SIMD bitmaps + factor masks + incremental caches + +## Telemetry (JSONL) + +- `conflict_rate`, `retry_count`, `join_success`, `reservation_latency_ms`, `commit_latency_ms` +- `epoch_flip_latency_ms`, `reader_epoch_lifetime_ms_p50/p95/p99` +- `bitmap_and_checked`, `bitmap_and_short_circuits`, `factor_mask_elided` +- `matches_found`, `matches_invalidated`, `match_time_ms` + +## Risks & Mitigations + +- Matching cost: constrain |L| ≤ 5–10; typed seeds; local neighborhoods; incremental rematch near diffs; only add incremental when matching > 50% frame time. +- Conflict storms: finer factor masks (per-room/per-type/per-port); join catalog; priority scheduling. +- Epoch stalls: double-buffer planes; lazy flips; grace period reclamation. +- Port bottleneck: versioned ports; batch reservations; separate factor masks for input/output/internal ports. + +## Roadmap & Deliverables + +Phase 0 (Tick determinism) +- Footprint + independence (ports/nodes/edges/factor) +- MIS batch planner; permutation test for isomorphic results +- Two-plane commutation harness under no-delete-under-descent + +Phase 1 (Baseline performance) +- SIMD bitmaps; factor masks; CompactRuleId(u32); basic telemetry +- Bench 1k×10 @ 60 FPS; independence+commit ≤ 2 ms + +Phase 2 (Optimization) +- Spatial indexing/sharding; incremental matching; join catalog; Merkle overlays +- Bench 10k×100; independence ≤ 2 ms; matching ≤ 8 ms + +Phase 3 (Real demo) +- Multiplayer confluence demo (zero desync), time‑travel fork/merge, inspector visualization of footprints/conflicts + +References: confluence skeleton v5, RMG math confluence, offset-graph arena notes + diff --git a/docs/telemetry-graph-replay.md b/docs/telemetry-graph-replay.md new file mode 100644 index 0000000..98d1d84 --- /dev/null +++ b/docs/telemetry-graph-replay.md @@ -0,0 +1,64 @@ +# Telemetry: Graph Snapshot for Repro/Replay (Design Note) + +Status: Draft • Scope: rmg-core (dev-only feature) + +## Problem + +When a conflict or unexpected outcome occurs during a transaction, logs with counts are helpful but insufficient for reproduction. We want the option to capture a minimal, deterministic snapshot of the reachable subgraph from `root` at key points (e.g., pre-commit or on conflict) so we can replay locally and bisect. + +## Approach + +- Add a feature-gated telemetry event `graph_snapshot` that emits the canonical, stable serialization of the reachable subgraph. +- Trigger points (feature-controlled): + - On first conflict within a tx (sampled or rate-limited) + - On commit (debug builds only) +- Consumers can store the JSONL stream and later reconstruct the exact state to reproduce behavior. + +## Constraints + +- Deterministic ordering and bytes: leverage the existing snapshot hash traversal and encoding rules. Do NOT invent a second ordering. +- Size control: + - Emit only the reachable subgraph from `root`. + - Optionally redact payloads or cap payload size via a `telemetry_max_payload_bytes` knob. + - Allow sampling (e.g., `N` per minute) to keep overhead bounded. +- Security: feature must be off by default; never ship in production. Payloads may contain domain data. + +## Event Shape (JSONL) + +``` +{ + "timestamp_micros": 1234567890, + "tx_id": 42, + "event": "graph_snapshot", + "root": "", + "snapshot_hash": "", + "nodes": [ + { "id": "", "ty": "", "payload": "" } + ], + "edges": [ + { "id": "", "from": "", "to": "", "ty": "", "payload": "" } + ] +} +``` + +- Ordering: nodes ascending by `NodeId`, edges grouped by `from` with each group ascending by `EdgeId`. +- Payload encoding: identical to runtime wire format (length-prefixed little-endian), then base64 for JSON safety. + +## API Sketch + +- `telemetry::graph_snapshot(tx, &GraphStore, &root, redact_payloads: bool)` +- Compiles behind `feature = "telemetry"` only. +- Reuses internal snapshot traversal to ensure identical reachability set and order. + +## Replay + +- CLI helper (`rmg-cli`) to read JSONL and reconstruct an in-memory `GraphStore` for any `graph_snapshot` event. +- Verify by recomputing the `snapshot_hash` and comparing with the logged value. + +## Next Steps + +- [ ] Add serialization helper that walks the same reachable set as `compute_snapshot_hash`. +- [ ] Feature-gate emitting on conflict (first per tx) and on commit (debug only). +- [ ] CLI command: `rmg-cli replay --from telemetry.jsonl --tx 42`. +- [ ] Document redaction policy and sampling knobs. + diff --git a/rust-toolchain.toml b/rust-toolchain.toml new file mode 100644 index 0000000..b2cf8c5 --- /dev/null +++ b/rust-toolchain.toml @@ -0,0 +1,3 @@ +[toolchain] +channel = "1.68.0" +components = ["rustfmt", "clippy"]