diff --git a/CHANGELOG.md b/CHANGELOG.md index f0ca06b0..b1927caf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ Cross-package release notes for relayburn. Package changelogs contain package-le ## [Unreleased] +- `relayburn-sdk-node` (Rust): napi-rs bindings skeleton — `#[napi]` shims for every public verb in `relayburn-sdk` (`summary`, `sessionCost`, `overhead`, `overheadTrim`, `hotspots`, `search`, `exportLedger`, `exportStamps`, async `ingest`, plus `ledgerOpen`), with u64 token counts surfaced as JS `BigInt`, ISO-8601 timestamps as `String`, async verbs returning `Promise`, and a typed `BurnError` mapping for SDK failures. (#247) - `relayburn-cli` (Rust): introduce the harness substrate — `HarnessAdapter` trait, lazy compile-time `phf` registry (`lookup` / `list_harness_names`), and the shared `pending_stamp::adapter` factory codex + opencode will reuse. Adapter slots in the registry are reserved but empty pending the Wave 2 PRs (#248-d/e/f). `relayburn-sdk` re-exports `start_watch_loop`, `WatchController`, `write_pending_stamp`, `PendingStampHarness`, and friends so the CLI doesn't have to reach into private SDK modules. (#248) - `relayburn-ingest` (Rust): port the per-process gap-warning state machine (`gap` module — `record_session_gap`, `emit_gap_warning`, `count_tool_call_gaps`, `reset_ingest_gap_warnings`, `set_ingest_gap_writer`) and `reingest_missing_content` (`reingest` module). Suppression mirrors the TS surface: one warning per fresh affected session, silent on steady-state, re-fires after the affected set decays back to empty. `relayburn-ledger` adds `Ledger::list_user_turn_session_ids` to power the `reingest_missing_content` skip filter alongside `list_content_session_ids`. (#278) - `relayburn-analyze` (Rust): port the behavioral-pattern detectors (`patterns` module). `detect_patterns` runs retry-loop, failure-run, cancellation-run, compaction-loss, edit-revert, OpenCode skill-recall-dup, OpenCode skill-pruning-protection, OpenCode system-prompt-tax, and edit-heavy detectors against an ordered turn stream, with optional content-sidecar / tool-result-event / user-turn enrichment. Public surface: `detect_patterns`, `DetectPatternsOptions`; per-pattern result structs are re-exported from `findings` (`RetryLoop`, `FailureRun`, `CancellationRun`, `CompactionLoss`, `EditRevertCycle`, `SkillRecallDup`, `SkillPruningProtection`, `SystemPromptTax`, `EditHeavySession`, `SessionPatternSummary`, `PatternsResult`, `PatternEventSource`). (#275) diff --git a/Cargo.lock b/Cargo.lock index 299fe48b..f8909d57 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -77,6 +77,15 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" +[[package]] +name = "convert_case" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" +dependencies = [ + "unicode-segmentation", +] + [[package]] name = "cpufeatures" version = "0.2.17" @@ -96,6 +105,16 @@ dependencies = [ "typenum", ] +[[package]] +name = "ctor" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a2785755761f3ddc1492979ce1e48d2c00d09311c39e4466429188f3dd6501" +dependencies = [ + "quote", + "syn", +] + [[package]] name = "digest" version = "0.10.7" @@ -256,6 +275,16 @@ version = "0.2.186" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68ab91017fe16c622486840e4c83c9a37afeff978bd239b5293d61ece587de66" +[[package]] +name = "libloading" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" +dependencies = [ + "cfg-if", + "windows-link", +] + [[package]] name = "libsqlite3-sys" version = "0.30.1" @@ -285,6 +314,66 @@ version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" +[[package]] +name = "napi" +version = "2.16.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55740c4ae1d8696773c78fdafd5d0e5fe9bc9f1b071c7ba493ba5c413a9184f3" +dependencies = [ + "bitflags", + "ctor", + "napi-derive", + "napi-sys", + "once_cell", + "serde", + "serde_json", + "tokio", +] + +[[package]] +name = "napi-build" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d376940fd5b723c6893cd1ee3f33abbfd86acb1cd1ec079f3ab04a2a3bc4d3b1" + +[[package]] +name = "napi-derive" +version = "2.16.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cbe2585d8ac223f7d34f13701434b9d5f4eb9c332cccce8dee57ea18ab8ab0c" +dependencies = [ + "cfg-if", + "convert_case", + "napi-derive-backend", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "napi-derive-backend" +version = "1.0.75" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1639aaa9eeb76e91c6ae66da8ce3e89e921cd3885e99ec85f4abacae72fc91bf" +dependencies = [ + "convert_case", + "once_cell", + "proc-macro2", + "quote", + "regex", + "semver", + "syn", +] + +[[package]] +name = "napi-sys" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "427802e8ec3a734331fec1035594a210ce1ff4dc5bc1950530920ab717964ea3" +dependencies = [ + "libloading", +] + [[package]] name = "once_cell" version = "1.21.4" @@ -457,7 +546,14 @@ dependencies = [ name = "relayburn-sdk-node" version = "0.0.0" dependencies = [ + "anyhow", + "napi", + "napi-build", + "napi-derive", "relayburn-sdk", + "serde", + "serde_json", + "tokio", ] [[package]] @@ -644,6 +740,12 @@ version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" +[[package]] +name = "unicode-segmentation" +version = "1.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9629274872b2bfaf8d66f5f15725007f635594914870f65218920345aa11aa8c" + [[package]] name = "unicode-xid" version = "0.2.6" diff --git a/crates/relayburn-sdk-node/Cargo.toml b/crates/relayburn-sdk-node/Cargo.toml index eeead086..1f4c89ac 100644 --- a/crates/relayburn-sdk-node/Cargo.toml +++ b/crates/relayburn-sdk-node/Cargo.toml @@ -8,8 +8,22 @@ repository.workspace = true description = "napi-rs bindings over relayburn-sdk — built in CI to produce the @relayburn/sdk npm artifacts. Not published to crates.io." publish = false +[lib] +crate-type = ["cdylib", "rlib"] + [dependencies] -# napi-rs glue lands in #247; this crate's only direct dep is the SDK. -# Not published to crates.io, so the version pin is loose by design — it -# just keeps `cargo metadata` and the workspace resolver happy. +# napi-rs glue. The Node bindings sit on top of the SDK and expose +# `#[napi]` shims for every public verb. Not published to crates.io — +# this crate is consumed exclusively by the napi-rs CI matrix in #247-b +# to produce the per-platform `.node` artifacts under +# `packages/sdk-node/`. relayburn-sdk = { path = "../relayburn-sdk", version = "0.0" } +napi = { version = "2", default-features = false, features = ["napi6", "tokio_rt", "serde-json"] } +napi-derive = "2" +serde = { workspace = true } +serde_json = { workspace = true } +anyhow = { workspace = true } +tokio = { workspace = true } + +[build-dependencies] +napi-build = "2" diff --git a/crates/relayburn-sdk-node/build.rs b/crates/relayburn-sdk-node/build.rs new file mode 100644 index 00000000..6c4782c6 --- /dev/null +++ b/crates/relayburn-sdk-node/build.rs @@ -0,0 +1,7 @@ +// napi-rs build script. Generates the symbol-export table the Node loader +// needs. See https://napi.rs/docs/cli/build for details. +extern crate napi_build; + +fn main() { + napi_build::setup(); +} diff --git a/crates/relayburn-sdk-node/src/lib.rs b/crates/relayburn-sdk-node/src/lib.rs index e805d098..d1c0b2f8 100644 --- a/crates/relayburn-sdk-node/src/lib.rs +++ b/crates/relayburn-sdk-node/src/lib.rs @@ -1 +1,1155 @@ -// TODO: port `@relayburn/sdk` napi-rs bindings — see issue #247. +//! napi-rs bindings for `relayburn-sdk`. +//! +//! This crate is built in CI by the napi-rs matrix (#247-b) to produce +//! the per-platform `.node` artifacts that ship inside +//! `@relayburn/sdk@2.0`. It is not published to crates.io. +//! +//! # Type-mapping rules +//! +//! The SDK is a Rust API; the Node bindings are a lossy presenter for it +//! the same way the CLI is. The rules below are applied uniformly so the +//! generated `.d.ts` is predictable for TS consumers: +//! +//! - **`u64` token counts → JS `BigInt`.** SDK fields like +//! `Summary::total_tokens`, every `tokens` row in `byTool` / `byModel`, +//! and the `OverheadSection::tokens` field cross the boundary as +//! `napi::bindgen_prelude::BigInt`. JS `number` (f64) cannot losslessly +//! represent the upper end of the u64 range and silently truncates above +//! 2^53; the SDK already deals in u64 internally so the boundary is the +//! right place to surface that. For verbs whose result is too recursive +//! to mirror as a `#[napi(object)]` struct (`overhead`, `overheadTrim`, +//! `hotspots`, `exportLedger`, `exportStamps`), we serialize through +//! serde_json and emit the result via the [`BigIntPromoting`] wrapper, +//! which walks the JSON tree and substitutes `BigInt` for any numeric +//! value sitting under one of the well-known u64 field names listed in +//! [`BIGINT_FIELDS`]. The lighter walker keeps the discriminated-union +//! shape of `HotspotsResult` intact (which is awkward to express as a +//! single typed napi object) and lets the export verbs surface every +//! nested u64 (`turnIndex`, `eventIndex`, `contentLength`, +//! `tokensBeforeCompact`, `byteLen`, `approxTokens`, the six `usage` +//! fields, …) without per-record type plumbing — all while honoring +//! the contract. +//! - **Timestamps → ISO-8601 `String`.** The SDK already speaks ISO +//! strings (`turn.ts`, `since` parameters); we keep that wire format +//! rather than dragging `chrono::DateTime` or `Date` through the FFI. +//! Matches the existing `packages/sdk/index.d.ts` byte-for-byte. +//! - **`async fn` SDK verbs → `Promise` on the JS side.** napi-rs's +//! `tokio_rt` feature drives this; we mark `ingest` `async fn` and the +//! sync verbs (`summary`, `sessionCost`, …) as plain `fn` returning +//! `Result`. +//! - **Errors → typed `BurnError` JS class (sync verbs only).** Domain +//! failures from the SDK (`anyhow::Error`) and argument-shape errors +//! raised at this boundary are surfaced as a `napi::Error` whose +//! `Status` slot carries one of [`SDK_ERROR_CODE`], [`IO_ERROR_CODE`], +//! or [`INVALID_ARGUMENT_ERROR_CODE`]. napi-rs writes that string into +//! the thrown JS Error's `code` property (via `napi_create_error`'s +//! `code` argument), so JS callers get +//! `try { … } catch (e) { if (e.code === 'BURN_SDK') … }`. The +//! [`BurnErrorCode`] enum is exported as a `string_enum` so TS code +//! can reference the codes by name without stringly-typed literals. +//! +//! **Async exception — [`ingest`].** napi-rs 2.x's `async fn` lowering +//! in `napi-derive` runs through `napi::bindgen_prelude::execute_tokio_future` +//! ([`napi-derive-backend`]'s `codegen/fn.rs`), which is hard-typed to +//! `Result>` — and `Status` is a *closed* enum +//! over the predefined NAPI status strings (`GenericFailure`, +//! `InvalidArg`, …). There is no public typed-error escape hatch in +//! napi 2.x: `JsDeferred::reject` only accepts `Error`, and +//! `AsyncTask::reject`'s rejection path likewise funnels through +//! `JsError::from(Error).into_value`. The only way to inject +//! a non-`Status` `code` would be to hand-roll a `JsDeferred` +//! replacement on top of raw `sys::napi_*` calls + a TSFN — see +//! `crates/relayburn-sdk-node/src/lib.rs` git history for the +//! evaluation. We deliberately don't pay that complexity in v1. +//! +//! **Concrete contract for [`ingest`]:** the returned `Promise` +//! rejects with a JS `Error` whose `.code === 'GenericFailure'` and +//! whose `.message` is the rendered `anyhow::Error` chain from the +//! SDK. JS callers branching on `e.code` should match `'GenericFailure'` +//! for ingest failures (or, more robustly, gate on `e.message` +//! substrings if discrimination is required). A future PR can tighten +//! this — likely by upgrading to napi-rs 3.x once the `string_enum` +//! and `BigInt` ergonomics there are validated against the rest of +//! the binding — at which point `e.code` becomes one of the +//! [`BurnErrorCode`] string values for ingest as well. +//! +//! # Surface +//! +//! Every public verb in `relayburn-sdk` (free-function form) is bound +//! here. The `Ledger` / `LedgerHandle` method form is omitted from the JS +//! surface for v1 — the TS sibling `@relayburn/sdk@1.x` only exposes the +//! free-function shape, and a future PR can add a `Ledger` JS class +//! without breaking compatibility. The deferred D9 PR (Wave 2) checks in a +//! `.d.ts` snapshot test against `packages/sdk/index.d.ts`. +//! +//! See `RUST_PORT_WAVE_PLAN.md` section 3 for how this fits the larger +//! port. + +#![allow(clippy::needless_pass_by_value)] + +use std::path::PathBuf; +use std::ptr; + +use napi::bindgen_prelude::{BigInt, Error as NapiError, Result as NapiResult, ToNapiValue}; +use napi::sys; +use napi_derive::napi; +use serde_json::Value as JsonValue; + +use relayburn_sdk as sdk; + +// --------------------------------------------------------------------------- +// Error mapping +// --------------------------------------------------------------------------- + +/// `code` written into the thrown JS Error for failures the SDK raises +/// (typically `anyhow::Error` chains from the query / ingest verbs). +pub const SDK_ERROR_CODE: &str = "BURN_SDK"; + +/// `code` written into the thrown JS Error for I/O failures at the napi +/// boundary itself (path conversions, etc.). +pub const IO_ERROR_CODE: &str = "BURN_IO"; + +/// `code` written into the thrown JS Error when the caller passed an +/// argument shape we can't accept (e.g. a `BigInt` outside the u64 +/// range). +pub const INVALID_ARGUMENT_ERROR_CODE: &str = "BURN_INVALID_ARGUMENT"; + +/// Tagged error code surfaced on the thrown JS Error's `code` property. +/// Exported as a TS `string_enum` so callers can branch on +/// `e.code === BurnErrorCode.Sdk` without stringly-typed literals. +/// +/// The string values match [`SDK_ERROR_CODE`] et al. — napi-rs writes +/// them into the `code` slot via `napi_create_error`'s code argument, so +/// the round-trip from constant → JS code property is byte-identical. +#[napi(string_enum)] +pub enum BurnErrorCode { + /// Catch-all for `anyhow::Error` chains the SDK raises. Refine over + /// time as the SDK's error surface grows typed variants. + #[napi(value = "BURN_SDK")] + Sdk, + /// I/O failures from the napi boundary itself (path conversions, etc.). + #[napi(value = "BURN_IO")] + Io, + /// Caller passed an invalid argument shape (e.g. `since` that isn't a + /// relative range nor an ISO timestamp). + #[napi(value = "BURN_INVALID_ARGUMENT")] + InvalidArgument, +} + +/// `Err` variant used by every verb in the binding. The status slot +/// carries one of the [`BurnErrorCode`] string values; napi-rs threads +/// that string into the thrown JS Error's `code` property. +/// +/// We intentionally keep verb signatures spelled as +/// `Result` (using the literal `Result` name and this +/// alias) rather than aliasing a full `BurnResult` — the napi-rs +/// `#[napi]` macro identifies the result wrapping by syntactic token +/// (`Result<...>`) rather than type-checked unwrap, so a wrapper alias +/// would be silently treated as a regular return type and the macro +/// would skip the `JsError::from(err).throw_into(env)` codepath. +pub type BurnError = NapiError<&'static str>; + +fn sdk_err(e: anyhow::Error) -> NapiError<&'static str> { + // Render the chain so the message is informative; the discriminant + // stays "BURN_SDK" until the SDK's typed error story exists. + NapiError::new(SDK_ERROR_CODE, format!("{e:#}")) +} + +fn invalid_arg(msg: impl Into) -> NapiError<&'static str> { + NapiError::new(INVALID_ARGUMENT_ERROR_CODE, msg.into()) +} + +// --------------------------------------------------------------------------- +// Helpers — small repeating conversions +// --------------------------------------------------------------------------- + +fn u64_to_bigint(v: u64) -> BigInt { + BigInt { + sign_bit: false, + words: vec![v], + } +} + +fn bigint_to_u64(v: BigInt) -> std::result::Result { + let (signed, value, lossless) = v.get_u64(); + if signed { + return Err(invalid_arg("expected non-negative bigint, got signed")); + } + if !lossless { + return Err(invalid_arg("bigint exceeds u64 range")); + } + Ok(value) +} + +fn maybe_path(s: Option) -> Option { + s.map(PathBuf::from) +} + +// --------------------------------------------------------------------------- +// BigIntPromoting — JsonValue → JS value walker that emits BigInt for the +// well-known u64 field names below. +// +// `overhead`, `overheadTrim`, and `hotspots` return shapes that are too +// recursive (or, in `hotspots`'s case, a discriminated union) to mirror +// cleanly as a single `#[napi(object)]` struct. We keep them on the +// `serde_json::Value` boundary but wrap the result so the standard +// number→JsNumber conversion in napi-rs's serde-json bridge gets +// overridden for the named fields. Anything not in this list rides +// through as a plain JS number, matching the existing TS contract. +// --------------------------------------------------------------------------- + +/// Field names that carry `u64` values in the SDK and therefore must be +/// surfaced as JS `BigInt`. Names are camelCased (matching `serde(rename_all +/// = "camelCase")` on the SDK structs); the walker matches these literally +/// against the JSON object's key list. +/// +/// Audit checklist when adding a new u64 field to the SDK: drop its +/// camelCase name here so the napi-rs bindings keep the BigInt contract. +const BIGINT_FIELDS: &[&str] = &[ + // overhead + overhead_trim + "tokens", + "bytes", + "totalLines", + "sessionCount", + "startLine", + "endLine", + "filesAnalyzed", + "filesWithRecommendations", + "totalRecommendations", + "tokensPerSession", + // hotspots aggregations + "callCount", + "distinctCommands", + "ridingTurns", + "firstEmitTurnIndex", + "toolCallCount", + "turnsAnalyzed", + "analyzed", + "excluded", + // export_ledger / export_stamps record bodies — every camelCased + // u64 field on TurnRecord / UserTurnRecord / ToolResultEventRecord / + // CompactionEvent / nested Usage and ToolCall payloads. These values + // already round-trip as u64 inside the SDK; without explicit + // promotion the serde-json bridge emits them as JS `number` (f64) + // and silently truncates anything above 2^53 when crossing the + // napi boundary. + "turnIndex", + "eventIndex", + "callIndex", + "contentLength", + "tokensBeforeCompact", + "byteLen", + "approxTokens", + "retries", + "collapsedCalls", + // nested `usage` shape on TurnRecord / ToolResultEventRecord — + // every field is u64, all six need promotion. + "input", + "output", + "reasoning", + "cacheRead", + "cacheCreate5m", + "cacheCreate1h", +]; + +fn is_bigint_field(name: &str) -> bool { + BIGINT_FIELDS.contains(&name) +} + +/// Wraps a `serde_json::Value` so that, when napi-rs converts it to a JS +/// value, leaf u64 numbers under the [`BIGINT_FIELDS`] keys come out as +/// `BigInt` instead of `number`. Used for the `overhead`, `overheadTrim`, +/// `hotspots`, `exportLedger`, and `exportStamps` verbs whose result +/// shapes are documented in `packages/sdk/index.d.ts`. +pub struct BigIntPromoting(JsonValue); + +impl ToNapiValue for BigIntPromoting { + unsafe fn to_napi_value(env: sys::napi_env, val: Self) -> NapiResult { + promote_value(env, val.0, /*key=*/ None) + } +} + +unsafe fn promote_value( + env: sys::napi_env, + val: JsonValue, + key: Option<&str>, +) -> NapiResult { + match val { + JsonValue::Number(n) => { + if let (Some(k), Some(u)) = (key, n.as_u64()) { + if is_bigint_field(k) { + return BigInt::to_napi_value(env, u64_to_bigint(u)); + } + } + // Fall back to napi-rs's default serde number conversion. + serde_json::Number::to_napi_value(env, n) + } + JsonValue::Object(map) => { + // Build a JS object, recursing per-value with the field name + // so `is_bigint_field` can match. + let mut obj: sys::napi_value = ptr::null_mut(); + napi::check_status!( + sys::napi_create_object(env, &mut obj), + "promote_value: napi_create_object" + )?; + for (k, v) in map.into_iter() { + let child = promote_value(env, v, Some(&k))?; + let key_buf = std::ffi::CString::new(k.as_str()).map_err(|e| { + NapiError::new( + napi::Status::GenericFailure, + format!("invalid object key (contains NUL): {e}"), + ) + })?; + napi::check_status!( + sys::napi_set_named_property(env, obj, key_buf.as_ptr(), child), + "promote_value: napi_set_named_property" + )?; + } + Ok(obj) + } + JsonValue::Array(arr) => { + // Arrays don't carry a key context for their elements — the + // outer object's key (e.g. `sections`) doesn't apply to each + // element's leaf scalars; pass `None` so per-element + // promotion is decided by the inner object's keys. + let mut js_arr: sys::napi_value = ptr::null_mut(); + napi::check_status!( + sys::napi_create_array_with_length(env, arr.len(), &mut js_arr), + "promote_value: napi_create_array_with_length" + )?; + for (i, v) in arr.into_iter().enumerate() { + let child = promote_value(env, v, /*key=*/ None)?; + napi::check_status!( + sys::napi_set_element(env, js_arr, i as u32, child), + "promote_value: napi_set_element" + )?; + } + Ok(js_arr) + } + // Booleans / strings / nulls — defer to napi-rs's standard + // serde_json::Value conversion via the leaf wrappers. + JsonValue::Bool(b) => bool::to_napi_value(env, b), + JsonValue::String(s) => String::to_napi_value(env, s), + JsonValue::Null => napi::bindgen_prelude::Null::to_napi_value(env, napi::bindgen_prelude::Null), + } +} + +// --------------------------------------------------------------------------- +// Ledger open options +// --------------------------------------------------------------------------- + +/// Where on disk a ledger should land. Mirrors +/// `relayburn_sdk::LedgerOpenOptions`. `home` defaults to `RELAYBURN_HOME` +/// (or `~/.relayburn`); `contentHome` overrides only the `content.sqlite` +/// path when it makes sense to park content on different storage. +#[napi(object)] +pub struct LedgerOpenOptions { + pub home: Option, + pub content_home: Option, +} + +fn open_options(home: Option, content_home: Option) -> sdk::LedgerOpenOptions { + sdk::LedgerOpenOptions { + home: maybe_path(home), + content_home: maybe_path(content_home), + } +} + +// --------------------------------------------------------------------------- +// summary +// --------------------------------------------------------------------------- + +#[napi(object)] +pub struct SummaryOptions { + pub session: Option, + pub project: Option, + /// ISO timestamp (e.g. `2026-04-01T00:00:00Z`) or relative range + /// (`24h`, `7d`, `4w`, `2m`). + pub since: Option, + pub ledger_home: Option, +} + +#[napi(object)] +pub struct SummaryToolRow { + pub tool: String, + pub tokens: BigInt, + pub cost: f64, + pub count: BigInt, +} + +#[napi(object)] +pub struct SummaryModelRow { + pub model: String, + pub tokens: BigInt, + pub cost: f64, +} + +#[napi(object)] +pub struct Summary { + pub total_tokens: BigInt, + pub total_cost: f64, + pub turn_count: BigInt, + pub by_tool: Vec, + pub by_model: Vec, +} + +impl From for Summary { + fn from(s: sdk::Summary) -> Self { + Summary { + total_tokens: u64_to_bigint(s.total_tokens), + total_cost: s.total_cost, + turn_count: u64_to_bigint(s.turn_count), + by_tool: s + .by_tool + .into_iter() + .map(|r| SummaryToolRow { + tool: r.tool, + tokens: u64_to_bigint(r.tokens), + cost: r.cost, + count: u64_to_bigint(r.count), + }) + .collect(), + by_model: s + .by_model + .into_iter() + .map(|r| SummaryModelRow { + model: r.model, + tokens: u64_to_bigint(r.tokens), + cost: r.cost, + }) + .collect(), + } + } +} + +#[napi] +pub fn summary(opts: Option) -> Result { + let opts = opts.unwrap_or(SummaryOptions { + session: None, + project: None, + since: None, + ledger_home: None, + }); + let raw = sdk::SummaryOptions { + session: opts.session, + project: opts.project, + since: opts.since, + ledger_home: maybe_path(opts.ledger_home), + }; + sdk::summary(raw).map(Summary::from).map_err(sdk_err) +} + +// --------------------------------------------------------------------------- +// session_cost +// --------------------------------------------------------------------------- + +#[napi(object)] +pub struct SessionCostOptions { + /// Session id to total. Omit for `{ note: 'no session id provided' }`. + pub session: Option, + pub ledger_home: Option, +} + +#[napi(object)] +pub struct SessionCostResult { + pub session_id: Option, + /// Total cost in USD, rounded to 6 decimal places. + pub total_usd: f64, + pub total_tokens: BigInt, + pub turn_count: BigInt, + pub models: Vec, + pub note: Option, +} + +impl From for SessionCostResult { + fn from(r: sdk::SessionCostResult) -> Self { + SessionCostResult { + session_id: r.session_id, + total_usd: r.total_usd, + total_tokens: u64_to_bigint(r.total_tokens), + turn_count: u64_to_bigint(r.turn_count), + models: r.models, + note: r.note, + } + } +} + +/// Compact session-scoped cost shape; powers the MCP `burn__sessionCost` tool. +#[napi(js_name = "sessionCost")] +pub fn session_cost(opts: Option) -> Result { + let opts = opts.unwrap_or(SessionCostOptions { + session: None, + ledger_home: None, + }); + let raw = sdk::SessionCostOptions { + session: opts.session, + ledger_home: maybe_path(opts.ledger_home), + }; + sdk::session_cost(raw) + .map(SessionCostResult::from) + .map_err(sdk_err) +} + +// --------------------------------------------------------------------------- +// overhead + overhead_trim — JsonValue passthrough wrapped in +// BigIntPromoting; see the file header for why we don't mirror these as +// typed `#[napi(object)]` structs. +// --------------------------------------------------------------------------- + +/// Mirror of `sdk::OverheadFileKind`. Wire values match +/// `packages/sdk/index.d.ts`'s `'claude-md' | 'agents-md'` literal union. +#[napi(string_enum = "kebab-case")] +pub enum OverheadFileKind { + ClaudeMd, + AgentsMd, +} + +impl From for sdk::OverheadFileKind { + fn from(k: OverheadFileKind) -> Self { + match k { + OverheadFileKind::ClaudeMd => sdk::OverheadFileKind::ClaudeMd, + OverheadFileKind::AgentsMd => sdk::OverheadFileKind::AgentsMd, + } + } +} + +#[napi(object)] +pub struct OverheadOptions { + /// Project path to inspect; defaults to process.cwd(). + pub project: Option, + pub since: Option, + pub kind: Option, + pub ledger_home: Option, +} + +/// Per-file + per-section overhead cost attribution. Powers `burn overhead`. +/// +/// Returns the attribution result as an `OverheadResult` (see +/// `packages/sdk/index.d.ts`). Numeric u64 fields (`tokens`, `bytes`, +/// `totalLines`, `sessionCount`, `startLine`, `endLine`) cross the +/// boundary as `BigInt`; everything else is plain JS `number` / string. +#[napi(ts_return_type = "import('./index').OverheadResult")] +pub fn overhead(opts: Option) -> Result { + let opts = opts.unwrap_or(OverheadOptions { + project: None, + since: None, + kind: None, + ledger_home: None, + }); + let raw = sdk::OverheadOptions { + project: maybe_path(opts.project), + since: opts.since, + kind: opts.kind.map(Into::into), + ledger_home: maybe_path(opts.ledger_home), + }; + let result = sdk::overhead(raw).map_err(sdk_err)?; + let value = serde_json::to_value(&result).map_err(|e| { + NapiError::new(SDK_ERROR_CODE, format!("serialize overhead: {e}")) + })?; + Ok(BigIntPromoting(value)) +} + +#[napi(object)] +pub struct OverheadTrimOptions { + pub project: Option, + pub since: Option, + pub kind: Option, + pub ledger_home: Option, + /// Recommendations per file. Default 3. + pub top: Option, + /// Include the unified-diff text per recommendation. Default true. + pub include_diff: Option, +} + +/// Trim recommendations for high-cost overhead-file sections. Powers +/// `burn overhead trim`. Returns an `OverheadTrimResult`-shaped JSON +/// object with the same `BigInt` substitutions as [`overhead`]. +#[napi(js_name = "overheadTrim", ts_return_type = "import('./index').OverheadTrimResult")] +pub fn overhead_trim(opts: Option) -> Result { + let opts = opts.unwrap_or(OverheadTrimOptions { + project: None, + since: None, + kind: None, + ledger_home: None, + top: None, + include_diff: None, + }); + let top = match opts.top { + Some(b) => Some(bigint_to_u64(b)?), + None => None, + }; + let raw = sdk::OverheadTrimOptions { + project: maybe_path(opts.project), + since: opts.since, + kind: opts.kind.map(Into::into), + ledger_home: maybe_path(opts.ledger_home), + top, + include_diff: opts.include_diff, + }; + let result = sdk::overhead_trim(raw).map_err(sdk_err)?; + let value = serde_json::to_value(&result).map_err(|e| { + NapiError::new(SDK_ERROR_CODE, format!("serialize overhead_trim: {e}")) + })?; + Ok(BigIntPromoting(value)) +} + +// --------------------------------------------------------------------------- +// hotspots — discriminated union; serialized via serde_json so the +// `kind` discriminant + per-variant rows survive the boundary. The TS +// .d.ts already documents the shape (`HotspotsResult` union). +// --------------------------------------------------------------------------- + +/// Mirror of `sdk::HotspotsGroupBy`. Wire values match +/// `packages/sdk/index.d.ts`'s +/// `'attribution' | 'bash' | 'bash-verb' | 'file' | 'subagent'` literal +/// union. +#[napi(string_enum = "kebab-case")] +pub enum HotspotsGroupBy { + Attribution, + Bash, + BashVerb, + File, + Subagent, +} + +impl From for sdk::HotspotsGroupBy { + fn from(g: HotspotsGroupBy) -> Self { + match g { + HotspotsGroupBy::Attribution => sdk::HotspotsGroupBy::Attribution, + HotspotsGroupBy::Bash => sdk::HotspotsGroupBy::Bash, + HotspotsGroupBy::BashVerb => sdk::HotspotsGroupBy::BashVerb, + HotspotsGroupBy::File => sdk::HotspotsGroupBy::File, + HotspotsGroupBy::Subagent => sdk::HotspotsGroupBy::Subagent, + } + } +} + +#[napi(object)] +pub struct HotspotsOptions { + pub session: Option, + pub project: Option, + pub since: Option, + pub group_by: Option, + pub patterns: Option>, + pub ledger_home: Option, +} + +/// Per-axis hotspot attribution + pattern-finding queries. Returns a +/// JSON-shaped discriminated union — see `HotspotsResult` in +/// `packages/sdk/index.d.ts`. u64 row counts (`callCount`, +/// `distinctCommands`, `ridingTurns`, `firstEmitTurnIndex`, +/// `toolCallCount`, `turnsAnalyzed`, `analyzed`, `excluded`) cross as +/// `BigInt` per the file header rule. +#[napi(ts_return_type = "import('./index').HotspotsResult")] +pub fn hotspots(opts: Option) -> Result { + let opts = opts.unwrap_or(HotspotsOptions { + session: None, + project: None, + since: None, + group_by: None, + patterns: None, + ledger_home: None, + }); + let raw = sdk::HotspotsOptions { + session: opts.session, + project: opts.project, + since: opts.since, + group_by: opts.group_by.map(Into::into), + patterns: opts.patterns, + ledger_home: maybe_path(opts.ledger_home), + }; + let result = sdk::hotspots(raw).map_err(sdk_err)?; + let value = serde_json::to_value(&result).map_err(|e| { + NapiError::new(SDK_ERROR_CODE, format!("serialize hotspots: {e}")) + })?; + Ok(BigIntPromoting(value)) +} + +// --------------------------------------------------------------------------- +// search +// --------------------------------------------------------------------------- + +#[napi(object)] +pub struct SearchQueryOptions { + /// FTS5 query string. Supports phrase (`"out of memory"`), boolean + /// (`a OR b`), and prefix (`mem*`) syntax. + pub query: String, + /// Hit cap. Defaults to 25 when omitted. + pub limit: Option, + /// Restrict to a single session_id. Omit to search all sessions. + pub session_id: Option, + pub ledger_home: Option, +} + +#[napi(object)] +pub struct SearchHit { + pub session_id: String, + pub message_id: String, + pub source: String, + /// FTS5 BM25 rank (lower = better match). + pub rank: f64, + /// ``-highlighted snippet around the matching tokens. + pub snippet: String, +} + +#[napi(object)] +pub struct SearchResult { + pub query: String, + pub hits: Vec, +} + +#[napi] +pub fn search(opts: SearchQueryOptions) -> Result { + let limit = match opts.limit { + Some(b) => Some(bigint_to_u64(b)? as usize), + None => None, + }; + let raw = sdk::SearchQueryOptions { + query: opts.query.clone(), + limit, + session_id: opts.session_id, + ledger_home: maybe_path(opts.ledger_home), + }; + let result = sdk::search(raw).map_err(sdk_err)?; + Ok(SearchResult { + query: result.query, + hits: result + .hits + .into_iter() + .map(|h| SearchHit { + session_id: h.session_id, + message_id: h.message_id, + source: h.source, + rank: h.rank, + snippet: h.snippet, + }) + .collect(), + }) +} + +// --------------------------------------------------------------------------- +// export_ledger / export_stamps +// --------------------------------------------------------------------------- + +#[napi(object)] +pub struct ExportLedgerOptions { + pub ledger_home: Option, +} + +#[napi(object)] +pub struct ExportStampsOptions { + pub ledger_home: Option, +} + +/// Stream every event row as a JSONL-shaped JSON object. Each value has +/// the form `{ v: 1, kind: '', record: }`. +/// +/// Buffered into an array for v1; matches the SDK's +/// `export_ledger() -> impl Iterator` behavior (it's already in-memory +/// today). A streaming variant is a follow-up. +/// +/// The result is wrapped in [`BigIntPromoting`] so u64 fields nested +/// inside each `record` object (`turnIndex`, `eventIndex`, `callIndex`, +/// `contentLength`, `tokensBeforeCompact`, `byteLen`, `approxTokens`, +/// `retries`, `collapsedCalls`, and the `usage` sub-object's six u64 +/// keys) cross as JS `BigInt` instead of being silently truncated above +/// 2^53 by the default serde-json `number` conversion. +#[napi(js_name = "exportLedger", ts_return_type = "unknown[]")] +pub fn export_ledger(opts: Option) -> Result { + let opts = opts.unwrap_or(ExportLedgerOptions { ledger_home: None }); + let raw = sdk::ExportLedgerOptions { + ledger_home: maybe_path(opts.ledger_home), + }; + let iter = sdk::export_ledger(raw).map_err(sdk_err)?; + let values: Vec = iter.collect(); + Ok(BigIntPromoting(JsonValue::Array(values))) +} + +/// Stream every stamp row as a JSONL-shaped JSON object. Sibling of +/// [`export_ledger`]. +/// +/// The result is wrapped in [`BigIntPromoting`] for symmetry with +/// [`export_ledger`]. Stamps don't currently carry u64 fields, but the +/// wrapper is cheap and means a future stamp-shape change that +/// introduces one (e.g. a `byteLen` on a range bound) won't silently +/// regress to f64 truncation. +#[napi(js_name = "exportStamps", ts_return_type = "unknown[]")] +pub fn export_stamps(opts: Option) -> Result { + let opts = opts.unwrap_or(ExportStampsOptions { ledger_home: None }); + let raw = sdk::ExportStampsOptions { + ledger_home: maybe_path(opts.ledger_home), + }; + let iter = sdk::export_stamps(raw).map_err(sdk_err)?; + let values: Vec = iter.collect(); + Ok(BigIntPromoting(JsonValue::Array(values))) +} + +// --------------------------------------------------------------------------- +// ingest — async; returns a Promise on the JS side. +// --------------------------------------------------------------------------- + +#[napi(object)] +pub struct IngestRoots { + /// `~/.claude/projects` override. + pub claude_projects_dir: Option, + /// `~/.codex/sessions` override. + pub codex_sessions_dir: Option, + /// `~/.local/share/opencode/storage` override. + pub opencode_storage_dir: Option, +} + +#[napi(object)] +pub struct IngestOptions { + pub ledger_home: Option, + pub roots: Option, +} + +#[napi(object)] +pub struct IngestReport { + pub scanned_sessions: BigInt, + pub ingested_sessions: BigInt, + pub appended_turns: BigInt, +} + +impl From for IngestReport { + fn from(r: sdk::IngestReport) -> Self { + IngestReport { + scanned_sessions: u64_to_bigint(r.scanned_sessions as u64), + ingested_sessions: u64_to_bigint(r.ingested_sessions as u64), + appended_turns: u64_to_bigint(r.appended_turns as u64), + } + } +} + +/// Discover and ingest unprocessed turns from the configured session +/// stores. Returns a `Promise`. +/// +/// Progress / warning sinks are intentionally not surfaced through the +/// boundary in v1 — the JS surface today doesn't expose them either. +/// Wave 2 D9 picks them up if the conformance gate calls for it. +/// +/// **Error-code contract.** Unlike the synchronous verbs (which reject +/// with `e.code` set to one of [`BurnErrorCode`]'s string values), this +/// async verb's rejection surfaces as `code: 'GenericFailure'`. The +/// rendered SDK error chain is in `e.message`. See the file header for +/// the full rationale (napi-rs 2.x's `execute_tokio_future` is +/// hard-typed to `Result>` and `Status` is a closed +/// enum, so `'BURN_SDK'` cannot be threaded through). The +/// [`ingest_uses_generic_failure_code_runtime_invariant`] test pins +/// this discrepancy so a future napi-rs upgrade or hand-rolled deferred +/// either fixes it or has to update the test in lockstep with the +/// header docs. +#[napi] +pub async fn ingest(opts: Option) -> NapiResult { + let opts = opts.unwrap_or(IngestOptions { + ledger_home: None, + roots: None, + }); + let roots = opts.roots.unwrap_or(IngestRoots { + claude_projects_dir: None, + codex_sessions_dir: None, + opencode_storage_dir: None, + }); + let raw = sdk::IngestOptions { + ledger_home: maybe_path(opts.ledger_home), + roots: sdk::IngestRoots { + claude_projects_dir: maybe_path(roots.claude_projects_dir), + codex_sessions_dir: maybe_path(roots.codex_sessions_dir), + opencode_storage_dir: maybe_path(roots.opencode_storage_dir), + }, + on_progress: None, + on_warn: None, + }; + let report = sdk::ingest(raw) + .await + .map_err(|e| NapiError::from_reason(format!("{e:#}")))?; + Ok(report.into()) +} + +// --------------------------------------------------------------------------- +// Module-level metadata. napi-rs doesn't require a `register_module` +// entry point — `#[napi]` items register themselves via the macros. +// We export the open-options shape under a stable name for wave-2 +// callers that want to construct one explicitly. +// --------------------------------------------------------------------------- + +/// Synchronously open and immediately close a ledger to validate the +/// configured paths. Returns the resolved `home` path. Mirrors the +/// `Ledger.open()` smoke-call shape from `packages/sdk/index.d.ts`; a +/// future PR can add a stateful `Ledger` JS class that holds a handle. +#[napi(js_name = "ledgerOpen")] +pub fn ledger_open(opts: Option) -> Result { + let opts = opts.unwrap_or(LedgerOpenOptions { + home: None, + content_home: None, + }); + let home = opts.home.clone(); + let content_home = opts.content_home.clone(); + let raw = open_options(home, content_home); + // Open + drop. Schema DDL applies on the first open, so this is a + // cheap "is the path writable / migration current?" probe. + let _handle = sdk::Ledger::open(raw).map_err(sdk_err)?; + // Echo the resolved home back so JS callers know which ledger they + // attached to. + Ok(opts + .home + .unwrap_or_else(|| sdk::ledger_home().to_string_lossy().into_owned())) +} + +// --------------------------------------------------------------------------- +// Tests — exercise the helpers that don't need a live napi env. The full +// boundary is covered end-to-end by the conformance test scaffold landing +// in #247-b (Wave 1 D2). +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn u64_to_bigint_round_trip_small() { + let big = u64_to_bigint(42); + assert_eq!(bigint_to_u64(big).unwrap(), 42); + } + + #[test] + fn u64_to_bigint_round_trip_max() { + let big = u64_to_bigint(u64::MAX); + assert_eq!(bigint_to_u64(big).unwrap(), u64::MAX); + } + + #[test] + fn bigint_to_u64_rejects_signed() { + let signed = BigInt { + sign_bit: true, + words: vec![1], + }; + assert!(bigint_to_u64(signed).is_err()); + } + + #[test] + fn bigint_to_u64_rejects_too_wide() { + let two_words = BigInt { + sign_bit: false, + words: vec![0, 1], + }; + assert!(bigint_to_u64(two_words).is_err()); + } + + #[test] + fn maybe_path_threads_string_to_pathbuf() { + assert!(maybe_path(None).is_none()); + assert_eq!( + maybe_path(Some("/tmp/x".into())), + Some(PathBuf::from("/tmp/x")) + ); + } + + #[test] + fn bigint_field_membership_covers_documented_keys() { + // Every camelCased u64 field that crosses the boundary today + // must be in BIGINT_FIELDS so the walker promotes it. + for key in [ + // overhead + hotspots verbs (round-1 set) + "tokens", + "bytes", + "totalLines", + "sessionCount", + "startLine", + "endLine", + "filesAnalyzed", + "filesWithRecommendations", + "totalRecommendations", + "tokensPerSession", + "callCount", + "distinctCommands", + "ridingTurns", + "firstEmitTurnIndex", + "toolCallCount", + "turnsAnalyzed", + "analyzed", + "excluded", + // export_ledger / export_stamps record-body u64s (round-3 set) + "turnIndex", + "eventIndex", + "callIndex", + "contentLength", + "tokensBeforeCompact", + "byteLen", + "approxTokens", + "retries", + "collapsedCalls", + // nested `usage` shape on TurnRecord / ToolResultEventRecord + "input", + "output", + "reasoning", + "cacheRead", + "cacheCreate5m", + "cacheCreate1h", + ] { + assert!(is_bigint_field(key), "{key} missing from BIGINT_FIELDS"); + } + + // Spot-check that f64 / string fields aren't accidentally on + // the list — a regression here would silently turn floats into + // BigInts on the JS side. + for key in [ + "totalCost", + "grandTotal", + "initialTokens", + "persistenceTokens", + "tokenShare", + "perSessionAvg", + "path", + "kind", + // Record envelope fields that live alongside the promoted + // u64 keys but are themselves a schema version (u32 small) + // or a string discriminant — promoting these would corrupt + // the JSONL contract. + "v", + "ts", + "sessionId", + "messageId", + "source", + ] { + assert!( + !is_bigint_field(key), + "{key} unexpectedly present in BIGINT_FIELDS" + ); + } + } + + #[test] + fn export_record_u64_fields_survive_above_2_pow_53() { + // Pin the round-3 fix: the four field names CodeRabbit called + // out, plus the rest of the export-record u64 surface, must be + // in BIGINT_FIELDS so the BigIntPromoting walker hands them to + // JS as `BigInt`. A regression — anyone removing one of these + // and forgetting to update the export verbs — would silently + // truncate values >2^53 in `exportLedger` / `exportStamps`. + // + // This is a static membership test rather than a live napi-env + // round-trip because the napi sys calls require a running JS + // environment (covered end-to-end by the wave-2 D9 conformance + // suite). The walker's behavior given a matched key is already + // exercised structurally — see `is_bigint_field` / the round-1 + // overhead+hotspots tests — so guarding the membership here is + // load-bearing for the contract. + const ABOVE_2_POW_53: u64 = (1u64 << 53) + 1; + // Sanity: this value is precisely the kind we'd lose to f64 + // rounding, so pinning it in the test doc keeps the failure + // mode visible. + assert!(ABOVE_2_POW_53 > (1u64 << 53)); + assert!(ABOVE_2_POW_53 as f64 as u64 != ABOVE_2_POW_53); + + // The exact field names from CodeRabbit's report. + for key in [ + "turnIndex", + "eventIndex", + "contentLength", + "tokensBeforeCompact", + ] { + assert!( + is_bigint_field(key), + "round-3 fix regressed: {key} not in BIGINT_FIELDS" + ); + } + + // The wrapper itself round-trips a JsonValue::Array (the shape + // export_ledger / export_stamps emit). We can't assert on the + // napi conversion here — see comment above — but we *can* + // build the wrapper to confirm the type plumbing compiles and + // accepts a value-above-2^53 inside a record body shaped like + // the live emitter. + let record = serde_json::json!({ + "v": 1, + "kind": "turn", + "record": { + "turnIndex": ABOVE_2_POW_53, + "eventIndex": ABOVE_2_POW_53, + "contentLength": ABOVE_2_POW_53, + "tokensBeforeCompact": ABOVE_2_POW_53, + }, + }); + let wrapped = BigIntPromoting(JsonValue::Array(vec![record.clone()])); + // We exposed the inner JsonValue as the tuple field; make sure + // the value we just wrapped wasn't lossily reshaped before + // hand-off to the walker — `serde_json::Number::as_u64` is what + // the walker calls, and that path returns the original u64. + if let JsonValue::Array(arr) = &wrapped.0 { + let rec = &arr[0]["record"]; + for k in [ + "turnIndex", + "eventIndex", + "contentLength", + "tokensBeforeCompact", + ] { + let n = rec[k].as_u64().expect("u64 survived the JSON round-trip"); + assert_eq!(n, ABOVE_2_POW_53, "{k} value mutated"); + } + } else { + panic!("BigIntPromoting wrapper dropped the array shape"); + } + } + + #[test] + fn burn_error_codes_match_constants() { + // The TS-exported BurnErrorCode variant values must equal the + // string codes we hand to `napi::Error::new`, otherwise JS + // callers comparing `e.code` to `BurnErrorCode.Sdk` would + // silently miss every error. + assert_eq!(SDK_ERROR_CODE, "BURN_SDK"); + assert_eq!(IO_ERROR_CODE, "BURN_IO"); + assert_eq!(INVALID_ARGUMENT_ERROR_CODE, "BURN_INVALID_ARGUMENT"); + } + + /// Runtime invariant pinning the documented `ingest()` error-code + /// discrepancy. The body is a compile-time assertion that the type + /// returned by `ingest`'s rejection path is the default + /// `napi::Error` (`Error`), *not* our typed + /// `Error<&'static str>` (`BurnError`). If a future napi-rs upgrade + /// or hand-rolled deferred replacement makes typed async errors + /// possible, this test will start failing; that's the signal to + /// remove the caveat from the file header + the `ingest()` doc + /// comment in lockstep with switching the signature to + /// `Result`. + /// + /// Why a static-typing check rather than a JS-side assertion: the + /// JS-side end-to-end test is wave-2 D9 territory (it requires a + /// built `.node` artifact); we can still pin the discrepancy *here* + /// by encoding the napi-rs limitation as a type-level fact. The + /// caveat in the header docs is then forced to track the type. + #[test] + fn ingest_uses_generic_failure_code_runtime_invariant() { + use std::any::TypeId; + + // `BurnError` (the typed error used by every sync verb) is + // distinct from the default `napi::Error`. + // `execute_tokio_future` (which `#[napi] async fn ingest` is + // lowered to) is hard-typed to the latter — see the file + // header. If these two ever become assignable, the docs need + // updating. + type DefaultNapiError = napi::Error; + assert_ne!( + TypeId::of::(), + TypeId::of::(), + "BurnError and napi::Error have unified — \ + ingest() can now return BurnError; update the file header \ + docs and switch ingest's return type." + ); + + // Sanity-check the shapes of the codes we *can* deliver vs the + // status code that `ingest()` will surface. These are what + // upstream `JsError::from(Error).into_value(env)` + // writes for the Status::GenericFailure case. + let sync_codes = [SDK_ERROR_CODE, IO_ERROR_CODE, INVALID_ARGUMENT_ERROR_CODE]; + let async_code: &str = napi::Status::GenericFailure.as_ref(); + for c in sync_codes { + assert_ne!( + c, async_code, + "{c} must not collide with the async fallback code {async_code}" + ); + } + assert_eq!(async_code, "GenericFailure"); + } +}