diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..f40a3e0 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,42 @@ +## Summary + + +- + + + +## Motivation + + + +## Changes + + + +| Crate | Change | +|-------|--------| +| | | + +## Public API Changes + + + + + +| Crate | Symbol | Change | +|-------|--------|--------| +| | | | + +## Testing + + + +- [ ] `cargo test --workspace` +- [ ] `cargo test -p peeroxide-cli --test live_commands -- --ignored` +- [ ] `cargo clippy --workspace --all-targets -- -D warnings` + +## Notes + + diff --git a/.gitignore b/.gitignore index 8b66c98..29332a6 100644 --- a/.gitignore +++ b/.gitignore @@ -1,16 +1,17 @@ /target/ tests/node/node_modules/ +.claude .sisyphus/ .vogon_poetry/ docs/book/ +# ignore local MCP config file +.mcp.json + # Task artifacts — planning docs, Ralph Loop prompts, progress trackers, PR checklists. # These should never land in git. If you need to commit one, do it explicitly and it # will still be surfaced by the pre-PR artifact scan described in AGENTS.md. -DOCS_PLAN.md -RALPH_PROMPT.md -REFACTOR_PLAN.md -PR-TODOS.md *_PLAN.md *_PROMPT.md *_TODOS.md +*_TODO.md diff --git a/AGENTS.md b/AGENTS.md index 8ca2386..1885979 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -9,9 +9,9 @@ This is the root of the peeroxide workspace — a Rust implementation of the Hyp | `peeroxide` | High-level swarm management and topic-based peer discovery | crates.io | | `peeroxide-dht` | HyperDHT: Kademlia routing, Noise handshakes, hole-punching, relay | crates.io | | `libudx` | UDX reliable UDP transport with BBR congestion control | crates.io | -| `peeroxide-cli` | CLI toolkit: lookup, announce, ping, cp, deaddrop | binary only | +| `peeroxide-cli` | CLI toolkit (`peeroxide` binary): lookup, announce, ping, cp, dd, chat, init | crates.io + homebrew tap (`rightbracket/peeroxide`) | -The three library crates are published to crates.io and have external users. `peeroxide-cli` is a consumer of those libraries, not a library itself. +All four crates are published to crates.io; the `peeroxide` binary is additionally distributed as a prebuilt via the [`rightbracket/peeroxide` homebrew tap](https://github.com/Rightbracket/homebrew-peeroxide). ## Key Files @@ -77,7 +77,7 @@ If you find yourself needing to change a library signature to satisfy a CLI feat "All tests pass" means all three suites: 1. `cargo test --workspace` — unit tests, integration tests, and the Node.js local interop test (`hyperswarm_cross_language_connect`) -2. `cargo test -p peeroxide-cli --test live_commands -- --ignored` — live public HyperDHT network tests (lookup, announce, cp, deaddrop) +2. `cargo test -p peeroxide-cli --test live_commands -- --ignored` — live public HyperDHT network tests (lookup, announce, cp, dd) Do not mark work complete until both suites are green. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 04e94d1..193622b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,7 +8,7 @@ This project is a Rust implementation of the Hyperswarm stack, focusing on wire | `libudx` | Reliable UDP transport with BBR congestion control | crates.io | | `peeroxide-dht` | Kademlia DHT, Noise handshakes, hole-punching, relay | crates.io | | `peeroxide` | High-level swarm and topic-based discovery | crates.io | -| `peeroxide-cli` | CLI toolkit: lookup, announce, ping, cp, deaddrop | crates.io (binary) | +| `peeroxide-cli` | CLI toolkit: lookup, announce, ping, cp, dd | crates.io (binary) | ## Development Requirements diff --git a/Cargo.lock b/Cargo.lock index d7fded7..be968e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -21,6 +21,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + [[package]] name = "anstream" version = "1.0.0" @@ -71,6 +80,15 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "arc-swap" +version = "1.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a3a1fd6f75306b68087b831f025c712524bcb19aad54e557b1129cfa0a2b207" +dependencies = [ + "rustversion", +] + [[package]] name = "assert_cmd" version = "2.2.1" @@ -145,6 +163,16 @@ version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" +[[package]] +name = "cc" +version = "1.2.61" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d16d90359e986641506914ba71350897565610e87ce0ad9e6f28569db3dd5c6d" +dependencies = [ + "find-msvc-tools", + "shlex", +] + [[package]] name = "cfg-if" version = "1.0.4" @@ -175,6 +203,17 @@ dependencies = [ "zeroize", ] +[[package]] +name = "chrono" +version = "0.4.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c673075a2e0e5f4a1dde27ce9dee1ea4558c7ffe648f576438a20ca1d2acc4b0" +dependencies = [ + "iana-time-zone", + "num-traits", + "windows-link", +] + [[package]] name = "cipher" version = "0.4.4" @@ -261,6 +300,12 @@ version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + [[package]] name = "cpufeatures" version = "0.2.17" @@ -279,6 +324,32 @@ dependencies = [ "rustc_version", ] +[[package]] +name = "crossterm" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" +dependencies = [ + "bitflags", + "crossterm_winapi", + "futures-core", + "mio", + "parking_lot", + "rustix 0.38.44", + "signal-hook", + "signal-hook-mio", + "winapi", +] + +[[package]] +name = "crossterm_winapi" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acdd7c62a3665c7f6830a51635d9ac9b23ed385797f70a83bb8bafe9c572ab2b" +dependencies = [ + "winapi", +] + [[package]] name = "crypto-common" version = "0.1.7" @@ -424,6 +495,12 @@ version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" +[[package]] +name = "find-msvc-tools" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" + [[package]] name = "float-cmp" version = "0.10.0" @@ -433,6 +510,16 @@ dependencies = [ "num-traits", ] +[[package]] +name = "fs2" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "futures" version = "0.3.32" @@ -572,6 +659,30 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "iana-time-zone" +version = "0.1.65" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + [[package]] name = "indexmap" version = "2.14.0" @@ -660,6 +771,12 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "linux-raw-sys" +version = "0.4.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" + [[package]] name = "linux-raw-sys" version = "0.12.1" @@ -696,6 +813,15 @@ version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" +[[package]] +name = "memmap2" +version = "0.9.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "714098028fe011992e1c3962653c96b2d578c4b4bce9036e15ff220319b1e0e3" +dependencies = [ + "libc", +] + [[package]] name = "mio" version = "1.2.0" @@ -703,6 +829,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50b7e5b27aa02a74bac8c3f23f448f8d87ff11f92d3aac1a6ed369ee08cc56c1" dependencies = [ "libc", + "log", "wasi", "windows-sys 0.61.2", ] @@ -801,34 +928,45 @@ dependencies = [ [[package]] name = "peeroxide-cli" -version = "0.1.0" +version = "0.2.0" dependencies = [ + "arc-swap", "assert_cmd", + "blake2", + "chrono", "clap", "clap_mangen", "crc32c", + "crossterm", + "curve25519-dalek", "dirs", + "ed25519-dalek", + "fs2", "futures", "hex", "indexmap", "indicatif", "libudx", + "memmap2", "peeroxide", "peeroxide-dht", "predicates", "rand", "serde", "serde_json", + "sha2", "tempfile", "tokio", "toml", + "toml_edit", "tracing", "tracing-subscriber", + "xsalsa20poly1305", ] [[package]] name = "peeroxide-dht" -version = "1.2.0" +version = "1.3.0" dependencies = [ "blake2", "chacha20", @@ -1047,6 +1185,19 @@ dependencies = [ "semver", ] +[[package]] +name = "rustix" +version = "0.38.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys 0.4.15", + "windows-sys 0.59.0", +] + [[package]] name = "rustix" version = "1.1.4" @@ -1056,7 +1207,7 @@ dependencies = [ "bitflags", "errno", "libc", - "linux-raw-sys", + "linux-raw-sys 0.12.1", "windows-sys 0.61.2", ] @@ -1159,6 +1310,33 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d881a16cf4426aa584979d30bd82cb33429027e42122b169753d6ef1085ed6e2" +dependencies = [ + "libc", + "signal-hook-registry", +] + +[[package]] +name = "signal-hook-mio" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b75a19a7a740b25bc7944bdee6172368f988763b744e3d4dfe753f6b4ece40cc" +dependencies = [ + "libc", + "mio", + "signal-hook", +] + [[package]] name = "signal-hook-registry" version = "1.4.8" @@ -1242,7 +1420,7 @@ dependencies = [ "fastrand", "getrandom 0.3.4", "once_cell", - "rustix", + "rustix 1.1.4", "windows-sys 0.61.2", ] @@ -1536,12 +1714,87 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-core" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-implement" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-interface" +version = "0.59.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "windows-link" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" +[[package]] +name = "windows-result" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-sys" version = "0.59.0" diff --git a/README.md b/README.md index f8bb639..51b1350 100644 --- a/README.md +++ b/README.md @@ -10,12 +10,48 @@ This project is a faithful port targeting full interoperability with the existin ## Architecture ``` -peeroxide — topic-based peer discovery + connection management (Hyperswarm) -└── peeroxide-dht — Kademlia DHT, Noise handshakes, hole-punching, relay (HyperDHT) - └── libudx — reliable UDP transport with BBR congestion control (libudx) +peeroxide-cli — command-line toolkit (lookup, announce, ping, cp, dd, chat, init) +└── peeroxide — topic-based peer discovery + connection management (Hyperswarm) + └── peeroxide-dht — Kademlia DHT, Noise handshakes, hole-punching, relay (HyperDHT) + └── libudx — reliable UDP transport with BBR congestion control (libudx) ``` -## Quick Start +## Install the CLI + +The `peeroxide` CLI bundles several subcommands (`lookup`, `announce`, `ping`, `cp`, `dd`, `chat`, `node`, `init`). +The CLI was built as an example of how to use the library, and also serves as a convenient toolkit for interacting with the network from the terminal to test connectivity, share files, or chat with peers. +No Rust toolchain is needed for the prebuilt CLI route. + +**Homebrew (macOS / Linux):** + +```bash +brew install rightbracket/peeroxide/peeroxide +``` + +Homebrew will auto-tap `rightbracket/peeroxide` on first use. Prebuilt binaries are published for macOS (universal Apple Silicon + Intel), Linux x86_64 (glibc), and Linux aarch64 (glibc). + +**Cargo:** + +```bash +cargo install peeroxide-cli +``` + +**Build from upstream `main`:** + +```bash +brew install --HEAD rightbracket/peeroxide/peeroxide +``` + +After install: + +```bash +peeroxide --help +peeroxide chat --help +``` + +Tap details and upgrade / uninstall instructions: . + +## Quick Start (library) ```rust use peeroxide::{spawn, discovery_key, JoinOpts, SwarmConfig}; @@ -43,6 +79,8 @@ async fn main() -> Result<(), Box> { HyperDHT implementation including Kademlia, hole-punching, and Noise handshakes. - **libudx** [![Crates.io](https://img.shields.io/crates/v/libudx.svg)](https://crates.io/crates/libudx) Pure Rust implementation of the UDX protocol with BBR congestion control. +- **peeroxide-cli** [![Crates.io](https://img.shields.io/crates/v/peeroxide-cli.svg)](https://crates.io/crates/peeroxide-cli) + Command-line toolkit (`peeroxide` binary): `lookup`, `announce`, `ping`, `cp`, `dd`, `chat`, `init`. ## Interoperability diff --git a/SECURITY.md b/SECURITY.md index b3356d7..efbcf9e 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -6,7 +6,7 @@ The following versions are currently supported with security updates: | Version | Supported | | ------- | --------- | -| 1.0.x | Yes | +| 1.3.x | Yes | ## Reporting a Vulnerability @@ -16,7 +16,7 @@ We use GitHub's private vulnerability reporting system. To report a security iss 2. Click "New draft advisory" 3. Fill in the details of the vulnerability -We will acknowledge your report within 48 hours and aim to provide a resolution within 90 days. +We will acknowledge your report and aim to provide a resolution within 90 days. ## Scope diff --git a/docs/AGENTS.md b/docs/AGENTS.md index edfdc9b..c2f7d91 100644 --- a/docs/AGENTS.md +++ b/docs/AGENTS.md @@ -10,12 +10,14 @@ docs/ └── src/ ├── SUMMARY.md — Chapter outline and navigation tree ├── introduction.md + ├── init/ — peeroxide init (config + man-page install) documentation ├── concepts/ — Shared conceptual background ├── lookup/ — lookup command documentation ├── announce/ — announce command documentation (echo protocol defined here) ├── ping/ — ping command documentation (cross-refs echo protocol) ├── cp/ — cp command documentation - ├── deaddrop/ — deaddrop command documentation + ├── dd/ — dd (Dead Drop) command documentation (v1 + v2 protocols) + ├── chat/ — chat subsystem (user guide, TUI, wire format, protocol, reference) └── appendices/ — Security model, limits & performance ``` @@ -40,7 +42,7 @@ Output goes to `docs/book/` (gitignored). - Cross-references use relative `[text](../path/to/file.md)` links (mdBook requirement). - Human output examples go on **stderr**; structured JSON output goes on **stdout**. - The Echo Protocol is defined exactly once in `src/announce/echo-protocol.md`. All other chapters that reference it must link there rather than re-documenting it. -- `deaddrop/future-direction.md` describes v2 (not yet implemented) — keep clearly labeled. +- Both `dd` v1 (`0x01`, single linked chain) and v2 (`0x02`, tree-indexed) protocols are shipped; `dd/future-direction.md` is a short pointer noting that there is no current speculative roadmap. ## Deployment diff --git a/docs/ascii_art.txt b/docs/ascii_art.txt new file mode 100644 index 0000000..400a278 --- /dev/null +++ b/docs/ascii_art.txt @@ -0,0 +1,9 @@ +,____ _____ _____ ____ _____ _____ ___ ,______ +| _ \| ____| ____| _ \ / _ \ \/ /_ _| _ \| ____| +| |_) | _| | _| | |_) | | | \ / | || | | | _| +| __/| |___| |___| _ <| |_| / \ | || |_| | |___ +|_| |_____|_____|_| \_\\___/_/\_\___|____/|_____| + +ENCRYPTED BY DEFAULT. PSEUDONYMOUS BY DESIGN. +NO SERVERS. NO ACCOUNTS. NO GATEKEEPERS. +TRUST NO ONE. TALK TO ANYONE. diff --git a/docs/src/SUMMARY.md b/docs/src/SUMMARY.md index f4a8c88..b87b1f0 100644 --- a/docs/src/SUMMARY.md +++ b/docs/src/SUMMARY.md @@ -2,9 +2,14 @@ [Introduction](./introduction.md) +# Setup + +- [init](./init/overview.md) + # Concepts - [DHT and Routing](./concepts/dht-and-routing.md) +- [DHT Primitives](./concepts/dht-primitives.md) - [Keys and Identity](./concepts/keys-and-identity.md) - [Topics and Discovery](./concepts/topics-and-discovery.md) @@ -21,11 +26,17 @@ - [cp](./cp/overview.md) - [Protocol](./cp/protocol.md) - [Reliability](./cp/reliability.md) -- [deaddrop](./deaddrop/overview.md) - - [Architecture](./deaddrop/architecture.md) - - [Wire Format](./deaddrop/format.md) - - [Operations](./deaddrop/operations.md) - - [Future Direction](./deaddrop/future-direction.md) +- [dd (Dead Drop)](./dd/overview.md) + - [Architecture](./dd/architecture.md) + - [Wire Format](./dd/format.md) + - [Operations](./dd/operations.md) + - [Future Direction](./dd/future-direction.md) +- [chat](./chat/overview.md) + - [User Guide](./chat/user-guide.md) + - [Interactive TUI](./chat/interactive-tui.md) + - [Wire Format](./chat/wire-format.md) + - [Protocol](./chat/protocol.md) + - [Reference](./chat/reference.md) # Appendices diff --git a/docs/src/announce/architecture.md b/docs/src/announce/architecture.md index b232402..4c309ec 100644 --- a/docs/src/announce/architecture.md +++ b/docs/src/announce/architecture.md @@ -5,7 +5,7 @@ The `announce` command manages a long-running swarm session, coordinating DHT pr ## Initialization Flow 1. **Identity Generation**: A `KeyPair` is either generated randomly or derived from a seed. -2. **Swarm Setup**: A `SwarmConfig` is constructed with the identity and DHT configuration. Firewall settings are determined by the global config (Open if `public=true`, Consistent otherwise). +2. **Swarm Setup**: A `SwarmConfig` is constructed with the identity and DHT configuration. The `--public` / `network.public` setting drives bootstrap node selection (see [init/overview.md → Global CLI Flags](../init/overview.md#global-cli-flags)); it does not change firewall semantics. 3. **Joining Topic**: The node joins the topic using `JoinOpts { client: false }`. This instructs the DHT to act as a server for this topic, making the node discoverable to lookup queries. 4. **Flushing**: The node waits for the join operation to flush, ensuring at least one announcement has reached the DHT. diff --git a/docs/src/appendices/limits-and-performance.md b/docs/src/appendices/limits-and-performance.md index 1959739..b10ad05 100644 --- a/docs/src/appendices/limits-and-performance.md +++ b/docs/src/appendices/limits-and-performance.md @@ -11,10 +11,10 @@ This appendix documents hard limits, configurable bounds, and observed performan | `IDLE_TIMEOUT` | 30 s | Echo session idle timeout | | `ECHO_MSG_LEN` | 16 bytes | Echo probe frame size (fixed) | | `ECHO_TIMEOUT` (ping) | 5 s | Per-probe timeout in `ping --connect` mode | -| `MAX_CHUNKS` | 65 535 | Maximum chunks in a single `deaddrop` message | -| `MAX_PAYLOAD` | 1 000 bytes | Maximum payload per `deaddrop` chunk | -| `ROOT_HEADER_SIZE` | 39 bytes | `deaddrop` root chunk header size | -| `NON_ROOT_HEADER_SIZE` | 33 bytes | `deaddrop` non-root chunk header size | +| `MAX_CHUNKS` | 65 535 | Maximum chunks in a single `dd` message | +| `MAX_PAYLOAD` | 1 000 bytes | Maximum payload per `dd` chunk | +| `ROOT_HEADER_SIZE` | 39 bytes | `dd` root chunk header size | +| `NON_ROOT_HEADER_SIZE` | 33 bytes | `dd` non-root chunk header size | | `CHUNK_SIZE` (cp) | 65 536 bytes | `cp` file chunk size | | `--data` max (announce) | 1 000 bytes | Maximum `--data` payload for `announce` | | lookup `--with-data` concurrency | 16 | `buffer_unordered(16)` for mutable DHT gets | @@ -22,7 +22,7 @@ This appendix documents hard limits, configurable bounds, and observed performan ## Derived Limits -**Maximum `deaddrop` message size:** +**Maximum `dd` message size:** ``` MAX_CHUNKS × MAX_PAYLOAD = 65 535 × 1 000 = ~65.5 MB @@ -59,3 +59,74 @@ Limited by available DHT storage and client memory. Each 65 536-byte chunk is st | 0 | SIGINT/SIGTERM received | `announce` (intentional — clean shutdown is success) | Note: `announce` returns 0 on SIGINT/SIGTERM because interactive shutdown is the normal workflow. `lookup` and `ping` return 130 to allow callers to distinguish interruption from success. + +## Chat + +| Parameter | Value | Description | +|---|---|---| +| Max record size | 1000 bytes | Maximum size for a single DHT record | +| Message overhead | 180 bytes | Fixed overhead (screen name + content combined ≤ 820 bytes) | +| Encryption | XSalsa20-Poly1305 | Security parameters: nonce 24 bytes, tag 16 bytes | +| Epoch length | 60 s | Time window for message bucketing | +| Buckets per epoch | 4 | Sub-divisions within an epoch for message distribution | +| DHT lookups per cycle | 8 | Checks current and previous epoch across 4 buckets | +| Discovery interval | 8 s | Cadence for looking up new peers | +| Feed expiry | 1200 s | Time before a peer feed is considered stale | +| Summary eviction trigger | 20 messages | Number of messages before clearing old history | +| Summary eviction count | 15 messages | Number of messages removed during eviction | +| Mutable put retries | 3 | Retries at 200 ms, 500 ms, and 1000 ms intervals | +| Rotation check interval | 30 s | Frequency of checking for epoch/bucket rotation | +| Dedup ring capacity | 1000 hashes | Number of message hashes stored to prevent duplicates | +| Gap timeout | 5 s | Maximum wait time for out-of-order messages | +| TUI history cap | 500 lines | Scrollback buffer limit in the interactive interface | + +### Chat Performance + +The inbox polling mechanism uses parallel lookups and mutable gets. A full inbox cycle typically completes in 2-4 seconds of wall-clock time. This is a significant improvement over earlier nested-serial designs which required 10-20 seconds for the same operation. + +## Dead Drop (v2) + +| Parameter | Value | Description | +|---|---|---| +| Max chunk size | 1000 bytes | Total size including headers | +| Data payload | 998 bytes | Actual data bytes per non-root chunk | +| Root index slots | 30 | Pointers to child chunks in the root node | +| Non-root index slots | 31 | Pointers to child chunks in intermediate nodes | +| Need-list entries | 124 | 8-byte entries published in each DHT record | +| Parallel fetch cap | 64 | Maximum concurrent DHT requests | +| Soft depth cap | 4 | Maximum tree depth (~27 GB capacity) | +| Per-put timeout | 30 s | Maximum duration for a single chunk upload | +| Stall watchdog check | 5 s | Frequency of progress monitoring | +| Stall watchdog trigger | 30 s | Time with no progress before triggering a restart | +| Need-list publish | 20 s | Frequency of publishing the local need-list | +| Need-list announce | 60 s | Keepalive interval for the need-list topic | +| Refresh interval | 600 s | Default cadence for re-announcing data availability | +| Initial concurrency | 128 | Starting sender concurrency for AIMD | +| Fetch backoff | 500 ms to 15 s | Progressive delay for failed mutable or immutable gets | + +### Tree Capacity by Depth + +The implementation enforces `SOFT_DEPTH_CAP = 4`. Depths beyond that are theoretical only and are rejected at PUT time. + +| Depth | Max Data Chunks | Approx. Capacity | +|---|---|---| +| 0 | 30 | 29 KB | +| 1 | 930 | 928 KB | +| 2 | 28,830 | 28 MB | +| 3 | 893,730 | 891 MB | +| 4 | 27,705,630 | 27 GB | + +### AIMD Algorithms + +**v2 (Current):** +- Uses Exponentially Weighted Moving Average (EWMA) with alpha 0.1. +- Decision interval of 20 samples. +- Fast-trip threshold of 10. +- Shrink factor: 0.75×. +- Growth factor: +2. + +**v1 (Legacy):** +- Uses a tumbling window of 10 samples. +- Halves concurrency if degradation exceeds 30%. +- Increases concurrency by 1 if 0% degradation is detected. + diff --git a/docs/src/appendices/security-model.md b/docs/src/appendices/security-model.md index 2b86a3a..3b9e475 100644 --- a/docs/src/appendices/security-model.md +++ b/docs/src/appendices/security-model.md @@ -29,14 +29,14 @@ The DHT is **untrusted infrastructure**. Any node can relay packets, and routing - Immutable DHT values (used by `cp`) are addressed by the SHA-256 hash of their content. Content is verified on retrieval. - Topic keys are not secret — anyone who knows the topic can look up its peer list. Do not treat topic confidentiality as a security property. -## `deaddrop` Threat Model +## `dd` (Dead Drop) Threat Model -`deaddrop` uses **mutable DHT storage** addressed by `(public_key, topic)`. Security properties: +`dd` uses **mutable DHT storage** addressed by `(public_key, topic)`. Security properties: - Only the holder of the private key can write to a slot (signatures enforced by the DHT). - Anyone who knows `(public_key, topic)` can read the slot — there is no access control on reads. -- Data is signed but **not encrypted** at the DHT layer. For sensitive payloads, encrypt the application data before using `deaddrop`. -- `deaddrop` is designed for asynchronous communication where sender and receiver share a topic out-of-band. +- Data is signed but **not encrypted** at the DHT layer. For sensitive payloads, encrypt the application data before using `dd`. +- `dd` is designed for asynchronous communication where sender and receiver share a topic out-of-band. ## `cp` Threat Model diff --git a/docs/src/chat/interactive-tui.md b/docs/src/chat/interactive-tui.md new file mode 100644 index 0000000..5f44c5d --- /dev/null +++ b/docs/src/chat/interactive-tui.md @@ -0,0 +1,92 @@ +# Interactive TUI + +Peeroxide chat features a terminal-based interactive interface (TUI) designed for real-time communication. + +## Mode Selection + +The TUI is automatically enabled if: +1. `stdout` is a TTY. +2. `stdin` is a TTY. +3. The `--line-mode` flag is not set. +4. The `PEEROXIDE_LINE_MODE` environment variable is unset, empty, or `"0"` (any other non-empty value forces line mode). + +If any of these conditions are not met, the client falls back to line mode. If TUI initialization fails on a TTY, a warning is printed and the client reverts to line mode. + +## Status Bar Layout + +The status bar sits at the bottom of the terminal and provides real-time feedback on network activity and session state. + +```text +● Sending 3 Receiving 12 inbox Feeds 2 DHT 32 general +``` + +### Components + +- **Activity Indicator (●)**: Lights up when any DHT operation (put, get, announce, lookup) is in flight. +- **Left Segments**: + - `Sending N`: Number of messages currently in the publish batching pipeline. + - `Receiving N`: Number of messages currently being fetched or ordered. + - `Ready`: Indicates the publisher queue is empty and the client is idle. + - *Note*: These slots use "sticky width"—once they grow to accommodate a larger number, they remain that size until the terminal is resized. +- **Center Segment**: + - Shows `inbox` (or `i`) when there are no unread invites. + - Shows `INBOX` (or `I`) in yellow-on-black when new invites have arrived. + - The segment is centered. It automatically shrinks or disappears if the terminal width is too narrow to avoid overlapping left or right segments. +- **Right Segments**: + - `Feeds N`: Total number of active feeds being tracked in the session. + - `DHT N`: Current number of connected peers in the DHT routing table. + - ``: The name of the current channel or the recipient's name. + +## Keyboard Controls + +| Key | Behavior | +|---|---| +| `Enter` | Send the current input buffer. | +| `Ctrl-C` | If buffer is non-empty: Clear the buffer. If buffer is empty: Arms a 2-second force-quit window. | +| `Ctrl-D` | If buffer is empty: Initiate graceful exit. If non-empty: Forward-delete character. | +| `Ctrl-L` | Full screen repaint and history replay. | +| `Up/Down` | Move the cursor up or down within the multi-line input area. | + +### Ctrl-C Force Quit + +When the buffer is empty, pressing `Ctrl-C` once will display a yellow-on-black notice: +`*** press Ctrl-C again within 2 seconds to force quit — press Ctrl-D for graceful exit` + +Pressing `Ctrl-C` a second time within the 2-second window will terminate the process immediately. This remains responsive even if the network publisher is blocked. Any other key disarms the window. + +## Slash Commands + +Commands can be entered directly into the input buffer starting with a `/`. + +| Command | Action | +|---|---| +| `/help`, `/?` | Display available commands. | +| `/quit`, `/exit` | Initiate a graceful shutdown. | +| `/ignore [name]` | List ignored users, or add a user to the ignore list. | +| `/unignore `| Remove a user from the ignore list. | +| `/friend [name]` | List friends, or add a user to your friends list. | +| `/unfriend `| Remove a user from your friends list. | +| `/inbox` | Display and drain the list of accumulated invites. Resets the INBOX status segment. | + +## Input Features + +### Multi-line Input +The input area above the status bar supports multi-line text. Use `Alt-Enter` (or your terminal's equivalent) to insert a newline without sending. + +### Bracketed Paste +The TUI supports bracketed paste mode. When you paste large blocks of text, the client treats it as a single input operation, preventing the terminal from interpreting pasted newlines as "Send" commands. + +### History Replay +The client maintains a bounded in-memory scrollback buffer of the last 500 messages (`HISTORY_CAP`). When the terminal is resized or repainted (`Ctrl-L`), the client replays the last `min(history_len, terminal_height)` entries to restore context. + +## Terminal Lifecycle + +The `TerminalGuard` ensures the terminal state is correctly managed: +- **Enter**: Scrolls existing terminal content into the scrollback, enables raw mode, enables bracketed paste, hides the cursor, and installs a panic hook. +- **Exit/Panic**: Resets the scroll region, restores the cursor, disables bracketed paste, restores original colors, and disables raw mode. + +## EOF and Shutdown + +When `stdin` reaches EOF (e.g., via `Ctrl-D` or piped input completion): +- **Default**: The client begins a graceful drain. It displays `*** flushing publish queue (Ctrl-C to abort)…` and waits for all pending messages to be published to the DHT. There is no fixed timeout, though `Ctrl-C` can be used to skip the wait. +- **--stay-after-eof**: Instead of exiting, the client enters read-only listener mode, allowing you to continue seeing incoming messages without being able to reply. diff --git a/docs/src/chat/overview.md b/docs/src/chat/overview.md new file mode 100644 index 0000000..5f44e90 --- /dev/null +++ b/docs/src/chat/overview.md @@ -0,0 +1,56 @@ +# Chat Subsystem Overview + +Peeroxide chat provides a serverless, end-to-end encrypted messaging environment built on the HyperDHT. It enables real-time communication without centralized accounts, phone numbers, or servers. Every identity is a public key, and every message is a cryptographically signed and encrypted record stored briefly in the distributed hash table. + +## Why Chat? + +Traditional messaging apps rely on central servers to store your messages, manage your identity, and route your traffic. Peeroxide chat removes these intermediaries. It treats the network as a shared space where peers discover each other through topics and exchange data directly. + +This design ensures: +- **Censorship Resistance**: There is no central point to shut down. +- **Privacy by Default**: All messages are encrypted. Metadata is minimized through epoch-based topic rotation. +- **Self-Sovereign Identity**: You own your cryptographic keys. Your identity is not tied to a service provider. + +## Identity Model + +Your identity in Peeroxide is an Ed25519 keypair. This keypair is stored in a local profile. When you send a message, it is signed with your private key, allowing anyone with your public key to verify that it came from you. + +Profiles allow you to manage multiple identities on one machine. Each profile includes: +- A permanent secret seed. +- An optional screen name. +- An optional biography. +- A friends list. + +Separately, a shared known-users name cache lives at `~/.config/peeroxide/chat/known_users`. It is process-wide (not per profile) and acts as a soft directory mapping public keys to the most-recently-seen screen name for each peer you have encountered. + +## Channels + +Peeroxide uses a topic-based discovery system. A "channel" is simply a name that maps to a DHT topic. + +### Public Channels +Public channels use a well-known derivation for their discovery topic. Anyone who knows the channel name (e.g., `general` or `rust-dev`) can join, read history, and post messages. + +### Private Channels +Private channels add a secret "group salt" to the topic derivation. Only peers who possess the salt can discover the channel topic or decrypt the messages within it. This enables private group conversations on the public DHT without revealing the participants or the content to outsiders. + +## Direct Messaging (DMs) + +Direct messaging allows private, one-to-one communication between two specific public keys. + +When you start a DM with another user, Peeroxide derives a unique `dm_channel_key` using your public key and theirs. Because the derivation is order-independent, both parties arrive at the same key. The communication is further secured using an ephemeral shared secret derived via X25519 Elliptic Curve Diffie-Hellman (ECDH). + +## The Inbox Concept + +Because there is no server to hold messages while you are offline, Peeroxide uses an "Inbox" mechanism to facilitate discovery. + +Your inbox is a set of rotating DHT topics derived from your public key. When someone wants to start a DM or invite you to a private channel, they generate a one-shot invite-feed keypair, publish an encrypted `InviteRecord` at that feed via `mutable_put`, and then `announce` that feed on your current inbox topic. + +Your client periodically monitors these topics. When a new invite appears, it notifies you and provides the necessary keys to join the conversation. This "nudge" mechanism allows peers to find each other even if they aren't currently in the same channel. + +## Profiles and the Nexus + +The "Nexus" is your personal landing page on the DHT. It contains your screen name and biography. When you are active, your client publishes your Nexus record directly under your identity public key (via `mutable_put` on that key, with no extra topic derivation). + +Your friends fetch your Nexus by `mutable_get`-ing your identity public key, picking up name and bio updates. This keeps your identity consistent across different channels and sessions. + +For more details on the technical implementation, see [Wire Format](./wire-format.md) and [Protocol](./protocol.md). diff --git a/docs/src/chat/protocol.md b/docs/src/chat/protocol.md new file mode 100644 index 0000000..896d417 --- /dev/null +++ b/docs/src/chat/protocol.md @@ -0,0 +1,77 @@ +# Operational Protocol + +The Peeroxide chat protocol defines how peers discover each other, synchronize message feeds, and maintain a consistent conversation state without a central server. + +## Feed Lifecycle + +A "feed" is a sequence of messages published by a single identity under a temporary Ed25519 keypair. + +### Rotation +To enhance privacy and limit the impact of key compromise, feed keypairs are rotated periodically. +1. At session start, a random feed keypair and a lifetime wobble (between 0.5x and 1.5x of `--feed-lifetime`) are chosen. +2. A rotation watcher checks the feed age every 30 seconds. +3. When the lifetime is reached, the publisher generates a new feed keypair. +4. The publisher first announces the new feed. +5. It then updates the old `FeedRecord` to include the `next_feed_pubkey` pointer. +6. The old feed remains active briefly to ensure peers follow the transition before it is abandoned. + +## Message Publishing Pipeline + +The publisher uses a bounded queue to batch and write messages to the DHT. + +1. **Batching**: Messages are accumulated in a queue. A batch is processed when it reaches `--batch-size` or after `--batch-wait-ms`. +2. **Immutable Put**: Each message in the batch is stored as an immutable record on the DHT. +3. **Mutable Put**: The `FeedRecord` for the current feed is updated to include the hashes of the new messages. This operation is retried up to 3 times (at 200ms, 500ms, and 1000ms intervals) to handle DHT congestion. +4. **Announce**: The publisher announces the feed's availability on the channel's `announce_topic`. + +## Reader Discovery Loop + +The reader task starts with a one-shot cold-start scan, then settles into a steady-state discovery loop. + +### Cold-Start Historical Scan +On startup, the reader fires concurrent lookups across the **last 20 epochs × 4 buckets = 80 discovery topics** (i.e. a 20-minute backwards window, since each epoch is 60 s). This surfaces feeds that announced before the session started so the client has visible history immediately, instead of waiting up to a full epoch rotation for the steady-state loop to reach them. + +### Steady-State Loop +After the cold-start completes, the continuous loop runs: + +1. **Discovery**: Every 8 seconds, the reader performs lookups on the 8 discovery topics (current and previous epoch across 4 buckets). +2. **Polling**: For every discovered peer, the reader fetches and decrypts their `FeedRecord`. +3. **Fetching**: New message hashes found in the `FeedRecord` are fetched as immutable records. +4. **Ordering**: Messages are passed to the `ChainGate` for causal ordering. + +## Ordering and Deduplication + +### DedupRing +The `DedupRing` is a FIFO cache with a capacity of 1000 hashes. It ensures that the client never processes or displays the same message twice, even if it is rediscovered through different feeds or topics. + +### ChainGate +The `ChainGate` enforces strict ordering based on the `prev_msg_hash` field in each `MessageEnvelope`. +- If a message arrives and its `prev_msg_hash` matches the last seen message from that sender, it is released to the UI. +- If it doesn't match, it is buffered, and the reader triggers a refetch of the missing hash with an exponential backoff. +- If a gap remains for more than 5 seconds (`GAP_TIMEOUT`), the `ChainGate` force-releases the buffered messages, marking them as `late`. + +## History and Eviction + +The `FeedRecord` has a limited capacity (max 26 hashes). When the message count reaches `SUMMARY_EVICT_TRIGGER` (20), the publisher performs an eviction. + +1. The 15 oldest messages (`SUMMARY_EVICT_COUNT`) are moved into a new `SummaryBlock`. +2. The `SummaryBlock` is stored as an immutable record. +3. The `FeedRecord` is updated to point to the new `SummaryBlock` hash and contains only the remaining 5 newest messages. +4. On a cold start, a reader can walk back through these `SummaryBlock` pointers up to a `MAX_SUMMARY_DEPTH` of 100 blocks. + +## Inbox and Invites + +The inbox monitor handles parallel scanning for new invites. + +1. **Snapshot**: The monitor takes a snapshot of currently known feed sequences. +2. **Parallel Scan**: It fires 8 concurrent DHT lookups for the 8 inbox topics. +3. **Resolution**: Peer pubkeys found in the topics are fanned out into parallel `mutable_get` calls to retrieve `InviteRecord`s. +4. **Verification**: Invites are decrypted using the `invite_key` (derived via ECDH) and verified. +5. **Nudge**: In DM sessions, a "nudge" is sent at most once per epoch to signal the sender's presence. A nudge is an encrypted `InviteRecord` published via `mutable_put` on the sender's invite-feed keypair (with the lure payload truncated to 800 bytes), followed by an `announce` on the recipient's current inbox topic. This matches the regular inbox-invite write path. + +## Graceful Shutdown + +Upon exit, the client attempts a clean teardown: +1. **Publisher Drain**: It waits for the publish queue to empty. +2. **Invite Retraction**: For DM sessions, it attempts to retract the inbox invite by publishing an empty payload to the invite feed with a 1-second timeout. +3. **Terminal Reset**: The TUI is disabled and terminal settings are restored. diff --git a/docs/src/chat/reference.md b/docs/src/chat/reference.md new file mode 100644 index 0000000..b85e2e2 --- /dev/null +++ b/docs/src/chat/reference.md @@ -0,0 +1,118 @@ +# Chat Reference + +Technical reference tables for constants, flags, and filesystem layouts in the Peeroxide chat subsystem. + +## Constants + +| Constant | Value | Description | +|---|---|---| +| `MAX_RECORD_SIZE` | 1000 bytes | Maximum size of any single DHT record. | +| `MSG_FIXED_OVERHEAD`| 180 bytes | Combined size of envelope fields (excluding name/content). | +| `MAX_SCREEN_NAME_CONTENT`| 820 bytes | Max sum of screen name + content lengths. | +| `NONCE_SIZE` | 24 bytes | XSalsa20 nonce size. | +| `TAG_SIZE` | 16 bytes | Poly1305 tag size. | +| `CONTENT_TYPE_TEXT` | `0x01` | Record content type for text messages. | +| `INVITE_TYPE_DM` | `0x01` | Inbox invite type for direct messages. | +| `INVITE_TYPE_PRIVATE` | `0x02` | Inbox invite type for private channels. | +| `SUMMARY_EVICT_TRIGGER`| 20 | Messages in `FeedRecord` before summary eviction. | +| `SUMMARY_EVICT_COUNT` | 15 | Number of messages moved to summary on eviction. | +| `MUTABLE_PUT_RETRY_MS` | `[200, 500, 1000]`| Retry intervals for mutable DHT updates. | +| `ROTATION_CHECK_INTERVAL`| 30s | How often the publisher checks for feed rotation. | +| `MAX_SUMMARY_DEPTH` | 100 | Maximum number of summary blocks to walk back. | +| `FEED_EXPIRY_SECS` | 1200 | Time (20 min) after which a feed is considered stale. | +| `DISCOVERY_INTERVAL_SECS`| 8 | Frequency of reader discovery lookups. | +| `HISTORY_CAP` | 500 | TUI scrollback history limit (in memory). | +| `CTRL_C_ARM_WINDOW` | 2s | Double-press window for force-exit. | +| `DEDUP_RING_CAPACITY` | 1000 | Max hashes stored in the deduplication set. | +| `GAP_TIMEOUT` | 5s | Time before ChainGate force-releases out-of-order msgs. | +| `REFETCH_SCHEDULE_MS` | `[0, 500, 1500, 3000]`| Backoff intervals for missing hash refetching. | + +## CLI Flags + +### Global Flags +- `--debug`: Enable stderr debug logs. +- `--probe`: Enable stderr trace probes. +- `--line-mode`: Force line-based I/O. + +### Subcommand: join +- `--profile `: Profile to use (default: `default`). +- `--group `: Private channel salt (conflicts with `--keyfile`). +- `--keyfile `: Private salt from file (conflicts with `--group`). +- `--no-nexus`: Skip nexus refresh/publish. +- `--no-friends`: Skip friend refresh. +- `--read-only`: Listen only mode. +- `--stealth`: Shorthand for `--no-nexus --read-only --no-friends`. Note this does **not** suppress inbox polling; see [Stealth Mode](./user-guide.md#stealth-mode) in the user guide for the full threat-model breakdown. +- `--feed-lifetime `: Feed rotation interval (default: `60`). +- `--batch-size `: Max messages per batch (default: `16`). Values below `1` are clamped to `1`. +- `--batch-wait-ms `: Batch window (default: `50`). +- `--stay-after-eof`: Enter listener mode on EOF. +- `--no-inbox`: Disable inbox monitor. +- `--inbox-poll-interval `: Inbox scan frequency (default: `15`). Values below `1` are clamped to `1`. + +### Subcommand: dm +Same session-flag surface as `join`, **except** `--group` and `--keyfile` are not accepted (the DM channel key is derived deterministically from the two participants' identity public keys). DM also adds: +- `--message `: Initial inbox-invite lure text. Ignored in stealth/read-only mode. + +### Subcommand: inbox +- `--profile `: Profile to use (default: `default`). +- `--poll-interval `: Polling interval (default: `15`). Values below `1` are clamped to `1`. +- `--no-nexus`, `--no-friends`: Accepted for flag-surface parity with `chat join` / `chat dm` but are no-ops here (the inbox CLI does not run nexus / friend background tasks). + +### Subcommand: whoami +- `--profile `: Profile to inspect (default: `default`). + +### Subcommand: profiles +- `profiles list`: no flags. +- `profiles create [--screen-name ]`: optional initial screen name; otherwise a deterministic vendor name is generated. +- `profiles delete `: rejects `default`. + +### Subcommand: friends +- `friends list [--profile ]`: also the implicit default if no subcommand is given. +- `friends add [--alias ] [--profile ]`: alias auto-fills from the known-users cache (or vendor name) when omitted. +- `friends remove [--profile ]`. +- `friends refresh`: one-shot DHT refresh; does **not** accept `--profile` and operates on the `default` profile only. + +### Subcommand: nexus +- `--profile `, `--set-name `, `--set-bio `, `--publish`, `--lookup `, `--daemon` (publish every 480 s, refresh **all** friends every 600 s). +- `--lookup` short-circuits to lookup mode. +- When at least one of `--set-name` / `--set-bio` is supplied and neither `--publish` nor `--daemon` is set, the setters are written to the profile and the command exits without publishing. +- When no flags (or only `--profile`) are supplied, the command still performs a single Nexus publish. + +## Profile Directory Layout + +Profiles are stored under `~/.config/peeroxide/chat/profiles/` (the chat subsystem uses the XDG-style `~/.config/peeroxide/chat/` root regardless of the platform-specific config dir used by `peeroxide`'s top-level config file). + +```text +~/.config/peeroxide/chat/profiles// +├── seed # 32-byte raw Ed25519 secret seed +├── name # Optional UTF-8 screen name +├── bio # Optional UTF-8 biography +└── friends # Friend list (TSV) +``` + +### Friends File Schema +The `friends` file is a Tab-Separated Values (TSV) file: +`<64-hex-pubkey>\t\t\t` + +### Shared Known Users +Located at `~/.config/peeroxide/chat/known_users`. +- **Format**: TSV `<64-hex-pubkey>\t` +- **Capacity**: 1000 entries (FIFO). +- **Reloading**: 5s mtime-debounced reload. + +## Name Resolution Precedence + +`NameResolver` (`peeroxide-cli/src/cmd/chat/name_resolver.rs`) resolves a peer's identity public key in the following order: + +1. **Friend Alias**: the friend's locally assigned alias, if non-empty. +2. **Known Screen Name**: the latest screen name for that pubkey in the shared `~/.config/peeroxide/chat/known_users` cache, if non-empty. +3. **Vendor Fallback**: a deterministic auto-generated name derived from the pubkey seed. + +Note: the friends file's per-friend `cached_name` and `cached_bio_line` columns are populated by the nexus refresh task for display in the friends-list and friend nexus prints, but `NameResolver` itself does not consult them — it goes straight from friend alias to the shared known_users cache. + +The two output formats: + +- **`bar_label()`** — compact label used in the status bar: + - friend alias source → bare alias (e.g. `bob`). + - any other source → `name@shortkey` (e.g. `alice@a1b2c3d4`), where `shortkey` is the first 8 hex characters of the pubkey. +- **`formal()`** — uniform fully-qualified label: `name (shortkey)` (e.g. `alice (a1b2c3d4)`), regardless of source. diff --git a/docs/src/chat/user-guide.md b/docs/src/chat/user-guide.md new file mode 100644 index 0000000..568af81 --- /dev/null +++ b/docs/src/chat/user-guide.md @@ -0,0 +1,277 @@ +# Chat User Guide + +The Peeroxide chat subsystem provides a set of CLI tools for managing identities, communicating in channels, and sending direct messages. + +## Global Flags + +These flags apply to all `peeroxide chat` subcommands. + +| Flag | Description | +|---|---| +| `--debug` | Enable stderr debug event logs. | +| `--probe` | Enable internal trace probes (stdin, post, fetch_batch, etc) to stderr. | +| `--line-mode` | Force line-based I/O even when running on a TTY. | + +In addition, every chat subcommand inherits the top-level `peeroxide` global flags documented in [init](../init/overview.md#global-cli-flags): `--config `, `--no-default-config`, `--public`, `--no-public`, `--bootstrap ` (repeatable), and `-v` / `--verbose`. These control config file loading, DHT bootstrap node selection, and tracing verbosity. + +## Subcommand: join + +Join a public or private channel for real-time conversation. + +```bash +peeroxide chat join [flags] +``` + +### Flags + +| Flag | Default | Description | +|---|---|---| +| `--profile ` | `default` | Use a specific identity profile. | +| `--group ` | | Set a private channel salt. Conflicts with `--keyfile`. | +| `--keyfile ` | | Read private channel salt from a file. Conflicts with `--group`. | +| `--no-nexus` | | Skip personal nexus (profile page) refresh and publication. | +| `--no-friends` | | Skip background friend nexus refresh. | +| `--read-only` | | Listen only; do not post messages or announce feeds. | +| `--stealth` | | Shorthand for `--no-nexus --read-only --no-friends`. | +| `--feed-lifetime ` | `60` | Rotation lifetime for your feed keypair. | +| `--batch-size ` | `16` | Maximum messages per publish batch. Values below `1` are clamped to `1`. | +| `--batch-wait-ms ` | `50` | Maximum time to wait for a batch to fill before publishing. | +| `--stay-after-eof` | | Enter read-only mode on stdin EOF instead of exiting. | +| `--no-inbox` | | Disable background inbox monitoring. | +| `--inbox-poll-interval ` | `15` | How often to poll the inbox for new invites. Values below `1` are clamped to `1`. | + +### Examples + +Join a public channel: +```bash +peeroxide chat join general +``` + +Join a private channel with a secret group name: +```bash +peeroxide chat join development --group "super-secret-salt-2026" +``` + +## Subcommand: dm + +Start an encrypted direct message session with another user. + +```bash +peeroxide chat dm [flags] +``` + +The `recipient` can be resolved using several formats (see Recipient Resolution below). + +### Flags + +`chat dm` supports most of the session flags from `join` (`--profile`, `--no-nexus`, `--no-friends`, `--read-only`, `--stealth`, `--feed-lifetime`, `--batch-size`, `--batch-wait-ms`, `--stay-after-eof`, `--no-inbox`, `--inbox-poll-interval`), plus a DM-only flag: + +| Flag | Description | +|---|---| +| `--message ` | Initial lure text sent with the inbox invite. Ignored in stealth/read-only mode. | + +`chat dm` does **not** accept `--group` / `--keyfile`; the channel key for a DM is derived deterministically from the two participants' identity public keys via `dm_channel_key`. + +### Recipient Resolution + +The recipient argument is resolved in the following order: +1. 64-character hex public key. +2. `@shortkey` (e.g., `@a1b2c3d4`). +3. `name@shortkey` (e.g., `alice@a1b2c3d4`). +4. 8-character shortkey (e.g., `a1b2c3d4`). +5. Friend alias (defined in your friends list). +6. Screen name from the `known_users` cache. + +## Subcommand: inbox + +Monitor your inbox for new invites without entering an interactive UI. + +```bash +peeroxide chat inbox [flags] +``` + +### Flags + +| Flag | Default | Description | +|---|---|---| +| `--profile ` | `default` | Use a specific profile. | +| `--poll-interval ` | `15` | Interval between inbox scans. Values below `1` are clamped to `1`. | +| `--no-nexus` | | Accepted for flag-surface parity with `chat join` / `chat dm`, but has no effect on `chat inbox` (which does not run a nexus publisher). | +| `--no-friends` | | Accepted for flag-surface parity with `chat join` / `chat dm`, but has no effect on `chat inbox` (which does not run a friend refresh task). | + +## Profile Management: whoami and profiles + +### whoami + +Prints information about your current profile, including your public key, screen name, and nexus topic. + +```bash +peeroxide chat whoami [--profile ] +``` + +| Flag | Default | Description | +|---|---|---| +| `--profile ` | `default` | Profile to inspect. | + +### profiles + +Manage multiple identities. Subcommands: + +```bash +peeroxide chat profiles list +peeroxide chat profiles create [--screen-name ] +peeroxide chat profiles delete +``` + +| Subcommand | Args / flags | Description | +|---|---|---| +| `list` | — | List all available profiles. | +| `create ` | `--screen-name ` (optional) | Create a new profile. If `--screen-name` is omitted, a deterministic vendor name is generated and stored. | +| `delete ` | — | Delete a profile. The `default` profile cannot be deleted. | + +## Friend Management: friends + +Manage your list of trusted peers. + +```bash +peeroxide chat friends [subcommand] [flags] +``` + +If no subcommand is given, `friends list` runs. + +### Subcommands and flags + +| Subcommand | Flags | Description | +|---|---|---| +| `list` | `--profile ` (default `default`) | Show all friends in the profile. | +| `add ` | `--alias ` (optional), `--profile ` (default `default`) | Add a new friend. Key resolution follows the same rules as DM recipients. If `--alias` is omitted, the alias auto-fills from the known-users cache or a vendor name. | +| `remove ` | `--profile ` (default `default`) | Remove a friend from the profile's list. | +| `refresh` | — | One-shot DHT update for friends' profile information. Does **not** accept a `--profile` flag — operates on the `default` profile only. | + +## Personal Page: nexus + +Manage your public profile information (Nexus) published on the DHT. + +```bash +peeroxide chat nexus [flags] +``` + +If `--lookup` is supplied, the command short-circuits to lookup mode. Otherwise, `--set-name` and `--set-bio` are written to the profile first (both are applied in one run). After the setters, behavior is: + +- `--publish`: perform a one-shot Nexus publish and exit. +- `--daemon`: enter the background loop (publish every 480 s, refresh **all** friends every 600 s). +- No `--publish` / `--daemon`, but at least one setter was supplied: exit without publishing. +- No flags at all (or only `--profile`): perform a one-shot Nexus publish and exit. + +### Flags + +| Flag | Default | Description | +|---|---|---| +| `--profile ` | `default` | Profile to publish from / inspect. | +| `--set-name ` | | Update your screen name (writes the profile's `name` file before publishing). | +| `--set-bio ` | | Update your biography (writes the profile's `bio` file before publishing). | +| `--publish` | | Publish your Nexus record to the DHT once. | +| `--daemon` | | Enter a background loop: publish your Nexus every 480s and refresh **all** friends every 600s. | +| `--lookup ` | | Lookup and print the Nexus information for a specific public key. Short-circuits the rest. | + +### Screen Name and Bio Files + +A profile's screen name and bio live as plain UTF-8 text files inside the profile directory: + +```text +~/.config/peeroxide/chat/profiles//name +~/.config/peeroxide/chat/profiles//bio +``` + +Both files are optional. If `name` is missing, a deterministic vendor name is generated from the profile's identity public key whenever a screen name is needed. If `bio` is missing or empty, the published Nexus record carries an empty bio. + +You can populate them two ways: + +- **`peeroxide chat nexus --set-name `** / **`--set-bio `** — writes the file with the supplied text (after trimming leading and trailing whitespace), then optionally publishes if `--publish` / `--daemon` is also given. Both setters can be supplied in one command. +- **Edit the file directly** with any editor. Multi-line bios are supported; the entire file content (after UTF-8 decoding) becomes the bio. The first line is treated specially by friends' clients — the [friends file](./reference.md#friends-file-schema) caches only the first line of each friend's bio for the `friends list` display, but the full bio is shown when a friend explicitly looks the identity up via `chat nexus --lookup`. + +### Size Limit + +The screen name and bio are serialized together into a single `NexusRecord` published to the DHT as a `mutable_put` value. The full record (3 framing bytes + `name` UTF-8 bytes + `bio` UTF-8 bytes) must fit within 1000 bytes, which is the `MAX_RECORD_SIZE` constant for chat records. + +In practice: with a typical 10–40 byte screen name, the bio budget is roughly **950–990 UTF-8 bytes** (note: bytes, not characters — many non-ASCII characters take 2–4 bytes each). + +If the combined size is too large, the publish step fails with: + +```text +warning: nexus serialize failed: record too large: N bytes exceeds 1000 byte limit +``` + +The bio file is **still saved on disk** in this case — only the DHT publish is skipped. Shorten the bio (or screen name) and re-run with `--publish` to recover. + +## Stealth Mode + +The `--stealth` flag is supported by both `chat join` and `chat dm`. It is a shorthand for `--no-nexus --read-only --no-friends`, but the behavioral and threat-model implications are easier to reason about as a single concept. + +### What `--stealth` suppresses + +Passing `--stealth` is equivalent to enabling all three of: + +- **`--read-only`** — your publisher is disabled entirely. No feed keypair is created, no message records are written via `immutable_put`, no `FeedRecord` is published via `mutable_put`, and no `announce` is sent on the channel or DM rendezvous topics. You become a pure observer of the channel. +- **`--no-nexus`** — your profile's Nexus record (screen name + bio) is not published. Other peers cannot resolve your identity public key to your screen name via the DHT, and you do not consume a `mutable_put` slot at your identity public key. +- **`--no-friends`** — the background friend-Nexus refresh task does not run. Your DHT does not issue periodic `mutable_get`s on each friend's identity public key, which would otherwise be observable to DHT nodes near those keys. + +### What `--stealth` does NOT suppress + +`--stealth` stops the publishing side of the protocol. Several other observable activities continue: + +- **Channel discovery is still active.** Reading any channel requires `lookup`s on its discovery topics, followed by `mutable_get`s on each announcer's feed public key. Both operations remain visible to the DHT nodes serving them. +- **Inbox monitoring is independent of `--stealth`.** A stealth session still polls your profile's inbox topics every `--inbox-poll-interval` seconds (8 lookups per cycle by default — current + previous epoch, 4 buckets each). The wire-level lookup carries only the derived inbox topic, not your public key, so a passive DHT participant who does not already know your identity cannot recover it from these queries alone. However, an observer who **already knows your public key** can independently derive the same inbox topics and recognize the polling pattern, which lets them correlate the polling source IP with your identity. If that matches your threat model, also pass `--no-inbox`. +- **DM under stealth is receive-only.** The DM channel key is symmetric between the two parties, so you can decrypt incoming messages. But you never `announce` your DM feed, never publish a message, and never send the per-epoch nudge. Your DM peer has no way to know you are listening. +- **Network-level metadata is unchanged.** Every DHT operation goes out over UDP to peers who see your IP address. The Hyperswarm DHT has no traffic-mixing or onion-routing layer. If IP-level identifiability matters in your threat model — and especially if your public key is already known to an adversary — route peeroxide's traffic through a transport you trust to provide that property: typically a VPN that gives you a different egress IP, mixes your traffic with other clients, and does not retain per-flow logs. + +### When `--stealth` is enough + +It is sufficient when your only goal is to read a channel without contributing to its announce set or signaling your presence to other channel participants — for example, when you are using a fresh profile whose public key no observer has associated with you, and you want to listen first before deciding whether to post. + +### When `--stealth` is not enough + +It is **not** sufficient when your public key is already known to an adversary and IP-level correlation matters. In that case the chain `your public key → derived inbox / Nexus / announce topics → DHT lookups from your IP` is exploitable by a sufficiently positioned observer. Combine `--stealth --no-inbox` with a trustworthy anonymizing transport in front of the binary. + +### Recipes + +- Lurk on a channel without joining its announce set: + + ```bash + peeroxide chat join general --stealth + ``` + +- Same, plus suppress inbox polling: + + ```bash + peeroxide chat join general --stealth --no-inbox + ``` + +- Lurk under a burner profile so the activity is not tied to your main identity: + + ```bash + peeroxide chat profiles create burner + peeroxide chat join general --stealth --profile burner + ``` + +## Interactive Usage + +When running in a TTY, `join` and `dm` enter an interactive mode with a status bar and slash commands. See [Interactive TUI](./interactive-tui.md) for details. + +In line mode (or when stdin is redirected), Peeroxide prints messages to stdout and notices to stderr. This is useful for piping chat into other tools. + +### Message Display + +Messages are formatted as: +`[TIMESTAMP] [DISPLAY_NAME]: CONTENT` + +If a message arrives significantly after its timestamp, it is prefixed with `[late]`. + +Display names are resolved with the following precedence: +1. Friend alias (e.g., `(Bob)`). +2. Friend's vendor name + screen name (e.g., `(Vendor) `). +3. Non-friend with a wire `screen_name` on the message (e.g., ``). +4. Non-friend without a wire `screen_name` but present in the shared `known_users` cache (e.g., ``). +5. Vendor fallback (e.g., ``). + +A `!` suffix on a name indicates the user is currently in a 300-second cooldown period after a name change. diff --git a/docs/src/chat/wire-format.md b/docs/src/chat/wire-format.md new file mode 100644 index 0000000..1500537 --- /dev/null +++ b/docs/src/chat/wire-format.md @@ -0,0 +1,125 @@ +# Wire Format + +Peeroxide chat uses a structured wire format for all data exchanged over the DHT. All records are encrypted within a common frame. The underlying DHT operations (`mutable_put` / `mutable_get` / `immutable_put` / `immutable_get` / `announce` / `lookup`) and their per-record size budget are documented in [DHT Primitives](../concepts/dht-primitives.md). + +## Encryption Frame + +Every record (Message, Feed, Summary, Nexus, Invite) is encapsulated in an XSalsa20-Poly1305 encryption frame. + +```text +[nonce: 24 bytes] [tag: 16 bytes] [ciphertext: variable] +``` + +- **Cipher**: XSalsa20-Poly1305. +- **Nonce**: 24-byte random nonce generated per message. +- **Tag**: 16-byte authentication tag. +- **No AAD**: No additional authenticated data is used in the frame. + +## Record Types + +### MessageEnvelope + +The `MessageEnvelope` represents a single chat message. + +| Field | Size | Description | +|---|---|---| +| `id_pubkey` | 32 | Ed25519 public key of the author. | +| `prev_msg_hash` | 32 | Blake2b hash of the previous message in this feed's chain. | +| `timestamp` | 8 | Unix timestamp in seconds (u64 Little Endian). | +| `content_type` | 1 | `0x01` for text. | +| `screen_name_len`| 1 | Length of the screen name string. | +| `screen_name` | var | UTF-8 encoded screen name. | +| `content_len` | 2 | Length of the content (u16 Little Endian). | +| `content` | var | UTF-8 encoded message content. | +| `signature` | 64 | Ed25519 signature over the message body. | + +**Signature Scheme**: +The signature covers the following bytes: +`b"peeroxide-chat:msg:v1:" || prev_msg_hash || timestamp || content_type || screen_name_len || screen_name || content` + +### FeedRecord + +The `FeedRecord` is a mutable record stored at a feed's public key. It acts as an index of recent messages. + +| Field | Size | Description | +|---|---|---| +| `id_pubkey` | 32 | Author's permanent public key. | +| `ownership_proof`| 64 | Proof that `id_pubkey` owns this feed. | +| `next_feed_pubkey`| 32 | Pointer to the next feed after rotation (all zeros if none). | +| `summary_hash` | 32 | Hash of the latest `SummaryBlock` for this feed. | +| `msg_count` | 1 | Number of message hashes in this record (max 26). | +| `msg_hashes` | 32 * N | Array of message hashes, newest-first. | + +**Ownership Proof**: +An Ed25519 signature by the `id_pubkey` over: +`b"peeroxide-chat:ownership:v1:" || feed_pubkey || channel_key` + +### SummaryBlock + +The `SummaryBlock` is an immutable record used to store history that has been evicted from the `FeedRecord`. + +| Field | Size | Description | +|---|---|---| +| `id_pubkey` | 32 | Author's public key. | +| `prev_summary_hash`| 32 | Hash of the previous `SummaryBlock` (all zeros if none). | +| `msg_count` | 1 | Number of hashes in this block. | +| `msg_hashes` | 32 * N | Array of message hashes, oldest-first. | +| `signature` | 64 | Ed25519 signature. | + +**Signature Scheme**: +Covers: `b"peeroxide-chat:summary:v1:" || prev_summary_hash || msg_hashes...` + +### NexusRecord + +The `NexusRecord` contains profile information published to the author's personal topic. + +| Field | Size | Description | +|---|---|---| +| `name_len` | 1 | Length of the screen name. | +| `name` | var | UTF-8 encoded screen name. | +| `bio_len` | 2 | Length of the biography (u16 Little Endian). | +| `bio` | var | UTF-8 encoded biography. | + +### InviteRecord + +Used for DMs and private channel invites in the Inbox. + +| Field | Size | Description | +|---|---|---| +| `id_pubkey` | 32 | Author's public key. | +| `ownership_proof`| 64 | Ownership proof (same as FeedRecord). | +| `next_feed_pubkey`| 32 | Next feed pointer. | +| `invite_type` | 1 | `0x01` = DM, `0x02` = Private Channel. | +| `payload_len` | 2 | Length of the payload (u16 Little Endian). | +| `payload` | var | Encrypted payload (see below). | + +**DM Payload**: Opaque lure text. +**Private Invite Payload**: `[name_len: u8][name][salt_len: u16 LE][salt]`. + +## Key Derivation + +All derivation functions use keyed BLAKE2b-256. + +| Key | Derivation Formula | +|---|---| +| `channel_key` (Public) | `hash([b"peeroxide-chat:channel:v1:", len4(name), name])` | +| `channel_key` (Private)| `hash([b"peeroxide-chat:channel:v1:", len4(name), name, b":salt:", len4(salt), salt])` | +| `dm_channel_key` | `hash([b"peeroxide-chat:dm:v1:", min(pk_a, pk_b), max(pk_a, pk_b)])` | +| `msg_key` | `keyed_blake2b(channel_key, b"peeroxide-chat:msgkey:v1")` | +| `dm_msg_key` | `keyed_blake2b(ecdh_secret, b"peeroxide-chat:dm-msgkey:v1:" || channel_key)` | +| `invite_key` | `keyed_blake2b(ecdh_secret, b"peeroxide-chat:invite-key:v1:" || invite_feed_pk)` | +| `announce_topic` | `keyed_blake2b(channel_key, b"peeroxide-chat:announce:v1:" || epoch_le || bucket)` | +| `inbox_topic` | `keyed_blake2b(hash(pk), b"peeroxide-chat:inbox:v1:" || epoch_le || bucket)` | + +### DM ECDH +For direct messages, Ed25519 keys are converted to X25519: +- Public Key: Edwards-to-Montgomery conversion. +- Secret Key: `SHA-512(seed)[0..32]` with standard clamping. +- Shared Secret: standard `x25519` scalar multiplication. + +## Epoch and Bucket Math + +- **Epoch**: `unix_time_secs / 60` (60-second intervals). +- **Buckets**: 4 buckets per epoch (0, 1, 2, 3). +- **Discovery**: A client scans `(current_epoch, previous_epoch) × 4 buckets`, resulting in 8 lookups per cycle. +- **Randomization**: Each session uses a random permutation of the 4 buckets to distribute load. diff --git a/docs/src/concepts/dht-and-routing.md b/docs/src/concepts/dht-and-routing.md index 29376bb..fe02599 100644 --- a/docs/src/concepts/dht-and-routing.md +++ b/docs/src/concepts/dht-and-routing.md @@ -16,9 +16,10 @@ Peeroxide relies on the [`pkarr`](https://docs.rs/pkarr) and [`mainline`](https: A DHT is a decentralized network, but new nodes need an entry point to join. These entry points are called **bootstrap nodes**. -- **Public Network**: By default, `peeroxide` uses a set of stable public bootstrap nodes to connect to the global HyperDHT network. -- **Configuration**: You can specify custom bootstrap nodes using the `--bootstrap` flag or the `network.bootstrap` setting in your config file. -- **Isolated Mode**: If no bootstrap nodes are provided and the `--public` flag is not set, the node runs in isolated mode. In this mode, discovery is only possible if peers connect to each other directly by address. +- **Public Network**: By default, `peeroxide` uses a set of stable public bootstrap nodes to connect to the global HyperDHT network. If neither the config file's `network.bootstrap` nor the command-line `--bootstrap` flag supplies any nodes, the runtime auto-fills the public bootstrap set so a fresh install still connects. +- **Configuration**: You can supply custom bootstrap nodes via the `--bootstrap` flag or the `network.bootstrap` setting in your config file. **Note:** CLI `--bootstrap` overrides the config file's `network.bootstrap` rather than combining with it. +- **Public Default Adjustments**: `--public` explicitly adds the default public bootstrap nodes (useful when you have custom bootstraps but also want public connectivity). `--no-public` explicitly removes them from the resolved list. +- **Isolated Mode**: Combining `--no-public` with no custom bootstraps (and no `network.bootstrap` in the config) yields an empty bootstrap list. In that state, the node has no entry point and can only be reached by peers who already know its address. ## Connectivity diff --git a/docs/src/concepts/dht-primitives.md b/docs/src/concepts/dht-primitives.md index c7f3d94..5036151 100644 --- a/docs/src/concepts/dht-primitives.md +++ b/docs/src/concepts/dht-primitives.md @@ -1,3 +1,121 @@ # DHT Primitives -*Content coming in Phase 3a.* +This page is a reference for the four core operations that `peeroxide-dht` exposes and that every higher-level subsystem (`announce`, `lookup`, `cp`, `dd`, `chat`) is built on top of. Once you understand [DHT and Routing](./dht-and-routing.md) at the conceptual level, this is the next layer down: the actual operations you can perform against the network. + +## `immutable_put` / `immutable_get` — Content-Addressed Storage + +Stores arbitrary bytes on DHT nodes, addressed by the BLAKE2b-256 hash of the value itself. Content-addressed: you can only retrieve a value if you already know its hash. + +- **`immutable_put(value: &[u8])`** — computes `target = hash(value)`, queries the K closest nodes to that target, commits the raw bytes. Returns the 32-byte hash. +- **`immutable_get(target: [u8; 32])`** — queries nodes closest to `target`; any node that has the value returns it. The client verifies `hash(returned_value) == target`. + +| Property | Detail | +|----------|--------| +| Data stored | Raw `Vec` — arbitrary bytes, no signing, no keys, no seq | +| Addressing | `hash(value)` — immutable; changing the value yields a different address | +| Max payload | ~900–1000 bytes (UDP framing; no explicit code constant) | +| Wire commands | `IMMUTABLE_PUT = 8`, `IMMUTABLE_GET = 9` | +| Discoverability | The reader must already know the hash (given out-of-band or via a mutable pointer) | + +## `mutable_put` / `mutable_get` — Signed, Updateable Storage + +Stores arbitrary bytes signed by an Ed25519 keypair, addressed by `hash(public_key)`. The owner can update the value by incrementing a sequence number. + +- **`mutable_put(key_pair, value: &[u8], seq: u64)`** — computes `target = hash(public_key)`, signs `(seq, value)` with the secret key, and sends `MutablePutRequest { public_key, seq, value, signature }` to the closest nodes. +- **`mutable_get(public_key: &[u8; 32], seq: u64)`** — queries with `target = hash(public_key)` and a requested minimum `seq`. Nodes return the stored value only if `stored.seq >= requested_seq`. The client verifies the signature. + +| Property | Detail | +|----------|--------| +| Data stored | `{ public_key: [u8;32], seq: u64, value: Vec, signature: [u8;64] }` | +| Addressing | `hash(public_key)` — one mutable slot per keypair | +| Max payload (value) | **~1002 bytes** (token present, `seq ≤ 252`; derived in [Size Budget for `mutable_put`](#size-budget-for-mutable_put) below) | +| Seq semantics | Strictly monotonic. `SEQ_REUSED (16)` error if equal; `SEQ_TOO_LOW (17)` if lower | +| Salt support | Not implemented — there is no salt field; one record per keypair | +| Wire commands | `MUTABLE_PUT = 6`, `MUTABLE_GET = 7` | + +## `announce` / `lookup` — Peer Discovery and Rendezvous + +Originally designed as peer-discovery primitives — store structured peer records (public key + relay addresses) under a topic hash. **In this workspace they are also used as a general-purpose rendezvous mechanism**: the announcer's public key acts as a pointer to a further `mutable_put` slot containing the actual record. See [The Rendezvous Pattern](#the-rendezvous-pattern) below. + +- **`announce(target: [u8;32], key_pair, relay_addresses)`** — queries the closest nodes for the topic and sends a signed `AnnounceMessage` containing `HyperPeer { public_key, relay_addresses }`. Multiple peers can announce under the same topic simultaneously. +- **`lookup(target: [u8;32])`** — queries the closest nodes; they return `LookupRawReply { peers: Vec, bump }` — all peers that have announced on that topic (up to 20 per node). + +| Property | Detail | +|----------|--------| +| Data stored | `HyperPeer { public_key: [u8;32], relay_addresses: Vec }` | +| Multi-writer | Yes — up to 20 announcers per topic per node | +| IP in stored record | No — the source IP is not stored in `HyperPeer`; only the pubkey + relay addresses | +| Announce with no addresses | Allowed — `relay_addresses = []` is valid | +| `MAX_RECORDS_PER_LOOKUP` | 20 per node (per-node cap; the total across all queried nodes can exceed 20) | +| `MAX_RELAY_ADDRESSES` | 3 (truncated on store) | +| Wire commands | `LOOKUP = 3`, `ANNOUNCE = 4`, `FIND_PEER = 2` | + +**Key differences from put/get:** + +- `lookup` / `announce` is multi-writer — many peers announce under one topic. +- `put` / `get` is single-writer — one value per address. +- `announce` stores a small structured record; `put` stores opaque bytes (up to ~1000 B per record). + +### The Rendezvous Pattern + +An announce "topic" is just a 32-byte address. There's no constraint that it correspond to a real peer-discoverable resource — it can be: + +- a hash of an application-meaningful key, +- a deterministic derivation from any shared input (a string, a public key, a secret, a counter, …) via BLAKE2b-256 or any other 32-byte hash, +- or even a random value, if both writer and reader can agree on it out-of-band. + +This makes `announce` / `lookup` a generic rendezvous primitive: anyone who can independently arrive at the same 32-byte topic can find every other announcer at it. And because the only structured field the protocol actually requires the announcer to publish is a 32-byte `public_key`, that pubkey can be treated as a **pointer** — typically to a `mutable_put` slot owned by that same ephemeral keypair — rather than as a literal peer identity. + +The generic three-step pattern is: + +1. **Derive a topic** — any agreed-upon function `f(...) -> [u8;32]`. Writer and reader must arrive at the same value. +2. **Writer** — generate an ephemeral keypair `k`, publish the actual record at `mutable_put(k, value, seq)`, and `announce` `k.public_key` on the topic. The `relay_addresses` field carries no meaning and is typically left empty. +3. **Reader** — `lookup` the topic, then `mutable_get` each returned pubkey to retrieve the actual records in parallel. + +This sidesteps `mutable_put`'s one-record-per-pubkey limitation: a single topic can host many simultaneous writers, each with its own independent `mutable_put` slot. The TTLs on the rendezvous record and on the payload record are also independent, so a writer can refresh them on different cadences. + +### Increasing Footprint with Epochs and Buckets + +The per-node cap is **20 announcers per topic per node**. Two complementary techniques extend that footprint by embedding deterministic salt into the topic derivation: + +- **Epochs** — incorporate a quantized timestamp (for example, `floor(unix_secs / 60)`). The topic rotates over time automatically; writer and reader both use the current epoch when deriving the topic, and readers typically scan a small backward window so announcers near a boundary aren't missed. Epoch rotation also bounds how long an observer can correlate a single topic. +- **Buckets** — incorporate a small integer `0..N`. Writers hash to one of N possible topics (deterministically or at random); readers scan all N in parallel to find every announcer. + +Combining the two yields `epoch_window × bucket_count` distinct topics for the same logical rendezvous — e.g. a 2-epoch window with 4 buckets gives an effective capacity of `8 × 20 = 160` announcers per node, all discoverable in 8 parallel lookups. + +### Concrete uses in this workspace + +The chat and dd v2 subsystems each lean on this pattern. Their exact topic-derivation rules, epoch/bucket counts, and write/read flows are documented alongside the rest of their wire formats and protocols: + +- chat — see [Wire Format](../chat/wire-format.md) and [Protocol](../chat/protocol.md). +- dead drop v2 — see [Architecture](../dd/architecture.md) and [Wire Format](../dd/format.md). + +## TTL (Time-To-Live) + +All stored values are ephemeral — they expire from node storage. + +| Storage type | TTL (default) | +|---|---| +| Announcement records (`RecordCache`) | 20 minutes (`max_record_age`) | +| Mutable / immutable LRU cache | 20 minutes (`max_lru_age`) | +| Router forward entries | 20 minutes (`DEFAULT_FORWARD_TTL`) | + +Clients must periodically re-announce / re-put to keep data alive. The 20-minute default matches the Node.js reference implementation. Both `cp` and `dd` issue periodic refreshes during long-running operations for exactly this reason. + +## Size Budget for `mutable_put` + +The most common protocol-design question is "how many bytes can I put inside one `mutable_put` value?" Starting from `libudx`'s `MAX_PAYLOAD = 1180` and subtracting the wire overhead for a `mutable_put` request with the routing token present and `seq ≤ 252`: + +```text +1180 libudx MAX_PAYLOAD + - 75 outer RPC Request fixed fields (type, flags, tid, to, token, command, target) + - 3 outer compact-encoding length prefix for put_bytes + - 32 public_key field + - 1 seq compact-encoding (1 byte for seq ≤ 252) + - 3 inner compact-encoding length prefix for value + - 64 signature +───── +1002 bytes available for the message value payload +``` + +In practice the higher-level subsystems reserve a small margin and call this `MAX_RECORD_SIZE = 1000` (see `chat::wire` and `deaddrop::v2::wire`). Subtract per-record framing — author pubkey, timestamp, content type, signature, length-prefix bytes — to derive the payload budget for your own protocol. The chat subsystem's [Reference](../chat/reference.md) and dead drop's [Wire Format](../dd/format.md) chapters carry the exact per-record overhead and the resulting content budgets. diff --git a/docs/src/cp/architecture.md b/docs/src/cp/architecture.md index 47c08a8..5475c76 100644 --- a/docs/src/cp/architecture.md +++ b/docs/src/cp/architecture.md @@ -55,6 +55,6 @@ While the underlying UDX protocol handles packetization, `cp` reads and writes d - **Sanitization**: Filenames provided by the sender are sanitized to prevent path traversal attacks (e.g., removing `..` or leading slashes). ### Network Configuration -The `cp` command respects global peeroxide configuration for bootstrap nodes and firewall settings. -- **Public Mode**: If `--public` is set, the swarm attempts to use public bootstrap nodes and sets the firewall to open. -- **Firewalled Mode**: If the node is detected as being behind a NAT, it will attempt hole-punching to establish the connection. +The `cp` command uses the same runtime bootstrap-resolution as every other DHT-using subcommand (via `build_dht_config(cfg)` in `peeroxide-cli/src/cmd/mod.rs`). +- **Bootstrap node selection**: CLI `--bootstrap` overrides the config file's `network.bootstrap`. `--public` adds default public bootstrap nodes; an empty list auto-fills with the defaults; `--no-public` removes them. See [init/overview.md → Global CLI Flags](../init/overview.md#global-cli-flags) for the full algorithm. +- **NAT traversal**: `cp` does not flip the node into "open firewall" mode; if the node is behind a NAT it attempts hole-punching to establish the direct connection. diff --git a/docs/src/cp/protocol.md b/docs/src/cp/protocol.md index 3812ca4..e92833a 100644 --- a/docs/src/cp/protocol.md +++ b/docs/src/cp/protocol.md @@ -58,4 +58,4 @@ While the underlying UDX protocol handles packetization, `cp` reads and writes d ### Network Configuration -The `cp` command respects global peeroxide configuration for bootstrap nodes and firewall settings. If `--public` is set, the swarm uses public bootstrap nodes with an open firewall. Otherwise, hole-punching is attempted for NAT traversal. +The `cp` command uses the same runtime bootstrap-resolution as every other DHT-using subcommand (`build_dht_config(cfg)` in `peeroxide-cli/src/cmd/mod.rs`). Bootstrap node selection is therefore driven by the shared rules documented in [init/overview.md → Global CLI Flags](../init/overview.md#global-cli-flags): CLI `--bootstrap` overrides the config file's `network.bootstrap`; `--public` adds default public bootstrap nodes; an empty list auto-fills with the defaults; `--no-public` removes them. The `--public` flag does **not** change the node's firewall state; NAT traversal for `cp` always relies on hole-punching via the DHT. diff --git a/docs/src/dd/architecture.md b/docs/src/dd/architecture.md new file mode 100644 index 0000000..855ca0f --- /dev/null +++ b/docs/src/dd/architecture.md @@ -0,0 +1,93 @@ +# Dead Drop Architecture + +The `dd` command implements two distinct protocol architectures for storing and retrieving data on the DHT. Both protocols are built on the DHT primitives documented in [DHT Primitives](../concepts/dht-primitives.md) (`mutable_put` / `mutable_get` / `immutable_put` / `immutable_get` / `announce`). + +## Protocol V1: Linear Chain + +The V1 protocol is a simple linked list of mutable DHT records. Each record contains a portion of the file and the public key of the next chunk in the chain. + +### V1 Flow + +```mermaid +sequenceDiagram + participant S as Sender + participant DHT as DHT Nodes + participant R as Receiver + + Note over S: Chunking + Key Derivation + S->>DHT: mutable_put(root_pk) + S->>DHT: mutable_put(chunk_1_pk) + S->>DHT: ... + S->>DHT: mutable_put(chunk_N_pk) + + Note over R: Get root_pk + R->>DHT: mutable_get(root_pk) + DHT-->>R: root record + loop Sequential Fetch + R->>DHT: mutable_get(next_pk) + DHT-->>R: chunk record + end +``` + +V1 features sequential fetching with exponential retry logic (1s to 30s) per chunk, bounded by the global timeout. + +## Protocol V2: Merkle Tree + +V2 uses a hierarchical tree structure to enable massive file support and parallel fetching. + +### V2 Flow + +```mermaid +sequenceDiagram + participant S as Sender + participant DHT as DHT Nodes + participant R as Receiver + + Note over S: Canonical Tree Build + S->>DHT: immutable_put(data_chunks) + S->>DHT: mutable_put(index_chunks) + S->>DHT: mutable_put(root_pk) + + Note over R: BFS Parallel Fetch + R->>DHT: mutable_get(root_pk) + DHT-->>R: root (metadata + top slots) + + rect rgb(240, 240, 240) + Note over R: Parallel BFS Loop + R->>DHT: mutable_get(index_pk) + R->>DHT: immutable_get(data_hash) + end + + Note over R: Need-list Cycle + R->>DHT: announce(need_topic) + R->>DHT: mutable_put(need_topic, ranges) + DHT-->>S: watch(need_topic) + S->>R: Republish missing chunks +``` + +### AIMD Congestion Control + +V2 employs an Additive Increase / Multiplicative Decrease (AIMD) controller to manage concurrency: +- **EWMA-based:** Smoothes sample noise with an alpha of 0.1. +- **Decision interval:** 20 samples. +- **Fast-trip:** Shrinks immediately if 10 degraded samples occur within a window. +- **Shrink:** 0.75x current (minimum 1). +- **Grow:** +2 permits. + +### Robustness Mechanisms + +- **Stall Watchdog:** Checks every 5s. If no put resolves for 30s, it forces AIMD to a recovery floor. +- **Sliding-window Timeout:** `get` operations abort only if no chunk decodes for `--timeout` seconds. +- **Graceful Shutdown:** First Ctrl-C triggers a sticky cancel signal that enqueues cleanups (like empty need-list sentinels). A second double-press force-exits. +- **Need-list Lifecycle:** Receivers publish the encoded missing-range need-list via `mutable_put` every 20s and announce keepalive on the need topic every 60s. Senders poll the need topic every 5s and prioritize enqueuing the full path (index + data) for any newly-listed chunks. + +## DHT Wire Monitoring + +The `dd` command monitors raw network overhead by reading atomic counters from the underlying DHT handle. + +| Method | Return | +|--------|--------| +| `wire_stats()` | `(u64, u64)` (sent, received) | +| `wire_counters()` | `WireCounters` (shared atomic handles) | + +These counters allow the progress UI to calculate "wire amplification" — the ratio of total bytes sent/received versus actual payload bytes delivered. diff --git a/docs/src/dd/format.md b/docs/src/dd/format.md new file mode 100644 index 0000000..36e9557 --- /dev/null +++ b/docs/src/dd/format.md @@ -0,0 +1,105 @@ +# Dead Drop Wire Format + +The `dd` command supports two versioned wire formats for DHT records. All multi-byte integers are encoded in **little-endian** (LE) byte order. The underlying DHT operations (`mutable_put` / `mutable_get` / `immutable_put` / `immutable_get`) are documented in [DHT Primitives](../concepts/dht-primitives.md). + +## Version 1 Wire Format + +V1 records are limited to 1000 bytes total and form a linear linked list of mutable records. + +### V1 Constants + +- `MAX_CHUNKS`: 65,535 +- `MAX_PAYLOAD`: 1,000 (total record limit) +- `ROOT_HEADER_SIZE`: 39 +- `NON_ROOT_HEADER_SIZE`: 33 +- `ROOT_PAYLOAD_MAX`: 961 +- `NON_ROOT_PAYLOAD_MAX`: 967 +- `VERSION`: `0x01` + +### V1 Layouts + +**Root Chunk (V1)** + +```text +[ver: 1][total_chunks: 2 LE][crc32c: 4 LE][next_pk: 32][payload: up to 961] +``` + +**Non-root Chunk (V1)** + +```text +[ver: 1][next_pk: 32][payload: up to 967] +``` + +## Version 2 Wire Format + +V2 records use a tree structure. Data chunks are stored in immutable records, while index and root chunks are stored in mutable records. + +### V2 Constants + +- `VERSION`: `0x02` +- `MAX_CHUNK_SIZE`: 1,000 +- `DATA_HEADER_SIZE`: 2 +- `DATA_PAYLOAD_MAX`: 998 +- `NON_ROOT_INDEX_HEADER_SIZE`: 1 +- `NON_ROOT_INDEX_SLOT_CAP`: 31 +- `ROOT_INDEX_HEADER_SIZE`: 13 +- `ROOT_INDEX_SLOT_CAP`: 30 +- `NEED_LIST_HEADER_SIZE`: 3 +- `NEED_ENTRY_SIZE`: 8 +- `NEED_LIST_ENTRY_CAP`: 124 + +### V2 Tree Structure + +The tree is constructed bottom-up. Leaf layers pack 31 data hashes per index chunk. Higher layers pack 31 index pubkeys per chunk. The root holds the top-layer keys directly. + +| Depth | Max Data Chunks | Capacity (approx) | +|-------|-----------------|-------------------| +| 0 | 30 | 29 KB | +| 1 | 930 | 928 KB | +| 2 | 28,830 | 28 MB | +| 3 | 893,730 | 891 MB | +| 4 | 27,705,630 | 27 GB | + +**Note:** The implementation enforces a `SOFT_DEPTH_CAP` of 4. + +### V2 Layouts + +**Data Chunk (V2)** + +Stored via `immutable_put`. The salt is reserved for randomization but currently fixed at `0x00`. + +```text +[0x02][salt: 0x00][payload: up to 998] +``` + +**Non-root Index Chunk (V2)** + +Stored via `mutable_put`. Contains 32-byte slots (either data hashes or child index pubkeys). + +```text +[0x02][slots: 31 x 32] +``` + +**Root Index Chunk (V2)** + +The entry point. Contains file metadata and top-level slots. + +```text +[0x02][file_size: 8 LE][crc32c: 4 LE][slots: 30 x 32] +``` + +**Need-list Record (V2)** + +Published by the receiver on the need topic to request missing data. + +```text +[0x02][count: 2 LE][entries: count x {start: 4 LE, end: 4 LE}] +``` + +Each entry is a half-open range `[start, end)` of data-chunk indices in the canonical DFS file order (chunk 0 is the first chunk of the file, chunk 1 is the next, etc.). The sender consults the need-list and republishes every data chunk in any listed range, plus the full index-tree path required to make those data chunks reachable. + +When the receiver has no missing chunks, it publishes a "receiver done" sentinel: a raw empty byte string at the need topic. The decoder treats a zero-byte value as the sentinel (it is not the same as the encoded need-list with `count = 0`). + +### Salt Situation + +While the V2 format reserves a byte for a per-deaddrop salt to randomize data chunk addresses, the current implementation enforces `salt(...) -> 0x00`. All V2 data chunk headers are currently prefixed with `[0x02][0x00]`. diff --git a/docs/src/dd/future-direction.md b/docs/src/dd/future-direction.md new file mode 100644 index 0000000..0e99104 --- /dev/null +++ b/docs/src/dd/future-direction.md @@ -0,0 +1,3 @@ +# Future Direction + +Both `dd` v1 and v2 protocols are shipped and fully supported. There is no speculative `dd` roadmap documented at this time. diff --git a/docs/src/dd/operations.md b/docs/src/dd/operations.md new file mode 100644 index 0000000..3db48e3 --- /dev/null +++ b/docs/src/dd/operations.md @@ -0,0 +1,86 @@ +# Dead Drop Operations + +The `dd` command supports both human-readable terminal output and machine-readable JSON output for integration with other tools. + +## Command Line Flags + +In addition to the dd-specific flags shown below, both `dd put` and `dd get` accept the inherited top-level global flags: `--config `, `--no-default-config`, `--public`, `--no-public`, `--bootstrap ` (repeatable), and `-v` / `--verbose`. These control config file loading, DHT bootstrap node selection, and tracing verbosity; see [init/overview.md → Global CLI Flags](../init/overview.md#global-cli-flags) for the bootstrap-resolution algorithm. + +### `dd put` Flags + +| Flag | Default | Description | +|------|---------|-------------| +| `` | required | Input file path. Use `-` for stdin. | +| `--max-speed ` | none | Limit transfer speed. Parses `k`/`m` suffixes (base-10, case-insensitive). | +| `--refresh-interval ` | `600` | Seconds between refresh cycles (must be > 0). | +| `--ttl ` | none | Stop refreshing after N seconds (must be > 0). | +| `--max-pickups ` | none | Exit after N unique pickup acks (must be > 0). | +| `--passphrase ` | none | Deterministic root seed from `discovery_key(passphrase)`. | +| `--interactive-passphrase` | none | TTY prompt for passphrase with hidden input. | +| `--no-progress` | `false` | Suppress progress UI. | +| `--json` | `false` | Emit JSON-Lines progress on stdout. | +| `--v1` | `false` | Force legacy v1 protocol. | + +### `dd get` Flags + +| Flag | Default | Description | +|------|---------|-------------| +| `` | required* | 64-character hex pickup key or passphrase text. | +| `--passphrase ` | none | Derive pickup key from passphrase. | +| `--interactive-passphrase` | none | TTY prompt for passphrase with hidden input. | +| `--no-progress` | `false` | Suppress progress UI. | +| `--output ` | `stdout` | Write payload to file instead of stdout. | +| `--json` | `false` | Emit JSON-Lines progress. **Requires** `--output`. | +| `--timeout ` | `1200` | Sliding no-progress timeout in seconds (must be > 0). | +| `--no-ack` | `false` | Suppress pickup acknowledgement announce. | + +*\*Key is required unless a passphrase flag is provided.* + +## Key Derivation and Passphrases + +- **Passphrase Derivation:** When a passphrase is used, the root seed is derived via `discovery_key(passphrase)`. +- **Interactive Fallback:** The `--interactive-passphrase` flag attempts to open `/dev/tty` for hidden input, falling back to stdin if unavailable. +- **Key vs Passphrase:** If a positional argument is exactly 64 characters of valid hex, it is treated as a raw 32-byte pickup key. Otherwise, it is treated as passphrase text and hashed via `discovery_key`. + +## Progress UX + +The mode is selected automatically: +1. `--json` -> JSON Lines on stdout. +2. `--no-progress` -> Progress disabled. +3. stderr is TTY -> Interactive bars. +4. else -> Periodic log line (every 2s). + +### Bar Layouts + +- **V1 Put:** `↑ filename D(bytes/total) [bar] pct rate ETA` +- **V2 Put:** `↑ filename I[idx/total] D(bytes/total) [bar] pct rate ETA` +- **V2 Get (4-bar multi):** + - **index:** `I[idx/total] rate` + - **data:** `D(bytes/total) [bar] pct rate ETA` + - **wire:** `W ↑ rate ↓ rate (x amplification)` + - **overall:** `filename bytes/total pct` + +*Wire amplification (`wire_total / bytes_done`) is omitted until the first payload byte is received.* + +## Machine-Readable Output (`--json`) + +The `--json` flag enables a stream of JSON Lines on **stdout**. Events use `type` as a discriminator and RFC3339 timestamps. + +### Event Schema + +| Type | Description | +|------|-------------| +| `start` | Operation initiated. Includes `version`, `filename`, `bytes_total`, `indexes_total`, `data_total`. | +| `progress` | Periodic update. Includes `bytes_done`, `rate_bytes_per_sec`, `eta_seconds`, `elapsed_seconds`. | +| `result` | Objective achieved. `put` returns `pickup_key` and `chunks`. `get` returns `crc` and `output`. | +| `ack` | Sender-only. Emitted when a recipient acknowledges receipt. Includes `peer` and `pickup_number`. | +| `done` | Operation completed. Includes final counters and `elapsed_seconds`. | + +**V1 Convention:** `indexes_total` and `indexes_done` are always `0` in V1 events. + +## Acknowledgement (Ack) Mechanism + +When a `get` operation completes (unless `--no-ack` is set), the receiver announces on the ack topic: +`ack_topic = discovery_key(root_pk || b"ack")` + +The sender polls this topic every 30s and counts unique announcer public keys. diff --git a/docs/src/dd/overview.md b/docs/src/dd/overview.md new file mode 100644 index 0000000..b5de40b --- /dev/null +++ b/docs/src/dd/overview.md @@ -0,0 +1,80 @@ +# Dead Drop Overview + +The `dd` command provides an anonymous, asynchronous store-and-forward mechanism using the DHT. It allows a sender to "put" data on the network that a receiver can later "get" using a unique key, without requiring both parties to be online at the same time. + +Unlike the `cp` command, which establishes a direct peer-to-peer connection, `dd` uses DHT records to store data. This makes it ideal for scenarios where the sender and receiver have intermittent connectivity or want to avoid direct IP discovery. + +## Key Features + +- **Asynchronous Delivery:** Data is stored on DHT nodes. The receiver picks it up whenever they're ready. +- **Protocol Versions:** Supports both the original v1 linked-list protocol and the high-performance v2 tree-indexed protocol. +- **Passphrase Support:** Pickup keys can be derived from human-readable passphrases. +- **Anonymity:** No direct connection is established between the sender and receiver. +- **Acknowledgements:** Optional pickup notifications (acks) let the sender know when data was retrieved. +- **Progress Control:** Use `--no-progress` for silent operation or `--json` for machine-readable event streams. + +## Protocol Selection + +The `dd` command supports two protocol versions: + +| Version | Characteristics | Selection | +|---------|-----------------|-----------| +| **V1** | Simple linked-list of mutable records. Limited to 64MB. Sequential fetches. | Explicit via `--v1` on `put`. Auto-detected on `get`. | +| **V2** | Merkle-tree indexed. Massive capacity. Parallel fetching with need-lists and AIMD congestion control. | Default on `put`. Auto-detected on `get`. | + +### Dispatch Rules + +- **Putting:** `dd put` defaults to v2. Use the `--v1` flag to force the legacy protocol. +- **Getting:** `dd get` automatically dispatches based on the first byte of the fetched root record (`0x01` for v1, `0x02` for v2). + +## Quick Start + +### Putting Data + +Put a message using a passphrase (v2 by default): + +```bash +echo "Hello from the void" | peeroxide dd put - --passphrase "my secret drop" +``` + +Put a file using a raw key (generated randomly): + +```bash +peeroxide dd put my-file.dat +``` + +Force v1 for compatibility with older clients: + +```bash +peeroxide dd put my-file.dat --v1 +``` + +### Getting Data + +Retrieve data using a passphrase: + +```bash +peeroxide dd get --passphrase "my secret drop" +``` + +Retrieve data using a 64-character hex pickup key: + +```bash +peeroxide dd get 7215c9...82a3 +``` + +Write to a file while suppressing progress bars: + +```bash +peeroxide dd get 7215c9...82a3 --output saved-file.dat --no-progress +``` + +## How it Differs from `cp` + +| Feature | `cp` | `dd` | +|---------|------|------| +| **Connection** | Direct P2P (UDX) | Mediated via DHT storage | +| **Online Requirement** | Both must be online | Asynchronous | +| **Discovery** | Topic-based | Key-based (Public Key) | +| **Speed** | High (Direct) | Moderate (DHT round-trips) | +| **Metadata** | Filename, size | Sequential or Tree chunks | diff --git a/docs/src/deaddrop/architecture.md b/docs/src/deaddrop/architecture.md deleted file mode 100644 index f3b1b4a..0000000 --- a/docs/src/deaddrop/architecture.md +++ /dev/null @@ -1,56 +0,0 @@ -# Deaddrop Architecture - -The deaddrop protocol enables store-and-forward data delivery using the HyperDHT's mutable storage capabilities. It builds a linked chain of signed chunks, where each chunk is stored on the DHT at a location derived from a deterministic key derivation scheme. - -## Data Flow - -The following diagram illustrates the interaction between the Sender, the DHT network, and the Receiver. - -```mermaid -sequenceDiagram - participant S as Sender - participant DHT as DHT Network - participant R as Receiver - - Note over S: 1. Split data into chunks - Note over S: 2. Derive keypairs for each chunk - S->>DHT: 3. mutable_put(chunk_0..N) - Note over S: 4. Print pickup key (root PK) - - Note over R: 5. Obtain pickup key - R->>DHT: 6. mutable_get(root_PK) - DHT-->>R: 7. Returns root chunk (metadata + next_PK) - loop For each chunk - R->>DHT: 8. mutable_get(next_PK) - DHT-->>R: 9. Returns next chunk - end - Note over R: 10. Reassemble & Verify CRC - R->>DHT: 11. announce(ack_topic) - DHT-->>S: 12. lookup(ack_topic) detected -``` - -## Key Components - -### Mutable DHT Storage -Unlike immutable storage (used in `cp`), deaddrop uses `mutable_put` and `mutable_get`. This allows the sender to refresh records to extend their lifespan on the DHT (which typically expires after 20 minutes). Records are signed by the sender, ensuring that DHT nodes or malicious actors cannot modify the data without invalidating the signature. - -### Chunking and Chaining -Data is split into chunks to fit within the DHT's payload limits (max 1000 bytes per chunk). -- **Root Chunk:** Contains the total chunk count, a CRC-32C checksum of the full payload, and the public key of the next chunk. -- **Continuation Chunks:** Contain the payload and the public key of the next chunk in the sequence. -- **Termination:** The final chunk in the chain has its `next_pk` field set to 32 zero bytes. - -### Key Derivation -All keypairs for the chunks are derived deterministically from a single `root_seed`. -- `root_seed`: 32 bytes (randomly generated or derived from a passphrase). -- `root_kp`: `KeyPair::from_seed(root_seed)`. -- `chunk_kp[i]`: Derived from `blake2b(root_seed || i_as_u16_le)`. - -The **pickup key** is the public key of the root chunk. Since the receiver only has the public key, they can read the data but cannot derive the private keys required to modify or forge chunks. - -### Acknowledgement (Ack) Mechanism -When a receiver successfully picks up a deaddrop, they "announce" their presence on a specific `ack_topic`. -- `ack_topic = discovery_key(root_public_key || b"ack")` -- The sender polls this topic using `lookup`. -- To maintain anonymity, the receiver uses an ephemeral keypair for the announcement. - diff --git a/docs/src/deaddrop/format.md b/docs/src/deaddrop/format.md deleted file mode 100644 index afb9a40..0000000 --- a/docs/src/deaddrop/format.md +++ /dev/null @@ -1,44 +0,0 @@ -# Deaddrop Wire Format - -Deaddrop uses a versioned binary format for its DHT records. Each record consists of a header followed by the payload. - -## Constants - -- `MAX_PAYLOAD`: 1000 bytes (total record size) -- `VERSION`: `0x01` -- `ROOT_HEADER_SIZE`: 39 bytes -- `NON_ROOT_HEADER_SIZE`: 33 bytes - -## Root Chunk (v1) - -The root chunk is the entry point of the deaddrop. Its public key is the "pickup key". - -| Offset | Size | Field | Description | -|--------|------|-------|-------------| -| 0 | 1 | Version | Set to `0x01` | -| 1 | 2 | Total Chunks | Number of chunks in the chain (u16 LE) | -| 3 | 4 | CRC-32C | Checksum of the full reassembled payload | -| 7 | 32 | Next PK | Public key of the next chunk (32 zeros if single chunk) | -| 39 | ... | Payload | Data bytes (up to 961 bytes) | - -## Continuation Chunk (v1) - -All subsequent chunks use a smaller header. - -| Offset | Size | Field | Description | -|--------|------|-------|-------------| -| 0 | 1 | Version | Set to `0x01` | -| 1 | 32 | Next PK | Public key of the next chunk (32 zeros if last chunk) | -| 33 | ... | Payload | Data bytes (up to 967 bytes) | - -## Implementation Details - -### Byte Order -All multi-byte integers (Total Chunks, CRC-32C) are encoded in **little-endian** byte order. - -### Integrity Verification -The CRC-32C checksum uses the Castagnoli polynomial. It is computed over the *entire* reassembled payload, not per-chunk. Receivers must fetch all chunks and reassemble them before verifying the checksum. - -### Chain Termination -The chain is considered terminated when a chunk (root or continuation) contains a `Next PK` field consisting of 32 null bytes (`0x00`). - diff --git a/docs/src/deaddrop/future-direction.md b/docs/src/deaddrop/future-direction.md deleted file mode 100644 index 06e1ccf..0000000 --- a/docs/src/deaddrop/future-direction.md +++ /dev/null @@ -1,49 +0,0 @@ -# Future Direction (Not Yet Implemented) - -**Note: The following features and protocol changes describe Deaddrop v2 and are not yet implemented.** - -The current Deaddrop v1 protocol uses a single linked-list of chunks. While functional, this requires sequential fetching where the receiver must download each chunk to discover the address of the next. For large files, this leads to high latency due to sequential round-trips. - -## Deaddrop v2: Two-Chain Storage Protocol - -Deaddrop v2 introduces a "two-chain" architecture to enable parallel data fetching while preserving anonymity and read-only pickup semantics. - -### Index Chain vs. Data Chain - -Instead of a single list, the protocol separates metadata and pointers from the actual data: - -- **Index Chain:** A small linked-list of records containing public keys (pointers) to data chunks. -- **Data Chain:** Independently addressable data chunks stored at random DHT coordinates. - -```text -Index chain (sequential fetch, small): - [root idx] → [idx 1] → [idx 2] → ... → [idx K] - │ │ │ - ▼ ▼ ▼ -Data chain (parallel fetch, bulk): - [d0..d29] [d30..d59] [d60..d89] ... -``` - -### Benefits of v2 - -| Property | v1 (Sequential) | v2 (Parallel) | -|----------|-----------------|---------------| -| **Fetch Pattern** | Entirely sequential | Index sequential + Data parallel | -| **Overhead** | ~3.4-3.9% | ~0.1% | -| **Max File Size** | ~60 MB | ~1.9 GB | -| **1MB Fetch Time** | ~1000 round-trips (15-50 min) | ~34 index + ~1000 parallel (~1 min) | - -### Key Derivation in v2 - -Keypairs are derived deterministically from the `root_seed` using domain separation: -- `index_keypair[i] = blake2b(root_seed || "idx" || i)` -- `data_keypair[i] = blake2b(root_seed || "dat" || i)` - -This ensures the sender can refresh the entire structure from a single seed while preventing address correlation between the index and data chains for third parties. - -### Frame Formats (v2) - -- **Data Chunk (0x02):** 1-byte version tag + up to 999 bytes of raw payload. -- **Root Index Chunk (0x02):** Metadata (size, CRC), `next` index pointer, and up to 29 data chunk pointers. -- **Non-Root Index Chunk (0x02):** `next` index pointer and up to 30 data chunk pointers. - diff --git a/docs/src/deaddrop/operations.md b/docs/src/deaddrop/operations.md deleted file mode 100644 index 1750ca2..0000000 --- a/docs/src/deaddrop/operations.md +++ /dev/null @@ -1,81 +0,0 @@ -# Deaddrop Output Formats - -The `deaddrop` command supports both human-readable terminal output and machine-readable JSON output for integration with other tools. - -## Human-Readable Output (Default) - -By default, `deaddrop` prints status messages to `stderr` and the resulting data (for `pickup`) or key (for `leave`) to `stdout`. - -### `leave` status output -```text -DEADDROP LEAVE 5 chunks (4500 bytes) - published chunk 1/5 - published chunk 2/5 - ... - published to DHT (best-effort) - pickup key printed to stdout - refreshing every 600s, monitoring for acks... -``` - -### `pickup` status output -```text -DEADDROP PICKUP @a1b2c3d4... - fetching chunk 1/5... - fetching chunk 2/5... - ... - reassembled 4500 bytes - ack sent (ephemeral identity) - done -``` - -## Machine-Readable Output (`--json`) - -Using the `--json` flag changes the output to a single-line JSON object per event or result. - -### `leave` result -When data is successfully published, the pickup key is returned: - -```json -{ - "type": "result", - "pickup_key": "a1b2c3d4...", - "chunks": 5, - "bytes": 4500 -} -``` - -### `pickup` result -When data is successfully retrieved: - -```json -{ - "type": "result", - "bytes": 4500, - "crc": "f3b2a100", - "output": "stdout" -} -``` - -### Progress Events -Intermediate progress can also be tracked via JSON: - -```json -{ - "type": "progress", - "chunk": 3, - "total": 5, - "action": "fetch" -} -``` - -### Acknowledgement Events -When the sender detects a pickup via an ack: - -```json -{ - "type": "ack", - "pickup_number": 1, - "peer": "e5f6g7h8..." -} -``` - diff --git a/docs/src/deaddrop/overview.md b/docs/src/deaddrop/overview.md deleted file mode 100644 index 9452488..0000000 --- a/docs/src/deaddrop/overview.md +++ /dev/null @@ -1,47 +0,0 @@ -# Deaddrop Overview - -The `deaddrop` tool provides an anonymous, asynchronous store-and-forward mechanism using the DHT. It allows a sender to "leave" data on the network that a receiver can later "pickup" using a unique key, without requiring both parties to be online at the same time. - -Unlike the `cp` command, which establishes a direct peer-to-peer connection between a sender and receiver, `deaddrop` uses mutable DHT values to store data. This makes it ideal for scenarios where the sender and receiver have intermittent connectivity or want to avoid direct IP discovery. - -## Key Features - -- **Asynchronous Delivery:** Data is stored on DHT nodes. The receiver picks it up whenever they're ready. -- **Mutable DHT Storage:** Uses the HyperDHT `mutable_put` and `mutable_get` operations. -- **Chunked Transfers:** Large files are automatically split into multiple chunks, linked together in a chain. -- **Passphrase Support:** Pickup keys can be derived from human-readable passphrases. -- **Anonymity:** No direct connection is established between the sender and receiver. -- **Acknowledgements:** Optional pickup notifications (acks) let the sender know when data was retrieved. - -## Basic Usage - -### Leaving Data - -To leave a message or file on the DHT: - -```bash -echo "Hello from the void" | peeroxide deaddrop leave - --passphrase "my secret drop" -``` - -The tool will print a 64-character hexadecimal pickup key (unless a passphrase is used). It will then continue to run, refreshing the data on the DHT to ensure it doesn't expire. - -### Picking Up Data - -To retrieve data: - -```bash -peeroxide deaddrop pickup --passphrase "my secret drop" -``` - -The receiver fetches each chunk sequentially, reassembles the original data, and verifies its integrity using a CRC-32C checksum. - -## How it Differs from `cp` - -| Feature | `cp` | `deaddrop` | -|---------|------|------------| -| **Connection** | Direct P2P (UDX) | Mediated via DHT storage | -| **Online Requirement** | Both must be online | Asynchronous | -| **Discovery** | Topic-based | Key-based (Public Key) | -| **Speed** | High (Direct) | Moderate (DHT round-trips) | -| **Metadata** | Filename, size | Sequential chunks | - diff --git a/docs/src/init/overview.md b/docs/src/init/overview.md new file mode 100644 index 0000000..79bd666 --- /dev/null +++ b/docs/src/init/overview.md @@ -0,0 +1,139 @@ +# init + +The `peeroxide init` command handles environment setup by generating configuration files or installing man pages. It provides a non-interactive way to bootstrap your local environment before running other peeroxide subcommands. + +## Command Modes + +The `init` command operates in two mutually exclusive modes. + +### Config Mode (Default) + +In its default mode, `init` writes a fresh `config.toml` file to your configuration directory. It includes a `[network]` table and commented examples of available fields. + +- **First run**: Creates parent directories and writes the file. +- **Rerun without flags**: Prints a message stating the config already exists and exits with code 0. +- **Rerun with `--force`**: Overwrites the existing file entirely. +- **Rerun with `--update`**: Merges new `network.public` or `network.bootstrap` values into the existing file while preserving comments and formatting. + +### Man-page Mode + +When invoked with `--man-pages`, the command skips configuration and instead generates and installs system man pages. + +## CLI Flags + +| Flag | Type | Default | Description | +|---|---|---|---| +| `--force` | `bool` | `false` | Overwrites an existing config file. Conflicts with `--update`. | +| `--update` | `bool` | `false` | Updates specific fields in an existing config. Requires `--public` or `--bootstrap`. Conflicts with `--force`. | +| `--public` | `bool` | `false` | Sets `network.public = true`. Adds default public HyperDHT bootstrap nodes. | +| `--bootstrap ` | `Vec` | `[]` | Sets `network.bootstrap`. Repeatable. In update mode, this replaces the entire bootstrap list. | +| `--man-pages [PATH]` | `PathBuf` | `/usr/local/share/man/` | Installs generated man pages. Writes to `PATH/man1/`. | + +### Flag Conflicts + +- `--man-pages` cannot be used with `--force`, `--update`, `--public`, or `--bootstrap`. +- `--force` and `--update` are mutually exclusive. +- `--update` requires at least one field to change (`--public` or `--bootstrap`). + +## Global CLI Flags + +The `peeroxide` binary defines several global flags that apply to most subcommands. `peeroxide init` itself only consumes two of them: + +- `--config ` — used as the target write path (and as the source path for `--update`). +- `-v` / `--verbose` — controls tracing verbosity. + +The remaining global flags listed below are accepted by the parser but **do not affect** `init` (which has its own local `--public` and `--bootstrap` flags applied to the generated/updated config). They take effect on subcommands that do DHT work (lookup, announce, ping, cp, dd, chat, node). + +| Flag | Type | Description | +|---|---|---| +| `-v`, `--verbose` | `u8` count | Increases logging level. `-v` for info, `-vv` for debug. `RUST_LOG` overrides this. (Used by init.) | +| `--config ` | `String` | Specifies a custom path for the config file. For `init`, this is the write target. | +| `--no-default-config` | `bool` | Skips loading the default configuration file. (Not consumed by `init`.) | +| `--public` | `bool` | Includes default public HyperDHT bootstrap nodes. (Not consumed by `init`; `init` has its own local `--public` for the generated config.) | +| `--no-public` | `bool` | Excludes default public HyperDHT bootstrap nodes. Conflicts with `--public`. (Not consumed by `init`.) | +| `--bootstrap ` | `Vec` | Adds a bootstrap node address (`host:port`). Repeatable. (Not consumed by `init`; `init` has its own local `--bootstrap` for the generated config.) | + +## Config File Locations + +### Target Path Precedence (init) + +When `init` determines where to write the config file, it follows this order: + +1. Path provided via `--config ` +2. Environment variable `$PEEROXIDE_CONFIG` +3. `$XDG_CONFIG_HOME/peeroxide/config.toml` +4. `~/.config/peeroxide/config.toml` +5. Default fallback `.config/peeroxide/config.toml` + +### Runtime Load Precedence + +When running commands, peeroxide loads configuration in this order: + +1. Path provided via `--config ` +2. Environment variable `$PEEROXIDE_CONFIG` +3. `$XDG_CONFIG_HOME/peeroxide/config.toml` +4. Platform-specific config directory (e.g., `Library/Application Support` on macOS) +5. `~/.config/peeroxide/config.toml` + +## Config Schema + +The config file uses the TOML format. + +### [network] + +| Field | Type | Default | Description | +|---|---|---|---| +| `public` | `bool` | `None` | If `true`, adds public bootstrap nodes. If `false`, removes them. | +| `bootstrap` | `Vec` | `None` | List of `host:port` or `ip:port` bootstrap addresses. | + +### [node] + +| Field | Type | Default | Description | +|---|---|---|---| +| `port` | `u16` | `49737` | The local port to bind for DHT operations. | +| `host` | `String` | `"0.0.0.0"` | The local address to bind. | +| `stats_interval` | `u64` | `60` | Interval in seconds for logging node statistics. | +| `max_records` | `usize` | `65536` | Maximum number of DHT records to store. | +| `max_lru_size` | `usize` | `65536` | Maximum size of the LRU cache for routing. | +| `max_per_key` | `usize` | `20` | Maximum records allowed per key. | +| `max_record_age` | `u64` | `1200` | Maximum age in seconds for DHT records. | +| `max_lru_age` | `u64` | `1200` | Maximum age in seconds for LRU entries. | + +### [announce] and [cp] + +These tables are currently empty and reserved for future use. + +## Bootstrap Resolution + +Peeroxide resolves the bootstrap-node list in two stages: a base-list selection from CLI/config (CLI overrides), then a public-default adjustment. + +**Stage 1 — pick the base list (in `peeroxide-cli/src/config.rs`):** + +- If `--bootstrap ` was supplied (one or more times) on the command line, use **only** those CLI bootstraps for the base list. The config file's `network.bootstrap` is **ignored** in this case. +- Otherwise, use the `network.bootstrap` list from the config file (if any). +- If neither source supplied bootstraps, the base list starts empty. + +**Stage 2 — apply the public-default adjustment (in `peeroxide-cli/src/cmd/mod.rs::resolve_bootstrap`):** + +1. If `public=true` (via flag or config), add the default public HyperDHT bootstrap nodes to the base list. +2. If the list is still empty after step 1, automatically add the default public HyperDHT bootstrap nodes (so a fresh install with no config and no flags still connects). +3. If `public=false` (via `--no-public` or config), remove all default public bootstrap nodes from the list. + +This ensures the node is never isolated unless specifically requested by combining `--no-public` with an empty bootstrap list. The `--no-public` flag replaces the legacy `--firewalled` flag behavior. + +Note: this resolution happens at runtime in subcommands that do DHT work (lookup, announce, ping, cp, dd, chat, node). `peeroxide init` uses its own local `--public` and `--bootstrap` flags to populate the generated/updated config file; the base-list selection and public-default adjustment do not run during `init`. + +## Man-page Installation + +When running `peeroxide init --man-pages`, the tool: + +1. Identifies the target directory (default `/usr/local/share/man/`). +2. Ensures the `man1/` subdirectory exists. +3. Cleans up any existing `peeroxide*.1` files in that directory. +4. Writes fresh man pages for the main command and all subcommands. + +## Exit Codes + +- `0`: Success. +- `1`: Runtime error, file system error, or TOML parsing error. +- `2`: Usage error or invalid arguments provided to the CLI. diff --git a/docs/src/introduction.md b/docs/src/introduction.md index d1607d7..d5fa1fa 100644 --- a/docs/src/introduction.md +++ b/docs/src/introduction.md @@ -1,18 +1,37 @@ # Introduction +```text +,____ _____ _____ ____ _____ _____ ___ ,______ +| _ \| ____| ____| _ \ / _ \ \/ /_ _| _ \| ____| +| |_) | _| | _| | |_) | | | \ / | || | | | _| +| __/| |___| |___| _ <| |_| / \ | || |_| | |___ +|_| |_____|_____|_| \_\\___/_/\_\___|____/|_____| + +ENCRYPTED BY DEFAULT. PSEUDONYMOUS BY DESIGN. +NO SERVERS. NO ACCOUNTS. NO GATEKEEPERS. +TRUST NO ONE. TALK TO ANYONE. +``` + `peeroxide-cli` is a command-line toolkit for interacting with the peeroxide P2P networking stack. It provides a set of tools for peer discovery, connectivity diagnostics, and decentralized data transfer, all while maintaining full wire-compatibility with the existing Hyperswarm and HyperDHT networks. The binary is named `peeroxide`. ## Core Tools -The toolkit consists of five primary commands: +The toolkit consists of eight primary commands: +- **[init](./init/overview.md)**: Generate configuration files and install man pages. - **[lookup](./lookup/overview.md)**: Query the DHT to find peers announcing a specific topic. - **[announce](./announce/overview.md)**: Announce your presence on a topic so others can discover you. - **[ping](./ping/overview.md)**: Diagnose reachability through bootstrap checks, NAT classification, or targeted peer pings. - **[cp](./cp/overview.md)**: Transfer files directly between peers over an encrypted swarm connection. -- **[deaddrop](./deaddrop/overview.md)**: Perform anonymous store-and-forward messaging via the DHT. +- **[dd (Dead Drop)](./dd/overview.md)**: Perform anonymous store-and-forward messaging via the DHT. The `dd` command supports both v1 and v2 protocols, with v2 auto-selected for new put operations. +- **[chat](./chat/overview.md)**: Join topic-based interactive chat rooms. +- **node**: Run a long-running DHT bootstrap / coordination node. + +## Quick Start + +It's recommended to run `peeroxide init` first to generate a default configuration and install system manual pages. ## Key Concepts diff --git a/peeroxide-cli/AGENTS.md b/peeroxide-cli/AGENTS.md index 66ba07b..b6d59af 100644 --- a/peeroxide-cli/AGENTS.md +++ b/peeroxide-cli/AGENTS.md @@ -1,25 +1,42 @@ # AGENTS.md — peeroxide-cli/ -This crate implements the `peeroxide` CLI binary with five subcommands: `lookup`, `announce`, `ping`, `cp`, `deaddrop`. +This crate implements the `peeroxide` CLI binary with eight subcommands: `init`, `node`, `lookup`, `announce`, `ping`, `cp`, `dd`, `chat`. ## Source Layout ``` src/ -├── main.rs — CLI entry point, subcommand dispatch +├── main.rs — CLI entry point, global flag parsing, subcommand dispatch +├── config.rs — TOML config schema + load precedence +├── manpage.rs — roff man-page generation (peeroxide(1) + per-subcommand pages) ├── cmd/ -│ ├── mod.rs — Shared helpers: parse_topic, build_dht_config, to_hex, discovery_key +│ ├── mod.rs — Shared helpers: parse_topic, resolve_bootstrap, to_hex, discovery_key +│ ├── init.rs — peeroxide init (config bootstrap + man-page install) +│ ├── node.rs — node subcommand (long-running DHT bootstrap node) │ ├── lookup.rs — lookup subcommand │ ├── announce.rs — announce subcommand + echo protocol server │ ├── ping.rs — ping subcommand (bootstrap check, direct, pubkey, topic, --connect) │ ├── cp.rs — cp subcommand (send/recv file transfer over swarm) -│ └── deaddrop.rs — deaddrop subcommand (mutable DHT store-and-forward) +│ ├── deaddrop/ +│ │ ├── mod.rs — dd subcommand dispatch + shared helpers +│ │ ├── v1.rs — v1 (0x01) single linked-list format +│ │ ├── v2/ — v2 (0x02) tree-indexed protocol +│ │ │ ├── mod.rs, build.rs, fetch.rs, keys.rs, need.rs, publish.rs, +│ │ │ ├── queue.rs, stream.rs, tree.rs, wire.rs +│ │ └── progress/ — TTY-aware bar / JSON / log / off mode + state +│ └── chat/ +│ ├── mod.rs, crypto.rs, debug.rs, display.rs, dm.rs, dm_cmd.rs, +│ ├── feed.rs, inbox.rs, inbox_cmd.rs, inbox_monitor.rs, join.rs, +│ ├── known_users.rs, name_resolver.rs, names.rs, nexus.rs, +│ ├── ordering.rs, post.rs, probe.rs, profile.rs, publisher.rs, +│ ├── reader.rs, session.rs, wire.rs +│ └── tui/{mod,commands,input,interactive,line,status,terminal}.rs ``` ## Key Shared Helpers (cmd/mod.rs) - `parse_topic(s)`: 64-char hex → raw 32-byte key; anything else → `discovery_key(s.as_bytes())` (BLAKE2b-256). -- `build_dht_config(args)`: Constructs `DhtConfig` from CLI flags. +- `resolve_bootstrap(...)`: bootstrap-list resolution. CLI `--bootstrap` overrides the config file's `network.bootstrap` (it does not combine). After the base list is selected, `--public` adds the default public HyperDHT bootstrap nodes; an empty list auto-fills with the defaults; `--no-public` removes the defaults. - `to_hex(bytes)`: Lowercase hex encoding. - `discovery_key(data)`: BLAKE2b-256 hash, returns `[u8; 32]`. @@ -34,11 +51,33 @@ src/ | `IDLE_TIMEOUT` | 30s | announce.rs | | `ECHO_MSG_LEN` | 16 | announce.rs | | `ECHO_TIMEOUT` | 5s | ping.rs | -| `MAX_CHUNKS` | 65535 | deaddrop.rs | -| `MAX_PAYLOAD` | 1000 | deaddrop.rs | -| `ROOT_HEADER_SIZE` | 39 | deaddrop.rs | -| `NON_ROOT_HEADER_SIZE` | 33 | deaddrop.rs | +| `MAX_PAYLOAD` | 1000 | deaddrop/mod.rs | +| `MAX_CHUNKS` (v1) | 65535 | deaddrop/v1.rs | +| `ROOT_HEADER_SIZE` (v1) | 39 | deaddrop/v1.rs | +| `NON_ROOT_HEADER_SIZE` (v1) | 33 | deaddrop/v1.rs | +| `VERSION` (v1) | 0x01 | deaddrop/v1.rs | +| `VERSION` (v2) | 0x02 | deaddrop/v2/wire.rs | +| `MAX_CHUNK_SIZE` (v2) | 1000 | deaddrop/v2/wire.rs | +| `DATA_HEADER_SIZE` (v2) | 2 | deaddrop/v2/wire.rs | +| `DATA_PAYLOAD_MAX` (v2) | 998 | deaddrop/v2/wire.rs | +| `NON_ROOT_INDEX_HEADER_SIZE` (v2) | 1 | deaddrop/v2/wire.rs | +| `NON_ROOT_INDEX_SLOT_CAP` (v2) | 31 | deaddrop/v2/wire.rs | +| `ROOT_INDEX_HEADER_SIZE` (v2) | 13 | deaddrop/v2/wire.rs | +| `ROOT_INDEX_SLOT_CAP` (v2) | 30 | deaddrop/v2/wire.rs | +| `NEED_LIST_HEADER_SIZE` (v2) | 3 | deaddrop/v2/wire.rs | +| `NEED_ENTRY_SIZE` (v2) | 8 | deaddrop/v2/wire.rs | +| `NEED_LIST_ENTRY_CAP` (v2) | 124 | deaddrop/v2/wire.rs | +| `HASH_LEN` (v2) | 32 | deaddrop/v2/wire.rs | +| `SOFT_DEPTH_CAP` (v2) | 4 | deaddrop/v2/mod.rs | +| `PARALLEL_FETCH_CAP` (v2) | 64 | deaddrop/v2/mod.rs | +| `PUT_TIMEOUT` (v2) | 30s | deaddrop/v2/publish.rs | | `CHUNK_SIZE` | 65536 | cp.rs | +| `MAX_RECORD_SIZE` (chat) | 1000 | chat/wire.rs | +| `MAX_SCREEN_NAME_CONTENT` (chat) | 820 | chat/wire.rs | +| `FEED_EXPIRY_SECS` (chat) | 1200 | chat/feed.rs | +| `DEDUP_RING_CAPACITY` (chat) | 1000 | chat/ordering.rs | +| `GAP_TIMEOUT` (chat) | 5s | chat/ordering.rs | +| `HISTORY_CAP` (chat TUI) | 500 | chat/tui/interactive.rs | ## Known Issues @@ -54,4 +93,4 @@ Full CLI documentation lives in `../docs/`. Build with `mdbook build docs/` from cargo test -p peeroxide-cli ``` -Integration tests are in `tests/`. They require network access (bootstrap nodes) for DHT-dependent tests. +Integration tests are in `tests/`. They require network access (bootstrap nodes) for DHT-dependent tests. The `live_commands.rs` suite is gated behind `#[ignore]` — run with `cargo test -p peeroxide-cli --test live_commands -- --ignored`. diff --git a/peeroxide-cli/CHANGELOG.md b/peeroxide-cli/CHANGELOG.md index de19f09..9013422 100644 --- a/peeroxide-cli/CHANGELOG.md +++ b/peeroxide-cli/CHANGELOG.md @@ -7,6 +7,53 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.2.0] - 2026-05-13 + +### Added + +- `peeroxide chat` — pseudonymous end-to-end-encrypted P2P chat over the DHT. Subcommands: `join`, `dm`, `inbox`, `whoami`, `profiles {list, create, delete}`, `friends {list, add, remove, refresh}`, `nexus`. Public channels by name, private channels via `--group ` or `--keyfile`; DMs derived from both participants' identity pubkeys plus an ECDH-augmented message key. Interactive TUI with a pinned status bar, multi-line input, slash commands, and a background inbox monitor; line mode is selected automatically when either stdio side is piped. Full reference and protocol spec: `docs/src/chat/`. +- `peeroxide init` — config bootstrap (default mode) and man-page installation (`--man-pages [PATH]`). New flags: `--force`, `--update`, `--public`, `--bootstrap ` (repeatable), `--man-pages [PATH]`. +- Tree-indexed `dd` protocol v2 shipped under wire byte `0x02`. Receiver fetches the index tree breadth-first in parallel. Soft depth cap of 4 supports up to ~27 GB at the default 998-byte chunk size. +- `dd put` and `dd get` now display a progress bar by default when stderr is a TTY (indicatif-driven). New flags: + - `--no-progress` — suppress the progress bar + - `--json` — emit structured `start`/`progress`/`result`/`ack`/`done` events as JSON Lines on stdout (schema documented in `docs/src/dd/operations.md`) + + `dd get --json` requires `--output FILE`; without it, flag parsing fails with a clear error (stdout would otherwise conflict with the JSON event stream). +- `dd` progress display includes cumulative DHT wire bytes (sent / received) via the new `peeroxide-dht` 1.3.0 `HyperDhtHandle::wire_stats()` / `wire_counters()` API (additive — see `peeroxide-dht/CHANGELOG.md` for the full new symbol set). Shown in the bar, periodic log, and JSON events. +- New global `-v` / `--verbose` count flag (warn / info / debug; `RUST_LOG` overrides). +- New global `--no-public` flag that excludes the default public HyperDHT bootstrap nodes. +- Per-`mutable_put` timeout of 30 seconds in the `dd` v2 sender. Stall watchdog kicks AIMD concurrency off the floor if no put resolves for 30 seconds. +- `peeroxide-init(1)` and `peeroxide-chat(1)` man pages. +- New mdBook chapters: `docs/src/chat/` (overview, user-guide, interactive-tui, wire-format, protocol, reference), `docs/src/init/overview.md`, `docs/src/concepts/dht-primitives.md` (covers `immutable_put`/`mutable_put`/`announce`/`lookup`, rendezvous pattern, TTL, and 1002-byte size budget). +- `docs/ascii_art.txt` banner asset embedded into `peeroxide --version` via clap `long_version`, into the crate README, and into the mdBook introduction. `-V` continues to print the bare semver for scripts. +- Prebuilt `peeroxide` binaries distributed via the [`rightbracket/peeroxide` Homebrew tap](https://github.com/Rightbracket/homebrew-peeroxide) for macOS (universal Apple Silicon + Intel), Linux x86_64 (glibc), and Linux aarch64 (glibc). No Rust toolchain required; `brew install rightbracket/peeroxide/peeroxide` auto-taps and installs. + +### Changed + +- Renamed `deaddrop` command to `dd` (short for "Dead Drop"). +- Renamed `deaddrop leave` subcommand to `dd put`. +- Renamed `deaddrop pickup` subcommand to `dd get`. +- `dd put` defaults to v2 protocol; pass `--v1` to force the legacy single-chain protocol. +- `dd get` auto-dispatches between v1 (`0x01`) and v2 (`0x02`) based on the root record's first byte. +- Bootstrap resolution: CLI `--bootstrap` overrides the config file's `network.bootstrap` (not additive). After base-list selection, `--public` adds defaults, an empty list auto-fills with defaults, and `--no-public` removes defaults. +- The legacy per-chunk status output emitted to stderr during the initial publish/fetch phase (`published chunk N/M`, `fetched data N/M`, `reassembled X bytes`, etc.) is replaced by the new progress UI (bar, periodic log, or JSON events). Scripts that parsed this output should migrate to `--json` mode. + **Preserved:** Refresh, ack (`[ack] pickup #N detected`), "ack sent", "done", "written to PATH", and other lifecycle messages on stderr are not affected and continue to print as before. +- In `--json` mode, all structured events (including the pickup key for `dd put`) go to stdout (per `docs/AGENTS.md` convention). The pickup key is delivered as `{"type":"result","pickup_key":"..."}` rather than a bare stdout line. JSON consumers should parse `{"type":"result"}` events. +- Consolidated `peeroxide chat` man pages into a single `peeroxide-chat(1)` covering every subcommand and group. Total man-page count is 9 (one per top-level command). +- All man pages have refreshed long-about prose, examples, exit status, and see-also entries. +- Rewritten `docs/src/dd/` chapters covering both v1 and v2. + +### Fixed + +- Shared sticky `Shutdown` primitive across `dd put`. First SIGINT/SIGTERM cancels gracefully; second exits with code 130. +- `dd` v2 need-list watcher now publishes only attempted-and-failed chunk ranges, not all missing positions. + +### Removed + +- `peeroxide config init` — replaced by `peeroxide init`. +- The legacy `--generate-man ` flag — replaced by `peeroxide init --man-pages [PATH]`. +- The legacy `--firewalled` global flag — replaced by `--no-public`. + ## [0.1.0] - 2026-04-29 ### Added diff --git a/peeroxide-cli/Cargo.toml b/peeroxide-cli/Cargo.toml index 28a3f9e..975c479 100644 --- a/peeroxide-cli/Cargo.toml +++ b/peeroxide-cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "peeroxide-cli" -version = "0.1.0" +version = "0.2.0" edition.workspace = true license.workspace = true rust-version.workspace = true @@ -20,13 +20,15 @@ path = "src/main.rs" [dependencies] peeroxide = { path = "../peeroxide", version = "1.2.0" } -peeroxide-dht = { path = "../peeroxide-dht", version = "1.2.0" } +peeroxide-dht = { path = "../peeroxide-dht", version = "1.3.0" } libudx = { path = "../libudx", version = "1.2.0" } clap = { version = "4", features = ["derive"] } clap_mangen = "0.2" tokio = { version = "1", features = ["full", "signal"] } toml = "0.8" +toml_edit = "0.22" dirs = "6" +fs2 = "0.4" tracing = "0.1" tracing-subscriber = { version = "0.3", features = ["env-filter"] } hex = "0.4" @@ -37,9 +39,19 @@ futures = "0.3" crc32c = "0.6" rand = "0.9" indexmap = "2" +blake2 = "0.10" +ed25519-dalek = { version = "2", features = ["rand_core"] } +curve25519-dalek = "4" +sha2 = "0.10" +xsalsa20poly1305 = "0.9" +chrono = { version = "0.4", default-features = false, features = ["clock"] } +memmap2 = "0.9" +crossterm = { version = "0.28", features = ["event-stream", "bracketed-paste"] } +arc-swap = "1" [dev-dependencies] tokio = { version = "1", features = ["full", "test-util"] } tempfile = "3" assert_cmd = "2" predicates = "3" +rand = "0.9" diff --git a/peeroxide-cli/DEADDROP_V2.md b/peeroxide-cli/DEADDROP_V2.md deleted file mode 100644 index 5463a1f..0000000 --- a/peeroxide-cli/DEADDROP_V2.md +++ /dev/null @@ -1,146 +0,0 @@ -# Deaddrop v2: Two-Chain Storage Protocol - -Future revision of the deaddrop frame format. Supersedes the v1 single linked-list design with a two-chain architecture that enables parallel data fetch while preserving read-only pickup semantics. - -## Motivation - -v1 uses a single linked-list of chunks. The receiver must fetch sequentially (each chunk contains the `next` pointer). For a 100KB file (~107 chunks), this means ~107 round-trips taking 2-5 minutes. The format also wastes bytes on redundant fields (next pointer in every frame). - -v2 separates concerns: a small **index chain** (linked-list of pointer records) and a large **data chain** (independently addressable chunks fetched in parallel). - -## Architecture - -``` -Index chain (sequential fetch, small): - [root idx] → [idx 1] → [idx 2] → ... → [idx K] (next=zeros) - │ │ │ - ▼ ▼ ▼ -Data chain (parallel fetch, bulk): - [d0..d29] [d30..d59] [d60..d89] ... -``` - -- **Index chain:** Linked-list of records containing data chunk public keys (pointers). Sequential fetch — but each record holds ~30 pointers, so the index is ~30× shorter than the data. -- **Data chain:** Independent records at random DHT coordinates. Once the receiver knows all pubkeys (from the index), it fetches all data chunks in parallel. - -## Frame Formats - -### Data chunk (version 0x02) - -``` -Offset Size Field -0 1 Version (0x02) -1 ... Payload (raw file bytes, up to 999 bytes) -``` - -Header overhead: **1 byte.** Max payload: **999 bytes.** - -Data chunks have no pointers, no metadata, no index. Just a version tag and raw bytes. Their ordering is defined by their position in the index chain. - -### Root index chunk (version 0x02) - -``` -Offset Size Field -0 1 Version (0x02) -1 4 Total file size in bytes (u32 LE) -5 4 CRC-32C of fully assembled payload (Castagnoli) -9 32 Next index chunk public key (32 zeros if single index chunk) -41 ... Data chunk public keys (32 bytes each, up to 29 per root) -``` - -Header overhead: **41 bytes.** Remaining: 959 bytes → **29 data chunk pointers** per root index. - -### Non-root index chunk (version 0x02) - -``` -Offset Size Field -0 1 Version (0x02) -1 32 Next index chunk public key (32 zeros if final index chunk) -33 ... Data chunk public keys (32 bytes each, up to 30 per chunk) -``` - -Header overhead: **33 bytes.** Remaining: 967 bytes → **30 data chunk pointers** per non-root index. - -## Key Derivation - -Sender derives all keypairs deterministically from `root_seed` (enables refresh after restart): - -``` -root_keypair = KeyPair::from_seed(root_seed) // chunk 0 of index chain -index_keypair[i] = KeyPair::from_seed(blake2b(root_seed || "idx" || i_as_u16_le)) -data_keypair[i] = KeyPair::from_seed(blake2b(root_seed || "dat" || i_as_u16_le)) -``` - -- `root_seed`: 32 bytes (random or BLAKE2b of passphrase) -- Index chunk 0 uses `root_keypair` directly -- `"idx"` and `"dat"` are literal ASCII byte prefixes (domain separation) - -**Pickup key = root public key** (derived from root_seed). The receiver never learns root_seed and cannot derive any private keys. Read-only capability preserved. - -## Fetch Protocol (Receiver) - -1. Has pickup key (root public key) -2. `mutable_get(root_pubkey, 0)` → parse root index → learn file size, CRC, first batch of data pubkeys, next index pointer -3. Walk index chain sequentially: fetch each `next` index chunk, accumulate data pubkeys -4. Once all data pubkeys collected: fire all `mutable_get` calls in parallel (batch, capped at e.g. 64 concurrent) -5. Reassemble data in index order (first pointer = first chunk of file) -6. Verify CRC-32C of assembled payload against root's stored value -7. Write output - -## Write Protocol (Sender) - -1. Read file, compute CRC-32C -2. Split into data chunks of ≤ 999 bytes -3. Derive all keypairs -4. Write data chunks (any order, parallel OK) -5. Build index chain with data chunk public keys (in file order) -6. Write index chain in reverse (last index chunk first, root last) — root-last = "ready" signal -7. Print root public key to stdout - -Refresh: re-put all data chunks and all index chunks with `seq = current Unix timestamp`. - -## Comparison to v1 - -| Property | v1 (single linked-list) | v2 (two-chain) | -|----------|------------------------|----------------| -| Data payload per chunk | 961 (root) / 967 (non-root) | **999** | -| Fetch pattern | All sequential | Index sequential + data **parallel** | -| Read/write separation | ✓ (pickup key = root pubkey) | ✓ (same) | -| Forgery protection | ✓ (each chunk signed by unique key) | ✓ (same) | -| Format max file size | ~60 MB (u16 chunk count) | **~1.9 GB** (65535 idx × 30 ptrs × 999 B) | -| 100KB fetch time | ~107 sequential queries (2-5 min) | ~4 index + ~107 parallel (seconds) | -| 1MB fetch time | ~1000 sequential queries (15-50 min) | ~34 index + ~1000 parallel (~1 min) | -| Overhead per data byte | 3.4-3.9% | **0.1%** | -| Complexity | Simple | Moderate (two derivation domains, two frame types) | - -## Practical Limits (v2) - -- **Data chunk payload:** 999 bytes -- **Pointers per index chunk:** 29 (root) / 30 (non-root) -- **Format maximum:** 65535 index chunks × ~30 pointers × 999 bytes ≈ 1.9 GB -- **Refresh is concurrent:** All puts (data + index) fire in parallel per cycle. Bottleneck is outbound bandwidth: each put commits ~1.1 KB to ~20 nodes ≈ 22 KB outbound per chunk. Refresh interval = 10 minutes (DHT record TTL = 20 min). -- **Practical ceiling:** Limited by upload bandwidth. At 1 MB/s upload with 10-min refresh: ~27,200 chunks (~27 MB). At 5 MB/s: full format max is achievable. -- **1MB example:** ~1000 data chunks + 34 index chunks = 1034 puts × 22 KB = ~22 MB outbound per cycle. Refreshes in ~22 seconds at 1 MB/s upload — trivial. - -## Security Properties (unchanged from v1) - -- Pickup key = root public key (read-only capability) -- Each chunk (data and index) signed by a unique keypair derived from root_seed -- Receiver cannot derive private keys → cannot forge records -- DHT nodes can read plaintext (same as v1 — encrypt before dropping for confidentiality) -- Malicious DHT nodes cannot forge chunks (signature verification) -- Data chunk locations are opaque to anyone who hasn't walked the index chain - -## Migration Notes - -- Version byte 0x02 distinguishes v2 frames from v1 (0x01) -- A v2-aware `pickup` client can detect the version from the root chunk and handle both formats -- `leave` would default to v2 but could support `--format v1` for compatibility during transition -- The pickup key format is unchanged (64-char hex root public key) -- Passphrase mode works identically (passphrase → blake2b → root_seed → root_keypair → root_pubkey) - -## Open Questions for Implementation - -- **Parallel fetch concurrency cap:** 64? 128? Depends on UDP socket limits and network conditions. -- **Index chain refresh order:** Any order is safe (data chunks are already written). Could refresh data and index in parallel. -- **Partial index walk + streaming fetch:** Could the receiver start fetching data chunks as soon as the first index record is parsed, while continuing to walk the index? This would pipeline index walking with data fetching for faster perceived latency. -- **Error handling for partial data fetch:** If 95/100 data chunks succeed but 5 timeout, should the receiver retry those 5 before aborting? How many retries? diff --git a/peeroxide-cli/README.md b/peeroxide-cli/README.md index e1e0221..8a2180c 100644 --- a/peeroxide-cli/README.md +++ b/peeroxide-cli/README.md @@ -1,5 +1,17 @@ # peeroxide-cli +```text +,____ _____ _____ ____ _____ _____ ___ ,______ +| _ \| ____| ____| _ \ / _ \ \/ /_ _| _ \| ____| +| |_) | _| | _| | |_) | | | \ / | || | | | _| +| __/| |___| |___| _ <| |_| / \ | || |_| | |___ +|_| |_____|_____|_| \_\\___/_/\_\___|____/|_____| + +ENCRYPTED BY DEFAULT. PSEUDONYMOUS BY DESIGN. +NO SERVERS. NO ACCOUNTS. NO GATEKEEPERS. +TRUST NO ONE. TALK TO ANYONE. +``` + Command-line interface for the peeroxide P2P networking stack. Wire-compatible with the existing Hyperswarm/HyperDHT network. ## Install @@ -24,11 +36,11 @@ The binary is named `peeroxide`. ## Quick Start ```sh -# 1. Generate a config file (optional but recommended) -peeroxide config init --output ~/.config/peeroxide/config.toml +# 1. Initialize a config file (optional but recommended) +peeroxide init # 2. Install man pages -peeroxide --generate-man ~/.local/share/man/man1/ +peeroxide init --man-pages ~/.local/share/man/ # 3. Verify network connectivity and discover your public address peeroxide --public ping @@ -38,13 +50,14 @@ peeroxide --public ping | Command | Description | |---------|-------------| +| `init` | Initialize config file or install man pages | | `node` | Run a long-running DHT coordination (bootstrap) node | | `lookup` | Query the DHT for peers announcing a topic | | `announce` | Announce presence on a topic | | `ping` | Diagnose reachability; bootstrap check, NAT classification, or targeted ping | | `cp` | Copy files between peers over the swarm | -| `deaddrop` | Anonymous store-and-forward via the DHT | -| `config` | Configuration management (`config init`) | +| `dd` | Dead Drop: anonymous store-and-forward via the DHT (v1 + v2 protocols) | +| `chat` | End-to-end-encrypted P2P chat: channels, DMs, inbox, and TUI | Run `peeroxide --help` for detailed usage of each command. @@ -53,7 +66,7 @@ Run `peeroxide --help` for detailed usage of each command. Generate and install man pages: ```sh -peeroxide --generate-man ~/.local/share/man/man1/ +peeroxide init --man-pages ~/.local/share/man/ ``` If `~/.local/share/man` is not in your `MANPATH`, add it: @@ -62,17 +75,18 @@ If `~/.local/share/man` is not in your `MANPATH`, add it: export MANPATH="$HOME/.local/share/man:$MANPATH" ``` -This produces 8 pages: +This produces 9 pages: ``` peeroxide(1) — main command and global options +peeroxide-init(1) — config initialization and man page installation peeroxide-node(1) — bootstrap node operation peeroxide-lookup(1) — DHT topic lookup peeroxide-announce(1) — DHT topic announcement peeroxide-ping(1) — connectivity diagnostics peeroxide-cp(1) — file transfer (send + recv) -peeroxide-config(1) — configuration management -peeroxide-deaddrop(1) — anonymous messaging (leave + pickup) +peeroxide-dd(1) — dead drop messaging (put + get, v1 + v2) +peeroxide-chat(1) — interactive chat (join, dm, inbox, profiles, friends, nexus, whoami) ``` ## Configuration @@ -80,11 +94,20 @@ peeroxide-deaddrop(1) — anonymous messaging (leave + pickup) ### Generating a config file ```sh -# Print to stdout (inspect before saving) -peeroxide config init +# Create config at default location (~/.config/peeroxide/config.toml) +peeroxide init + +# Create config with public mode enabled +peeroxide init --public + +# Create config with custom bootstrap nodes +peeroxide init --bootstrap node1.example.com:49737 + +# Overwrite existing config +peeroxide init --force -# Write to default location -peeroxide config init --output ~/.config/peeroxide/config.toml +# Update specific fields in existing config +peeroxide init --update --public ``` ### Config file location @@ -93,7 +116,9 @@ peeroxide looks for configuration at (in order): 1. Path given by `--config ` 2. `$PEEROXIDE_CONFIG` environment variable -3. `~/.config/peeroxide/config.toml` +3. `$XDG_CONFIG_HOME/peeroxide/config.toml` +4. Platform-specific config directory (`dirs::config_dir()`, e.g. `~/Library/Application Support/peeroxide/config.toml` on macOS) +5. `~/.config/peeroxide/config.toml` Use `--no-default-config` to skip config file loading entirely. @@ -117,9 +142,9 @@ These flags apply to all subcommands: | `--config ` | Use a specific config file | | `--no-default-config` | Ignore the default config entirely | | `--bootstrap ` | Add bootstrap nodes (repeatable) | -| `--public` | Mark this node as publicly reachable | -| `--no-public` | Force NAT mode (override config) | -| `--firewalled` | Force firewalled status for testing | +| `--public` | Use the public HyperDHT bootstrap network | +| `--no-public` | Do not use the public HyperDHT bootstrap network | +| `-v`, `--verbose` | Increase output verbosity (-v info, -vv debug) | ## Examples @@ -154,11 +179,11 @@ peeroxide cp recv my-transfer-topic ./downloads/ # Stream from stdin cat data.bin | peeroxide cp send - my-transfer-topic -# Leave a dead drop message -echo 'secret' | peeroxide deaddrop leave - --passphrase s3cret +# Put a message at a dead drop +echo 'secret' | peeroxide dd put - --passphrase s3cret -# Pick up a dead drop message -peeroxide deaddrop pickup --passphrase s3cret +# Get a message from a dead drop +peeroxide dd get --passphrase s3cret # Run a public bootstrap node peeroxide node --public --port 49737 diff --git a/peeroxide-cli/src/cmd/announce.rs b/peeroxide-cli/src/cmd/announce.rs index 4b818f6..bdf10d4 100644 --- a/peeroxide-cli/src/cmd/announce.rs +++ b/peeroxide-cli/src/cmd/announce.rs @@ -69,11 +69,6 @@ pub async fn run(args: AnnounceArgs, cfg: &ResolvedConfig) -> i32 { let mut swarm_config = SwarmConfig::default(); swarm_config.key_pair = Some(key_pair.clone()); swarm_config.dht = dht_config; - if cfg.public { - swarm_config.firewall = super::FIREWALL_OPEN; - } else if cfg.firewalled { - swarm_config.firewall = super::FIREWALL_CONSISTENT; - } let (task, handle, mut conn_rx) = match spawn(swarm_config).await { Ok(v) => v, diff --git a/peeroxide-cli/src/cmd/chat/crypto.rs b/peeroxide-cli/src/cmd/chat/crypto.rs new file mode 100644 index 0000000..caf8284 --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/crypto.rs @@ -0,0 +1,465 @@ +//! Cryptographic primitives for the peeroxide chat protocol. +//! +//! All key-derivation functions use keyed BLAKE2b-256 MACs so that the same +//! raw key material can produce independent outputs for different purposes. + +use blake2::digest::consts::U32; +use blake2::digest::{KeyInit, Mac}; +use blake2::Blake2bMac; +use curve25519_dalek::edwards::CompressedEdwardsY; +use curve25519_dalek::montgomery::MontgomeryPoint; +use peeroxide_dht::crypto::{hash, hash_batch, sign_detached, verify_detached}; +use sha2::{Digest, Sha512}; +use std::time::{SystemTime, UNIX_EPOCH}; + +type Blake2bMac256 = Blake2bMac; + +/// Keyed BLAKE2b-256 MAC. +/// +/// Used by all KDF functions in this module. The `key` is always 32 bytes +/// (a channel key, ECDH secret, etc.) and `msg` is the domain-separated +/// input. +fn keyed_blake2b(key: &[u8; 32], msg: &[u8]) -> [u8; 32] { + let mut mac: Blake2bMac256 = KeyInit::new_from_slice(key.as_slice()) + .expect("32-byte key is always valid for BLAKE2b"); + mac.update(msg); + let output = mac.finalize().into_bytes(); + let mut result = [0u8; 32]; + result.copy_from_slice(&output); + result +} + +/// Returns `(x.len() as u32).to_le_bytes()` — a 4-byte little-endian length +/// prefix suitable for inclusion in hash pre-images. +pub fn len4(x: &[u8]) -> [u8; 4] { + (x.len() as u32).to_le_bytes() +} + +/// Derive a channel key for a public or password-protected channel. +/// +/// * Public channel: +/// `hash_batch([b"peeroxide-chat:channel:v1:", len4(name), name])` +/// * Private channel (with salt): +/// `hash_batch([b"peeroxide-chat:channel:v1:", len4(name), name, b":salt:", len4(salt), salt])` +pub fn channel_key(name: &[u8], salt: Option<&[u8]>) -> [u8; 32] { + match salt { + None => hash_batch(&[b"peeroxide-chat:channel:v1:", &len4(name), name]), + Some(s) => hash_batch(&[ + b"peeroxide-chat:channel:v1:", + &len4(name), + name, + b":salt:", + &len4(s), + s, + ]), + } +} + +/// Derive a symmetric DM channel key from two peer identity public keys. +/// +/// The key is order-independent: `dm_channel_key(a, b) == dm_channel_key(b, a)`. +/// +/// `hash_batch([b"peeroxide-chat:dm:v1:", lex_min(id_a, id_b), lex_max(id_a, id_b)])` +pub fn dm_channel_key(id_a: &[u8; 32], id_b: &[u8; 32]) -> [u8; 32] { + let (lo, hi) = if id_a <= id_b { + (id_a.as_ref(), id_b.as_ref()) + } else { + (id_b.as_ref(), id_a.as_ref()) + }; + hash_batch(&[b"peeroxide-chat:dm:v1:", lo, hi]) +} + +/// Derive the DHT announce topic for a given channel, epoch, and bucket. +/// +/// `keyed_blake2b(key=channel_key, msg=b"peeroxide-chat:announce:v1:" || epoch_u64_le || bucket_u8)` +pub fn announce_topic(channel_key: &[u8; 32], epoch: u64, bucket: u8) -> [u8; 32] { + let mut msg = Vec::with_capacity(27 + 8 + 1); + msg.extend_from_slice(b"peeroxide-chat:announce:v1:"); + msg.extend_from_slice(&epoch.to_le_bytes()); + msg.push(bucket); + keyed_blake2b(channel_key, &msg) +} + +/// Derive the DHT inbox topic for a given recipient, epoch, and bucket. +/// +/// `keyed_blake2b(key=hash(recipient_id_pubkey), msg=b"peeroxide-chat:inbox:v1:" || epoch_u64_le || bucket_u8)` +pub fn inbox_topic(recipient_id_pubkey: &[u8; 32], epoch: u64, bucket: u8) -> [u8; 32] { + let key = hash(recipient_id_pubkey); + let mut msg = Vec::with_capacity(24 + 8 + 1); + msg.extend_from_slice(b"peeroxide-chat:inbox:v1:"); + msg.extend_from_slice(&epoch.to_le_bytes()); + msg.push(bucket); + keyed_blake2b(&key, &msg) +} + +/// Derive the symmetric message encryption key for a public/private channel. +/// +/// `keyed_blake2b(key=channel_key, msg=b"peeroxide-chat:msgkey:v1")` +pub fn msg_key(channel_key: &[u8; 32]) -> [u8; 32] { + keyed_blake2b(channel_key, b"peeroxide-chat:msgkey:v1") +} + +/// Derive the symmetric message encryption key for a DM conversation. +/// +/// `keyed_blake2b(key=ecdh_secret, msg=b"peeroxide-chat:dm-msgkey:v1:" || channel_key)` +pub fn dm_msg_key(ecdh_secret: &[u8; 32], channel_key: &[u8; 32]) -> [u8; 32] { + let mut msg = Vec::with_capacity(28 + 32); + msg.extend_from_slice(b"peeroxide-chat:dm-msgkey:v1:"); + msg.extend_from_slice(channel_key); + keyed_blake2b(ecdh_secret, &msg) +} + +/// Derive the invite encryption key from an ECDH secret and an invite feed pubkey. +/// +/// `keyed_blake2b(key=ecdh_secret, msg=b"peeroxide-chat:invite-key:v1:" || invite_feed_pubkey)` +pub fn invite_key(ecdh_secret: &[u8; 32], invite_feed_pubkey: &[u8; 32]) -> [u8; 32] { + let mut msg = Vec::with_capacity(29 + 32); + msg.extend_from_slice(b"peeroxide-chat:invite-key:v1:"); + msg.extend_from_slice(invite_feed_pubkey); + keyed_blake2b(ecdh_secret, &msg) +} + +/// Convert an Ed25519 public key to its X25519 (Montgomery) representation. +/// +/// Uses the birational map from Edwards to Montgomery form defined in +/// RFC 7748. Returns `None` if the input is not a valid compressed Edwards +/// point. +pub fn ed25519_pubkey_to_x25519(ed_pubkey: &[u8; 32]) -> Option<[u8; 32]> { + let compressed = CompressedEdwardsY::from_slice(ed_pubkey).ok()?; + let point = compressed.decompress()?; + let montgomery = point.to_montgomery(); + Some(montgomery.to_bytes()) +} + +/// Convert an Ed25519 secret key (libsodium 64-byte layout: seed ‖ pubkey) to +/// an X25519 private scalar. +/// +/// The X25519 scalar is derived as `SHA-512(seed)[0..32]` with the standard +/// X25519 clamping applied. +pub fn ed25519_secret_to_x25519(ed_secret: &[u8; 64]) -> [u8; 32] { + // secret_key layout: seed(32) || pubkey(32) + let seed = &ed_secret[..32]; + let h = Sha512::digest(seed); + let mut x25519_priv = [0u8; 32]; + x25519_priv.copy_from_slice(&h[..32]); + // Clamp per RFC 7748 §5 + x25519_priv[0] &= 248; + x25519_priv[31] &= 127; + x25519_priv[31] |= 64; + x25519_priv +} + +/// Perform an X25519 Diffie–Hellman key exchange. +/// +/// `my_priv` should be a clamped X25519 scalar (e.g. from +/// [`ed25519_secret_to_x25519`]). `their_pub` is the remote party's X25519 +/// public key. Returns the 32-byte shared secret. +pub fn x25519_ecdh(my_priv: &[u8; 32], their_pub: &[u8; 32]) -> [u8; 32] { + let point = MontgomeryPoint(*their_pub); + // `mul_clamped` performs the full clamped scalar multiplication defined + // by RFC 7748 §5, accepting a raw `[u8; 32]` scalar. + point.mul_clamped(*my_priv).to_bytes() +} + +/// Produce an Ed25519 ownership proof binding a feed public key to a channel. +/// +/// `sign(id_sk, b"peeroxide-chat:ownership:v1:" || feed_pubkey || channel_key)` +pub fn ownership_proof( + id_secret_key: &[u8; 64], + feed_pubkey: &[u8; 32], + channel_key: &[u8; 32], +) -> [u8; 64] { + let mut msg = Vec::with_capacity(28 + 32 + 32); + msg.extend_from_slice(b"peeroxide-chat:ownership:v1:"); + msg.extend_from_slice(feed_pubkey); + msg.extend_from_slice(channel_key); + sign_detached(&msg, id_secret_key) +} + +/// Verify an ownership proof. +/// +/// Returns `true` iff the proof is a valid Ed25519 signature by `id_pubkey` +/// over `b"peeroxide-chat:ownership:v1:" || feed_pubkey || channel_key`. +pub fn verify_ownership_proof( + id_pubkey: &[u8; 32], + feed_pubkey: &[u8; 32], + channel_key: &[u8; 32], + proof: &[u8; 64], +) -> bool { + let mut msg = Vec::with_capacity(28 + 32 + 32); + msg.extend_from_slice(b"peeroxide-chat:ownership:v1:"); + msg.extend_from_slice(feed_pubkey); + msg.extend_from_slice(channel_key); + verify_detached(proof, &msg, id_pubkey) +} + +/// Return the current epoch: `unix_timestamp_secs / 60`. +/// +/// Each epoch is one minute long. Announce topics are keyed by epoch and a +/// small bucket index so that peers can overlap their presence across +/// consecutive epochs without exact time synchronisation. +pub fn current_epoch() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("system clock is before UNIX epoch") + .as_secs() + / 60 +} + +#[cfg(test)] +mod tests { + use super::*; + use peeroxide_dht::hyperdht::KeyPair; + + #[test] + fn test_len4() { + assert_eq!(len4(b""), [0, 0, 0, 0]); + assert_eq!(len4(b"hi"), [2, 0, 0, 0]); + assert_eq!(len4(&[0u8; 256]), [0, 1, 0, 0]); + } + + #[test] + fn test_channel_key_deterministic() { + let k1 = channel_key(b"general", None); + let k2 = channel_key(b"general", None); + assert_eq!(k1, k2, "channel_key is deterministic"); + + let k3 = channel_key(b"other", None); + assert_ne!(k1, k3, "different names produce different keys"); + } + + #[test] + fn test_channel_key_salt_differs_from_unsalted() { + let unsalted = channel_key(b"general", None); + let salted = channel_key(b"general", Some(b"s3cret")); + assert_ne!(unsalted, salted, "salt changes the key"); + + let salted2 = channel_key(b"general", Some(b"s3cret")); + assert_eq!(salted, salted2, "same salt → same key"); + } + + #[test] + fn test_dm_channel_key_symmetric() { + let a = [1u8; 32]; + let b = [2u8; 32]; + assert_eq!( + dm_channel_key(&a, &b), + dm_channel_key(&b, &a), + "dm_channel_key must be order-independent" + ); + } + + #[test] + fn test_dm_channel_key_differs_from_channel_key() { + let a = [1u8; 32]; + let b = [2u8; 32]; + let dm = dm_channel_key(&a, &b); + let ch = channel_key(&a, None); + assert_ne!(dm, ch); + } + + #[test] + fn test_announce_topic_varies_by_epoch_and_bucket() { + let ck = channel_key(b"general", None); + let t0 = announce_topic(&ck, 1000, 0); + let t1 = announce_topic(&ck, 1001, 0); + let t2 = announce_topic(&ck, 1000, 1); + assert_ne!(t0, t1, "different epochs → different topic"); + assert_ne!(t0, t2, "different buckets → different topic"); + assert_eq!( + announce_topic(&ck, 1000, 0), + t0, + "announce_topic is deterministic" + ); + } + + #[test] + fn test_msg_key_deterministic() { + let ck = channel_key(b"test", None); + assert_eq!(msg_key(&ck), msg_key(&ck)); + } + + #[test] + fn test_dm_msg_key_deterministic() { + let secret = [42u8; 32]; + let ck = channel_key(b"test", None); + assert_eq!(dm_msg_key(&secret, &ck), dm_msg_key(&secret, &ck)); + } + + #[test] + fn test_inbox_topic_varies() { + let pk = [3u8; 32]; + let t0 = inbox_topic(&pk, 500, 0); + let t1 = inbox_topic(&pk, 501, 0); + let t2 = inbox_topic(&pk, 500, 1); + assert_ne!(t0, t1); + assert_ne!(t0, t2); + } + + #[test] + fn test_invite_key_deterministic() { + let secret = [7u8; 32]; + let feed_pk = [8u8; 32]; + assert_eq!(invite_key(&secret, &feed_pk), invite_key(&secret, &feed_pk)); + } + + #[test] + fn test_ed25519_pubkey_to_x25519_valid() { + let kp = KeyPair::generate(); + let x25519_pub = ed25519_pubkey_to_x25519(&kp.public_key); + assert!( + x25519_pub.is_some(), + "valid Ed25519 pubkey should convert successfully" + ); + } + + #[test] + fn test_ed25519_pubkey_to_x25519_invalid() { + let bad = [0xFFu8; 32]; + let _ = ed25519_pubkey_to_x25519(&bad); + } + + #[test] + fn test_ecdh_shared_secret_matches() { + let kp_a = KeyPair::generate(); + let kp_b = KeyPair::generate(); + + let x_priv_a = ed25519_secret_to_x25519(&kp_a.secret_key); + let x_priv_b = ed25519_secret_to_x25519(&kp_b.secret_key); + + let x_pub_a = ed25519_pubkey_to_x25519(&kp_a.public_key) + .expect("keypair A pubkey must convert"); + let x_pub_b = ed25519_pubkey_to_x25519(&kp_b.public_key) + .expect("keypair B pubkey must convert"); + + let shared_ab = x25519_ecdh(&x_priv_a, &x_pub_b); + let shared_ba = x25519_ecdh(&x_priv_b, &x_pub_a); + + assert_eq!( + shared_ab, shared_ba, + "ECDH shared secret must be symmetric" + ); + } + + #[test] + fn test_ed25519_to_x25519_roundtrip() { + let kp_a = KeyPair::generate(); + let kp_b = KeyPair::generate(); + + let x_priv_a = ed25519_secret_to_x25519(&kp_a.secret_key); + let x_pub_b = ed25519_pubkey_to_x25519(&kp_b.public_key) + .expect("keypair B pubkey must convert"); + + let shared = x25519_ecdh(&x_priv_a, &x_pub_b); + assert_ne!(shared, [0u8; 32], "shared secret must not be the zero point"); + } + + #[test] + fn test_ownership_proof_verify() { + let id_kp = KeyPair::generate(); + let feed_pk = [0xABu8; 32]; + let ck = channel_key(b"myroom", None); + + let proof = ownership_proof(&id_kp.secret_key, &feed_pk, &ck); + assert!( + verify_ownership_proof(&id_kp.public_key, &feed_pk, &ck, &proof), + "ownership proof must verify with the correct key" + ); + } + + #[test] + fn test_ownership_proof_wrong_key_fails() { + let id_kp = KeyPair::generate(); + let other_kp = KeyPair::generate(); + let feed_pk = [0xABu8; 32]; + let ck = channel_key(b"myroom", None); + + let proof = ownership_proof(&id_kp.secret_key, &feed_pk, &ck); + assert!( + !verify_ownership_proof(&other_kp.public_key, &feed_pk, &ck, &proof), + "ownership proof must NOT verify with the wrong key" + ); + } + + #[test] + fn test_current_epoch_is_reasonable() { + let epoch = current_epoch(); + assert!(epoch > 28_000_000, "epoch should reflect a plausible current time"); + } + + #[test] + fn test_channel_key_fixed_vector() { + let key = channel_key(b"general", None); + let hex_key = hex::encode(key); + let key2 = channel_key(b"general", None); + assert_eq!(key, key2, "channel_key must be deterministic"); + assert_eq!(hex_key.len(), 64); + assert_ne!(key, [0u8; 32]); + } + + #[test] + fn test_channel_key_salted_fixed_vector() { + let key = channel_key(b"general", Some(b"mysalt")); + let key2 = channel_key(b"general", Some(b"mysalt")); + assert_eq!(key, key2, "salted channel_key must be deterministic"); + let unsalted = channel_key(b"general", None); + assert_ne!(key, unsalted); + } + + #[test] + fn test_msg_key_fixed_vector() { + let ck = channel_key(b"general", None); + let mk = msg_key(&ck); + let mk2 = msg_key(&ck); + assert_eq!(mk, mk2, "msg_key must be deterministic"); + assert_ne!(mk, ck, "msg_key must differ from channel_key"); + } + + #[test] + fn test_announce_topic_fixed_vector() { + let ck = channel_key(b"general", None); + let topic = announce_topic(&ck, 28000000, 2); + let topic2 = announce_topic(&ck, 28000000, 2); + assert_eq!(topic, topic2, "announce_topic must be deterministic"); + } + + #[test] + fn test_dm_channel_key_fixed_vector() { + let a = [0x01u8; 32]; + let b = [0x02u8; 32]; + let dk = dm_channel_key(&a, &b); + let dk2 = dm_channel_key(&a, &b); + assert_eq!(dk, dk2, "dm_channel_key must be deterministic"); + let dk_rev = dm_channel_key(&b, &a); + assert_eq!(dk, dk_rev, "dm_channel_key must be symmetric"); + } + + #[test] + fn test_invite_key_fixed_vector() { + let ecdh = [0x42u8; 32]; + let feed_pk = [0xABu8; 32]; + let ik = invite_key(&ecdh, &feed_pk); + let ik2 = invite_key(&ecdh, &feed_pk); + assert_eq!(ik, ik2, "invite_key must be deterministic"); + assert_ne!(ik, ecdh, "invite_key must differ from ecdh input"); + } + + #[test] + fn test_ecdh_deterministic_from_seed() { + let seed_a = [0x11u8; 32]; + let seed_b = [0x22u8; 32]; + let kp_a = KeyPair::from_seed(seed_a); + let kp_b = KeyPair::from_seed(seed_b); + + let x_priv_a = ed25519_secret_to_x25519(&kp_a.secret_key); + let x_pub_b = ed25519_pubkey_to_x25519(&kp_b.public_key).unwrap(); + let shared1 = x25519_ecdh(&x_priv_a, &x_pub_b); + + let x_priv_a2 = ed25519_secret_to_x25519(&kp_a.secret_key); + let x_pub_b2 = ed25519_pubkey_to_x25519(&kp_b.public_key).unwrap(); + let shared2 = x25519_ecdh(&x_priv_a2, &x_pub_b2); + + assert_eq!(shared1, shared2, "ECDH must be deterministic from same seeds"); + assert_ne!(shared1, [0u8; 32]); + } +} diff --git a/peeroxide-cli/src/cmd/chat/debug.rs b/peeroxide-cli/src/cmd/chat/debug.rs new file mode 100644 index 0000000..14d7c73 --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/debug.rs @@ -0,0 +1,40 @@ +//! Debug logging for the chat subsystem. +//! +//! When enabled via `--debug`, prints timestamped event lines to stderr +//! for high-value network events useful for tracing and diagnostics. + +use std::sync::atomic::{AtomicBool, Ordering}; + +use chrono::Local; + +static DEBUG_ENABLED: AtomicBool = AtomicBool::new(false); + +pub fn enable() { + DEBUG_ENABLED.store(true, Ordering::Relaxed); +} + +pub fn is_enabled() -> bool { + DEBUG_ENABLED.load(Ordering::Relaxed) +} + +/// Format: `[YYYY-MM-DD HH:MM:SS] [DEBUG] {event}: [{op}] {details}` +pub fn log_event(event: &str, op: &str, details: &str) { + if !is_enabled() { + return; + } + let ts = Local::now().format("%Y-%m-%d %H:%M:%S"); + eprintln!("[{ts}] [DEBUG] {event}: [{op}] {details}"); +} + +/// Truncates to `first6...last6` when longer than 16 chars. +pub fn short_hex(hex: &str) -> String { + if hex.len() <= 16 { + hex.to_string() + } else { + format!("{}...{}", &hex[..6], &hex[hex.len() - 6..]) + } +} + +pub fn short_key(key: &[u8; 32]) -> String { + short_hex(&hex::encode(key)) +} diff --git a/peeroxide-cli/src/cmd/chat/display.rs b/peeroxide-cli/src/cmd/chat/display.rs new file mode 100644 index 0000000..d8c6276 --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/display.rs @@ -0,0 +1,458 @@ +use std::collections::HashMap; +use std::time::{SystemTime, UNIX_EPOCH}; + +use crate::cmd::chat::known_users::SharedKnownUsers; +use super::names; +use crate::cmd::chat::profile::Friend; + +pub struct DisplayMessage { + pub id_pubkey: [u8; 32], + pub screen_name: String, + pub content: String, + pub timestamp: u64, + pub is_self: bool, + pub late: bool, +} + +pub struct DisplayState { + friends: HashMap<[u8; 32], Friend>, + last_identity_shown: HashMap<[u8; 32], u64>, + known_names: HashMap<[u8; 32], String>, + name_change_at: HashMap<[u8; 32], u64>, + known_users: SharedKnownUsers, +} + +/// Output of [`DisplayState::render_to`] — a formatted message line plus any +/// associated system notices (identity reveal, name change). The caller is +/// responsible for actually printing these to the user; this separation lets +/// the line-mode UI route them to stdout/stderr and the interactive TUI route +/// them through the renderer. +#[derive(Debug, Clone)] +pub struct RenderedOutput { + /// The formatted message line, e.g. `[12:34:56] [alice]: hello`. May be + /// prefixed with `[late] ` if the message was delivered out-of-order. + pub message_line: String, + /// Zero or more system notices (each rendered as a separate line) that + /// accompany this message: `*** vendor@short is fullkey`, name-change + /// announcements, etc. + pub system_notices: Vec, +} + +/// Render a [`DisplayMessage`] without any [`DisplayState`] context. +/// +/// Used by `LineUi` when it doesn't have access to the live `DisplayState` +/// (e.g. when called outside the main join loop). Produces a best-effort +/// formatted line — no friend aliases, no identity notices, no cooldown +/// marker. The main loop should always go through [`DisplayState::render_to`] +/// for full formatting; this helper exists for fall-back paths. +pub fn render_message_line(msg: &DisplayMessage) -> RenderedOutput { + let timestamp_str = format_timestamp(msg.timestamp); + let shortkey = &hex::encode(msg.id_pubkey)[..8]; + let vendor_name = names::generate_name_from_seed(&msg.id_pubkey); + let display_name = if !msg.screen_name.is_empty() { + format!("<{}@{}>", msg.screen_name, shortkey) + } else { + format!("<{vendor_name}@{shortkey}>") + }; + let late_marker = if msg.late { "[late] " } else { "" }; + RenderedOutput { + message_line: format!( + "{late_marker}[{timestamp_str}] [{display_name}]: {}", + msg.content + ), + system_notices: Vec::new(), + } +} + +impl DisplayState { + pub fn new(friends: Vec, known_users: SharedKnownUsers) -> Self { + let friends_map: HashMap<[u8; 32], Friend> = + friends.into_iter().map(|f| (f.pubkey, f)).collect(); + Self { + friends: friends_map, + last_identity_shown: HashMap::new(), + known_names: HashMap::new(), + name_change_at: HashMap::new(), + known_users, + } + } + + /// Reload the friends map from the given list. + /// Called periodically to pick up alias edits and nexus name refreshes. + pub fn reload_friends(&mut self, friends: Vec) { + self.friends = friends.into_iter().map(|f| (f.pubkey, f)).collect(); + } + + /// Render `msg` and print directly to stdout/stderr. Convenience wrapper + /// around [`render_to`]; preserved for callers that don't yet route + /// through a `ChatUi`. New callers should prefer [`render_to`] so the + /// output can be directed appropriately (e.g. into the TUI scroll region). + pub fn render(&mut self, msg: &DisplayMessage) { + let out = self.render_to(msg); + for notice in &out.system_notices { + eprintln!("{notice}"); + } + println!("{}", out.message_line); + } + + /// Render `msg` and return the formatted output, mutating internal state + /// (last-identity-shown, known-names, name-change-cooldown) along the way. + /// The caller is responsible for emitting the resulting strings. + pub fn render_to(&mut self, msg: &DisplayMessage) -> RenderedOutput { + let now_secs = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_secs(); + + let timestamp_str = format_timestamp(msg.timestamp); + let display_name = self.format_display_name(msg, now_secs); + + let mut notices = Vec::new(); + + if self.should_show_identity(msg, now_secs) { + let shortkey = &hex::encode(msg.id_pubkey)[..8]; + let fullkey = hex::encode(msg.id_pubkey); + let vendor_name = names::generate_name_from_seed(&msg.id_pubkey); + notices.push(format!("*** {vendor_name}@{shortkey} is {fullkey}")); + self.last_identity_shown.insert(msg.id_pubkey, now_secs); + } + + let late_marker = if msg.late { "[late] " } else { "" }; + let message_line = format!( + "{late_marker}[{timestamp_str}] [{display_name}]: {}", + msg.content + ); + + if !msg.screen_name.is_empty() { + let prev = self.known_names.get(&msg.id_pubkey); + if let Some(old_name) = prev { + if old_name.as_str() != msg.screen_name { + let shortkey = &hex::encode(msg.id_pubkey)[..8]; + notices.push(format!( + "*** {}@{} changed screen name: \"{}\" → \"{}\"", + old_name, shortkey, old_name, msg.screen_name + )); + self.name_change_at.insert(msg.id_pubkey, now_secs); + } + } + self.known_names + .insert(msg.id_pubkey, msg.screen_name.clone()); + } + + RenderedOutput { + message_line, + system_notices: notices, + } + } + + fn format_display_name(&mut self, msg: &DisplayMessage, now_secs: u64) -> String { + let shortkey = &hex::encode(msg.id_pubkey)[..8]; + let vendor_name = names::generate_name_from_seed(&msg.id_pubkey); + + let name_cooldown_active = self + .name_change_at + .get(&msg.id_pubkey) + .map(|&t| now_secs.saturating_sub(t) < 300) + .unwrap_or(false); + let bang = if name_cooldown_active { "!" } else { "" }; + + // Note: deliberately doesn't go through `NameResolver` — the + // message-rendering ladder here differs from the general resolver + // semantics in subtle ways. Specifically, the "friend without + // alias" case uses the VENDOR name (not the cached known name) in + // the parenthesised stable-identifier slot, while the general + // resolver gives the cached known name priority. The two + // semantics are different on purpose: this path wants a + // pubkey-derived stable identifier for the friendship marker, + // independent of whatever screen name the friend is currently + // using. Friends-aware bar / slash output should compose + // `NameResolver` results directly. + if let Some(friend) = self.friends.get(&msg.id_pubkey) { + if let Some(ref alias) = friend.alias { + if msg.screen_name.is_empty() || *alias == msg.screen_name { + format!("({alias}){bang}") + } else { + format!("({alias}) <{}>{bang}", msg.screen_name) + } + } else if !msg.screen_name.is_empty() { + format!("({vendor_name}) <{}@{}>{bang}", msg.screen_name, shortkey) + } else { + format!("({vendor_name}){bang}") + } + } else if !msg.screen_name.is_empty() { + format!("<{}@{}>{bang}", msg.screen_name, shortkey) + } else if let Some(cached_name) = self.known_users.get(&msg.id_pubkey) { + format!("<{}@{}>{bang}", cached_name, shortkey) + } else { + format!("<{vendor_name}@{shortkey}>{bang}") + } + } + + fn should_show_identity(&mut self, msg: &DisplayMessage, now_secs: u64) -> bool { + if msg.is_self { + return false; + } + if let Some(friend) = self.friends.get(&msg.id_pubkey) { + if friend.alias.is_some() { + return false; + } + } + if self.known_users.get(&msg.id_pubkey).is_some() { + return false; + } + match self.last_identity_shown.get(&msg.id_pubkey) { + Some(&last) => now_secs.saturating_sub(last) > 600, + None => true, + } + } +} + +fn format_timestamp(unix_secs: u64) -> String { + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_secs(); + + let secs = unix_secs; + let s = secs % 60; + let m = (secs / 60) % 60; + let h = (secs / 3600) % 24; + + let today_start = now - (now % 86400); + if secs >= today_start { + format!("{h:02}:{m:02}:{s:02}") + } else { + let days = secs / 86400; + let y = 1970 + (days / 365); + let d = days % 365; + let mo = d / 30 + 1; + let day = d % 30 + 1; + format!("{y}-{mo:02}-{day:02} {h:02}:{m:02}:{s:02}") + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::TempDir; + + #[test] + fn format_display_name_friend_with_alias() { + let friend = Friend { + pubkey: [1u8; 32], + alias: Some("alice".to_string()), + cached_name: None, + cached_bio_line: None, + }; + let dir = TempDir::new().unwrap(); + let ku = SharedKnownUsers::new(dir.path().join("known_users")); + let mut state = DisplayState::new(vec![friend], ku); + let msg = DisplayMessage { + id_pubkey: [1u8; 32], + screen_name: "alice".to_string(), + content: "hi".to_string(), + timestamp: 0, + is_self: false, + late: false, + }; + let name = state.format_display_name(&msg, 0); + assert_eq!(name, "(alice)"); + } + + #[test] + fn format_display_name_non_friend() { + let dir = TempDir::new().unwrap(); + let ku = SharedKnownUsers::new(dir.path().join("known_users")); + let mut state = DisplayState::new(vec![], ku); + let msg = DisplayMessage { + id_pubkey: [0xab; 32], + screen_name: "bob".to_string(), + content: "hi".to_string(), + timestamp: 0, + is_self: false, + late: false, + }; + let name = state.format_display_name(&msg, 0); + assert!(name.starts_with("')); + } + + #[test] + fn format_display_name_non_friend_vendor_fallback() { + let dir = TempDir::new().unwrap(); + let ku = SharedKnownUsers::new(dir.path().join("known_users")); + let mut state = DisplayState::new(vec![], ku); + let msg = DisplayMessage { + id_pubkey: [0x11; 32], + screen_name: "".to_string(), + content: "hi".to_string(), + timestamp: 0, + is_self: false, + late: false, + }; + let vendor = names::generate_name_from_seed(&msg.id_pubkey); + let shortkey = &hex::encode(msg.id_pubkey)[..8]; + let name = state.format_display_name(&msg, 0); + assert_eq!(name, format!("<{vendor}@{shortkey}>")); + } + + #[test] + fn format_display_name_with_name_change_cooldown() { + let dir = TempDir::new().unwrap(); + let ku = SharedKnownUsers::new(dir.path().join("known_users")); + let mut state = DisplayState::new(vec![], ku); + state.known_names.insert([0xab; 32], "old_name".to_string()); + state.name_change_at.insert([0xab; 32], 1000); + + let msg = DisplayMessage { + id_pubkey: [0xab; 32], + screen_name: "new_name".to_string(), + content: "hi".to_string(), + timestamp: 0, + is_self: false, + late: false, + }; + let name_during_cooldown = state.format_display_name(&msg, 1100); + assert!(name_during_cooldown.ends_with('!'), "should show ! during 300s cooldown"); + + let name_after_cooldown = state.format_display_name(&msg, 1400); + assert!(!name_after_cooldown.ends_with('!'), "should NOT show ! after 300s"); + } + + #[test] + fn format_display_name_known_users_fallback() { + let dir = TempDir::new().unwrap(); + let mut ku = SharedKnownUsers::new(dir.path().join("known_users")); + ku.update(&[0xabu8; 32], "bob").unwrap(); + + let mut state = DisplayState::new(vec![], ku); + let msg = DisplayMessage { + id_pubkey: [0xabu8; 32], + screen_name: "".to_string(), + content: "hi".to_string(), + timestamp: 0, + is_self: false, + late: false, + }; + let name = state.format_display_name(&msg, 0); + let shortkey = &hex::encode([0xabu8; 32])[..8]; + assert_eq!(name, format!("")); + } + + #[test] + fn format_display_name_friend_no_alias_no_wire_uses_vendor_name() { + let dir = TempDir::new().unwrap(); + let ku = SharedKnownUsers::new(dir.path().join("known_users")); + + let friend = Friend { + pubkey: [2u8; 32], + alias: None, + cached_name: None, + cached_bio_line: None, + }; + let mut state = DisplayState::new(vec![friend], ku); + let msg = DisplayMessage { + id_pubkey: [2u8; 32], + screen_name: "".to_string(), + content: "hi".to_string(), + timestamp: 0, + is_self: false, + late: false, + }; + let vendor = names::generate_name_from_seed(&msg.id_pubkey); + let name = state.format_display_name(&msg, 0); + assert_eq!(name, format!("({vendor})")); + } + + #[test] + fn format_display_name_friend_no_alias_with_wire_uses_vendor_anchor() { + let dir = TempDir::new().unwrap(); + let ku = SharedKnownUsers::new(dir.path().join("known_users")); + + let friend = Friend { + pubkey: [3u8; 32], + alias: None, + cached_name: None, + cached_bio_line: None, + }; + let mut state = DisplayState::new(vec![friend], ku); + let msg = DisplayMessage { + id_pubkey: [3u8; 32], + screen_name: "wire_name".to_string(), + content: "hi".to_string(), + timestamp: 0, + is_self: false, + late: false, + }; + let vendor = names::generate_name_from_seed(&msg.id_pubkey); + let shortkey = &hex::encode(msg.id_pubkey)[..8]; + let name = state.format_display_name(&msg, 0); + assert_eq!(name, format!("({vendor}) ")); + } + + #[test] + fn format_display_name_wire_precedence() { + let dir = TempDir::new().unwrap(); + let mut ku = SharedKnownUsers::new(dir.path().join("known_users")); + ku.update(&[0xabu8; 32], "old_bob").unwrap(); + + let mut state = DisplayState::new(vec![], ku); + let msg = DisplayMessage { + id_pubkey: [0xabu8; 32], + screen_name: "new_bob".to_string(), + content: "hi".to_string(), + timestamp: 0, + is_self: false, + late: false, + }; + let name = state.format_display_name(&msg, 0); + let shortkey = &hex::encode([0xabu8; 32])[..8]; + assert_eq!(name, format!("")); + } + + #[test] + fn format_display_name_friend_priority_over_known_users() { + let dir = TempDir::new().unwrap(); + let mut ku = SharedKnownUsers::new(dir.path().join("known_users")); + ku.update(&[1u8; 32], "bob_cache").unwrap(); + + let friend = Friend { + pubkey: [1u8; 32], + alias: Some("bestie".to_string()), + cached_name: None, + cached_bio_line: None, + }; + let mut state = DisplayState::new(vec![friend], ku); + let msg = DisplayMessage { + id_pubkey: [1u8; 32], + screen_name: "bob_wire".to_string(), + content: "hi".to_string(), + timestamp: 0, + is_self: false, + late: false, + }; + let name = state.format_display_name(&msg, 0); + assert!(name.starts_with("(bestie)"), "friend alias should take priority: {}", name); + } + + #[test] + fn render_identity_notice_includes_vendor_name() { + let dir = TempDir::new().unwrap(); + let ku = SharedKnownUsers::new(dir.path().join("known_users")); + let mut state = DisplayState::new(vec![], ku); + let msg = DisplayMessage { + id_pubkey: [0x11; 32], + screen_name: "".to_string(), + content: "hi".to_string(), + timestamp: 0, + is_self: false, + late: false, + }; + + let vendor = names::generate_name_from_seed(&msg.id_pubkey); + let shortkey = &hex::encode(msg.id_pubkey)[..8]; + assert!(state.should_show_identity(&msg, 0)); + let expected = format!("*** {vendor}@{shortkey} is {}", hex::encode(msg.id_pubkey)); + assert!(expected.contains(&format!("{vendor}@{shortkey}"))); + } +} diff --git a/peeroxide-cli/src/cmd/chat/dm.rs b/peeroxide-cli/src/cmd/chat/dm.rs new file mode 100644 index 0000000..60c2814 --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/dm.rs @@ -0,0 +1,15 @@ +use crate::cmd::chat::crypto; + +pub fn dm_channel_key(my_pubkey: &[u8; 32], their_pubkey: &[u8; 32]) -> [u8; 32] { + crypto::dm_channel_key(my_pubkey, their_pubkey) +} + +pub fn dm_msg_key(my_secret: &[u8; 64], their_pubkey: &[u8; 32], channel_key: &[u8; 32]) -> [u8; 32] { + let my_x25519 = crypto::ed25519_secret_to_x25519(my_secret); + let their_x25519 = match crypto::ed25519_pubkey_to_x25519(their_pubkey) { + Some(pk) => pk, + None => return [0u8; 32], + }; + let ecdh_secret = crypto::x25519_ecdh(&my_x25519, &their_x25519); + crypto::dm_msg_key(&ecdh_secret, channel_key) +} diff --git a/peeroxide-cli/src/cmd/chat/dm_cmd.rs b/peeroxide-cli/src/cmd/chat/dm_cmd.rs new file mode 100644 index 0000000..01f33f9 --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/dm_cmd.rs @@ -0,0 +1,145 @@ +use clap::Parser; + +use crate::cmd::chat::crypto; +use crate::cmd::chat::known_users; +use crate::cmd::chat::name_resolver::NameResolver; +use crate::cmd::chat::profile; +use crate::cmd::chat::session::{self, DmExtras, SessionConfig}; +use crate::config::ResolvedConfig; + +use peeroxide_dht::hyperdht::KeyPair; + +#[derive(Parser)] +pub struct DmArgs { + /// Recipient: alias, pubkey hex (64 chars), @shortkey, name@shortkey, or screen name + pub recipient: String, + + /// Identity profile to use + #[arg(long, default_value = "default")] + pub profile: String, + + /// Do not publish personal nexus + #[arg(long)] + pub no_nexus: bool, + + /// Do not refresh friend nexus data + #[arg(long)] + pub no_friends: bool, + + /// Listen only + #[arg(long)] + pub read_only: bool, + + /// Equivalent to --no-nexus --read-only --no-friends + #[arg(long)] + pub stealth: bool, + + /// Message to include in the startup inbox nudge + #[arg(long)] + pub message: Option, + + /// Max feed keypair lifetime before rotation (minutes) + #[arg(long, default_value = "60")] + pub feed_lifetime: u64, + + /// Max messages to publish in a single batch. + #[arg(long, default_value = "16")] + pub batch_size: usize, + + /// Idle time (ms) the publisher waits to accumulate additional + /// messages into the current batch before flushing. + #[arg(long, default_value = "50")] + pub batch_wait_ms: u64, + + /// After stdin closes (EOF), remain joined to the channel in + /// read-only mode instead of exiting. Default is to exit cleanly + /// once stdin is exhausted. + #[arg(long)] + pub stay_after_eof: bool, + + /// Disable the background inbox monitor + INBOX status bar segment + /// + /inbox slash command. Default is enabled. + #[arg(long)] + pub no_inbox: bool, + + /// Inbox polling interval in seconds. + #[arg(long, default_value = "15")] + pub inbox_poll_interval: u64, +} + +pub async fn run(args: DmArgs, cfg: &ResolvedConfig, line_mode: bool) -> i32 { + let read_only = args.read_only || args.stealth; + let no_nexus = args.no_nexus || args.stealth; + let no_friends = args.no_friends || args.stealth; + + let recipient_pubkey = match super::resolve_recipient(&args.profile, &args.recipient) { + Ok(pk) => pk, + Err(e) => { + eprintln!("error: {e}"); + return 1; + } + }; + + let prof = match profile::load_or_create_profile(&args.profile) { + Ok(p) => p, + Err(e) => { + eprintln!("error: failed to load profile '{}': {e}", args.profile); + return 1; + } + }; + + let id_keypair = KeyPair::from_seed(prof.seed); + + // Channel + message keys for a DM are derived deterministically from + // the two identity pubkeys (channel) plus the X25519-ECDH shared + // secret (message). The session is then a normal "private channel" + // with these specialized keys; everything downstream + // (announce topics, feed records, encryption, ordering) treats them + // identically to a non-DM private channel. + let channel_key = crypto::dm_channel_key(&id_keypair.public_key, &recipient_pubkey); + let ecdh_secret = { + let my_x25519 = crypto::ed25519_secret_to_x25519(&id_keypair.secret_key); + let Some(their_x25519) = crypto::ed25519_pubkey_to_x25519(&recipient_pubkey) else { + eprintln!("error: invalid recipient public key (cannot convert to X25519)"); + return 1; + }; + crypto::x25519_ecdh(&my_x25519, &their_x25519) + }; + let message_key = crypto::dm_msg_key(&ecdh_secret, &channel_key); + + // Resolve the recipient's display name for the bar / greeting via + // the canonical name resolver (friend alias > known screen name > + // vendor name fallback). + let friends = profile::load_friends(&args.profile).unwrap_or_default(); + let known = known_users::load_shared_users().unwrap_or_default(); + let resolved = NameResolver::new(&friends, &known).resolve(&recipient_pubkey); + + let bar_name = format!("DM:{}", resolved.bar_label()); + let greeting = format!("*** DM with {}", resolved.formal()); + + let config = SessionConfig { + bar_name, + greeting, + channel_key, + message_key, + profile: args.profile, + prof, + id_keypair, + read_only, + no_nexus, + no_friends, + no_inbox: args.no_inbox, + feed_lifetime: args.feed_lifetime, + batch_size: args.batch_size, + batch_wait_ms: args.batch_wait_ms, + inbox_poll_interval: args.inbox_poll_interval, + stay_after_eof: args.stay_after_eof, + line_mode, + dm: Some(DmExtras { + recipient_pubkey, + initial_message: args.message, + }), + }; + + session::run(config, cfg).await +} diff --git a/peeroxide-cli/src/cmd/chat/feed.rs b/peeroxide-cli/src/cmd/chat/feed.rs new file mode 100644 index 0000000..eb48847 --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/feed.rs @@ -0,0 +1,254 @@ +use peeroxide_dht::hyperdht::{HyperDhtHandle, KeyPair}; +use rand::Rng; +use tokio::sync::watch; + +use crate::cmd::chat::crypto; +use crate::cmd::chat::debug; +use crate::cmd::chat::wire::FeedRecord; + +pub struct FeedState { + pub feed_keypair: KeyPair, + pub id_keypair: KeyPair, + pub channel_key: [u8; 32], + pub ownership_proof: [u8; 64], + pub msg_hashes: Vec<[u8; 32]>, + pub msg_count: u8, + pub summary_hash: [u8; 32], + pub next_feed_pubkey: [u8; 32], + pub seq: u64, + pub prev_msg_hash: [u8; 32], + pub bucket_permutation: [u8; 4], + pub bucket_index: usize, + pub feed_lifetime_minutes: u64, + pub feed_lifetime_secs: u64, + pub created_at: std::time::Instant, +} + +impl FeedState { + pub fn new( + feed_keypair: KeyPair, + id_keypair: KeyPair, + channel_key: [u8; 32], + ownership_proof: [u8; 64], + feed_lifetime_minutes: u64, + ) -> Self { + let mut rng = rand::rng(); + let mut bucket_permutation: [u8; 4] = [0, 1, 2, 3]; + for i in (1..4).rev() { + let j = rng.random_range(0..=i); + bucket_permutation.swap(i, j); + } + + let wobble: f64 = rng.random_range(0.5..1.5); + let feed_lifetime_secs = (feed_lifetime_minutes as f64 * 60.0 * wobble) as u64; + + Self { + feed_keypair, + id_keypair, + channel_key, + ownership_proof, + msg_hashes: Vec::new(), + msg_count: 0, + summary_hash: [0u8; 32], + next_feed_pubkey: [0u8; 32], + seq: 0, + prev_msg_hash: [0u8; 32], + bucket_permutation, + bucket_index: 0, + feed_lifetime_minutes, + feed_lifetime_secs, + created_at: std::time::Instant::now(), + } + } + + pub fn next_bucket(&mut self) -> u8 { + let b = self.bucket_permutation[self.bucket_index % 4]; + self.bucket_index += 1; + b + } + + pub fn serialize_feed_record(&self) -> Vec { + let record = FeedRecord { + id_pubkey: self.id_keypair.public_key, + ownership_proof: self.ownership_proof, + next_feed_pubkey: self.next_feed_pubkey, + summary_hash: self.summary_hash, + msg_count: self.msg_count, + msg_hashes: self.msg_hashes.clone(), + }; + record.serialize().unwrap_or_default() + } + + pub fn needs_rotation(&self) -> bool { + self.created_at.elapsed().as_secs() >= self.feed_lifetime_secs + } + + /// Rotate to a new feed keypair. Sets `next_feed_pubkey` on the current + /// state (so the old feed points to the new one), then returns a fresh + /// `FeedState` for the new keypair. + pub fn rotate(&mut self) -> FeedState { + let new_keypair = KeyPair::generate(); + self.next_feed_pubkey = new_keypair.public_key; + + let new_ownership = crypto::ownership_proof( + &self.id_keypair.secret_key, + &new_keypair.public_key, + &self.channel_key, + ); + + FeedState::new( + new_keypair, + self.id_keypair.clone(), + self.channel_key, + new_ownership, + self.feed_lifetime_minutes, + ) + } +} + +pub async fn run_feed_refresh( + handle: HyperDhtHandle, + feed_keypair: KeyPair, + mut state_rx: watch::Receiver<(Vec, u64)>, +) { + let refresh_interval = tokio::time::Duration::from_secs(480); + let mut interval = tokio::time::interval(refresh_interval); + interval.tick().await; + + loop { + interval.tick().await; + let (record_data, seq) = state_rx.borrow_and_update().clone(); + match handle.mutable_put(&feed_keypair, &record_data, seq).await { + Ok(_) => { + debug::log_event( + "Feed refresh", + "mutable_put", + &format!( + "feed_pubkey={}, seq={seq}", + debug::short_key(&feed_keypair.public_key), + ), + ); + } + Err(e) => { + tracing::warn!("feed refresh failed: {e}"); + } + } + } +} + +pub async fn run_rotation_overlap_refresh( + handle: HyperDhtHandle, + feed_keypair: KeyPair, + record_data: Vec, + seq: u64, +) { + tokio::time::sleep(tokio::time::Duration::from_secs(480)).await; + match handle.mutable_put(&feed_keypair, &record_data, seq).await { + Ok(_) => { + debug::log_event( + "Rotation overlap refresh", + "mutable_put", + &format!( + "feed_pubkey={}, seq={seq}", + debug::short_key(&feed_keypair.public_key), + ), + ); + } + Err(e) => { + tracing::warn!("rotation overlap refresh failed: {e}"); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn make_feed_state(lifetime_minutes: u64) -> FeedState { + let feed_kp = KeyPair::generate(); + let id_kp = KeyPair::generate(); + let channel_key = [0x42u8; 32]; + let ownership = crypto::ownership_proof(&id_kp.secret_key, &feed_kp.public_key, &channel_key); + FeedState::new(feed_kp, id_kp, channel_key, ownership, lifetime_minutes) + } + + #[test] + fn new_feed_state_starts_empty() { + let fs = make_feed_state(60); + assert_eq!(fs.msg_hashes.len(), 0); + assert_eq!(fs.msg_count, 0); + assert_eq!(fs.seq, 0); + assert_eq!(fs.next_feed_pubkey, [0u8; 32]); + assert_eq!(fs.summary_hash, [0u8; 32]); + } + + #[test] + fn needs_rotation_false_when_fresh() { + let fs = make_feed_state(60); + assert!(!fs.needs_rotation()); + } + + #[test] + fn rotate_sets_next_feed_pubkey() { + let mut fs = make_feed_state(60); + let old_pk = fs.feed_keypair.public_key; + let new_fs = fs.rotate(); + assert_ne!(fs.next_feed_pubkey, [0u8; 32]); + assert_eq!(fs.next_feed_pubkey, new_fs.feed_keypair.public_key); + assert_ne!(new_fs.feed_keypair.public_key, old_pk); + } + + #[test] + fn rotate_preserves_identity() { + let mut fs = make_feed_state(60); + let id_pk = fs.id_keypair.public_key; + let new_fs = fs.rotate(); + assert_eq!(new_fs.id_keypair.public_key, id_pk); + assert_eq!(new_fs.channel_key, fs.channel_key); + } + + #[test] + fn rotate_new_feed_starts_clean() { + let mut fs = make_feed_state(60); + fs.msg_hashes.push([1u8; 32]); + fs.msg_count = 1; + fs.seq = 5; + let new_fs = fs.rotate(); + assert_eq!(new_fs.msg_hashes.len(), 0); + assert_eq!(new_fs.msg_count, 0); + assert_eq!(new_fs.seq, 0); + } + + #[test] + fn next_bucket_cycles_through_permutation() { + let mut fs = make_feed_state(60); + let mut seen = Vec::new(); + for _ in 0..4 { + seen.push(fs.next_bucket()); + } + seen.sort(); + assert_eq!(seen, vec![0, 1, 2, 3]); + } + + #[test] + fn serialize_feed_record_not_empty() { + let fs = make_feed_state(60); + let data = fs.serialize_feed_record(); + assert!(!data.is_empty()); + } + + #[test] + fn feed_lifetime_has_wobble() { + let fs1 = make_feed_state(60); + let fs2 = make_feed_state(60); + let fs3 = make_feed_state(60); + let lifetimes = [fs1.feed_lifetime_secs, fs2.feed_lifetime_secs, fs3.feed_lifetime_secs]; + let all_same = lifetimes[0] == lifetimes[1] && lifetimes[1] == lifetimes[2]; + let min = 60 * 60 / 2; + let max = 60 * 60 * 3 / 2; + for l in &lifetimes { + assert!(*l >= min && *l <= max, "lifetime {l} not in expected range [{min}, {max}]"); + } + assert!(!all_same || lifetimes[0] != 3600, "extremely unlikely: 3 feeds with identical non-60min lifetime"); + } +} diff --git a/peeroxide-cli/src/cmd/chat/inbox.rs b/peeroxide-cli/src/cmd/chat/inbox.rs new file mode 100644 index 0000000..001c9c6 --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/inbox.rs @@ -0,0 +1,294 @@ +use peeroxide_dht::hyperdht::{HyperDhtHandle, KeyPair}; +use rand::Rng; + +use crate::cmd::chat::crypto; +use crate::cmd::chat::debug; +use crate::cmd::chat::known_users::KnownUser; +use crate::cmd::chat::wire::{self, InviteRecord, INVITE_TYPE_DM}; + +pub async fn send_dm_invite( + handle: &HyperDhtHandle, + invite_feed_keypair: &KeyPair, + id_keypair: &KeyPair, + recipient_pubkey: &[u8; 32], + channel_key: &[u8; 32], + real_feed_pubkey: &[u8; 32], + message: &str, +) -> Result<(), String> { + let ownership = crypto::ownership_proof( + &id_keypair.secret_key, + &invite_feed_keypair.public_key, + channel_key, + ); + + let invite = InviteRecord { + id_pubkey: id_keypair.public_key, + ownership_proof: ownership, + next_feed_pubkey: *real_feed_pubkey, + invite_type: INVITE_TYPE_DM, + payload: message.as_bytes().to_vec(), + }; + + let plaintext = invite.serialize().map_err(|e| format!("invite serialize: {e}"))?; + + let invite_x25519_priv = crypto::ed25519_secret_to_x25519(&invite_feed_keypair.secret_key); + let recipient_x25519 = crypto::ed25519_pubkey_to_x25519(recipient_pubkey) + .ok_or_else(|| "invalid recipient pubkey".to_string())?; + let ecdh_secret = crypto::x25519_ecdh(&invite_x25519_priv, &recipient_x25519); + let inv_key = crypto::invite_key(&ecdh_secret, &invite_feed_keypair.public_key); + + let encrypted = wire::encrypt_invite(&inv_key, &plaintext) + .map_err(|e| format!("invite encrypt: {e}"))?; + + handle + .mutable_put(invite_feed_keypair, &encrypted, 0) + .await + .map_err(|e| format!("invite mutable_put: {e}"))?; + + debug::log_event( + "Invite sent", + "mutable_put", + &format!( + "invite_feed_pk={}, sender={}, recipient={}, invite_type=0x{:02x}, payload_len={}", + debug::short_key(&invite_feed_keypair.public_key), + debug::short_key(&id_keypair.public_key), + debug::short_key(recipient_pubkey), + INVITE_TYPE_DM, + message.len(), + ), + ); + + let epoch = crypto::current_epoch(); + let bucket = rand::rng().random_range(0..4u8); + let topic = crypto::inbox_topic(recipient_pubkey, epoch, bucket); + let _ = handle.announce(topic, invite_feed_keypair, &[]).await; + + debug::log_event( + "Inbox announce", + "announce", + &format!( + "invite_feed_pk={}, recipient={}, epoch={epoch}, bucket={bucket}", + debug::short_key(&invite_feed_keypair.public_key), + debug::short_key(recipient_pubkey), + ), + ); + + Ok(()) +} + +#[allow(clippy::too_many_arguments)] +pub async fn send_dm_nudge( + handle: &HyperDhtHandle, + invite_feed_keypair: &KeyPair, + id_keypair: &KeyPair, + recipient_pubkey: &[u8; 32], + channel_key: &[u8; 32], + real_feed_pubkey: &[u8; 32], + message_text: &str, + seq: u64, +) -> Result<(), String> { + let ownership = crypto::ownership_proof( + &id_keypair.secret_key, + &invite_feed_keypair.public_key, + channel_key, + ); + + let payload = if message_text.len() > 800 { + message_text.as_bytes()[..800].to_vec() + } else { + message_text.as_bytes().to_vec() + }; + + let invite = InviteRecord { + id_pubkey: id_keypair.public_key, + ownership_proof: ownership, + next_feed_pubkey: *real_feed_pubkey, + invite_type: INVITE_TYPE_DM, + payload, + }; + + let plaintext = invite.serialize().map_err(|e| format!("nudge serialize: {e}"))?; + + let invite_x25519_priv = crypto::ed25519_secret_to_x25519(&invite_feed_keypair.secret_key); + let recipient_x25519 = crypto::ed25519_pubkey_to_x25519(recipient_pubkey) + .ok_or_else(|| "invalid recipient pubkey".to_string())?; + let ecdh_secret = crypto::x25519_ecdh(&invite_x25519_priv, &recipient_x25519); + let inv_key = crypto::invite_key(&ecdh_secret, &invite_feed_keypair.public_key); + + let encrypted = wire::encrypt_invite(&inv_key, &plaintext) + .map_err(|e| format!("nudge encrypt: {e}"))?; + + handle + .mutable_put(invite_feed_keypair, &encrypted, seq + 1) + .await + .map_err(|e| format!("nudge mutable_put: {e}"))?; + + debug::log_event( + "Inbox nudge sent", + "mutable_put", + &format!( + "invite_feed_pk={}, sender={}, recipient={}, seq={}", + debug::short_key(&invite_feed_keypair.public_key), + debug::short_key(&id_keypair.public_key), + debug::short_key(recipient_pubkey), + seq + 1, + ), + ); + + let epoch = crypto::current_epoch(); + let bucket = rand::rng().random_range(0..4u8); + let topic = crypto::inbox_topic(recipient_pubkey, epoch, bucket); + let _ = handle.announce(topic, invite_feed_keypair, &[]).await; + + debug::log_event( + "Inbox announce", + "announce", + &format!( + "invite_feed_pk={}, recipient={}, epoch={epoch}, bucket={bucket}", + debug::short_key(&invite_feed_keypair.public_key), + debug::short_key(recipient_pubkey), + ), + ); + + Ok(()) +} + +pub struct DecodedInvite { + pub sender_pubkey: [u8; 32], + pub next_feed_pubkey: [u8; 32], + pub invite_type: u8, + pub payload: Vec, +} + +impl std::fmt::Debug for DecodedInvite { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("DecodedInvite") + .field("sender_pubkey", &hex::encode(self.sender_pubkey)) + .field("invite_type", &format_args!("0x{:02x}", self.invite_type)) + .field("payload_len", &self.payload.len()) + .finish() + } +} + +impl Clone for DecodedInvite { + fn clone(&self) -> Self { + Self { + sender_pubkey: self.sender_pubkey, + next_feed_pubkey: self.next_feed_pubkey, + invite_type: self.invite_type, + payload: self.payload.clone(), + } + } +} + +pub fn decrypt_and_verify_invite( + encrypted_data: &[u8], + invite_feed_pubkey: &[u8; 32], + my_keypair: &KeyPair, +) -> Result { + let invite_x25519_pub = crypto::ed25519_pubkey_to_x25519(invite_feed_pubkey) + .ok_or_else(|| "invalid invite feed pubkey".to_string())?; + let my_x25519_priv = crypto::ed25519_secret_to_x25519(&my_keypair.secret_key); + let ecdh_secret = crypto::x25519_ecdh(&my_x25519_priv, &invite_x25519_pub); + let inv_key = crypto::invite_key(&ecdh_secret, invite_feed_pubkey); + + let plaintext = + wire::decrypt_invite(&inv_key, encrypted_data).map_err(|e| format!("decrypt: {e}"))?; + + let record = + InviteRecord::deserialize(&plaintext).map_err(|e| format!("parse invite: {e}"))?; + + let candidate_dm_key = + crypto::dm_channel_key(&record.id_pubkey, &my_keypair.public_key); + if crypto::verify_ownership_proof( + &record.id_pubkey, + invite_feed_pubkey, + &candidate_dm_key, + &record.ownership_proof, + ) { + return Ok(DecodedInvite { + sender_pubkey: record.id_pubkey, + next_feed_pubkey: record.next_feed_pubkey, + invite_type: record.invite_type, + payload: record.payload, + }); + } + + if record.invite_type == wire::INVITE_TYPE_PRIVATE && record.payload.len() >= 3 { + let name_len = record.payload[0] as usize; + if record.payload.len() >= 1 + name_len + 2 { + let name = &record.payload[1..1 + name_len]; + let salt_len = + u16::from_le_bytes([record.payload[1 + name_len], record.payload[2 + name_len]]) + as usize; + if record.payload.len() >= 3 + name_len + salt_len { + let salt = &record.payload[3 + name_len..3 + name_len + salt_len]; + let candidate_key = crypto::channel_key(name, Some(salt)); + if crypto::verify_ownership_proof( + &record.id_pubkey, + invite_feed_pubkey, + &candidate_key, + &record.ownership_proof, + ) { + return Ok(DecodedInvite { + sender_pubkey: record.id_pubkey, + next_feed_pubkey: record.next_feed_pubkey, + invite_type: record.invite_type, + payload: record.payload, + }); + } + } + } + } + + Err("ownership proof verification failed".to_string()) +} + +pub fn display_invite( + number: u32, + invite: &DecodedInvite, + _my_pubkey: &[u8; 32], + profile_name: &str, + known_users: &[KnownUser], +) { + let sender_hex = hex::encode(invite.sender_pubkey); + let short = &sender_hex[..8]; + + let sender_name = known_users + .iter() + .find(|u| u.pubkey == invite.sender_pubkey) + .map(|u| u.screen_name.as_str()) + .unwrap_or(short); + + if invite.invite_type == INVITE_TYPE_DM { + let lure = String::from_utf8_lossy(&invite.payload); + println!("[INVITE #{number}] DM from {sender_name} ({short})"); + if !lure.is_empty() { + println!(" \"{lure}\""); + } + println!(" → peeroxide chat dm {sender_hex} --profile {profile_name}"); + } else { + if invite.payload.len() >= 3 { + let name_len = invite.payload[0] as usize; + if invite.payload.len() >= 1 + name_len + 2 { + let name = String::from_utf8_lossy(&invite.payload[1..1 + name_len]); + let salt_len = u16::from_le_bytes([ + invite.payload[1 + name_len], + invite.payload[2 + name_len], + ]) as usize; + if invite.payload.len() >= 3 + name_len + salt_len { + let salt = + String::from_utf8_lossy(&invite.payload[3 + name_len..3 + name_len + salt_len]); + println!( + "[INVITE #{number}] Channel \"{name}\" from {sender_name} ({short})" + ); + println!( + " → peeroxide chat join \"{name}\" --group \"{salt}\" --profile {profile_name}" + ); + return; + } + } + } + println!("[INVITE #{number}] Channel invite from {sender_name} ({short})"); + } +} diff --git a/peeroxide-cli/src/cmd/chat/inbox_cmd.rs b/peeroxide-cli/src/cmd/chat/inbox_cmd.rs new file mode 100644 index 0000000..56a401d --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/inbox_cmd.rs @@ -0,0 +1,103 @@ +use clap::Parser; + +use crate::cmd::chat::inbox_monitor::{format_invite_lines, InboxMonitor}; +use crate::cmd::chat::known_users; +use crate::cmd::chat::profile; +use crate::cmd::{build_dht_config, sigterm_recv}; +use crate::config::ResolvedConfig; + +use libudx::UdxRuntime; +use peeroxide_dht::hyperdht::{self, KeyPair}; + +#[derive(Parser)] +pub struct InboxArgs { + /// Identity profile to use + #[arg(long, default_value = "default")] + pub profile: String, + + /// Inbox polling interval in seconds + #[arg(long, default_value = "15")] + pub poll_interval: u64, + + /// Do not publish personal nexus + #[arg(long)] + pub no_nexus: bool, + + /// Do not refresh friend nexus data + #[arg(long)] + pub no_friends: bool, +} + +pub async fn run(args: InboxArgs, cfg: &ResolvedConfig) -> i32 { + let prof = match profile::load_or_create_profile(&args.profile) { + Ok(p) => p, + Err(e) => { + eprintln!("error: failed to load profile '{}': {e}", args.profile); + return 1; + } + }; + + let id_keypair = KeyPair::from_seed(prof.seed); + + let dht_config = build_dht_config(cfg); + let runtime = match UdxRuntime::new() { + Ok(r) => r, + Err(e) => { + eprintln!("error: failed to create UDP runtime: {e}"); + return 1; + } + }; + + let (task, handle, _server_rx) = match hyperdht::spawn(&runtime, dht_config).await { + Ok(v) => v, + Err(e) => { + eprintln!("error: failed to start DHT: {e}"); + return 1; + } + }; + + if let Err(e) = handle.bootstrapped().await { + eprintln!("error: bootstrap failed: {e}"); + return 1; + } + + let table_size = handle.table_size().await.unwrap_or(0); + eprintln!("*** connection established with DHT ({table_size} peers in routing table)"); + let poll_interval_secs = args.poll_interval.max(1); + eprintln!("*** monitoring inbox (polling every {poll_interval_secs}s)"); + + let cached_users = known_users::load_shared_users().unwrap_or_default(); + let monitor = InboxMonitor::new(cached_users); + + let poll_interval = tokio::time::Duration::from_secs(poll_interval_secs); + let mut interval = tokio::time::interval(poll_interval); + + loop { + tokio::select! { + _ = interval.tick() => { + let new_invites = monitor.poll_once(&handle, &id_keypair).await; + // Live-print and drain at once so the unread buffer doesn't + // grow unboundedly; the CLI's whole purpose is to surface + // new invites as they arrive. + let _ = monitor.take_unread(); + for inv in &new_invites { + for line in format_invite_lines(inv, &args.profile, monitor.known_users()) { + println!("{line}"); + } + } + } + _ = tokio::signal::ctrl_c() => { + eprintln!("\n*** shutting down"); + break; + } + _ = sigterm_recv() => { + eprintln!("\n*** shutting down (SIGTERM)"); + break; + } + } + } + + let _ = handle.destroy().await; + let _ = task.await; + 0 +} diff --git a/peeroxide-cli/src/cmd/chat/inbox_monitor.rs b/peeroxide-cli/src/cmd/chat/inbox_monitor.rs new file mode 100644 index 0000000..0eb598f --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/inbox_monitor.rs @@ -0,0 +1,475 @@ +//! Generic inbox polling logic shared by the `chat inbox` CLI command and +//! the `chat join` inbox monitor. +//! +//! Per the chat protocol (see `docs/src/chat/wire-format.md` and `docs/src/chat/protocol.md`): the recipient's inbox topic is keyed_blake2b'd over +//! `(id_pubkey, epoch_u64_le, bucket_u8)` with a 1-minute epoch and 4 +//! buckets per epoch. Senders announce on a random bucket of the current +//! epoch; readers scan **current + previous epoch × 4 buckets = 8 lookups** +//! per polling cycle. For each unique invite-feed pubkey discovered, the +//! reader `mutable_get`s its record, decrypts using ECDH with its identity +//! key, verifies the ownership proof, and surfaces the resulting invite. +//! +//! ## Concurrency +//! +//! [`InboxMonitor`] is designed to be shared as `Arc` between +//! a polling task and `/inbox` slash-command handlers. All mutable state +//! sits behind a single internal `std::sync::Mutex`; the lock is held only +//! for brief CPU-bound merges of poll results and never across a DHT +//! `.await`. Concretely [`InboxMonitor::poll_once`]: +//! +//! 1. Briefly locks to snapshot the `(feed_pubkey -> seen seq)` watermark. +//! 2. Releases the lock, then does all DHT lookups + `mutable_get`s + +//! decrypt/verify with the snapshot as the dedup reference. +//! 3. Briefly relocks to merge the candidates into `seen` + the unread +//! buffer, assigning sequential numbers under the lock so multiple +//! overlapping pollers (unusual but legal) don't collide. +//! +//! This means `/inbox` calls always acquire the lock quickly even mid-poll, +//! so user-facing slash commands never block on a multi-second DHT scan. + +use std::collections::HashMap; +use std::sync::Mutex; + +use peeroxide_dht::hyperdht::{HyperDhtHandle, KeyPair}; + +use crate::cmd::chat::crypto; +use crate::cmd::chat::debug; +use crate::cmd::chat::inbox::{self, DecodedInvite}; +use crate::cmd::chat::known_users::KnownUser; +use crate::cmd::chat::name_resolver::NameResolver; +use crate::cmd::chat::wire::INVITE_TYPE_DM; + +/// A decoded invite with its stable session-scope sequence number ("#N" in +/// the display format). +#[derive(Debug, Clone)] +pub struct NumberedInvite { + pub number: u32, + pub invite: DecodedInvite, +} + +/// Inner mutable state — sits behind a `std::sync::Mutex` so all access is +/// brief and lock-free across `.await`. +struct InboxMonitorInner { + /// `feed_pubkey -> last seen seq`. Subsequent observations at the same + /// (or lower) seq are ignored — they're rebroadcasts of an invite we've + /// already surfaced. + seen_invite_feeds: HashMap<[u8; 32], u64>, + /// Running counter of invites surfaced this session. Increments by 1 + /// per new surfacing; used as the `[INVITE #N]` number. + all_time_count: u32, + /// Invites the user hasn't viewed yet. Pushed to by `poll_once`, + /// drained by `take_unread`. + unread: Vec, +} + +/// Owns the polling watermark + unread buffer behind a single internal +/// lock. Cheap to clone the `Arc` and share between a +/// polling task and `/inbox` handlers. +pub struct InboxMonitor { + inner: Mutex, + /// Read-only at construction. Holds the cached known-users list for + /// the display (vendor name fallback). + cached_users: Vec, +} + +impl InboxMonitor { + pub fn new(cached_users: Vec) -> Self { + Self { + inner: Mutex::new(InboxMonitorInner { + seen_invite_feeds: HashMap::new(), + all_time_count: 0, + unread: Vec::new(), + }), + cached_users, + } + } + + /// One polling round: scan the current + previous epoch across all four + /// buckets, decoding any new invites. Returns the just-surfaced + /// invites in arrival order; also appends each to the unread buffer. + /// + /// The lock is held only briefly at the start (to snapshot the seen + /// watermark) and at the end (to merge results); the DHT lookups and + /// `mutable_get`s in between happen with NO locks held, so other + /// callers (notably `/inbox` slash-command handlers) can always + /// acquire the lock quickly. + pub async fn poll_once( + &self, + handle: &HyperDhtHandle, + id_keypair: &KeyPair, + ) -> Vec { + // 1. Snapshot the seen map under brief lock. The snapshot is used as + // the dedup reference during the lock-free DHT phase. Stale + // reads are safe — at worst we re-process an invite the merge + // phase will then dedup against the (now-up-to-date) map. + let seen_snapshot: HashMap<[u8; 32], u64> = { + let inner = self.inner.lock().expect("inbox monitor mutex poisoned"); + inner.seen_invite_feeds.clone() + }; + + // 2. Lock-free DHT phase: scan + decrypt + verify. Each pass + // collects (feed_pk, seq, DecodedInvite) candidates. + let candidates = perform_dht_scan(handle, id_keypair, &seen_snapshot).await; + + // 3. Brief-lock merge: re-check seq against the (possibly newer) + // seen map, assign #N, append to unread. + let mut surfaced: Vec = Vec::new(); + if !candidates.is_empty() { + let mut inner = self.inner.lock().expect("inbox monitor mutex poisoned"); + for (feed_pk, seq, invite) in candidates { + // Re-check under lock (defensive against concurrent pollers). + if matches!(inner.seen_invite_feeds.get(&feed_pk).copied(), Some(s) if seq <= s) { + continue; + } + inner.seen_invite_feeds.insert(feed_pk, seq); + inner.all_time_count = inner.all_time_count.saturating_add(1); + let numbered = NumberedInvite { + number: inner.all_time_count, + invite, + }; + inner.unread.push(numbered.clone()); + surfaced.push(numbered); + } + } + surfaced + } + + /// Drain the unread buffer; subsequent `unread_count` returns 0 until + /// new invites arrive. + pub fn take_unread(&self) -> Vec { + let mut inner = self.inner.lock().expect("inbox monitor mutex poisoned"); + std::mem::take(&mut inner.unread) + } + + /// Number of unread invites currently buffered (cheap; for the bar). + pub fn unread_count(&self) -> usize { + let inner = self.inner.lock().expect("inbox monitor mutex poisoned"); + inner.unread.len() + } + + /// Total invites surfaced this session (cumulative, never decrements). + pub fn all_time_count(&self) -> u32 { + let inner = self.inner.lock().expect("inbox monitor mutex poisoned"); + inner.all_time_count + } + + /// Borrow the cached known-users for use by `format_invite_lines`. + /// Immutable after construction; no lock needed. + pub fn known_users(&self) -> &[KnownUser] { + &self.cached_users + } +} + +/// Lock-free DHT scan: fan out all 8 (epoch, bucket) lookups in parallel, +/// then for each lookup result fan out the per-peer `mutable_get`s in +/// parallel, then post-process (decrypt + verify) all candidates. Does +/// NOT touch any `InboxMonitor` state — purely an async I/O helper. +/// +/// Errors from individual lookups / gets are swallowed silently — best- +/// effort, network-flaky operations. Per-event debug logs go through +/// `debug::log_event`. +/// +/// Parallelism note: an earlier version of this scanned epochs and +/// buckets serially, which made the whole poll cycle take 10-20 s on a +/// typical public DHT — too slow for the background monitor's 15 s +/// cadence. Fanning out via `join_all` cuts the wall-clock to roughly +/// the slowest single round-trip plus the slowest mutable_get fan-out +/// per lookup. +async fn perform_dht_scan( + handle: &HyperDhtHandle, + id_keypair: &KeyPair, + seen_snapshot: &HashMap<[u8; 32], u64>, +) -> Vec<([u8; 32], u64, DecodedInvite)> { + let current_epoch = crypto::current_epoch(); + + // ── phase 1: 8 lookups in parallel ──────────────────────────────── + let lookup_futures = [current_epoch, current_epoch.saturating_sub(1)] + .into_iter() + .flat_map(|epoch| (0..4u8).map(move |bucket| (epoch, bucket))) + .map(|(epoch, bucket)| async move { + let topic = crypto::inbox_topic(&id_keypair.public_key, epoch, bucket); + let res = handle.lookup(topic).await; + (epoch, bucket, res) + }); + let lookup_results = futures::future::join_all(lookup_futures).await; + + // Collect a unique set of feed_pubkeys to fetch. The same peer may + // appear on multiple buckets / both epochs; dedup so we don't fire + // multiple `mutable_get`s for the same target. + let mut to_fetch: HashMap<[u8; 32], ()> = HashMap::new(); + for (epoch, bucket, res) in &lookup_results { + let Ok(results) = res else { continue }; + let peer_count: usize = results.iter().map(|r| r.peers.len()).sum(); + debug::log_event( + "Inbox check", + "lookup", + &format!("epoch={epoch}, bucket={bucket}, results={peer_count}"), + ); + for result in results { + for peer in &result.peers { + to_fetch.entry(peer.public_key).or_insert(()); + } + } + } + + // ── phase 2: fan out all mutable_gets in parallel ───────────────── + let get_futures = to_fetch.keys().copied().map(|feed_pk| async move { + let res = handle.mutable_get(&feed_pk, 0).await; + (feed_pk, res) + }); + let get_results = futures::future::join_all(get_futures).await; + + // ── phase 3: decrypt + verify; collect candidates ───────────────── + let mut out: Vec<([u8; 32], u64, DecodedInvite)> = Vec::new(); + for (feed_pk, res) in get_results { + let Ok(Some(mget)) = res else { continue }; + let prev_seq = seen_snapshot.get(&feed_pk).copied(); + if matches!(prev_seq, Some(s) if mget.seq <= s) { + continue; + } + let Ok(invite) = inbox::decrypt_and_verify_invite( + &mget.value, + &feed_pk, + id_keypair, + ) else { + continue; + }; + debug::log_event( + "Invite received", + "mutable_get", + &format!( + "invite_feed_pk={}, sender={}, invite_type=0x{:02x}, payload_len={}", + debug::short_key(&feed_pk), + debug::short_key(&invite.sender_pubkey), + invite.invite_type, + invite.payload.len(), + ), + ); + out.push((feed_pk, mget.seq, invite)); + } + out +} + +/// Render a numbered invite as the same multi-line string format the +/// `chat inbox` CLI command produces on stdout. Returns one element per +/// output line so the caller can route each line through either +/// `println!` (CLI) or `ChatUi::render_system` (TUI `/inbox`). +/// +/// Format (matches the original `inbox::display_invite` output verbatim): +/// ```text +/// [INVITE #N] DM from () +/// "" (only if non-empty for DM) +/// → peeroxide chat dm --profile

+/// ``` +/// Or for a private/group channel invite: +/// ```text +/// [INVITE #N] Channel "" from () +/// → peeroxide chat join "" --group "" --profile

+/// ``` +pub fn format_invite_lines( + numbered: &NumberedInvite, + profile_name: &str, + known_users: &[KnownUser], +) -> Vec { + let invite = &numbered.invite; + let number = numbered.number; + let sender_hex = hex::encode(invite.sender_pubkey); + // Resolve via the canonical name resolver (no friends list in this + // context — invite display only had access to known_users historically). + let resolved = NameResolver::from_known_users(known_users).resolve(&invite.sender_pubkey); + let sender_name = &resolved.name; + let short = &resolved.shortkey; + + let mut out = Vec::with_capacity(3); + if invite.invite_type == INVITE_TYPE_DM { + let lure = String::from_utf8_lossy(&invite.payload); + out.push(format!("[INVITE #{number}] DM from {sender_name} ({short})")); + if !lure.is_empty() { + out.push(format!(" \"{lure}\"")); + } + out.push(format!( + " → peeroxide chat dm {sender_hex} --profile {profile_name}" + )); + return out; + } + + // Channel invite: payload is [name_len(1) | name(N) | salt_len(2 LE) | salt(M)]. + if invite.payload.len() >= 3 { + let name_len = invite.payload[0] as usize; + if invite.payload.len() >= 1 + name_len + 2 { + let name = String::from_utf8_lossy(&invite.payload[1..1 + name_len]); + let salt_len = u16::from_le_bytes([ + invite.payload[1 + name_len], + invite.payload[2 + name_len], + ]) as usize; + if invite.payload.len() >= 3 + name_len + salt_len { + let salt = + String::from_utf8_lossy(&invite.payload[3 + name_len..3 + name_len + salt_len]); + out.push(format!( + "[INVITE #{number}] Channel \"{name}\" from {sender_name} ({short})" + )); + out.push(format!( + " → peeroxide chat join \"{name}\" --group \"{salt}\" --profile {profile_name}" + )); + return out; + } + } + } + out.push(format!( + "[INVITE #{number}] Channel invite from {sender_name} ({short})" + )); + out +} + +#[cfg(test)] +mod tests { + use super::*; + + fn invite_dm(sender_byte: u8, lure: &str) -> DecodedInvite { + DecodedInvite { + sender_pubkey: [sender_byte; 32], + next_feed_pubkey: [0; 32], + invite_type: INVITE_TYPE_DM, + payload: lure.as_bytes().to_vec(), + } + } + + fn invite_channel(name: &str, salt: &str) -> DecodedInvite { + let mut p: Vec = Vec::new(); + p.push(name.len() as u8); + p.extend_from_slice(name.as_bytes()); + p.extend_from_slice(&(salt.len() as u16).to_le_bytes()); + p.extend_from_slice(salt.as_bytes()); + DecodedInvite { + sender_pubkey: [0xab; 32], + next_feed_pubkey: [0; 32], + invite_type: crate::cmd::chat::wire::INVITE_TYPE_PRIVATE, + payload: p, + } + } + + /// Push an invite directly into the unread buffer (bypassing DHT) for + /// testing the take_unread / unread_count surface. + fn push_for_test(m: &InboxMonitor, invite: DecodedInvite) { + let mut inner = m.inner.lock().unwrap(); + inner.all_time_count = inner.all_time_count.saturating_add(1); + let n = NumberedInvite { + number: inner.all_time_count, + invite, + }; + inner.unread.push(n); + } + + #[test] + fn new_monitor_is_empty() { + let m = InboxMonitor::new(vec![]); + assert_eq!(m.unread_count(), 0); + assert_eq!(m.all_time_count(), 0); + } + + #[test] + fn take_unread_drains_and_resets_count() { + let m = InboxMonitor::new(vec![]); + push_for_test(&m, invite_dm(1, "hi")); + push_for_test(&m, invite_dm(2, "yo")); + assert_eq!(m.unread_count(), 2); + assert_eq!(m.all_time_count(), 2); + let drained = m.take_unread(); + assert_eq!(drained.len(), 2); + assert_eq!(m.unread_count(), 0); + // All-time count is unaffected by drain. + assert_eq!(m.all_time_count(), 2); + } + + #[test] + fn take_unread_assigns_sequential_numbers() { + let m = InboxMonitor::new(vec![]); + push_for_test(&m, invite_dm(1, "a")); + push_for_test(&m, invite_dm(2, "b")); + push_for_test(&m, invite_dm(3, "c")); + let drained = m.take_unread(); + assert_eq!(drained.iter().map(|n| n.number).collect::>(), vec![1, 2, 3]); + } + + #[test] + fn format_invite_lines_dm_with_lure() { + let inv = NumberedInvite { + number: 7, + invite: invite_dm(0x42, "wanna chat?"), + }; + let lines = format_invite_lines(&inv, "default", &[]); + assert_eq!(lines.len(), 3); + // Unknown sender resolves to a vendor name; the shortkey appears + // in parentheses regardless. + assert!( + lines[0].starts_with("[INVITE #7] DM from "), + "got: {}", + lines[0] + ); + assert!(lines[0].ends_with("(42424242)"), "got: {}", lines[0]); + assert_eq!(lines[1], " \"wanna chat?\""); + assert!(lines[2].contains("peeroxide chat dm ")); + assert!(lines[2].contains("--profile default")); + } + + #[test] + fn format_invite_lines_dm_without_lure() { + let inv = NumberedInvite { + number: 3, + invite: invite_dm(0x11, ""), + }; + let lines = format_invite_lines(&inv, "alice", &[]); + assert_eq!(lines.len(), 2); + assert!(lines[0].starts_with("[INVITE #3] DM from ")); + assert!(lines[0].ends_with("(11111111)"), "got: {}", lines[0]); + assert!(lines[1].contains("--profile alice")); + } + + #[test] + fn format_invite_lines_dm_uses_known_user_name() { + let users = vec![KnownUser { + pubkey: [0x42; 32], + screen_name: "Alice".to_string(), + }]; + let inv = NumberedInvite { + number: 1, + invite: invite_dm(0x42, ""), + }; + let lines = format_invite_lines(&inv, "default", &users); + assert!(lines[0].contains("DM from Alice (42424242)"), "got: {}", lines[0]); + } + + #[test] + fn format_invite_lines_channel_with_salt() { + let inv = NumberedInvite { + number: 4, + invite: invite_channel("secret-room", "salty"), + }; + let lines = format_invite_lines(&inv, "default", &[]); + assert_eq!(lines.len(), 2); + assert!( + lines[0].starts_with("[INVITE #4] Channel \"secret-room\" from"), + "got: {}", + lines[0] + ); + assert!(lines[1].contains("--group \"salty\"")); + assert!(lines[1].contains("--profile default")); + } + + /// Quick concurrency sanity check: while a long-running fake "poll" + /// task holds the lock briefly to merge results, another caller can + /// always acquire the lock without contention. This is more of a + /// design-doc test than a true stress test — it just verifies that + /// the API surface uses &self (not &mut self) so multiple callers + /// can share an `Arc`. + #[test] + fn monitor_methods_take_shared_ref() { + let m = std::sync::Arc::new(InboxMonitor::new(vec![])); + let m2 = m.clone(); + // Two shared-ref methods can be called from different references. + let _ = m.unread_count(); + let _ = m2.all_time_count(); + let _ = m.take_unread(); + let _ = m2.known_users(); + } +} diff --git a/peeroxide-cli/src/cmd/chat/join.rs b/peeroxide-cli/src/cmd/chat/join.rs new file mode 100644 index 0000000..6ab4cee --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/join.rs @@ -0,0 +1,141 @@ +use clap::Parser; + +use crate::cmd::chat::crypto; +use crate::cmd::chat::profile; +use crate::cmd::chat::session::{self, SessionConfig}; +use crate::config::ResolvedConfig; + +use peeroxide_dht::hyperdht::KeyPair; + +#[derive(Parser)] +pub struct JoinArgs { + /// Channel name + pub channel: String, + + /// Private channel with group name as salt + #[arg(long, conflicts_with = "keyfile")] + pub group: Option, + + /// Private channel with keyfile as salt + #[arg(long, conflicts_with = "group")] + pub keyfile: Option, + + /// Identity profile to use + #[arg(long, default_value = "default")] + pub profile: String, + + /// Do not publish personal nexus + #[arg(long)] + pub no_nexus: bool, + + /// Do not refresh friend nexus data + #[arg(long)] + pub no_friends: bool, + + /// Listen only; no posting, no feed, no announce + #[arg(long)] + pub read_only: bool, + + /// Equivalent to --no-nexus --read-only --no-friends + #[arg(long)] + pub stealth: bool, + + /// Max feed keypair lifetime before rotation (minutes) + #[arg(long, default_value = "60")] + pub feed_lifetime: u64, + + /// Max messages to publish in a single batch. + /// Each batch performs one mutable_put + one announce regardless of + /// message count, so larger batches amortize DHT round-trips when + /// piping a file. Capped well below the 26-hash FeedRecord window. + #[arg(long, default_value = "16")] + pub batch_size: usize, + + /// Idle time (ms) the publisher waits to accumulate additional + /// messages into the current batch before flushing. Interactive + /// single messages flush after this delay; piped streams typically + /// fill the batch sooner. + #[arg(long, default_value = "50")] + pub batch_wait_ms: u64, + + /// After stdin closes (EOF), remain joined to the channel in read-only + /// mode instead of exiting. Useful when a script pipes a burst of + /// messages and the operator wants to keep watching the channel + /// afterward. Default is to exit cleanly once stdin is exhausted, which + /// matches the natural shell-pipe lifecycle (`file | peeroxide chat join` + /// finishes when the file does). + #[arg(long)] + pub stay_after_eof: bool, + + /// Disable the background inbox monitor. By default `chat join` polls + /// the same inbox topics as `chat inbox` so an INBOX indicator can + /// surface on the status bar; `/inbox` then dumps the unread invites + /// to the chat region. When disabled, the inbox segment is omitted + /// from the bar entirely and `/inbox` is a no-op. + #[arg(long)] + pub no_inbox: bool, + + /// Inbox polling interval in seconds. Matches the chat inbox CLI + /// default; the chat protocol docs (`docs/src/chat/protocol.md`) suggest 15-30 s. + #[arg(long, default_value = "15")] + pub inbox_poll_interval: u64, +} + +pub async fn run(args: JoinArgs, cfg: &ResolvedConfig, line_mode: bool) -> i32 { + let read_only = args.read_only || args.stealth; + let no_nexus = args.no_nexus || args.stealth; + let no_friends = args.no_friends || args.stealth; + + let prof = match profile::load_or_create_profile(&args.profile) { + Ok(p) => p, + Err(e) => { + eprintln!("error: failed to load profile '{}': {e}", args.profile); + return 1; + } + }; + + let id_keypair = KeyPair::from_seed(prof.seed); + + let salt = if let Some(ref group) = args.group { + Some(group.as_bytes().to_vec()) + } else if let Some(ref keyfile_path) = args.keyfile { + match std::fs::read(keyfile_path) { + Ok(data) => Some(data), + Err(e) => { + eprintln!("error: failed to read keyfile '{keyfile_path}': {e}"); + return 1; + } + } + } else { + None + }; + + let channel_key = crypto::channel_key(args.channel.as_bytes(), salt.as_deref()); + let message_key = crypto::msg_key(&channel_key); + + let bar_name = args.channel.clone(); + let greeting = format!("*** joining channel '{}'", args.channel); + + let config = SessionConfig { + bar_name, + greeting, + channel_key, + message_key, + profile: args.profile, + prof, + id_keypair, + read_only, + no_nexus, + no_friends, + no_inbox: args.no_inbox, + feed_lifetime: args.feed_lifetime, + batch_size: args.batch_size, + batch_wait_ms: args.batch_wait_ms, + inbox_poll_interval: args.inbox_poll_interval, + stay_after_eof: args.stay_after_eof, + line_mode, + dm: None, + }; + + session::run(config, cfg).await +} diff --git a/peeroxide-cli/src/cmd/chat/known_users.rs b/peeroxide-cli/src/cmd/chat/known_users.rs new file mode 100644 index 0000000..321b53a --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/known_users.rs @@ -0,0 +1,491 @@ +//! Shared known-users file with atomic I/O and mtime-based cache invalidation. +//! +//! File format: one entry per line, tab-separated: +//! `<64-hex-pubkey>\t` + +use std::collections::HashMap; +use std::fs; +use std::io; +use std::path::PathBuf; +use std::time::{Duration, Instant, SystemTime}; + +use fs2::FileExt; + +/// A single entry in the known-users file. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct KnownUser { + pub pubkey: [u8; 32], + pub screen_name: String, +} + +/// Returns `~/.config/peeroxide/chat/known_users`. +pub fn shared_known_users_path() -> PathBuf { + let home = dirs::home_dir().expect("home directory not found"); + home.join(".config") + .join("peeroxide") + .join("chat") + .join("known_users") +} + +/// In-memory view of the shared known-users file. +/// +/// Caches entries in memory with mtime-based invalidation (5-second debounce). +/// Writer coordination uses `fs2` advisory exclusive locks. No Arc/Mutex — +/// this struct is single-owner. +pub struct SharedKnownUsers { + path: PathBuf, + entries: Vec, + index: HashMap<[u8; 32], usize>, + last_mtime: Option, + last_checked: Instant, +} + +impl SharedKnownUsers { + /// Creates a new instance from the given path and immediately loads it. + /// + /// A missing file is treated as empty (no error). + pub fn new(path: PathBuf) -> Self { + let mut s = Self { + path, + entries: Vec::new(), + index: HashMap::new(), + last_mtime: None, + last_checked: Instant::now(), + }; + s.load(); + s + } + + /// Convenience constructor using [`shared_known_users_path()`]. + pub fn load_from_shared() -> Self { + Self::new(shared_known_users_path()) + } + + /// Reads and parses the file, updating `entries`, `index`, and `last_mtime`. + /// + /// A missing file silently results in an empty list. Any unreadable file + /// is also silently ignored to avoid crashing long-running callers. + pub fn load(&mut self) { + self.entries.clear(); + self.index.clear(); + self.last_mtime = None; + + let content = match fs::read_to_string(&self.path) { + Ok(c) => c, + Err(e) if e.kind() == io::ErrorKind::NotFound => return, + Err(_) => return, + }; + + // Record mtime *after* a successful read. + if let Ok(meta) = fs::metadata(&self.path) { + self.last_mtime = meta.modified().ok(); + } + + for line in content.lines() { + let line = line.trim(); + if line.is_empty() || line.starts_with('#') { + continue; + } + let mut parts = line.splitn(2, '\t'); + let hex_key = parts.next().unwrap_or("").trim(); + let screen_name = parts.next().unwrap_or("").trim().to_owned(); + + let pubkey = match decode_pubkey(hex_key) { + Ok(k) => k, + Err(_) => continue, + }; + + if let Some(&idx) = self.index.get(&pubkey) { + // Last-wins: update the existing entry in place. + self.entries[idx].screen_name = screen_name; + } else { + let idx = self.entries.len(); + self.entries.push(KnownUser { pubkey, screen_name }); + self.index.insert(pubkey, idx); + } + } + } + + /// Reloads the file if the mtime changed and at least 5 seconds have + /// elapsed since the last check. Skips entirely when called too soon. + pub fn maybe_reload(&mut self) { + const CHECK_INTERVAL: Duration = Duration::from_secs(5); + if self.last_checked.elapsed() < CHECK_INTERVAL { + return; + } + self.last_checked = Instant::now(); + + let current_mtime = fs::metadata(&self.path).ok().and_then(|m| m.modified().ok()); + if current_mtime != self.last_mtime { + self.load(); + } + } + + /// Returns the screen name for `pubkey`, calling `maybe_reload` first. + pub fn get(&mut self, pubkey: &[u8; 32]) -> Option<&str> { + self.maybe_reload(); + self.index + .get(pubkey) + .map(|&idx| self.entries[idx].screen_name.as_str()) + } + + /// Returns all entries as a slice, calling `maybe_reload` first. + pub fn all_users(&mut self) -> &[KnownUser] { + self.maybe_reload(); + &self.entries + } + + /// Resolves a hex prefix to a pubkey. + /// + /// Returns `Ok(None)` when nothing matches, `Ok(Some(key))` for a unique + /// match, and an `InvalidInput` error when the prefix is ambiguous. + pub fn resolve_shortkey(&mut self, prefix: &str) -> io::Result> { + if prefix.len() > 64 { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "shortkey must not exceed 64 hex characters", + )); + } + self.maybe_reload(); + let lower = prefix.to_lowercase(); + let matches: Vec<[u8; 32]> = self + .entries + .iter() + .filter(|u| hex::encode(u.pubkey).starts_with(&lower)) + .map(|u| u.pubkey) + .collect(); + + match matches.len() { + 0 => Ok(None), + 1 => Ok(Some(matches[0])), + n => Err(io::Error::new( + io::ErrorKind::InvalidInput, + format!( + "shortkey '{}' is ambiguous: {} matches found", + prefix, n + ), + )), + } + } + + /// Inserts or updates `pubkey → screen_name`, writing atomically on change. + /// + /// Skips silently when `screen_name` is empty or sanitises to empty. + /// Skips silently when the stored name is already `screen_name`. + /// Enforces a 1 000-entry FIFO cap; oldest entry is evicted on overflow. + pub fn update(&mut self, pubkey: &[u8; 32], screen_name: &str) -> io::Result<()> { + if screen_name.is_empty() { + return Ok(()); + } + let sanitized = sanitize_screen_name(screen_name); + if sanitized.is_empty() { + return Ok(()); + } + + if let Some(&idx) = self.index.get(pubkey) { + if self.entries[idx].screen_name == sanitized { + return Ok(()); // skip-if-unchanged + } + self.entries[idx].screen_name = sanitized; + } else { + let idx = self.entries.len(); + self.entries.push(KnownUser { + pubkey: *pubkey, + screen_name: sanitized, + }); + self.index.insert(*pubkey, idx); + } + + // FIFO eviction when over cap. + if self.entries.len() > 1000 { + self.entries.remove(0); + // Rebuild index after the removal shifted all slots. + self.index.clear(); + for (i, entry) in self.entries.iter().enumerate() { + self.index.insert(entry.pubkey, i); + } + } + + self.write_atomic()?; + + // Track the mtime of our own write to avoid re-reading it. + if let Ok(meta) = fs::metadata(&self.path) { + self.last_mtime = meta.modified().ok(); + } + self.last_checked = Instant::now(); + + Ok(()) + } + + /// Writes all entries atomically via a tmp-file + rename. + /// + /// Uses an `fs2` advisory exclusive lock on `.known_users.lock` to + /// coordinate concurrent writers. + pub fn write_atomic(&self) -> io::Result<()> { + let parent = self.path.parent().ok_or_else(|| { + io::Error::new( + io::ErrorKind::InvalidInput, + "known_users path has no parent directory", + ) + })?; + fs::create_dir_all(parent)?; + + let lock_path = parent.join(".known_users.lock"); + let lock_file = fs::OpenOptions::new() + .create(true) + .write(true) + .truncate(true) + .open(&lock_path)?; + lock_file.lock_exclusive()?; + + let tmp_path = parent.join(".known_users.tmp"); + let mut content = String::new(); + for entry in &self.entries { + content.push_str(&hex::encode(entry.pubkey)); + content.push('\t'); + content.push_str(&entry.screen_name); + content.push('\n'); + } + fs::write(&tmp_path, content.as_bytes())?; + fs::rename(&tmp_path, &self.path)?; + + drop(lock_file); + Ok(()) + } + + #[cfg(test)] + pub(crate) fn force_stale(&mut self) { + self.last_checked = Instant::now() - Duration::from_secs(10); + } +} + +/// Sanitises a screen name for storage: strips `\r`/`\n`, replaces `\t` with +/// space, trims surrounding whitespace. Emoji and other Unicode are preserved. +pub fn sanitize_screen_name(s: &str) -> String { + let cleaned: String = s + .chars() + .filter(|&c| c != '\r' && c != '\n') + .map(|c| if c == '\t' { ' ' } else { c }) + .collect(); + cleaned.trim().to_owned() +} + +/// Write-only helper for call sites that do not keep a long-lived cache. +/// +/// Loads the shared file fresh, applies the update (with skip-if-unchanged +/// guard), and writes back atomically. +pub fn update_shared(pubkey: &[u8; 32], screen_name: &str) -> io::Result<()> { + let mut cache = SharedKnownUsers::load_from_shared(); + cache.update(pubkey, screen_name) +} + +/// One-shot load for short-lived CLI commands. +/// +/// Returns a cloned snapshot of all entries. +pub fn load_shared_users() -> io::Result> { + let cache = SharedKnownUsers::load_from_shared(); + Ok(cache.entries.clone()) +} + +fn decode_pubkey(s: &str) -> Result<[u8; 32], hex::FromHexError> { + let bytes = hex::decode(s)?; + if bytes.len() != 32 { + return Err(hex::FromHexError::InvalidStringLength); + } + let mut key = [0u8; 32]; + key.copy_from_slice(&bytes); + Ok(key) +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::TempDir; + + fn make_pubkey(b: u8) -> [u8; 32] { + [b; 32] + } + + #[test] + fn test_load_empty() { + let dir = TempDir::new().unwrap(); + let path = dir.path().join("known_users"); + let mut cache = SharedKnownUsers::new(path); + assert!(cache.entries.is_empty()); + assert!(cache.get(&make_pubkey(1)).is_none()); + } + + #[test] + fn test_write_and_read_back() { + let dir = TempDir::new().unwrap(); + let path = dir.path().join("known_users"); + + let pk1 = make_pubkey(1); + let pk2 = make_pubkey(2); + let pk3 = make_pubkey(3); + + { + let mut cache = SharedKnownUsers::new(path.clone()); + cache.update(&pk1, "alice").unwrap(); + cache.update(&pk2, "bob").unwrap(); + cache.update(&pk3, "carol").unwrap(); + } + + let mut cache2 = SharedKnownUsers::new(path); + assert_eq!(cache2.get(&pk1), Some("alice")); + assert_eq!(cache2.get(&pk2), Some("bob")); + assert_eq!(cache2.get(&pk3), Some("carol")); + } + + #[test] + fn test_dedup_last_wins() { + let dir = TempDir::new().unwrap(); + let path = dir.path().join("known_users"); + let pk = make_pubkey(10); + + let mut cache = SharedKnownUsers::new(path); + cache.update(&pk, "alice").unwrap(); + cache.update(&pk, "alice2").unwrap(); + + assert_eq!(cache.get(&pk), Some("alice2")); + assert_eq!(cache.entries.len(), 1); + } + + #[test] + fn test_skip_if_unchanged() { + let dir = TempDir::new().unwrap(); + let path = dir.path().join("known_users"); + + let pk = make_pubkey(11); + let mut cache = SharedKnownUsers::new(path.clone()); + cache.update(&pk, "alice").unwrap(); + + let mtime_before = fs::metadata(&path).unwrap().modified().unwrap(); + std::thread::sleep(Duration::from_millis(10)); + + cache.update(&pk, "alice").unwrap(); + + let mtime_after = fs::metadata(&path).unwrap().modified().unwrap(); + assert_eq!(mtime_before, mtime_after); + } + + #[test] + fn test_skip_empty_name() { + let dir = TempDir::new().unwrap(); + let path = dir.path().join("known_users"); + + let pk = make_pubkey(12); + let mut cache = SharedKnownUsers::new(path.clone()); + cache.update(&pk, "").unwrap(); + + assert!(cache.get(&pk).is_none()); + assert!(!path.exists(), "file should not be created for empty name"); + } + + #[test] + fn test_fifo_eviction() { + let dir = TempDir::new().unwrap(); + let path = dir.path().join("known_users"); + + let mut cache = SharedKnownUsers::new(path); + + for i in 0u32..=1000 { + let mut pk = [0u8; 32]; + pk[0..4].copy_from_slice(&i.to_le_bytes()); + cache.update(&pk, &format!("user{}", i)).unwrap(); + } + + let mut pk0 = [0u8; 32]; + pk0[0..4].copy_from_slice(&0u32.to_le_bytes()); + assert!(cache.get(&pk0).is_none(), "pk0 should be evicted"); + + let mut pk1 = [0u8; 32]; + pk1[0..4].copy_from_slice(&1u32.to_le_bytes()); + assert!(cache.get(&pk1).is_some(), "pk1 should be present"); + + let mut pk1000 = [0u8; 32]; + pk1000[0..4].copy_from_slice(&1000u32.to_le_bytes()); + assert!(cache.get(&pk1000).is_some(), "pk1000 should be present"); + + assert_eq!(cache.entries.len(), 1000); + } + + #[test] + fn test_sanitize() { + assert_eq!(sanitize_screen_name("hello\tworld\r\n"), "hello world"); + assert_eq!(sanitize_screen_name("alice 🎉"), "alice 🎉"); + assert_eq!(sanitize_screen_name(" spaces "), "spaces"); + assert_eq!(sanitize_screen_name("\r\n"), ""); + } + + #[test] + fn test_resolve_shortkey_found() { + let dir = TempDir::new().unwrap(); + let path = dir.path().join("known_users"); + + let pk = [0xabu8; 32]; + let mut cache = SharedKnownUsers::new(path); + cache.update(&pk, "testuser").unwrap(); + + let prefix = &hex::encode(pk)[..8]; + let result = cache.resolve_shortkey(prefix).unwrap(); + assert_eq!(result, Some(pk)); + } + + #[test] + fn test_resolve_shortkey_ambiguous() { + let dir = TempDir::new().unwrap(); + let path = dir.path().join("known_users"); + + let mut pk1 = [0u8; 32]; + pk1[0] = 0xab; + pk1[1] = 0x01; + let mut pk2 = [0u8; 32]; + pk2[0] = 0xab; + pk2[1] = 0x02; + + let mut cache = SharedKnownUsers::new(path); + cache.update(&pk1, "user1").unwrap(); + cache.update(&pk2, "user2").unwrap(); + + let result = cache.resolve_shortkey("ab"); + assert!(result.is_err(), "should be ambiguous"); + let err = result.unwrap_err(); + assert!( + err.to_string().contains("ambiguous"), + "error message should mention ambiguous" + ); + } + + #[test] + fn test_resolve_shortkey_not_found() { + let dir = TempDir::new().unwrap(); + let path = dir.path().join("known_users"); + let mut cache = SharedKnownUsers::new(path); + let result = cache.resolve_shortkey("deadbeef").unwrap(); + assert!(result.is_none()); + } + + #[test] + fn test_mtime_reload() { + let dir = TempDir::new().unwrap(); + let path = dir.path().join("known_users"); + + let pk1 = make_pubkey(1); + let pk2 = make_pubkey(2); + + let mut cache1 = SharedKnownUsers::new(path.clone()); + cache1.update(&pk1, "alice").unwrap(); + + let mut cache2 = SharedKnownUsers::new(path.clone()); + cache2.update(&pk2, "bob").unwrap(); + + cache1.force_stale(); + + assert_eq!( + cache1.get(&pk2), + Some("bob"), + "cache1 should reload and find pk2 written by cache2" + ); + } +} diff --git a/peeroxide-cli/src/cmd/chat/mod.rs b/peeroxide-cli/src/cmd/chat/mod.rs new file mode 100644 index 0000000..0a86527 --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/mod.rs @@ -0,0 +1,819 @@ +#![allow(dead_code)] + +pub mod crypto; +pub mod debug; +pub mod display; +pub mod dm; +pub mod dm_cmd; +pub mod feed; +pub mod inbox; +pub mod inbox_cmd; +pub mod inbox_monitor; +pub mod join; +pub mod known_users; +pub mod name_resolver; +pub mod names; +pub mod nexus; +pub mod ordering; +pub mod post; +pub mod probe; +pub mod profile; +pub mod publisher; +pub mod reader; +pub mod session; +pub mod tui; +pub mod wire; + +use clap::{Parser, Subcommand}; + +use crate::config::ResolvedConfig; + +#[derive(Parser)] +pub struct ChatArgs { + #[command(subcommand)] + pub command: ChatCommands, + + /// Enable debug event logging to stderr + #[arg(long, global = true)] + pub debug: bool, + + /// Enable message-flow probes (stdin/post/fetch_batch/release) to stderr. + /// Diagnostic only; useful for tracing ordering and duplication bugs. + #[arg(long, global = true)] + pub probe: bool, + + /// Disable the interactive TTY mode and use line-oriented stdin/stdout + /// even when stdout is a terminal. Auto-enabled when stdout is not a TTY + /// (e.g. piped or redirected). The env var PEEROXIDE_LINE_MODE=1 has the + /// same effect. + #[arg(long, global = true)] + pub line_mode: bool, +} + +#[derive(Subcommand)] +pub enum ChatCommands { + /// Join a channel and participate interactively + Join(join::JoinArgs), + /// Start or resume a DM conversation + Dm(dm_cmd::DmArgs), + /// Monitor the invite inbox + Inbox(inbox_cmd::InboxArgs), + /// Display the current profile's identity + Whoami(WhoamiArgs), + /// Manage profiles + Profiles { + #[command(subcommand)] + command: ProfilesCommands, + }, + /// Manage friends list + Friends { + #[command(subcommand)] + command: Option, + }, + /// Manage the personal nexus record + Nexus(nexus::NexusArgs), +} + +#[derive(Parser)] +pub struct WhoamiArgs { + /// Profile to display + #[arg(long, default_value = "default")] + pub profile: String, +} + +#[derive(Subcommand)] +pub enum ProfilesCommands { + /// List all profiles + List, + /// Create a new profile + Create { + /// Profile name + name: String, + /// Optional screen name + #[arg(long)] + screen_name: Option, + }, + /// Delete a profile + Delete { + /// Profile name to delete + name: String, + }, +} + +#[derive(Subcommand)] +pub enum FriendsCommands { + /// List all friends + List { + /// Identity profile to use + #[arg(long, default_value = "default")] + profile: String, + }, + /// Add a friend + Add { + /// Recipient: alias, pubkey hex (64 chars), @shortkey, name@shortkey, or screen name + key: String, + /// Local alias for this friend + #[arg(long)] + alias: Option, + /// Identity profile to use + #[arg(long, default_value = "default")] + profile: String, + }, + /// Remove a friend + Remove { + /// Recipient: alias, pubkey hex (64 chars), @shortkey, name@shortkey, or screen name + key: String, + /// Identity profile to use + #[arg(long, default_value = "default")] + profile: String, + }, + /// One-shot refresh all friend nexus records + Refresh, +} + +pub async fn run(args: ChatArgs, cfg: &ResolvedConfig) -> i32 { + if args.debug { + debug::enable(); + } + if args.probe { + probe::enable(); + } + let line_mode = args.line_mode + || std::env::var("PEEROXIDE_LINE_MODE") + .map(|v| !v.is_empty() && v != "0") + .unwrap_or(false); + match args.command { + ChatCommands::Join(join_args) => join::run(join_args, cfg, line_mode).await, + ChatCommands::Dm(dm_args) => dm_cmd::run(dm_args, cfg, line_mode).await, + ChatCommands::Inbox(inbox_args) => inbox_cmd::run(inbox_args, cfg).await, + ChatCommands::Whoami(args) => run_whoami(args), + ChatCommands::Profiles { command } => run_profiles(command), + ChatCommands::Friends { command } => { + let command = command.unwrap_or(FriendsCommands::List { + profile: "default".to_string(), + }); + match command { + FriendsCommands::Refresh => run_friends_refresh(cfg).await, + other => run_friends_sync(other), + } + } + ChatCommands::Nexus(nexus_args) => nexus::run(nexus_args, cfg).await, + } +} + +fn run_whoami(args: WhoamiArgs) -> i32 { + let prof = match profile::load_or_create_profile(&args.profile) { + Ok(p) => p, + Err(e) => { + eprintln!("error: failed to load profile '{}': {e}", args.profile); + return 1; + } + }; + + let kp = peeroxide_dht::hyperdht::KeyPair::from_seed(prof.seed); + let pubkey_hex = hex::encode(kp.public_key); + let nexus_topic = hex::encode(peeroxide_dht::crypto::hash(&kp.public_key)); + + println!("Profile: {}", prof.name); + println!("Public key: {pubkey_hex}"); + if let Some(ref name) = prof.screen_name { + println!("Screen name: {name}"); + } else { + println!("Screen name: (not set)"); + } + println!("Nexus topic: {nexus_topic}"); + 0 +} + +fn run_profiles(command: ProfilesCommands) -> i32 { + match command { + ProfilesCommands::List => { + let profiles = match profile::list_profiles() { + Ok(p) => p, + Err(e) => { + eprintln!("error: {e}"); + return 1; + } + }; + if profiles.is_empty() { + println!("No profiles found. Create one with: peeroxide chat profiles create "); + return 0; + } + for name in profiles { + match profile::load_profile(&name) { + Ok(prof) => { + let kp = peeroxide_dht::hyperdht::KeyPair::from_seed(prof.seed); + let short = &hex::encode(kp.public_key)[..8]; + let screen = prof + .screen_name + .as_deref() + .map(|s| format!("({s})")) + .unwrap_or_else(|| "(no screen name)".to_string()); + println!(" {name:16} {short}... {screen}"); + } + Err(e) => { + println!(" {name:16} (error: {e})"); + } + } + } + 0 + } + ProfilesCommands::Create { name, screen_name } => { + match profile::create_profile(&name, screen_name.as_deref()) { + Ok(prof) => { + let kp = peeroxide_dht::hyperdht::KeyPair::from_seed(prof.seed); + let pubkey_hex = hex::encode(kp.public_key); + println!("Created profile '{name}'"); + println!("Name: {name}"); + println!("Public key: {pubkey_hex}"); + 0 + } + Err(e) => { + eprintln!("error: {e}"); + 1 + } + } + } + ProfilesCommands::Delete { name } => { + if name == "default" { + eprintln!("error: cannot delete the default profile"); + return 1; + } + match profile::delete_profile(&name) { + Ok(()) => { + println!("Deleted profile '{name}'"); + 0 + } + Err(e) => { + eprintln!("error: {e}"); + 1 + } + } + } + } +} + +fn run_friends_sync(command: FriendsCommands) -> i32 { + match command { + FriendsCommands::List { profile } => { + let friends = match profile::load_friends(&profile) { + Ok(f) => f, + Err(e) => { + eprintln!("error: {e}"); + return 1; + } + }; + if friends.is_empty() { + println!("No friends. Add one with: peeroxide chat friends add "); + return 0; + } + for f in &friends { + let pk_hex = hex::encode(f.pubkey); + let short = &pk_hex[..8]; + let alias_str = f.alias.as_deref().unwrap_or(""); + let name_str = f + .cached_name + .clone() + .unwrap_or_else(|| names::generate_name_from_seed(&f.pubkey)); + if alias_str.is_empty() { + println!(" {short} {name_str}"); + } else { + println!(" {short} {alias_str} ({name_str})"); + } + } + 0 + } + FriendsCommands::Add { key, alias, profile } => { + // Resolve key: could be full 64-char hex, 8-char shortkey, or name@shortkey + let pubkey = match resolve_recipient(&profile, &key) { + Ok(pk) => pk, + Err(e) => { + eprintln!("error: {e}"); + return 1; + } + }; + if let Err(e) = std::fs::create_dir_all(profile::profile_dir(&profile)) { + eprintln!("error: {e}"); + return 1; + } + let alias = match alias { + Some(alias) => Some(alias), + None => known_users::load_shared_users() + .ok() + .and_then(|users| { + users + .into_iter() + .find(|user| user.pubkey == pubkey) + .map(|user| user.screen_name) + .filter(|name| !name.is_empty()) + }) + .or_else(|| Some(names::generate_name_from_seed(&pubkey))), + }; + let friend = profile::Friend { + pubkey, + alias, + cached_name: None, + cached_bio_line: None, + }; + if let Err(e) = profile::save_friend(&profile, &friend) { + eprintln!("error: {e}"); + return 1; + } + println!("Added friend {}", hex::encode(pubkey)); + 0 + } + FriendsCommands::Remove { key, profile } => { + let pubkey = match resolve_recipient(&profile, &key) { + Ok(pk) => pk, + Err(e) => { + eprintln!("error: {e}"); + return 1; + } + }; + if let Err(e) = profile::remove_friend(&profile, &pubkey) { + eprintln!("error: {e}"); + return 1; + } + println!("Removed friend {}", &hex::encode(pubkey)[..8]); + 0 + } + FriendsCommands::Refresh => unreachable!(), + } +} + +async fn run_friends_refresh(cfg: &ResolvedConfig) -> i32 { + use libudx::UdxRuntime; + use peeroxide_dht::hyperdht; + + let dht_config = crate::cmd::build_dht_config(cfg); + let runtime = match UdxRuntime::new() { + Ok(r) => r, + Err(e) => { + eprintln!("error: {e}"); + return 1; + } + }; + + let (task, handle, _) = match hyperdht::spawn(&runtime, dht_config).await { + Ok(v) => v, + Err(e) => { + eprintln!("error: failed to start DHT: {e}"); + return 1; + } + }; + + if let Err(e) = handle.bootstrapped().await { + eprintln!("error: bootstrap failed: {e}"); + return 1; + } + + eprintln!("*** refreshing friend nexus records..."); + nexus::refresh_friends(&handle, "default").await; + eprintln!("*** done"); + + let _ = handle.destroy().await; + let _ = task.await; + 0 +} + +/// Resolve a recipient identifier to a 32-byte Ed25519 public key. +pub fn resolve_recipient(profile_name: &str, input: &str) -> Result<[u8; 32], String> { + let resolved = if input.len() == 64 { + match hex::decode(input) { + Ok(bytes) if bytes.len() == 32 => { + let mut pk = [0u8; 32]; + pk.copy_from_slice(&bytes); + Ok(pk) + } + _ => Err(format!("invalid 64-char hex pubkey: '{input}'")), + } + } else if let Some(shortkey) = input.strip_prefix('@') { + resolve_shortkey_input(shortkey) + } else if let Some(pos) = input.rfind('@') { + let name_part = &input[..pos]; + let shortkey_part = &input[pos + 1..]; + let pk = resolve_shortkey_input(shortkey_part)?; + + let users = known_users::load_shared_users() + .map_err(|e| format!("failed to load known users: {e}"))?; + if let Some(user) = users.iter().find(|u| u.pubkey == pk) { + if user.screen_name == name_part { + Ok(pk) + } else { + Err("name mismatch".to_string()) + } + } else { + Ok(pk) + } + } else if input.len() == 8 && input.chars().all(|c| c.is_ascii_hexdigit()) { + resolve_shortkey_input(input) + } else { + let friends = profile::load_friends(profile_name).unwrap_or_default(); + let mut matched_pubkeys: Vec<[u8; 32]> = Vec::new(); + for f in &friends { + if f.alias.as_deref() == Some(input) { + matched_pubkeys.push(f.pubkey); + } + } + + if matched_pubkeys.is_empty() { + let users = known_users::load_shared_users().unwrap_or_default(); + for u in &users { + if u.screen_name == input { + matched_pubkeys.push(u.pubkey); + } + } + } + + matched_pubkeys.sort(); + matched_pubkeys.dedup(); + match matched_pubkeys.len() { + 1 => Ok(matched_pubkeys[0]), + 0 => Err(format!("recipient '{input}' not found")), + n => Err(format!("recipient '{input}' is ambiguous ({n} matches)")), + } + }; + + let resolved = resolved?; + if let Ok(own_prof) = profile::load_profile(profile_name) { + let own_kp = peeroxide_dht::hyperdht::KeyPair::from_seed(own_prof.seed); + if resolved == own_kp.public_key { + return Err("cannot send a DM to yourself".to_string()); + } + } + Ok(resolved) +} + +fn resolve_shortkey_input(shortkey: &str) -> Result<[u8; 32], String> { + let mut cache = known_users::SharedKnownUsers::load_from_shared(); + match cache.resolve_shortkey(shortkey) { + Ok(Some(pk)) => Ok(pk), + Ok(None) => Err(format!("shortkey '{shortkey}' not found in known users")), + Err(e) => Err(format!("failed to search known users: {e}")), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::fs; + use std::io::{self, Write}; + use std::path::Path; + use std::process::Command; + use tempfile::TempDir; + + fn pk(byte: u8) -> [u8; 32] { + [byte; 32] + } + + fn profile_root(home: &Path) -> std::path::PathBuf { + home.join(".config/peeroxide/chat/profiles") + } + + struct HomeGuard(Option); + + impl HomeGuard { + fn set(home: &Path) -> Self { + let prev = std::env::var_os("HOME"); + unsafe { std::env::set_var("HOME", home) }; + Self(prev) + } + } + + impl Drop for HomeGuard { + fn drop(&mut self) { + match self.0.take() { + Some(prev) => unsafe { std::env::set_var("HOME", prev) }, + None => unsafe { std::env::remove_var("HOME") }, + } + } + } + + fn write_profile(home: &Path, name: &str, seed: [u8; 32]) -> io::Result<()> { + let dir = profile_root(home).join(name); + fs::create_dir_all(&dir)?; + fs::write(dir.join("seed"), seed) + } + + fn write_known_users(home: &Path, rows: &[([u8; 32], &str)]) -> io::Result<()> { + let dir = home.join(".config").join("peeroxide").join("chat"); + fs::create_dir_all(&dir)?; + let mut file = fs::OpenOptions::new() + .create(true) + .append(true) + .open(dir.join("known_users"))?; + for (pubkey, name) in rows { + writeln!(file, "{}\t{}", hex::encode(pubkey), name)?; + } + Ok(()) + } + + fn write_friends(home: &Path, profile_name: &str, rows: &[([u8; 32], Option<&str>)]) -> io::Result<()> { + let dir = profile_root(home).join(profile_name); + fs::create_dir_all(&dir)?; + let mut file = fs::OpenOptions::new() + .create(true) + .append(true) + .open(dir.join("friends"))?; + for (pubkey, alias) in rows { + writeln!(file, "{}\t{}\t\t", hex::encode(pubkey), alias.unwrap_or(""))?; + } + Ok(()) + } + + fn prepare_profile(home: &Path, profile_name: &str) -> io::Result<()> { + fs::create_dir_all(profile_root(home).join(profile_name)) + } + + fn friend_output(friend: &profile::Friend) -> String { + let pk_hex = hex::encode(friend.pubkey); + let short = &pk_hex[..8]; + let alias_str = friend.alias.as_deref().unwrap_or(""); + let name_str = friend + .cached_name + .clone() + .unwrap_or_else(|| names::generate_name_from_seed(&friend.pubkey)); + if alias_str.is_empty() { + format!(" {short} {name_str}") + } else { + format!(" {short} {alias_str} ({name_str})") + } + } + + fn current_test_binary() -> std::path::PathBuf { + std::env::current_exe().unwrap() + } + + fn run_child_case(home: &Path, case: &str, profile_name: &str, input: &str) { + let output = Command::new(current_test_binary()) + .args(["--exact", "resolve_recipient_sandbox", "--nocapture"]) + .env("HOME", home) + .env("RESOLVE_CASE", case) + .env("RESOLVE_PROFILE", profile_name) + .env("RESOLVE_INPUT", input) + .output() + .unwrap(); + assert!( + output.status.success(), + "stdout: {}\nstderr: {}", + String::from_utf8_lossy(&output.stdout), + String::from_utf8_lossy(&output.stderr) + ); + } + + fn run_friends_child_case(home: &Path, case: &str) { + let output = Command::new(current_test_binary()) + .args(["--exact", "friends_sandbox", "--nocapture"]) + .env("HOME", home) + .env("FRIENDS_CASE", case) + .output() + .unwrap(); + assert!( + output.status.success(), + "stdout: {}\nstderr: {}", + String::from_utf8_lossy(&output.stdout), + String::from_utf8_lossy(&output.stderr) + ); + } + + #[test] + fn test_resolve_64char_valid_hex() { + let tmp = TempDir::new().unwrap(); + let input = hex::encode([0x11u8; 32]); + run_child_case(tmp.path(), "valid_hex", "default", &input); + } + + #[test] + fn test_resolve_64char_invalid_hex() { + let tmp = TempDir::new().unwrap(); + let input = "g".repeat(64); + run_child_case(tmp.path(), "invalid_hex", "default", &input); + } + + #[test] + fn test_resolve_at_shortkey() { + let tmp = TempDir::new().unwrap(); + write_known_users(tmp.path(), &[(pk(1), "Alice")]).unwrap(); + let shortkey = &hex::encode(pk(1))[..8]; + run_child_case(tmp.path(), "at_shortkey", "default", &format!("@{shortkey}")); + } + + #[test] + fn test_resolve_name_at_shortkey() { + let tmp = TempDir::new().unwrap(); + write_known_users(tmp.path(), &[(pk(2), "alice")]).unwrap(); + let shortkey = &hex::encode(pk(2))[..8]; + run_child_case(tmp.path(), "name_at_shortkey", "default", &format!("alice@{shortkey}")); + } + + #[test] + fn test_resolve_bare_shortkey() { + let tmp = TempDir::new().unwrap(); + write_known_users(tmp.path(), &[(pk(3), "Bob")]).unwrap(); + let shortkey = &hex::encode(pk(3))[..8]; + run_child_case(tmp.path(), "bare_shortkey", "default", shortkey); + } + + #[test] + fn test_resolve_friend_alias() { + let tmp = TempDir::new().unwrap(); + write_friends(tmp.path(), "default", &[(pk(4), Some("carol"))]).unwrap(); + run_child_case(tmp.path(), "friend_alias", "default", "carol"); + } + + #[test] + fn test_resolve_known_user_screen_name() { + let tmp = TempDir::new().unwrap(); + write_known_users(tmp.path(), &[(pk(5), "dave")]).unwrap(); + run_child_case(tmp.path(), "known_user", "default", "dave"); + } + + #[test] + fn test_resolve_friend_alias_priority() { + let tmp = TempDir::new().unwrap(); + write_friends(tmp.path(), "default", &[(pk(6), Some("erin"))]).unwrap(); + write_known_users(tmp.path(), &[(pk(7), "erin")]).unwrap(); + run_child_case(tmp.path(), "friend_priority", "default", "erin"); + } + + #[test] + fn test_resolve_ambiguous() { + let tmp = TempDir::new().unwrap(); + write_known_users(tmp.path(), &[(pk(8), "frank"), (pk(9), "frank")]).unwrap(); + run_child_case(tmp.path(), "ambiguous", "default", "frank"); + } + + #[test] + fn test_resolve_not_found() { + let tmp = TempDir::new().unwrap(); + run_child_case(tmp.path(), "not_found", "default", "missing"); + } + + #[test] + fn test_resolve_name_mismatch() { + let tmp = TempDir::new().unwrap(); + write_known_users(tmp.path(), &[(pk(10), "grace")]).unwrap(); + let shortkey = &hex::encode(pk(10))[..8]; + run_child_case(tmp.path(), "name_mismatch", "default", &format!("wrong@{shortkey}")); + } + + #[test] + fn test_friends_add_auto_alias_vendor() { + let _guard = profile::test_home_lock().lock().unwrap(); + let tmp = TempDir::new().unwrap(); + run_friends_child_case(tmp.path(), "vendor"); + } + + #[test] + fn test_friends_add_auto_alias_explicit_preserved() { + let _guard = profile::test_home_lock().lock().unwrap(); + let tmp = TempDir::new().unwrap(); + run_friends_child_case(tmp.path(), "explicit"); + } + + #[test] + fn test_friends_list_vendor_fallback() { + let _guard = profile::test_home_lock().lock().unwrap(); + let tmp = TempDir::new().unwrap(); + run_friends_child_case(tmp.path(), "vendor_fallback"); + } + + #[test] + fn test_friends_list_cached_name_preserved() { + let tmp = TempDir::new().unwrap(); + let _home = HomeGuard::set(tmp.path()); + prepare_profile(tmp.path(), "default").unwrap(); + let friend = profile::Friend { + pubkey: pk(14), + alias: Some("pal".to_string()), + cached_name: Some("Alice".to_string()), + cached_bio_line: None, + }; + let line = friend_output(&friend); + assert!(line.contains("Alice")); + assert!(!line.contains("(unknown)")); + } + + #[test] + fn test_resolve_self_guard() { + let tmp = TempDir::new().unwrap(); + let seed = [0x42u8; 32]; + write_profile(tmp.path(), "default", seed).unwrap(); + let own_pk = peeroxide_dht::hyperdht::KeyPair::from_seed(seed).public_key; + run_child_case(tmp.path(), "self_guard", "default", &hex::encode(own_pk)); + } + + #[test] + fn resolve_recipient_sandbox() { + let case = match std::env::var("RESOLVE_CASE") { + Ok(v) => v, + Err(_) => return, + }; + let profile_name = std::env::var("RESOLVE_PROFILE").unwrap(); + let input = std::env::var("RESOLVE_INPUT").unwrap(); + match case.as_str() { + "valid_hex" => { + let pk = resolve_recipient(&profile_name, &input).unwrap(); + assert_eq!(pk, [0x11u8; 32]); + } + "invalid_hex" => { + let err = resolve_recipient(&profile_name, &input).unwrap_err(); + assert_eq!(err, format!("invalid 64-char hex pubkey: '{input}'")); + } + "at_shortkey" => { + assert_eq!(resolve_recipient(&profile_name, &input).unwrap(), pk(1)); + } + "name_at_shortkey" => { + assert_eq!(resolve_recipient(&profile_name, &input).unwrap(), pk(2)); + } + "bare_shortkey" => { + assert_eq!(resolve_recipient(&profile_name, &input).unwrap(), pk(3)); + } + "friend_alias" => { + assert_eq!(resolve_recipient(&profile_name, &input).unwrap(), pk(4)); + } + "known_user" => { + assert_eq!(resolve_recipient(&profile_name, &input).unwrap(), pk(5)); + } + "friend_priority" => { + assert_eq!(resolve_recipient(&profile_name, &input).unwrap(), pk(6)); + } + "ambiguous" => { + let err = resolve_recipient(&profile_name, &input).unwrap_err(); + assert!(err.contains("ambiguous")); + } + "not_found" => { + let err = resolve_recipient(&profile_name, &input).unwrap_err(); + assert!(err.contains("not found")); + } + "name_mismatch" => { + let err = resolve_recipient(&profile_name, &input).unwrap_err(); + assert_eq!(err, "name mismatch"); + } + "self_guard" => { + let err = resolve_recipient(&profile_name, &input).unwrap_err(); + assert_eq!(err, "cannot send a DM to yourself"); + } + other => panic!("unknown case: {other}"), + } + } + + #[test] + fn friends_sandbox() { + let case = match std::env::var("FRIENDS_CASE") { + Ok(v) => v, + Err(_) => return, + }; + let home = std::path::PathBuf::from(std::env::var_os("HOME").unwrap()); + match case.as_str() { + "vendor" => { + let _home = HomeGuard::set(&home); + prepare_profile(&home, "default").unwrap(); + let pubkey = pk(11); + let expected = names::generate_name_from_seed(&pubkey); + let friend = profile::Friend { + pubkey, + alias: Some(expected.clone()), + cached_name: None, + cached_bio_line: None, + }; + profile::save_friend("default", &friend).unwrap(); + let loaded = profile::load_friends("default").unwrap(); + assert_eq!(loaded.len(), 1); + assert_eq!(loaded[0].alias.as_deref(), Some(expected.as_str())); + } + "explicit" => { + let _home = HomeGuard::set(&home); + prepare_profile(&home, "default").unwrap(); + let friend = profile::Friend { + pubkey: pk(12), + alias: Some("buddy".to_string()), + cached_name: None, + cached_bio_line: None, + }; + profile::save_friend("default", &friend).unwrap(); + let loaded = profile::load_friends("default").unwrap(); + assert_eq!(loaded.len(), 1); + assert_eq!(loaded[0].alias.as_deref(), Some("buddy")); + } + "vendor_fallback" => { + let _home = HomeGuard::set(&home); + prepare_profile(&home, "default").unwrap(); + let pubkey = pk(13); + let friend = profile::Friend { + pubkey, + alias: None, + cached_name: None, + cached_bio_line: None, + }; + profile::save_friend("default", &friend).unwrap(); + let loaded = profile::load_friends("default").unwrap(); + let line = friend_output(&loaded[0]); + let expected = names::generate_name_from_seed(&pubkey); + assert!(line.contains(&expected)); + assert!(!line.contains("(unknown)")); + } + other => panic!("unknown case: {other}"), + } + } +} diff --git a/peeroxide-cli/src/cmd/chat/name_resolver.rs b/peeroxide-cli/src/cmd/chat/name_resolver.rs new file mode 100644 index 0000000..88d979c --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/name_resolver.rs @@ -0,0 +1,324 @@ +//! Single source of truth for resolving a pubkey to a human-readable name. +//! +//! Before this module the same precedence ladder (friend alias → in-flight +//! screen_name → cached known-users screen_name → deterministic vendor +//! name → 8-char shortkey) was duplicated across several call sites: +//! `display::DisplayState::format_display_name`, `inbox_monitor:: +//! format_invite_lines`, the various slash-command output paths, and the +//! DM bar name. Each had slightly different framing rules and small +//! inconsistencies. +//! +//! [`NameResolver`] centralises the lookup; callers compose the framing +//! they need from [`ResolvedName`]'s components or its format helpers. +//! +//! The resolver is purely message-agnostic — it takes only a pubkey. The +//! chat-message rendering path layers msg-specific behaviour (the +//! `msg.screen_name` override, the cooldown `!` bang, the `(alias) ` +//! framing) on top of this base resolver. + +use crate::cmd::chat::known_users::KnownUser; +use crate::cmd::chat::names; +use crate::cmd::chat::profile::Friend; + +/// Which source produced the resolved name. Callers use this to pick +/// suitable framing — e.g. friend aliases show bare in compact contexts, +/// while screen / vendor names get the `@shortkey` suffix to disambiguate. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum NameSource { + /// Matched a `Friend.alias` in the user's profile-local friends file. + FriendAlias, + /// Matched a `screen_name` in the shared `known_users` cache (i.e. + /// the pubkey has authored at least one message we've seen). + KnownScreenName, + /// Fell through to the deterministic vendor name derived from the + /// pubkey. Always available. + VendorName, +} + +/// Outcome of a name resolution. Carries the components separately so +/// callers can compose them into a label of their choice; the +/// [`Self::bar_label`] / [`Self::formal`] helpers cover the common cases. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ResolvedName { + /// The primary name segment (no decoration). Examples: + /// FriendAlias → "alice" + /// KnownScreenName → "alice" + /// VendorName → "tyrannical_elbakyan" + pub name: String, + /// 8-char hex shortkey suffix (`hex::encode(pubkey)[..8]`). Always + /// populated regardless of source. + pub shortkey: String, + pub source: NameSource, +} + +impl ResolvedName { + /// Compact label suitable for narrow contexts (status bar, single-line + /// summaries). Friend aliases show bare; everything else gets the + /// `@shortkey` suffix for disambiguation. + /// + /// - `FriendAlias` → `"alice"` + /// - `KnownScreenName` → `"alice@abc12345"` + /// - `VendorName` → `"tyrannical_elbakyan@abc12345"` + pub fn bar_label(&self) -> String { + match self.source { + NameSource::FriendAlias => self.name.clone(), + NameSource::KnownScreenName | NameSource::VendorName => { + format!("{}@{}", self.name, self.shortkey) + } + } + } + + /// "Formal" label suitable for system notices / verbose output where + /// disambiguation matters: always `" ()"`. Friend + /// aliases still benefit from the parenthesised shortkey here so the + /// user can verify they're acting on the expected identity. + /// + /// - any source → `"alice (abc12345)"` + pub fn formal(&self) -> String { + format!("{} ({})", self.name, self.shortkey) + } + + /// True when the name came from a friend alias — useful for callers + /// that want to skip a `*** vendor@short is fullkey` identity notice + /// when the user has already aliased the sender. + pub fn is_friend(&self) -> bool { + matches!(self.source, NameSource::FriendAlias) + } + + /// True when the resolver only had the deterministic vendor name to + /// fall back on — i.e. no friend alias and no cached screen name. + pub fn is_vendor_fallback(&self) -> bool { + matches!(self.source, NameSource::VendorName) + } +} + +/// Resolver bound to a particular friends list + known-users snapshot. +/// Cheap to construct from borrowed slices; doesn't allocate until +/// `resolve` is called. +pub struct NameResolver<'a> { + friends: &'a [Friend], + known_users: &'a [KnownUser], +} + +impl<'a> NameResolver<'a> { + pub fn new(friends: &'a [Friend], known_users: &'a [KnownUser]) -> Self { + Self { + friends, + known_users, + } + } + + /// Resolver with no friends list (caller has nothing loaded). Falls + /// through to known-users / vendor name resolution. + pub fn from_known_users(known_users: &'a [KnownUser]) -> Self { + Self { + friends: &[], + known_users, + } + } + + /// Apply the precedence ladder to one pubkey: + /// 1. Friend with non-empty alias → `FriendAlias`. + /// 2. Known user with non-empty screen_name → `KnownScreenName`. + /// 3. Deterministic vendor name from `names::generate_name_from_seed` + /// → `VendorName`. + pub fn resolve(&self, pubkey: &[u8; 32]) -> ResolvedName { + let shortkey = hex::encode(pubkey)[..8].to_string(); + + if let Some(friend) = self.friends.iter().find(|f| f.pubkey == *pubkey) { + if let Some(alias) = friend.alias.as_ref() { + if !alias.is_empty() { + return ResolvedName { + name: alias.clone(), + shortkey, + source: NameSource::FriendAlias, + }; + } + } + } + + if let Some(user) = self.known_users.iter().find(|u| u.pubkey == *pubkey) { + if !user.screen_name.is_empty() { + return ResolvedName { + name: user.screen_name.clone(), + shortkey, + source: NameSource::KnownScreenName, + }; + } + } + + ResolvedName { + name: names::generate_name_from_seed(pubkey), + shortkey, + source: NameSource::VendorName, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn friend(pubkey_byte: u8, alias: Option<&str>) -> Friend { + Friend { + pubkey: [pubkey_byte; 32], + alias: alias.map(|s| s.to_string()), + cached_name: None, + cached_bio_line: None, + } + } + + fn known(pubkey_byte: u8, screen_name: &str) -> KnownUser { + KnownUser { + pubkey: [pubkey_byte; 32], + screen_name: screen_name.to_string(), + } + } + + #[test] + fn friend_alias_wins_over_known_screen_name() { + let friends = vec![friend(0x42, Some("Alice"))]; + let knowns = vec![known(0x42, "alice_v2")]; + let r = NameResolver::new(&friends, &knowns); + let resolved = r.resolve(&[0x42; 32]); + assert_eq!(resolved.name, "Alice"); + assert_eq!(resolved.source, NameSource::FriendAlias); + } + + #[test] + fn known_screen_name_used_when_no_friend_alias() { + let friends: Vec = vec![]; + let knowns = vec![known(0x42, "alice")]; + let r = NameResolver::new(&friends, &knowns); + let resolved = r.resolve(&[0x42; 32]); + assert_eq!(resolved.name, "alice"); + assert_eq!(resolved.source, NameSource::KnownScreenName); + } + + #[test] + fn friend_without_alias_falls_through_to_known() { + // Friend exists but has no alias — known cache should still resolve. + let friends = vec![friend(0x42, None)]; + let knowns = vec![known(0x42, "alice")]; + let r = NameResolver::new(&friends, &knowns); + let resolved = r.resolve(&[0x42; 32]); + assert_eq!(resolved.name, "alice"); + assert_eq!(resolved.source, NameSource::KnownScreenName); + } + + #[test] + fn friend_with_empty_alias_falls_through() { + let friends = vec![friend(0x42, Some(""))]; + let knowns = vec![known(0x42, "alice")]; + let r = NameResolver::new(&friends, &knowns); + let resolved = r.resolve(&[0x42; 32]); + assert_eq!(resolved.source, NameSource::KnownScreenName); + } + + #[test] + fn vendor_name_fallback_when_unknown() { + let friends: Vec = vec![]; + let knowns: Vec = vec![]; + let r = NameResolver::new(&friends, &knowns); + let resolved = r.resolve(&[0xab; 32]); + assert_eq!(resolved.source, NameSource::VendorName); + assert!(!resolved.name.is_empty(), "vendor name should be non-empty"); + assert_eq!(resolved.shortkey, "abababab"); + } + + #[test] + fn known_with_empty_screen_name_falls_through_to_vendor() { + let friends: Vec = vec![]; + let knowns = vec![known(0x42, "")]; + let r = NameResolver::new(&friends, &knowns); + let resolved = r.resolve(&[0x42; 32]); + assert_eq!(resolved.source, NameSource::VendorName); + } + + #[test] + fn shortkey_always_populated() { + let r = NameResolver::from_known_users(&[]); + let resolved = r.resolve(&[0x01; 32]); + assert_eq!(resolved.shortkey.len(), 8); + assert_eq!(resolved.shortkey, "01010101"); + } + + #[test] + fn from_known_users_constructor_implies_no_friends() { + let knowns = vec![known(0x42, "alice")]; + let r = NameResolver::from_known_users(&knowns); + let resolved = r.resolve(&[0x42; 32]); + assert_eq!(resolved.source, NameSource::KnownScreenName); + } + + #[test] + fn bar_label_friend_is_bare() { + let resolved = ResolvedName { + name: "alice".to_string(), + shortkey: "abc12345".to_string(), + source: NameSource::FriendAlias, + }; + assert_eq!(resolved.bar_label(), "alice"); + } + + #[test] + fn bar_label_known_includes_short() { + let resolved = ResolvedName { + name: "alice".to_string(), + shortkey: "abc12345".to_string(), + source: NameSource::KnownScreenName, + }; + assert_eq!(resolved.bar_label(), "alice@abc12345"); + } + + #[test] + fn bar_label_vendor_includes_short() { + let resolved = ResolvedName { + name: "tyrannical_elbakyan".to_string(), + shortkey: "abc12345".to_string(), + source: NameSource::VendorName, + }; + assert_eq!(resolved.bar_label(), "tyrannical_elbakyan@abc12345"); + } + + #[test] + fn formal_always_paren_short() { + for source in [ + NameSource::FriendAlias, + NameSource::KnownScreenName, + NameSource::VendorName, + ] { + let resolved = ResolvedName { + name: "alice".to_string(), + shortkey: "abc12345".to_string(), + source, + }; + assert_eq!(resolved.formal(), "alice (abc12345)"); + } + } + + #[test] + fn source_predicates() { + let friend = ResolvedName { + name: "a".into(), + shortkey: "00000000".into(), + source: NameSource::FriendAlias, + }; + let vendor = ResolvedName { + name: "a".into(), + shortkey: "00000000".into(), + source: NameSource::VendorName, + }; + let known = ResolvedName { + name: "a".into(), + shortkey: "00000000".into(), + source: NameSource::KnownScreenName, + }; + assert!(friend.is_friend()); + assert!(!vendor.is_friend()); + assert!(!known.is_friend()); + assert!(vendor.is_vendor_fallback()); + assert!(!friend.is_vendor_fallback()); + assert!(!known.is_vendor_fallback()); + } +} diff --git a/peeroxide-cli/src/cmd/chat/names.rs b/peeroxide-cli/src/cmd/chat/names.rs new file mode 100644 index 0000000..530ab4e --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/names.rs @@ -0,0 +1,566 @@ +use rand::{rngs::StdRng, Rng, SeedableRng}; + +const ADJECTIVES: &[&str] = &[ + "abhorrent", + "abominable", + "admiring", + "adoring", + "affectionate", + "agitated", + "amazing", + "angry", + "apocalyptic", + "atrocious", + "awesome", + "baleful", + "barbaric", + "beautiful", + "berserk", + "bestial", + "blasphemous", + "blissful", + "bloodthirsty", + "bold", + "boring", + "brave", + "brutal", + "busy", + "calculating", + "callous", + "charming", + "clever", + "compassionate", + "competent", + "condescending", + "confident", + "cool", + "corrupt", + "cranky", + "crazy", + "cruel", + "cursed", + "damnable", + "dazzling", + "debased", + "degenerate", + "depraved", + "deranged", + "despicable", + "determined", + "diabolic", + "diabolical", + "disgusting", + "distracted", + "domineering", + "dreamy", + "dystopian", + "eager", + "ecstatic", + "egregious", + "elastic", + "elated", + "elegant", + "eloquent", + "epic", + "erratic", + "exciting", + "execrable", + "fanatical", + "ferocious", + "fervent", + "festive", + "fiendish", + "filthy", + "flagitious", + "flamboyant", + "focused", + "friendly", + "frosty", + "funny", + "gallant", + "genocidal", + "ghoulish", + "gifted", + "goofy", + "gracious", + "great", + "grim", + "grotesque", + "happy", + "hardcore", + "hateful", + "heartless", + "heinous", + "heuristic", + "hopeful", + "hungry", + "ignoble", + "implacable", + "infallible", + "infernal", + "iniquitous", + "insane", + "inspiring", + "intelligent", + "interesting", + "ironfisted", + "jolly", + "jovial", + "keen", + "kind", + "laughing", + "lewd", + "loathsome", + "loving", + "lucid", + "macabre", + "magical", + "malevolent", + "malicious", + "malignant", + "maniacal", + "merciless", + "miscreant", + "modest", + "monstrous", + "murderous", + "musing", + "mystifying", + "naughty", + "nefarious", + "nervous", + "nice", + "nifty", + "nihilistic", + "nostalgic", + "noxious", + "objective", + "obscene", + "odious", + "ominous", + "optimistic", + "paranoid", + "peaceful", + "pedantic", + "pensive", + "perverted", + "pestilent", + "practical", + "priceless", + "profane", + "psychotic", + "putrid", + "quirky", + "quizzical", + "rabid", + "rancorous", + "raunchy", + "recursing", + "relaxed", + "repulsive", + "reverent", + "revolting", + "romantic", + "ruthless", + "sad", + "sadistic", + "satanic", + "scurrilous", + "serene", + "sharp", + "silly", + "sinister", + "sleepy", + "sociopathic", + "stoic", + "strange", + "stupefied", + "suspicious", + "sweet", + "tender", + "thirsty", + "totalitarian", + "treacherous", + "truculent", + "trusting", + "twisted", + "tyrannical", + "unhinged", + "unholy", + "unruffled", + "upbeat", + "vengeful", + "venomous", + "vibrant", + "vicious", + "vigilant", + "vigorous", + "vile", + "villainous", + "vindictive", + "violent", + "volatile", + "whorish", + "wicked", + "wizardly", + "wonderful", + "wrathful", + "xenodochial", + "youthful", + "zealous", + "zen", +]; + +const SURNAMES: &[&str] = &[ + "agnesi", + "albattani", + "allen", + "almeida", + "amin", + "antonelli", + "archimedes", + "ardinghelli", + "aryabhata", + "assad", + "attila", + "austin", + "babbage", + "babydoc", + "baghdadi", + "banach", + "banzai", + "barbie", + "bardeen", + "bartik", + "bassi", + "beaver", + "bell", + "benz", + "beria", + "bhabha", + "bhaskara", + "binladen", + "black", + "blackburn", + "blackwell", + "bohr", + "bokassa", + "booth", + "borg", + "bose", + "bouman", + "boyd", + "brahmagupta", + "brattain", + "brown", + "buck", + "bundy", + "burnell", + "caligula", + "cannon", + "carson", + "cartwright", + "carver", + "ceausescu", + "cerf", + "chandrasekhar", + "chaplygin", + "chatelet", + "chatterjee", + "chaum", + "chebyshev", + "clarke", + "cohen", + "colden", + "commodus", + "cori", + "cray", + "curie", + "curran", + "dahmer", + "darwin", + "davinci", + "dewdney", + "dhawan", + "diffie", + "dijkstra", + "dirac", + "dracula", + "driscoll", + "dubinsky", + "duvalier", + "dzerzhinsky", + "easley", + "edison", + "eichmann", + "einstein", + "elbakyan", + "elgamal", + "elion", + "ellis", + "engelbart", + "euclid", + "euler", + "faraday", + "feistel", + "fermat", + "fermi", + "feynman", + "franco", + "franklin", + "gacy", + "gagarin", + "galileo", + "galois", + "ganguly", + "gauss", + "genghis", + "germain", + "goebbels", + "goering", + "goldberg", + "goldstine", + "goldwasser", + "golick", + "goodall", + "gould", + "greider", + "grothendieck", + "habre", + "haibt", + "hamilton", + "haslett", + "hawking", + "heisenberg", + "hellman", + "hermann", + "herschel", + "hertz", + "hess", + "heydrich", + "heyrovsky", + "himmler", + "hitler", + "hodgkin", + "hofstadter", + "hoover", + "hopper", + "hoxha", + "hugle", + "hussein", + "hypatia", + "ishii", + "ishizaka", + "jackson", + "jang", + "jemison", + "jennings", + "jepsen", + "johnson", + "joliot", + "jones", + "kalam", + "kapitsa", + "karadzic", + "kare", + "keldysh", + "keller", + "kepler", + "khayyam", + "khorana", + "kilby", + "kimilsung", + "kimjongil", + "kirch", + "knuth", + "koch", + "kony", + "koresh", + "kowalevski", + "lalande", + "lamarr", + "lamport", + "leakey", + "leavitt", + "lederberg", + "lehmann", + "lewin", + "lichterman", + "liskov", + "lovelace", + "lumiere", + "mahavira", + "mao", + "margulis", + "matsumoto", + "maxwell", + "mayer", + "mccarthy", + "mcclintock", + "mclaren", + "mclean", + "mcnulty", + "meitner", + "mendel", + "mendeleev", + "mengele", + "mengistu", + "meninsky", + "merkle", + "mestorf", + "milosevic", + "mirzakhani", + "mobutu", + "molotov", + "montalcini", + "moore", + "morse", + "moser", + "murdock", + "mussolini", + "napier", + "nash", + "nero", + "neumann", + "newton", + "nightingale", + "niyazov", + "nobel", + "noether", + "northcutt", + "noyce", + "panini", + "pare", + "pascal", + "pasteur", + "pavelic", + "payne", + "perlman", + "pike", + "pinochet", + "poincare", + "poitras", + "polpot", + "proskuriakova", + "ptolemy", + "qusay", + "raman", + "ramanujan", + "rhodes", + "ride", + "riosmontt", + "ritchie", + "robinson", + "roentgen", + "rosalind", + "rosenberg", + "rubin", + "saha", + "sammet", + "sanderson", + "satoshi", + "shamir", + "shannon", + "shaw", + "shirley", + "shockley", + "shtern", + "sinoussi", + "snyder", + "solomon", + "speer", + "spence", + "stalin", + "stonebraker", + "streicher", + "stroessner", + "sutherland", + "swanson", + "swartz", + "swirles", + "taussig", + "taylor", + "tesla", + "tharp", + "thompson", + "tojo", + "torvalds", + "trujillo", + "tu", + "turing", + "uday", + "varahamihira", + "vaughan", + "videla", + "villani", + "visvesvaraya", + "volhard", + "vyshinsky", + "wescoff", + "wilbur", + "wiles", + "williams", + "williamson", + "wilson", + "wing", + "wozniak", + "wright", + "wu", + "yagoda", + "yalow", + "yezhov", + "yonath", + "zawahiri", + "zhukovsky", +]; + +pub fn generate_name_from_seed(seed: &[u8; 32]) -> String { + let mut rng = StdRng::from_seed(*seed); + let adj = ADJECTIVES[rng.random_range(0..ADJECTIVES.len())]; + let surname = SURNAMES[rng.random_range(0..SURNAMES.len())]; + let num: u16 = rng.random_range(0..10000); + format!("{adj}_{surname}_{num:04}") +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn generate_name_is_deterministic() { + let seed = [42u8; 32]; + let name1 = generate_name_from_seed(&seed); + let name2 = generate_name_from_seed(&seed); + assert_eq!(name1, name2); + } + + #[test] + fn generate_name_known_seed() { + let seed = [0u8; 32]; + let name = generate_name_from_seed(&seed); + let parts: Vec<&str> = name.splitn(3, '_').collect(); + assert_eq!(parts.len(), 3, "name must have exactly two underscores: {name}"); + assert_eq!(parts[2].len(), 4, "suffix must be 4 digits: {name}"); + assert!(parts[2].chars().all(|c| c.is_ascii_digit()), "suffix must be digits: {name}"); + } + + #[test] + fn generate_name_format_regex_like() { + let seed = [1u8; 32]; + let name = generate_name_from_seed(&seed); + assert!( + name.chars().all(|c| c.is_ascii_lowercase() || c.is_ascii_digit() || c == '_'), + "unexpected chars in: {name}" + ); + assert_eq!(name.chars().filter(|&c| c == '_').count(), 2, "expected 2 underscores in: {name}"); + } + + #[test] + fn generate_name_different_seeds_differ() { + let seed_a = [0u8; 32]; + let seed_b = [1u8; 32]; + let name_a = generate_name_from_seed(&seed_a); + let name_b = generate_name_from_seed(&seed_b); + assert_ne!(name_a, name_b, "different seeds should (very likely) produce different names"); + } +} diff --git a/peeroxide-cli/src/cmd/chat/nexus.rs b/peeroxide-cli/src/cmd/chat/nexus.rs new file mode 100644 index 0000000..95a6e3c --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/nexus.rs @@ -0,0 +1,403 @@ +use clap::Parser; + +use peeroxide_dht::hyperdht::{HyperDhtHandle, KeyPair}; + +use crate::cmd::chat::debug; +use crate::cmd::chat::known_users; +use crate::cmd::chat::profile; +use crate::cmd::chat::tui::NoticeSink; +use crate::cmd::chat::wire::NexusRecord; +use crate::cmd::{build_dht_config, sigterm_recv}; +use crate::config::ResolvedConfig; + +use libudx::UdxRuntime; +use peeroxide_dht::hyperdht; + +#[derive(Parser)] +pub struct NexusArgs { + /// Profile to manage + #[arg(long, default_value = "default")] + pub profile: String, + + /// Update screen name + #[arg(long)] + pub set_name: Option, + + /// Update bio + #[arg(long)] + pub set_bio: Option, + + /// Publish nexus to DHT (one-shot) + #[arg(long)] + pub publish: bool, + + /// Look up another user's nexus + #[arg(long)] + pub lookup: Option, + + /// Run continuously: publish own + refresh friends + #[arg(long)] + pub daemon: bool, +} + +pub async fn run(args: NexusArgs, cfg: &ResolvedConfig) -> i32 { + if let Some(ref pubkey_hex) = args.lookup { + return run_lookup(pubkey_hex, cfg).await; + } + + let _ = profile::load_or_create_profile(&args.profile); + + let mut setters_applied = false; + if let Some(ref name) = args.set_name { + let dir = profile::profile_dir(&args.profile); + if let Err(e) = std::fs::write(dir.join("name"), name.trim()) { + eprintln!("error: failed to write name: {e}"); + return 1; + } + println!("Screen name updated to: {}", name.trim()); + setters_applied = true; + } + + if let Some(ref bio) = args.set_bio { + let dir = profile::profile_dir(&args.profile); + if let Err(e) = std::fs::write(dir.join("bio"), bio.trim()) { + eprintln!("error: failed to write bio: {e}"); + return 1; + } + println!("Bio updated."); + setters_applied = true; + } + + if setters_applied && !args.publish && !args.daemon { + return 0; + } + + let prof = match profile::load_profile(&args.profile) { + Ok(p) => p, + Err(e) => { + eprintln!("error: {e}"); + return 1; + } + }; + + let id_keypair = KeyPair::from_seed(prof.seed); + + let dht_config = build_dht_config(cfg); + let runtime = match UdxRuntime::new() { + Ok(r) => r, + Err(e) => { + eprintln!("error: {e}"); + return 1; + } + }; + + let (task, handle, _) = match hyperdht::spawn(&runtime, dht_config).await { + Ok(v) => v, + Err(e) => { + eprintln!("error: failed to start DHT: {e}"); + return 1; + } + }; + + if let Err(e) = handle.bootstrapped().await { + eprintln!("error: bootstrap failed: {e}"); + return 1; + } + + if args.publish { + publish_nexus_once(&handle, &id_keypair, &args.profile, None).await; + let _ = handle.destroy().await; + let _ = task.await; + return 0; + } + + if args.daemon { + eprintln!("*** nexus daemon started (publish + friend refresh)"); + let profile_name = args.profile.clone(); + let publish_interval = tokio::time::Duration::from_secs(480); + let friend_interval = tokio::time::Duration::from_secs(600); + let mut publish_timer = tokio::time::interval(publish_interval); + let mut friend_timer = tokio::time::interval(friend_interval); + loop { + tokio::select! { + _ = publish_timer.tick() => { + publish_nexus_once(&handle, &id_keypair, &profile_name, None).await; + } + _ = friend_timer.tick() => { + refresh_friends(&handle, &profile_name).await; + } + _ = tokio::signal::ctrl_c() => { + eprintln!("\n*** shutting down"); + break; + } + _ = sigterm_recv() => { + break; + } + } + } + let _ = handle.destroy().await; + let _ = task.await; + return 0; + } + + publish_nexus_once(&handle, &id_keypair, &args.profile, None).await; + let _ = handle.destroy().await; + let _ = task.await; + 0 +} + +async fn publish_nexus_once( + handle: &HyperDhtHandle, + id_keypair: &KeyPair, + profile_name: &str, + notices: Option<&NoticeSink>, +) { + let prof = match profile::load_profile(profile_name) { + Ok(p) => p, + Err(e) => { + emit_notice(notices, format!("warning: failed to load profile for nexus: {e}")); + return; + } + }; + + let record = NexusRecord { + name: prof.screen_name.unwrap_or_default(), + bio: prof.bio.unwrap_or_default(), + }; + + let data = match record.serialize() { + Ok(d) => d, + Err(e) => { + emit_notice(notices, format!("warning: nexus serialize failed: {e}")); + return; + } + }; + + let seq = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap_or_default() + .as_secs(); + + match handle.mutable_put(id_keypair, &data, seq).await { + Ok(_) => { + emit_notice(notices, format!(" nexus published (seq={seq})")); + debug::log_event( + "Nexus publish", + "mutable_put", + &format!( + "id_pubkey={}, seq={seq}, name_len={}, bio_len={}", + debug::short_key(&id_keypair.public_key), + record.name.len(), + record.bio.len(), + ), + ); + } + Err(e) => { + emit_notice(notices, format!("warning: nexus publish failed: {e}")); + } + } +} + +/// Send `line` through the notice sink if one is provided, otherwise fall +/// back to `eprintln!`. The fallback covers callers like the standalone +/// `nexus daemon` subcommand that don't have an interactive UI in play. +fn emit_notice(notices: Option<&NoticeSink>, line: String) { + match notices { + Some(s) => s.send(line), + None => eprintln!("{line}"), + } +} + +pub async fn run_nexus_refresh( + handle: HyperDhtHandle, + id_keypair: KeyPair, + profile_name: String, + notices: NoticeSink, +) { + let refresh_interval = tokio::time::Duration::from_secs(480); + let mut interval = tokio::time::interval(refresh_interval); + + loop { + interval.tick().await; + publish_nexus_once(&handle, &id_keypair, &profile_name, Some(¬ices)).await; + } +} + +async fn run_lookup(pubkey_hex: &str, cfg: &ResolvedConfig) -> i32 { + let pk_bytes = match hex::decode(pubkey_hex) { + Ok(b) if b.len() == 32 => { + let mut pk = [0u8; 32]; + pk.copy_from_slice(&b); + pk + } + _ => { + eprintln!("error: invalid pubkey (expected 64-char hex)"); + return 1; + } + }; + + let dht_config = build_dht_config(cfg); + let runtime = match UdxRuntime::new() { + Ok(r) => r, + Err(e) => { + eprintln!("error: {e}"); + return 1; + } + }; + + let (task, handle, _) = match hyperdht::spawn(&runtime, dht_config).await { + Ok(v) => v, + Err(e) => { + eprintln!("error: {e}"); + return 1; + } + }; + + if let Err(e) = handle.bootstrapped().await { + eprintln!("error: bootstrap failed: {e}"); + return 1; + } + + match handle.mutable_get(&pk_bytes, 0).await { + Ok(Some(result)) => match NexusRecord::deserialize(&result.value) { + Ok(nexus) => { + println!("Pubkey: {pubkey_hex}"); + if nexus.name.is_empty() { + println!("Name: (not set)"); + } else { + println!("Name: {}", nexus.name); + } + if !nexus.bio.is_empty() { + println!("Bio: {}", nexus.bio); + } + println!("Seq: {}", result.seq); + } + Err(e) => { + eprintln!("error: failed to parse nexus record: {e}"); + } + }, + Ok(None) => { + println!("No nexus record found for {pubkey_hex}"); + } + Err(e) => { + eprintln!("error: mutable_get failed: {e}"); + } + } + + let _ = handle.destroy().await; + let _ = task.await; + 0 +} + +pub async fn run_friend_refresh( + handle: HyperDhtHandle, + profile_name: String, + _notices: NoticeSink, +) { + // `_notices` is reserved for future friend-refresh notifications. Today + // `refresh_one_friend` is silent on success and only logs via + // `debug::log_event` (which respects --debug). If we later want to + // surface, e.g. "*** alice changed name", the sink is ready. + let refresh_interval = tokio::time::Duration::from_secs(600); + let mut interval = tokio::time::interval(refresh_interval); + let mut friend_index: usize = 0; + + loop { + interval.tick().await; + refresh_one_friend(&handle, &profile_name, &mut friend_index).await; + } +} + +async fn refresh_one_friend(handle: &HyperDhtHandle, profile_name: &str, index: &mut usize) { + let friends = match profile::load_friends(profile_name) { + Ok(f) => f, + Err(_) => return, + }; + + if friends.is_empty() { + return; + } + + *index %= friends.len(); + let friend = &friends[*index]; + *index += 1; + + if let Ok(Some(result)) = handle.mutable_get(&friend.pubkey, 0).await { + if let Ok(nexus) = NexusRecord::deserialize(&result.value) { + let mut updated = friend.clone(); + let mut changed = false; + let name = nexus.name.clone(); + let name_len = nexus.name.len(); + let bio_len = nexus.bio.len(); + if !name.is_empty() && updated.cached_name.as_deref() != Some(&name) { + updated.cached_name = Some(name.clone()); + let _ = known_users::update_shared(&friend.pubkey, &name); + changed = true; + } + if !nexus.bio.is_empty() { + let first_line = nexus.bio.lines().next().unwrap_or("").to_owned(); + if updated.cached_bio_line.as_deref() != Some(&first_line) { + updated.cached_bio_line = Some(first_line); + changed = true; + } + } + if changed { + debug::log_event( + "Friend nexus update", + "mutable_get", + &format!( + "friend_pubkey={}, seq={}, name_len={name_len}, bio_len={bio_len}", + debug::short_key(&friend.pubkey), + result.seq, + ), + ); + let _ = profile::remove_friend(profile_name, &friend.pubkey); + let _ = profile::save_friend(profile_name, &updated); + } + } + } +} + +pub async fn refresh_friends(handle: &HyperDhtHandle, profile_name: &str) { + let friends = match profile::load_friends(profile_name) { + Ok(f) => f, + Err(_) => return, + }; + + for friend in &friends { + if let Ok(Some(result)) = handle.mutable_get(&friend.pubkey, 0).await { + if let Ok(nexus) = NexusRecord::deserialize(&result.value) { + let mut updated = friend.clone(); + let mut changed = false; + let name = nexus.name.clone(); + let name_len = nexus.name.len(); + let bio_len = nexus.bio.len(); + if !name.is_empty() && updated.cached_name.as_deref() != Some(&name) { + updated.cached_name = Some(name.clone()); + let _ = known_users::update_shared(&updated.pubkey, &name); + changed = true; + } + if !nexus.bio.is_empty() { + let first_line = nexus.bio.lines().next().unwrap_or("").to_owned(); + if updated.cached_bio_line.as_deref() != Some(&first_line) { + updated.cached_bio_line = Some(first_line); + changed = true; + } + } + if changed { + debug::log_event( + "Friend nexus update", + "mutable_get", + &format!( + "friend_pubkey={}, seq={}, name_len={name_len}, bio_len={bio_len}", + debug::short_key(&friend.pubkey), + result.seq, + ), + ); + let _ = profile::remove_friend(profile_name, &friend.pubkey); + let _ = profile::save_friend(profile_name, &updated); + } + } + } + } +} diff --git a/peeroxide-cli/src/cmd/chat/ordering.rs b/peeroxide-cli/src/cmd/chat/ordering.rs new file mode 100644 index 0000000..efabcf5 --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/ordering.rs @@ -0,0 +1,723 @@ +use std::collections::{HashMap, HashSet, VecDeque}; +use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; +use tokio::sync::mpsc; +use tokio::time::{Duration, Instant}; + +use crate::cmd::chat::display::DisplayMessage; +use crate::cmd::chat::probe; + +/// Default capacity for the shared receiver-side message-hash dedup ring. +pub const DEDUP_RING_CAPACITY: usize = 1000; + +/// Bounded FIFO set of message hashes seen by the receiver. +/// +/// One shared instance is threaded through fetch-side filtering and the +/// `ChainGate` so a hash that has ever been admitted is never processed +/// again, regardless of which code path re-encounters it. When the ring +/// reaches capacity the oldest hash is evicted; for chat traffic 1000 +/// entries comfortably covers a session-length window. +pub struct DedupRing { + capacity: usize, + set: HashSet<[u8; 32]>, + queue: VecDeque<[u8; 32]>, +} + +impl DedupRing { + pub fn new(capacity: usize) -> Self { + assert!(capacity > 0, "DedupRing capacity must be positive"); + Self { + capacity, + set: HashSet::with_capacity(capacity), + queue: VecDeque::with_capacity(capacity), + } + } + + pub fn with_default_capacity() -> Self { + Self::new(DEDUP_RING_CAPACITY) + } + + pub fn contains(&self, h: &[u8; 32]) -> bool { + self.set.contains(h) + } + + /// Insert `h` into the ring. Returns `true` if newly added, `false` if + /// already present. When capacity is exceeded the oldest hash is evicted. + pub fn insert(&mut self, h: [u8; 32]) -> bool { + if !self.set.insert(h) { + return false; + } + self.queue.push_back(h); + if self.queue.len() > self.capacity { + if let Some(old) = self.queue.pop_front() { + self.set.remove(&old); + } + } + true + } + + pub fn len(&self) -> usize { + self.queue.len() + } + + pub fn capacity(&self) -> usize { + self.capacity + } +} + +impl Default for DedupRing { + fn default() -> Self { + Self::with_default_capacity() + } +} + +static RELEASE_COUNTER: AtomicU64 = AtomicU64::new(0); + +fn short_hex(b: &[u8; 32]) -> String { + let mut s = String::with_capacity(8); + for byte in &b[..4] { + s.push_str(&format!("{byte:02x}")); + } + s +} + +pub struct PendingMessage { + pub display: DisplayMessage, + pub msg_hash: [u8; 32], + pub prev_msg_hash: [u8; 32], + /// Per-feed chain identifier. A single `id_pubkey` may publish across + /// multiple feeds (each CLI run generates a fresh `feed_keypair`, and + /// in-process rotation also creates new ones). Chains are scoped per + /// feed: messages from the same identity but different feeds are + /// independent streams, ordered against themselves but not against + /// each other. + pub feed_pubkey: [u8; 32], +} + +/// Per-(id, feed) chain identity. Two messages from the same `id_pubkey` +/// but different `feed_pubkey`s are independent chains — they may overlap +/// in time, share no causal link, and must not block on each other. +type ChainKey = ([u8; 32], [u8; 32]); + +fn key_of(msg: &PendingMessage) -> ChainKey { + (msg.display.id_pubkey, msg.feed_pubkey) +} + +type BufferedByPrev = HashMap<[u8; 32], (PendingMessage, Instant)>; + +/// Tracks per-(id, feed) chain state and enforces strict `prev_msg_hash` +/// ordering within each chain. +/// +/// Callers submit messages oldest-first. The first message seen for a +/// given `(id_pubkey, feed_pubkey)` anchors that chain; subsequent +/// messages must link to its last released hash, or they are buffered +/// until their predecessor arrives. A second feed from the same identity +/// gets its own independent anchor and chain. +pub struct ChainGate { + last_released: HashMap, + pending: HashMap, +} + +#[derive(Debug)] +pub enum SubmitOutcome { + Released, + Buffered { missing_predecessor: [u8; 32] }, + Duplicate, +} + +impl ChainGate { + pub fn new() -> Self { + Self { + last_released: HashMap::new(), + pending: HashMap::new(), + } + } + + /// Submit one message. If its predecessor has been released for this + /// chain (or this is the first message we've seen for `(id, feed)`), + /// release immediately and drain any chain-linked buffered + /// descendants. Otherwise buffer and return the predecessor hash so + /// the caller can kick off a refetch. + /// + /// `dedup` is the shared receiver-wide message-hash ring. Any hash + /// already present is rejected as `Duplicate` before chain logic + /// runs, so a hash is never released twice even if upstream code + /// paths submit it more than once. + pub fn submit( + &mut self, + msg: PendingMessage, + dedup: &mut DedupRing, + tx: &mpsc::UnboundedSender, + ) -> SubmitOutcome { + let key = key_of(&msg); + let prev = msg.prev_msg_hash; + let own = msg.msg_hash; + + if dedup.contains(&own) || self.last_released.get(&key) == Some(&own) { + return SubmitOutcome::Duplicate; + } + + let anchor = !self.last_released.contains_key(&key); + let chains = self.last_released.get(&key) == Some(&prev); + + if anchor || chains { + self.release(msg, dedup, tx); + self.drain(&key, dedup, tx); + return SubmitOutcome::Released; + } + + self.pending + .entry(key) + .or_default() + .insert(prev, (msg, Instant::now())); + SubmitOutcome::Buffered { + missing_predecessor: prev, + } + } + + fn release( + &mut self, + msg: PendingMessage, + dedup: &mut DedupRing, + tx: &mpsc::UnboundedSender, + ) { + let key = key_of(&msg); + let hash = msg.msg_hash; + // Mark this hash as seen in the shared ring so no other code path can + // re-release it. `insert` is a no-op if it was already present. + dedup.insert(hash); + if probe::is_enabled() { + let n = RELEASE_COUNTER.fetch_add(1, AtomicOrdering::Relaxed) + 1; + let preview: String = msg.display.content.chars().take(40).collect(); + eprintln!( + "[probe] release#{n} msg_hash={} late={} content={:?}", + short_hex(&hash), + msg.display.late, + preview, + ); + } + let _ = tx.send(msg.display); + self.last_released.insert(key, hash); + } + + fn drain( + &mut self, + key: &ChainKey, + dedup: &mut DedupRing, + tx: &mpsc::UnboundedSender, + ) { + loop { + let cursor = match self.last_released.get(key) { + Some(h) => *h, + None => return, + }; + let next = self + .pending + .get_mut(key) + .and_then(|per_chain| per_chain.remove(&cursor)); + let Some((msg, _)) = next else { + return; + }; + self.release(msg, dedup, tx); + } + } + + /// Force-release any buffered messages older than `timeout`. Each released + /// message is tagged `late = true` and `last_released` is reset so future + /// in-order messages chain forward from the late release. Returns the list + /// of predecessor hashes whose buffered descendants were force-released — + /// the caller should stop refetching them. + pub fn expire( + &mut self, + now: Instant, + timeout: Duration, + dedup: &mut DedupRing, + tx: &mpsc::UnboundedSender, + ) -> Vec<[u8; 32]> { + let mut abandoned_predecessors: Vec<[u8; 32]> = Vec::new(); + + let keys: Vec = self.pending.keys().copied().collect(); + for key in keys { + let expired_prevs: Vec<[u8; 32]> = { + let per_chain = match self.pending.get(&key) { + Some(p) => p, + None => continue, + }; + per_chain + .iter() + .filter(|(_, (_, t))| now.duration_since(*t) >= timeout) + .map(|(k, _)| *k) + .collect() + }; + + if expired_prevs.is_empty() { + continue; + } + + let mut expired_msgs: Vec = Vec::new(); + if let Some(per_chain) = self.pending.get_mut(&key) { + for prev in &expired_prevs { + if let Some((mut m, _)) = per_chain.remove(prev) { + m.display.late = true; + expired_msgs.push(m); + } + } + } + + expired_msgs.sort_by_key(|m| m.display.timestamp); + + for m in expired_msgs { + let prev = m.prev_msg_hash; + self.release(m, dedup, tx); + self.drain(&key, dedup, tx); + abandoned_predecessors.push(prev); + } + } + + abandoned_predecessors + } + + pub fn buffered_predecessors(&self) -> Vec<[u8; 32]> { + let mut out = Vec::new(); + for per_chain in self.pending.values() { + for prev in per_chain.keys() { + out.push(*prev); + } + } + out + } + + /// Total number of messages currently buffered awaiting a missing + /// predecessor, across all per-(id, feed) chains. Used by the status bar + /// as the `Receiving... (N)` count — non-zero indicates the receiver is + /// holding back messages until the chain completes. + pub fn pending_count(&self) -> usize { + self.pending.values().map(|per_chain| per_chain.len()).sum() + } +} + +/// Sort a batch of messages so each `(id_pubkey, feed_pubkey)` chain plays +/// oldest-first. +/// +/// Within each chain, walks the `prev_msg_hash` link starting from the +/// message whose `prev_msg_hash` is not the `msg_hash` of any other +/// message in the batch (i.e. the chain root from the batch's +/// perspective). Messages not reachable from any root are appended at +/// the end in arrival order. +pub fn chain_sort(messages: Vec) -> Vec { + let mut by_chain: HashMap> = HashMap::new(); + for m in messages { + by_chain.entry(key_of(&m)).or_default().push(m); + } + + let mut out: Vec = Vec::new(); + for (_chain, batch) in by_chain { + let mut by_prev: HashMap<[u8; 32], PendingMessage> = HashMap::new(); + let mut own_hashes: std::collections::HashSet<[u8; 32]> = + std::collections::HashSet::new(); + for m in batch { + own_hashes.insert(m.msg_hash); + by_prev.insert(m.prev_msg_hash, m); + } + + let roots: Vec<[u8; 32]> = by_prev + .iter() + .filter(|(prev, _)| !own_hashes.contains(*prev)) + .map(|(prev, _)| *prev) + .collect(); + + for root in roots { + let mut cursor = root; + while let Some(m) = by_prev.remove(&cursor) { + cursor = m.msg_hash; + out.push(m); + } + } + + // Anything left has a cycle (shouldn't happen) — flush in arrival order. + for (_, m) in by_prev { + out.push(m); + } + } + + out +} + +#[cfg(test)] +mod tests { + use super::*; + use tokio::sync::mpsc::unbounded_channel; + + fn h(b: u8) -> [u8; 32] { + [b; 32] + } + + /// Default test feed_pubkey. The single-feed legacy tests all use one + /// implicit feed; cross-feed behavior is exercised by `msg_on_feed`. + const DEFAULT_FEED: [u8; 32] = [0xFE; 32]; + + fn msg(id: u8, own: u8, prev: u8, ts: u64) -> PendingMessage { + msg_on_feed(id, DEFAULT_FEED, own, prev, ts) + } + + fn msg_on_feed( + id: u8, + feed_pubkey: [u8; 32], + own: u8, + prev: u8, + ts: u64, + ) -> PendingMessage { + PendingMessage { + display: DisplayMessage { + id_pubkey: h(id), + screen_name: String::new(), + content: format!("msg-{own}"), + timestamp: ts, + is_self: false, + late: false, + }, + msg_hash: h(own), + prev_msg_hash: h(prev), + feed_pubkey, + } + } + + fn collect(rx: &mut mpsc::UnboundedReceiver) -> Vec { + let mut out = Vec::new(); + while let Ok(m) = rx.try_recv() { + out.push(m.content); + } + out + } + + fn collect_with_late( + rx: &mut mpsc::UnboundedReceiver, + ) -> Vec<(String, bool)> { + let mut out = Vec::new(); + while let Ok(m) = rx.try_recv() { + out.push((m.content, m.late)); + } + out + } + + #[test] + fn in_order_release() { + let (tx, mut rx) = unbounded_channel(); + let mut g = ChainGate::new(); + let mut d = DedupRing::new(1000); + assert!(matches!( + g.submit(msg(1, 1, 0, 1), &mut d, &tx), + SubmitOutcome::Released + )); + assert!(matches!( + g.submit(msg(1, 2, 1, 2), &mut d, &tx), + SubmitOutcome::Released + )); + assert!(matches!( + g.submit(msg(1, 3, 2, 3), &mut d, &tx), + SubmitOutcome::Released + )); + assert_eq!(collect(&mut rx), vec!["msg-1", "msg-2", "msg-3"]); + } + + #[test] + fn reverse_arrival_buffers_then_drains() { + let (tx, mut rx) = unbounded_channel(); + let mut g = ChainGate::new(); + let mut d = DedupRing::new(1000); + // First message anchors the chain. + let r1 = g.submit(msg(1, 1, 0, 1), &mut d, &tx); + assert!(matches!(r1, SubmitOutcome::Released)); + // msg 3 arrives before msg 2 — must buffer. + let r3 = g.submit(msg(1, 3, 2, 3), &mut d, &tx); + assert!(matches!( + r3, + SubmitOutcome::Buffered { missing_predecessor } if missing_predecessor == h(2) + )); + // msg 2 arrives — releases 2 then drains 3. + let r2 = g.submit(msg(1, 2, 1, 2), &mut d, &tx); + assert!(matches!(r2, SubmitOutcome::Released)); + assert_eq!(collect(&mut rx), vec!["msg-1", "msg-2", "msg-3"]); + } + + #[test] + fn gap_timeout_releases_late() { + let (tx, mut rx) = unbounded_channel(); + let mut g = ChainGate::new(); + let mut d = DedupRing::new(1000); + let _ = g.submit(msg(1, 1, 0, 1), &mut d, &tx); + // Skip msg 2; submit msg 3 — buffered. + let _ = g.submit(msg(1, 3, 2, 3), &mut d, &tx); + // Drain msg 1. + let _ = collect(&mut rx); + + let later = Instant::now() + Duration::from_secs(10); + let abandoned = g.expire(later, Duration::from_secs(5), &mut d, &tx); + assert_eq!(abandoned, vec![h(2)]); + let got = collect_with_late(&mut rx); + assert_eq!(got, vec![("msg-3".to_string(), true)]); + } + + #[test] + fn gap_timeout_then_chain_resumes() { + let (tx, mut rx) = unbounded_channel(); + let mut g = ChainGate::new(); + let mut d = DedupRing::new(1000); + let _ = g.submit(msg(1, 1, 0, 1), &mut d, &tx); + let _ = g.submit(msg(1, 3, 2, 3), &mut d, &tx); + let _ = collect(&mut rx); + + let later = Instant::now() + Duration::from_secs(10); + let _ = g.expire(later, Duration::from_secs(5), &mut d, &tx); + let _ = collect(&mut rx); + + // After timeout, last_released should be msg 3's hash. msg 4 chains forward. + let r4 = g.submit(msg(1, 4, 3, 4), &mut d, &tx); + assert!(matches!(r4, SubmitOutcome::Released)); + assert_eq!(collect(&mut rx), vec!["msg-4"]); + } + + #[test] + fn two_sender_interleave_preserves_per_sender_chain() { + let (tx, mut rx) = unbounded_channel(); + let mut g = ChainGate::new(); + let mut d = DedupRing::new(1000); + // A1, B1, A2, B2 arriving interleaved + let _ = g.submit(msg(1, 10, 0, 1), &mut d, &tx); + let _ = g.submit(msg(2, 20, 0, 1), &mut d, &tx); + let _ = g.submit(msg(1, 11, 10, 2), &mut d, &tx); + let _ = g.submit(msg(2, 21, 20, 2), &mut d, &tx); + let got = collect(&mut rx); + // Cross-sender order is arrival-order, not enforced; but per-sender chain is. + assert_eq!(got, vec!["msg-10", "msg-20", "msg-11", "msg-21"]); + } + + #[test] + fn anchor_on_mid_stream_join() { + let (tx, mut rx) = unbounded_channel(); + let mut g = ChainGate::new(); + let mut d = DedupRing::new(1000); + // We join when sender has already published; the first thing we receive + // is msg 5 (no predecessor available locally). It should anchor. + let r = g.submit(msg(1, 5, 4, 5), &mut d, &tx); + assert!(matches!(r, SubmitOutcome::Released)); + // msg 6 chains forward. + let r6 = g.submit(msg(1, 6, 5, 6), &mut d, &tx); + assert!(matches!(r6, SubmitOutcome::Released)); + assert_eq!(collect(&mut rx), vec!["msg-5", "msg-6"]); + } + + #[test] + fn duplicate_submit_ignored() { + let (tx, mut rx) = unbounded_channel(); + let mut g = ChainGate::new(); + let mut d = DedupRing::new(1000); + let _ = g.submit(msg(1, 1, 0, 1), &mut d, &tx); + let r = g.submit(msg(1, 1, 0, 1), &mut d, &tx); + assert!(matches!(r, SubmitOutcome::Duplicate)); + assert_eq!(collect(&mut rx), vec!["msg-1"]); + } + + #[test] + fn dedup_ring_blocks_re_release_after_chain_moves_on() { + // Reproduces the test2.out symptom: a hash is released, the chain + // advances past it, then the same hash is re-submitted later (e.g. + // via a refetch path or a duplicate FeedRecord entry). Without the + // shared dedup ring the per-sender `last_released` no longer matches + // and the gate would re-release. With the ring, it is rejected. + let (tx, mut rx) = unbounded_channel(); + let mut g = ChainGate::new(); + let mut d = DedupRing::new(1000); + let _ = g.submit(msg(1, 1, 0, 1), &mut d, &tx); + let _ = g.submit(msg(1, 2, 1, 2), &mut d, &tx); + let _ = g.submit(msg(1, 3, 2, 3), &mut d, &tx); + assert_eq!(collect(&mut rx), vec!["msg-1", "msg-2", "msg-3"]); + + // Same hash arrives again from a different code path — must be a no-op. + let r = g.submit(msg(1, 1, 0, 1), &mut d, &tx); + assert!(matches!(r, SubmitOutcome::Duplicate)); + let r = g.submit(msg(1, 2, 1, 2), &mut d, &tx); + assert!(matches!(r, SubmitOutcome::Duplicate)); + assert!(collect(&mut rx).is_empty()); + } + + #[test] + fn dedup_ring_blocks_re_release_after_expire() { + // A buffered message is force-released as late; submitting it again + // afterwards (e.g. a slow refetch finally returns) must not re-emit. + let (tx, mut rx) = unbounded_channel(); + let mut g = ChainGate::new(); + let mut d = DedupRing::new(1000); + let _ = g.submit(msg(1, 1, 0, 1), &mut d, &tx); + let _ = g.submit(msg(1, 3, 2, 3), &mut d, &tx); + let _ = collect(&mut rx); + + let later = Instant::now() + Duration::from_secs(10); + let _ = g.expire(later, Duration::from_secs(5), &mut d, &tx); + let _ = collect(&mut rx); + + // msg 3 is now in the ring. Re-submitting it must be a Duplicate. + let r = g.submit(msg(1, 3, 2, 3), &mut d, &tx); + assert!(matches!(r, SubmitOutcome::Duplicate)); + assert!(collect(&mut rx).is_empty()); + } + + #[test] + fn dedup_ring_bounded_evicts_oldest() { + let mut d = DedupRing::new(3); + assert!(d.insert(h(1))); + assert!(d.insert(h(2))); + assert!(d.insert(h(3))); + assert!(d.contains(&h(1))); + // Fourth insert evicts the first. + assert!(d.insert(h(4))); + assert!(!d.contains(&h(1))); + assert!(d.contains(&h(2))); + assert!(d.contains(&h(3))); + assert!(d.contains(&h(4))); + // Duplicate insert is a no-op and does not advance eviction. + assert!(!d.insert(h(4))); + assert_eq!(d.len(), 3); + } + + #[test] + fn chain_sort_orders_oldest_first() { + // Submit newest-first; chain_sort should reverse into chain order. + let input = vec![msg(1, 3, 2, 3), msg(1, 2, 1, 2), msg(1, 1, 0, 1)]; + let sorted = chain_sort(input); + let contents: Vec<_> = sorted.iter().map(|m| m.display.content.clone()).collect(); + assert_eq!(contents, vec!["msg-1", "msg-2", "msg-3"]); + } + + #[test] + fn chain_sort_two_senders_independent() { + let input = vec![ + msg(1, 3, 2, 3), + msg(2, 30, 20, 3), + msg(1, 2, 1, 2), + msg(2, 20, 10, 2), + msg(1, 1, 0, 1), + msg(2, 10, 0, 1), + ]; + let sorted = chain_sort(input); + // Within each sender, order is chain-correct; cross-sender is unspecified. + let by_sender: HashMap<[u8; 32], Vec> = + sorted.iter().fold(HashMap::new(), |mut acc, m| { + acc.entry(m.display.id_pubkey) + .or_default() + .push(m.display.content.clone()); + acc + }); + assert_eq!(by_sender[&h(1)], vec!["msg-1", "msg-2", "msg-3"]); + assert_eq!(by_sender[&h(2)], vec!["msg-10", "msg-20", "msg-30"]); + } + + #[test] + fn two_feeds_same_id_independent_chains() { + // Same id_pubkey publishes via two different feeds (e.g. CLI run A + // and CLI run B with the same profile). Each feed has its own + // independent chain rooted at prev=0. Neither should buffer or be + // marked late just because the other anchor is set. + let (tx, mut rx) = unbounded_channel(); + let mut g = ChainGate::new(); + let mut d = DedupRing::new(1000); + let feed_a = [0xA1; 32]; + let feed_b = [0xB2; 32]; + + // Feed A chain: anchor + one more + assert!(matches!( + g.submit(msg_on_feed(1, feed_a, 1, 0, 1), &mut d, &tx), + SubmitOutcome::Released + )); + assert!(matches!( + g.submit(msg_on_feed(1, feed_a, 2, 1, 2), &mut d, &tx), + SubmitOutcome::Released + )); + + // Feed B (same id) starts a NEW chain rooted at prev=0. Old behavior + // (single-anchor-per-id) would buffer this because last_released[id] + // would already be set to feed_a's tail. New behavior anchors per + // (id, feed_b) independently. + assert!(matches!( + g.submit(msg_on_feed(1, feed_b, 10, 0, 1), &mut d, &tx), + SubmitOutcome::Released + )); + assert!(matches!( + g.submit(msg_on_feed(1, feed_b, 11, 10, 2), &mut d, &tx), + SubmitOutcome::Released + )); + + let got = collect(&mut rx); + assert_eq!(got, vec!["msg-1", "msg-2", "msg-10", "msg-11"]); + } + + #[test] + fn pending_count_reflects_buffered_messages() { + let (tx, mut rx) = unbounded_channel(); + let mut g = ChainGate::new(); + let mut d = DedupRing::new(1000); + assert_eq!(g.pending_count(), 0); + + // anchor + immediate release → no buffer growth + let _ = g.submit(msg(1, 1, 0, 1), &mut d, &tx); + assert_eq!(g.pending_count(), 0); + + // submit out-of-order msg → buffers + let _ = g.submit(msg(1, 3, 2, 3), &mut d, &tx); + assert_eq!(g.pending_count(), 1); + let _ = g.submit(msg(1, 4, 3, 4), &mut d, &tx); + assert_eq!(g.pending_count(), 2); + + // submit the missing predecessor → drains both + let _ = g.submit(msg(1, 2, 1, 2), &mut d, &tx); + assert_eq!(g.pending_count(), 0); + // sanity: order is correct + let got = collect(&mut rx); + assert_eq!(got, vec!["msg-1", "msg-2", "msg-3", "msg-4"]); + } + + #[test] + fn two_feeds_same_id_no_cross_buffer_under_gap() { + // Feed A has a gap (msg 2 missing). Feed B from the same id is + // entirely intact. The gap on feed A must not cause feed B's + // messages to buffer or be marked late. + let (tx, mut rx) = unbounded_channel(); + let mut g = ChainGate::new(); + let mut d = DedupRing::new(1000); + let feed_a = [0xA1; 32]; + let feed_b = [0xB2; 32]; + + // Feed A: anchor msg 1, then msg 3 (gap on msg 2) + let _ = g.submit(msg_on_feed(1, feed_a, 1, 0, 1), &mut d, &tx); + let _ = g.submit(msg_on_feed(1, feed_a, 3, 2, 3), &mut d, &tx); + + // Feed B: complete chain, must not be impacted by feed A's gap. + assert!(matches!( + g.submit(msg_on_feed(1, feed_b, 10, 0, 1), &mut d, &tx), + SubmitOutcome::Released + )); + assert!(matches!( + g.submit(msg_on_feed(1, feed_b, 11, 10, 2), &mut d, &tx), + SubmitOutcome::Released + )); + + let got = collect_with_late(&mut rx); + // msg-1 from feed_a anchors (no late), msg-3 buffered (not in output + // yet), msg-10 + msg-11 from feed_b release cleanly without late tag. + assert_eq!( + got, + vec![ + ("msg-1".to_string(), false), + ("msg-10".to_string(), false), + ("msg-11".to_string(), false), + ] + ); + + // Now expire — feed_a's msg-3 should release as late on its own + // chain only, leaving feed_b untouched. + let later = Instant::now() + Duration::from_secs(10); + let abandoned = g.expire(later, Duration::from_secs(5), &mut d, &tx); + assert_eq!(abandoned, vec![h(2)]); + let got = collect_with_late(&mut rx); + assert_eq!(got, vec![("msg-3".to_string(), true)]); + } +} diff --git a/peeroxide-cli/src/cmd/chat/post.rs b/peeroxide-cli/src/cmd/chat/post.rs new file mode 100644 index 0000000..61eb2e8 --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/post.rs @@ -0,0 +1,251 @@ +use std::sync::atomic::{AtomicU64, Ordering}; + +use peeroxide_dht::crypto::hash; +use peeroxide_dht::hyperdht::{HyperDhtHandle, KeyPair}; + +use crate::cmd::chat::crypto; +use crate::cmd::chat::debug; +use crate::cmd::chat::feed::FeedState; +use crate::cmd::chat::probe; +use crate::cmd::chat::wire::{self, MessageEnvelope, SummaryBlock}; + +static POST_COUNTER: AtomicU64 = AtomicU64::new(0); + +/// Trigger threshold (in `msg_hashes` length) at which a summary block is +/// extracted to keep the active FeedRecord bounded. +pub const SUMMARY_EVICT_TRIGGER: usize = 20; +/// Number of oldest hashes folded into the summary block when eviction fires. +pub const SUMMARY_EVICT_COUNT: usize = 15; + +/// The synchronous "prepare a single message" step shared by the legacy +/// per-message publisher (`post_message`) and the batched publisher. +/// +/// Builds a signed envelope linking to `feed_state.prev_msg_hash`, encrypts +/// it, computes the hash, and mutates `feed_state` to advance the chain +/// (prepends to `msg_hashes`, updates `prev_msg_hash`). Performs summary +/// block eviction when `msg_hashes` reaches `SUMMARY_EVICT_TRIGGER` *before* +/// the new hash is added; the summary's serialized bytes are returned to +/// the caller so the network put can happen alongside the message put. +/// +/// `seq` is **not** bumped here — the caller controls when to advance seq +/// (one bump per network publish: per-message for the legacy path, per-batch +/// for the batched publisher). +pub fn prepare_one( + feed_state: &mut FeedState, + id_keypair: &KeyPair, + message_key: &[u8; 32], + screen_name: &str, + content: &str, +) -> Result { + let timestamp = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap_or_default() + .as_secs(); + + let post_n = POST_COUNTER.fetch_add(1, Ordering::Relaxed) + 1; + if probe::is_enabled() { + let preview: String = content.chars().take(40).collect(); + crate::cmd::chat::tui::emit_notice(format!("[probe] post#{post_n} content={preview:?}")); + } + + let envelope = MessageEnvelope::sign( + &id_keypair.secret_key, + id_keypair.public_key, + feed_state.prev_msg_hash, + timestamp, + wire::CONTENT_TYPE_TEXT, + screen_name, + content, + ); + + let plaintext = envelope.serialize(); + let encrypted = wire::encrypt_message(message_key, &plaintext) + .map_err(|e| format!("encryption failed: {e}"))?; + + if encrypted.len() > wire::MAX_RECORD_SIZE { + return Err(format!( + "message too large: {} bytes (max {})", + encrypted.len(), + wire::MAX_RECORD_SIZE + )); + } + + let msg_hash = hash(&encrypted); + let prev_msg_hash = feed_state.prev_msg_hash; + if probe::is_enabled() { + crate::cmd::chat::tui::emit_notice(format!( + "[probe] post#{post_n} msg_hash={} prev={}", + debug::short_key(&msg_hash), + debug::short_key(&prev_msg_hash), + )); + } + + debug::log_event( + "Message posted", + "immutable_put", + &format!( + "msg_hash={}, author={}, prev_hash={}, ts={timestamp}, content_type=0x{:02x}", + debug::short_key(&msg_hash), + debug::short_key(&id_keypair.public_key), + debug::short_key(&prev_msg_hash), + envelope.content_type, + ), + ); + + // Eviction must happen before we add the new hash so the summary frames + // the oldest already-published 15 (not 14 + the brand-new entry). + let summary_data = if feed_state.msg_hashes.len() >= SUMMARY_EVICT_TRIGGER { + let total = feed_state.msg_hashes.len(); + let keep = total - SUMMARY_EVICT_COUNT; + let evicted: Vec<[u8; 32]> = feed_state.msg_hashes[keep..].to_vec(); + let evicted_oldest_first: Vec<[u8; 32]> = evicted.into_iter().rev().collect(); + + let summary = SummaryBlock::sign( + &id_keypair.secret_key, + id_keypair.public_key, + feed_state.summary_hash, + evicted_oldest_first, + ); + + let data = summary + .serialize() + .map_err(|e| format!("summary serialize: {e}"))?; + let summary_hash = hash(&data); + + debug::log_event( + "Summary block", + "immutable_put", + &format!( + "summary_hash={}, id_pubkey={}, msg_count={}, prev_summary={}", + debug::short_key(&summary_hash), + debug::short_key(&id_keypair.public_key), + SUMMARY_EVICT_COUNT, + debug::short_key(&feed_state.summary_hash), + ), + ); + + feed_state.summary_hash = summary_hash; + feed_state.msg_hashes.truncate(keep); + feed_state.msg_count = feed_state.msg_hashes.len() as u8; + Some(data) + } else { + None + }; + + // Update feed state synchronously — hash is deterministic. + feed_state.msg_hashes.insert(0, msg_hash); + feed_state.msg_count = feed_state.msg_hashes.len() as u8; + feed_state.prev_msg_hash = msg_hash; + + Ok(Prepared { + encrypted, + msg_hash, + summary_data, + }) +} + +/// Result of `prepare_one`: ciphertext to put + hash + (optionally) a +/// summary block whose bytes need to be `immutable_put` alongside. +pub struct Prepared { + pub encrypted: Vec, + pub msg_hash: [u8; 32], + pub summary_data: Option>, +} + +/// Prepares a message for posting: encrypts, computes hash, updates feed state, +/// then spawns all network operations (immutable_put, mutable_put, announce) in +/// the background. Returns immediately after state mutation so the input loop +/// is never blocked by network latency. +/// +/// Used by the DM command path. The channel-join path uses the batched +/// publisher in `publisher.rs` instead. +pub fn post_message( + handle: &HyperDhtHandle, + feed_state: &mut FeedState, + id_keypair: &KeyPair, + message_key: &[u8; 32], + channel_key: &[u8; 32], + screen_name: &str, + content: &str, +) -> Result<(), String> { + let prepared = prepare_one(feed_state, id_keypair, message_key, screen_name, content)?; + + feed_state.seq += 1; + + let feed_record_data = feed_state.serialize_feed_record(); + let epoch = crypto::current_epoch(); + let bucket = feed_state.next_bucket(); + let topic = crypto::announce_topic(channel_key, epoch, bucket); + let feed_kp = feed_state.feed_keypair.clone(); + let seq = feed_state.seq; + let msg_count = feed_state.msg_count; + let Prepared { + encrypted, + summary_data, + .. + } = prepared; + + // Spawn all network operations as a background task chain + let h = handle.clone(); + tokio::spawn(async move { + // immutable_put for message (and summary if needed) + let (msg_put, _) = tokio::join!( + h.immutable_put(&encrypted), + async { + if let Some(data) = summary_data { + if let Err(e) = h.immutable_put(&data).await { + crate::cmd::chat::tui::emit_notice(format!( + "warning: summary immutable_put failed: {e}" + )); + } + } + } + ); + + if let Err(e) = msg_put { + crate::cmd::chat::tui::emit_notice(format!( + "warning: message immutable_put failed: {e}" + )); + return; + } + + // mutable_put + announce fire concurrently + let h2 = h.clone(); + let (put_res, _) = tokio::join!( + async { + let r = h.mutable_put(&feed_kp, &feed_record_data, seq).await; + if r.is_ok() { + debug::log_event( + "Feed record update", + "mutable_put", + &format!( + "feed_pubkey={}, seq={seq}, msg_count={msg_count}", + debug::short_key(&feed_kp.public_key), + ), + ); + } + r + }, + async { + let _ = h2.announce(topic, &feed_kp, &[]).await; + debug::log_event( + "Channel announce", + "announce", + &format!( + "feed_pubkey={}, epoch={epoch}, bucket={bucket}, topic={}", + debug::short_key(&feed_kp.public_key), + debug::short_key(&topic), + ), + ); + } + ); + + if let Err(e) = put_res { + crate::cmd::chat::tui::emit_notice(format!( + "warning: feed mutable_put failed: {e}" + )); + } + }); + + Ok(()) +} diff --git a/peeroxide-cli/src/cmd/chat/probe.rs b/peeroxide-cli/src/cmd/chat/probe.rs new file mode 100644 index 0000000..1ead9cd --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/probe.rs @@ -0,0 +1,26 @@ +//! Receiver-side message-flow probes for chat. +//! +//! When enabled via `--probe`, emits structured one-line records to stderr +//! at the key transitions in the publish/receive pipeline: +//! +//! * `stdin#N read=...` — every line read from stdin by the publisher +//! * `post#N content=...` — every call to `post_message` +//! * `post#N msg_hash=... prev=...` — the hash chain link recorded for each post +//! * `fetch_batch msg_hashes_total=X unseen=Y` — every receiver fetch batch +//! * `release#N msg_hash=... late=... content=...` — every gate release +//! +//! Useful for diagnosing publisher↔receiver ordering bugs and duplicate +//! releases without recompiling. Counter IDs are global to the process so +//! turning the flag on mid-session may produce non-zero starting indices. + +use std::sync::atomic::{AtomicBool, Ordering}; + +static PROBE_ENABLED: AtomicBool = AtomicBool::new(false); + +pub fn enable() { + PROBE_ENABLED.store(true, Ordering::Relaxed); +} + +pub fn is_enabled() -> bool { + PROBE_ENABLED.load(Ordering::Relaxed) +} diff --git a/peeroxide-cli/src/cmd/chat/profile.rs b/peeroxide-cli/src/cmd/chat/profile.rs new file mode 100644 index 0000000..b8e188c --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/profile.rs @@ -0,0 +1,562 @@ +//! Profile directory management for the peeroxide chat system. +//! +//! ## Directory Layout +//! +//! ```text +//! ~/.config/peeroxide/chat/profiles// +//! ├── seed # 32 raw bytes (Ed25519 seed) +//! ├── name # UTF-8 screen name (optional) +//! ├── bio # UTF-8 bio text (optional) +//! └── friends # tab-separated: pubkey\talias\tcached_name\tcached_bio_line +//! ``` +//! +//! The shared known-users cache lives one level up at +//! `~/.config/peeroxide/chat/known_users` and is process-wide, not per +//! profile — see `known_users::shared_known_users_path`. + +use std::collections::HashMap; +use std::fs; +use std::io::{self, Write}; +use std::path::PathBuf; +#[cfg(test)] +use std::sync::{Mutex, OnceLock}; + +use peeroxide_dht::hyperdht::KeyPair; + +use super::names; + +/// A local chat identity stored on disk. +#[derive(Debug, Clone)] +pub struct Profile { + /// Directory name used to identify this profile on disk. + pub name: String, + /// Raw Ed25519 seed (32 bytes). + pub seed: [u8; 32], + /// Optional human-readable screen name. + pub screen_name: Option, + /// Optional biography text. + pub bio: Option, +} + +/// A trusted contact stored in the `friends` file. +#[derive(Debug, Clone)] +pub struct Friend { + /// The friend's Ed25519 public key (32 bytes). + pub pubkey: [u8; 32], + /// Local alias chosen by the profile owner. + pub alias: Option, + /// Most recently cached screen name announced by the friend. + pub cached_name: Option, + /// Most recently cached first line of bio announced by the friend. + pub cached_bio_line: Option, +} + +/// Returns `~/.config/peeroxide/chat/profiles/`. +pub fn profiles_dir() -> PathBuf { + dirs::home_dir() + .unwrap_or_else(|| PathBuf::from(".")) + .join(".config") + .join("peeroxide") + .join("chat") + .join("profiles") +} + +/// Returns the directory for a specific named profile. +pub fn profile_dir(name: &str) -> PathBuf { + profiles_dir().join(name) +} + +/// Creates a new profile on disk. +/// +/// Generates a fresh random 32-byte seed, creates the profile directory, and +/// writes the seed (and optional screen name) to disk. Fails if the profile +/// already exists. +pub fn create_profile(name: &str, screen_name: Option<&str>) -> io::Result { + let dir = profile_dir(name); + if dir.exists() { + return Err(io::Error::new( + io::ErrorKind::AlreadyExists, + format!("profile '{}' already exists at {}", name, dir.display()), + )); + } + fs::create_dir_all(&dir)?; + + let mut seed = [0u8; 32]; + { + use rand::RngCore; + rand::rng().fill_bytes(&mut seed); + } + + fs::write(dir.join("seed"), seed)?; + + let effective_screen_name = match screen_name { + Some(sn) => sn.to_owned(), + None => names::generate_name_from_seed(&KeyPair::from_seed(seed).public_key), + }; + fs::write(dir.join("name"), &effective_screen_name)?; + + Ok(Profile { + name: name.to_owned(), + seed, + screen_name: Some(effective_screen_name), + bio: None, + }) +} + +/// Loads an existing profile from disk. +pub fn load_profile(name: &str) -> io::Result { + let dir = profile_dir(name); + + let seed_bytes = fs::read(dir.join("seed"))?; + if seed_bytes.len() != 32 { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!( + "profile '{}': seed file must be exactly 32 bytes, got {}", + name, + seed_bytes.len() + ), + )); + } + let mut seed = [0u8; 32]; + seed.copy_from_slice(&seed_bytes); + + let screen_name = match read_optional_text(&dir.join("name"))? { + Some(name) => Some(name), + None => Some(names::generate_name_from_seed(&KeyPair::from_seed(seed).public_key)), + }; + let bio = read_optional_text(&dir.join("bio"))?; + + Ok(Profile { + name: name.to_owned(), + seed, + screen_name, + bio, + }) +} + +pub fn load_or_create_profile(name: &str) -> io::Result { + match load_profile(name) { + Ok(p) => Ok(p), + Err(e) if e.kind() == io::ErrorKind::NotFound => { + eprintln!("*** creating new profile '{name}'"); + create_profile(name, None) + } + Err(e) => Err(e), + } +} + +/// Deletes a profile and all its files from disk. +pub fn delete_profile(name: &str) -> io::Result<()> { + let dir = profile_dir(name); + if !dir.exists() { + return Err(io::Error::new( + io::ErrorKind::NotFound, + format!("profile '{}' does not exist", name), + )); + } + fs::remove_dir_all(dir) +} + +/// Lists all profile names (subdirectory names inside `profiles_dir()`). +pub fn list_profiles() -> io::Result> { + let dir = profiles_dir(); + if !dir.exists() { + return Ok(Vec::new()); + } + let mut names = Vec::new(); + for entry in fs::read_dir(&dir)? { + let entry = entry?; + if entry.file_type()?.is_dir() { + if let Some(n) = entry.file_name().to_str() { + names.push(n.to_owned()); + } + } + } + names.sort(); + Ok(names) +} + +/// Loads the `friends` file for the given profile. +/// +/// Lines are tab-separated: `<64-hex-pubkey>\t\t\t`. +/// Lines starting with `#` are comments and are skipped. When the same +/// public key appears more than once, the **last** entry wins. +pub fn load_friends(profile_name: &str) -> io::Result> { + let path = profile_dir(profile_name).join("friends"); + if !path.exists() { + return Ok(Vec::new()); + } + let content = fs::read_to_string(&path)?; + + let mut map: HashMap<[u8; 32], (usize, Friend)> = HashMap::new(); + let mut order: Vec<[u8; 32]> = Vec::new(); + + for line in content.lines() { + let line = line.trim(); + if line.is_empty() || line.starts_with('#') { + continue; + } + let parts: Vec<&str> = line.splitn(4, '\t').collect(); + let pubkey = match decode_pubkey(parts[0]) { + Ok(k) => k, + Err(_) => continue, + }; + let alias = optional_field(parts.get(1).copied()); + let cached_name = optional_field(parts.get(2).copied()); + let cached_bio_line = optional_field(parts.get(3).copied()); + + let friend = Friend { + pubkey, + alias, + cached_name, + cached_bio_line, + }; + + if let Some(existing) = map.get_mut(&pubkey) { + existing.1 = friend; + } else { + let idx = order.len(); + order.push(pubkey); + map.insert(pubkey, (idx, friend)); + } + } + + let mut result: Vec<(usize, Friend)> = map.into_values().collect(); + result.sort_by_key(|(idx, _)| *idx); + Ok(result.into_iter().map(|(_, f)| f).collect()) +} + +/// Appends or updates a friend entry in the `friends` file. +/// +/// The entry is always appended; deduplication happens at read time (latest +/// entry wins). +pub fn save_friend(profile_name: &str, friend: &Friend) -> io::Result<()> { + let path = profile_dir(profile_name).join("friends"); + let mut file = fs::OpenOptions::new() + .create(true) + .append(true) + .open(&path)?; + + let line = format!( + "{}\t{}\t{}\t{}\n", + hex::encode(friend.pubkey), + friend.alias.as_deref().unwrap_or(""), + friend.cached_name.as_deref().unwrap_or(""), + friend.cached_bio_line.as_deref().unwrap_or(""), + ); + file.write_all(line.as_bytes()) +} + +/// Removes a friend from the `friends` file by rewriting the file without +/// any entries for the given public key. +pub fn remove_friend(profile_name: &str, pubkey: &[u8; 32]) -> io::Result<()> { + let path = profile_dir(profile_name).join("friends"); + if !path.exists() { + return Ok(()); + } + let content = fs::read_to_string(&path)?; + let target_hex = hex::encode(pubkey); + + let filtered: String = content + .lines() + .filter(|line| { + let l = line.trim(); + if l.is_empty() || l.starts_with('#') { + return true; + } + let first_field = l.split('\t').next().unwrap_or(""); + first_field != target_hex + }) + .map(|l| format!("{}\n", l)) + .collect(); + + fs::write(&path, filtered) +} + +#[cfg(test)] +pub(crate) fn test_home_lock() -> &'static Mutex<()> { + static LOCK: OnceLock> = OnceLock::new(); + LOCK.get_or_init(|| Mutex::new(())) +} + +fn read_optional_text(path: &std::path::Path) -> io::Result> { + match fs::read_to_string(path) { + Ok(s) => { + let trimmed = s.trim().to_owned(); + if trimmed.is_empty() { + Ok(None) + } else { + Ok(Some(trimmed)) + } + } + Err(e) if e.kind() == io::ErrorKind::NotFound => Ok(None), + Err(e) => Err(e), + } +} + +fn decode_pubkey(s: &str) -> Result<[u8; 32], hex::FromHexError> { + let bytes = hex::decode(s)?; + if bytes.len() != 32 { + // `hex::FromHexError` has no wrong-length variant; `InvalidStringLength` + // is the closest available error for a well-formed but wrong-sized decode. + return Err(hex::FromHexError::InvalidStringLength); + } + let mut key = [0u8; 32]; + key.copy_from_slice(&bytes); + Ok(key) +} + +fn optional_field(s: Option<&str>) -> Option { + match s { + Some(v) if !v.is_empty() => Some(v.to_owned()), + _ => None, + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::TempDir; + fn do_create_profile( + profiles_root: &std::path::Path, + name: &str, + screen_name: Option<&str>, + ) -> io::Result { + let dir = profiles_root.join(name); + if dir.exists() { + return Err(io::Error::new(io::ErrorKind::AlreadyExists, "already exists")); + } + fs::create_dir_all(&dir)?; + + let mut seed = [0u8; 32]; + { + use rand::RngCore; + rand::rng().fill_bytes(&mut seed); + } + fs::write(dir.join("seed"), seed)?; + let effective_screen_name = match screen_name { + Some(sn) => sn.to_owned(), + None => crate::cmd::chat::names::generate_name_from_seed(&KeyPair::from_seed(seed).public_key), + }; + fs::write(dir.join("name"), &effective_screen_name)?; + Ok(Profile { + name: name.to_owned(), + seed, + screen_name: Some(effective_screen_name), + bio: None, + }) + } + + fn do_load_profile(profiles_root: &std::path::Path, name: &str) -> io::Result { + let dir = profiles_root.join(name); + let seed_bytes = fs::read(dir.join("seed"))?; + if seed_bytes.len() != 32 { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "seed must be 32 bytes", + )); + } + let mut seed = [0u8; 32]; + seed.copy_from_slice(&seed_bytes); + let screen_name = match read_optional_text(&dir.join("name"))? { + Some(name) => Some(name), + None => Some(crate::cmd::chat::names::generate_name_from_seed(&KeyPair::from_seed(seed).public_key)), + }; + let bio = read_optional_text(&dir.join("bio"))?; + Ok(Profile { + name: name.to_owned(), + seed, + screen_name, + bio, + }) + } + + #[test] + fn profile_create_load_roundtrip() { + let tmp = TempDir::new().unwrap(); + let root = tmp.path(); + + let created = do_create_profile(root, "alice", Some("Alice Liddell")).unwrap(); + assert_eq!(created.name, "alice"); + assert_eq!(created.screen_name.as_deref(), Some("Alice Liddell")); + assert!(created.bio.is_none()); + + let loaded = do_load_profile(root, "alice").unwrap(); + assert_eq!(loaded.name, "alice"); + assert_eq!(loaded.seed, created.seed); + assert_eq!(loaded.screen_name, created.screen_name); + } + + #[test] + fn profile_create_no_screen_name() { + let tmp = TempDir::new().unwrap(); + let created = do_create_profile(tmp.path(), "bob", None).unwrap(); + let expected = crate::cmd::chat::names::generate_name_from_seed(&KeyPair::from_seed(created.seed).public_key); + assert_eq!(created.screen_name.as_deref(), Some(expected.as_str())); + let loaded = do_load_profile(tmp.path(), "bob").unwrap(); + assert_eq!(loaded.screen_name.as_deref(), Some(expected.as_str())); + } + + #[test] + fn profile_seed_is_32_bytes() { + let tmp = TempDir::new().unwrap(); + let created = do_create_profile(tmp.path(), "carol", None).unwrap(); + let raw = fs::read(tmp.path().join("carol").join("seed")).unwrap(); + assert_eq!(raw.len(), 32); + assert_eq!(raw.as_slice(), created.seed.as_slice()); + } + + fn write_friends_file(dir: &std::path::Path, content: &str) -> io::Result<()> { + fs::create_dir_all(dir)?; + fs::write(dir.join("friends"), content) + } + + fn parse_friends_from_dir(dir: &std::path::Path) -> io::Result> { + let path = dir.join("friends"); + if !path.exists() { + return Ok(Vec::new()); + } + let content = fs::read_to_string(&path)?; + let mut map: HashMap<[u8; 32], (usize, Friend)> = HashMap::new(); + let mut order: Vec<[u8; 32]> = Vec::new(); + + for line in content.lines() { + let line = line.trim(); + if line.is_empty() || line.starts_with('#') { + continue; + } + let parts: Vec<&str> = line.splitn(4, '\t').collect(); + let pubkey = match decode_pubkey(parts[0]) { + Ok(k) => k, + Err(_) => continue, + }; + let alias = optional_field(parts.get(1).copied()); + let cached_name = optional_field(parts.get(2).copied()); + let cached_bio_line = optional_field(parts.get(3).copied()); + let friend = Friend { pubkey, alias, cached_name, cached_bio_line }; + if let Some(existing) = map.get_mut(&pubkey) { + existing.1 = friend; + } else { + let idx = order.len(); + order.push(pubkey); + map.insert(pubkey, (idx, friend)); + } + } + let mut result: Vec<(usize, Friend)> = map.into_values().collect(); + result.sort_by_key(|(idx, _)| *idx); + Ok(result.into_iter().map(|(_, f)| f).collect()) + } + + fn pubkey_from_u8(n: u8) -> [u8; 32] { + let mut k = [0u8; 32]; + k[0] = n; + k + } + + #[test] + fn friends_parse_basic() { + let tmp = TempDir::new().unwrap(); + let key_a = pubkey_from_u8(1); + let key_b = pubkey_from_u8(2); + let content = format!( + "# comment\n{}\talias_a\tCached A\tBio A\n{}\t\t\t\n", + hex::encode(key_a), + hex::encode(key_b), + ); + write_friends_file(tmp.path(), &content).unwrap(); + + let friends = parse_friends_from_dir(tmp.path()).unwrap(); + assert_eq!(friends.len(), 2); + + assert_eq!(friends[0].pubkey, key_a); + assert_eq!(friends[0].alias.as_deref(), Some("alias_a")); + assert_eq!(friends[0].cached_name.as_deref(), Some("Cached A")); + assert_eq!(friends[0].cached_bio_line.as_deref(), Some("Bio A")); + + assert_eq!(friends[1].pubkey, key_b); + assert!(friends[1].alias.is_none()); + assert!(friends[1].cached_name.is_none()); + assert!(friends[1].cached_bio_line.is_none()); + } + + #[test] + fn friends_dedup_last_wins() { + let tmp = TempDir::new().unwrap(); + let key = pubkey_from_u8(42); + let content = format!( + "{}\told_alias\told_name\told_bio\n{}\tnew_alias\tnew_name\tnew_bio\n", + hex::encode(key), + hex::encode(key), + ); + write_friends_file(tmp.path(), &content).unwrap(); + + let friends = parse_friends_from_dir(tmp.path()).unwrap(); + assert_eq!(friends.len(), 1); + assert_eq!(friends[0].alias.as_deref(), Some("new_alias")); + assert_eq!(friends[0].cached_name.as_deref(), Some("new_name")); + } + + #[test] + fn friends_skips_malformed_lines() { + let tmp = TempDir::new().unwrap(); + let key = pubkey_from_u8(5); + let content = format!( + "not-hex\talias\tname\tbio\n{}\tvalid\t\t\n", + hex::encode(key), + ); + write_friends_file(tmp.path(), &content).unwrap(); + let friends = parse_friends_from_dir(tmp.path()).unwrap(); + assert_eq!(friends.len(), 1); + assert_eq!(friends[0].pubkey, key); + } + + #[test] + fn create_profile_without_name_gets_generated_name() { + let seed = [99u8; 32]; + let name = crate::cmd::chat::names::generate_name_from_seed(&seed); + assert!(name.contains('_'), "generated name must contain underscore: {name}"); + let parts: Vec<&str> = name.splitn(3, '_').collect(); + assert_eq!(parts.len(), 3); + assert!(parts[2].chars().all(|c| c.is_ascii_digit())); + } + + #[test] + fn create_profile_user_name_preserved() { + let tmp = TempDir::new().unwrap(); + let created = do_create_profile(tmp.path(), "named", Some("MyCustomName")).unwrap(); + assert_eq!(created.screen_name.as_deref(), Some("MyCustomName")); + let loaded = do_load_profile(tmp.path(), "named").unwrap(); + assert_eq!(loaded.screen_name.as_deref(), Some("MyCustomName")); + } + + #[test] + fn load_profile_derives_name_when_file_missing() { + let tmp = TempDir::new().unwrap(); + let created = do_create_profile(tmp.path(), "missing-name", Some("Custom")).unwrap(); + fs::remove_file(tmp.path().join("missing-name").join("name")).unwrap(); + + let loaded = do_load_profile(tmp.path(), "missing-name").unwrap(); + let expected = crate::cmd::chat::names::generate_name_from_seed(&KeyPair::from_seed(created.seed).public_key); + assert_eq!(loaded.screen_name.as_deref(), Some(expected.as_str())); + } + + #[test] + fn profile_create_no_screen_name_uses_pubkey() { + let tmp = TempDir::new().unwrap(); + let created = do_create_profile(tmp.path(), "pubkey-create", None).unwrap(); + let expected = crate::cmd::chat::names::generate_name_from_seed(&KeyPair::from_seed(created.seed).public_key); + assert_eq!(created.screen_name.as_deref(), Some(expected.as_str())); + } + + #[test] + fn profile_load_missing_name_uses_pubkey() { + let tmp = TempDir::new().unwrap(); + let created = do_create_profile(tmp.path(), "pubkey-load", Some("Shown")).unwrap(); + fs::remove_file(tmp.path().join("pubkey-load").join("name")).unwrap(); + + let loaded = do_load_profile(tmp.path(), "pubkey-load").unwrap(); + let expected = crate::cmd::chat::names::generate_name_from_seed(&KeyPair::from_seed(created.seed).public_key); + assert_eq!(loaded.screen_name.as_deref(), Some(expected.as_str())); + } +} diff --git a/peeroxide-cli/src/cmd/chat/publisher.rs b/peeroxide-cli/src/cmd/chat/publisher.rs new file mode 100644 index 0000000..8c9abc7 --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/publisher.rs @@ -0,0 +1,418 @@ +//! Batched serial publisher for chat-channel messages. +//! +//! Owns `FeedState`, the feed-refresh task, and the rotation tick. Drains +//! a bounded mpsc of message jobs, accumulates each into a short window +//! (`batch_wait_ms`) up to `batch_size`, and publishes the whole batch +//! with a single chained set of network operations: +//! +//! 1. join_all immutable_put(message bytes) for every message in batch +//! (plus the summary block ciphertext, if eviction fired mid-batch) +//! 2. mutable_put(FeedRecord, final seq) with up to 3 retries +//! 3. announce on the next per-batch bucket +//! +//! This eliminates the per-message `tokio::spawn` race that allowed the +//! old code to advertise a FeedRecord whose referenced immutable_puts +//! had not yet propagated, which manifested at the receiver as `[late]` +//! gap-timeout releases when the immutable_get of a missing predecessor +//! could not be satisfied within the 5s window. + +use std::sync::Arc; +use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; +use std::time::Duration; + +use futures::future::join_all; +use peeroxide_dht::hyperdht::{HyperDhtHandle, KeyPair}; +use tokio::sync::{mpsc, watch}; +use tokio::task::JoinHandle; +use tokio::time::Instant; + +use crate::cmd::chat::crypto; +use crate::cmd::chat::debug; +use crate::cmd::chat::feed::{self, FeedState}; +use crate::cmd::chat::post::{prepare_one, Prepared}; +use crate::cmd::chat::probe; +use crate::cmd::chat::tui::{NoticeSink, StatusState}; + +/// Jobs the publisher accepts on its inbound queue. +pub enum PubJob { + /// A single text message to publish. + Message(String), +} + +static BATCH_COUNTER: AtomicU64 = AtomicU64::new(0); + +/// Retry schedule for the per-batch `mutable_put`. The publisher first +/// fires the FeedRecord update; on failure it waits each successive delay +/// and retries. If all attempts fail the batch's messages are still in +/// the DHT (immutables succeeded) and the next successful batch will +/// re-advertise them via the chain, so this is loss-tolerant. +const MUTABLE_PUT_RETRY_MS: [u64; 3] = [200, 500, 1000]; + +/// Rotation check interval — mirrors the cadence of the old in-line tick +/// that lived in `join.rs`. +const ROTATION_CHECK_INTERVAL: Duration = Duration::from_secs(30); + +/// Run the publisher worker to completion. +/// +/// On entry, performs the initial `mutable_put` of the empty FeedRecord +/// and spawns the periodic feed-refresh task. The worker exits cleanly +/// when `rx` is closed (i.e. all senders dropped), at which point the +/// feed-refresh task is aborted and the function returns. +#[allow(clippy::too_many_arguments)] +pub async fn run_publisher( + handle: HyperDhtHandle, + mut feed_state: FeedState, + id_keypair: KeyPair, + message_key: [u8; 32], + channel_key: [u8; 32], + screen_name: String, + mut rx: mpsc::Receiver, + batch_size: usize, + batch_wait_ms: u64, + status: Arc, + notices: NoticeSink, +) { + // Sanitize to non-pathological values. + let batch_size = batch_size.max(1); + let batch_wait = Duration::from_millis(batch_wait_ms); + + // --- Initial publish --- + let initial_data = feed_state.serialize_feed_record(); + if let Err(e) = handle + .mutable_put(&feed_state.feed_keypair, &initial_data, feed_state.seq) + .await + { + notices.send(format!("warning: initial feed publish failed: {e}")); + } + + let (refresh_tx, refresh_rx) = + watch::channel((initial_data.clone(), feed_state.seq)); + let mut refresh_handle: JoinHandle<()> = { + let h = handle.clone(); + let kp = feed_state.feed_keypair.clone(); + tokio::spawn(async move { + feed::run_feed_refresh(h, kp, refresh_rx).await; + }) + }; + let mut refresh_tx = refresh_tx; + + let mut rotation_check = tokio::time::interval(ROTATION_CHECK_INTERVAL); + rotation_check.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay); + // Burn the immediate first tick. + rotation_check.tick().await; + + loop { + tokio::select! { + biased; + // Rotation only fires when no inbound jobs are queued, so a + // rotation never splits the await chain of a batch. + _ = rotation_check.tick() => { + if feed_state.needs_rotation() { + rotate_feed( + &handle, + &mut feed_state, + &mut refresh_tx, + &mut refresh_handle, + ¬ices, + ) + .await; + } + } + maybe_first = rx.recv() => { + let Some(first) = maybe_first else { + // All senders dropped — stdin closed, caller shutting down. + refresh_handle.abort(); + return; + }; + let mut texts: Vec = Vec::with_capacity(batch_size); + push_text(&mut texts, first); + + // Accumulate up to batch_size or batch_wait timeout. + let deadline = Instant::now() + batch_wait; + while texts.len() < batch_size { + let remaining = deadline.saturating_duration_since(Instant::now()); + if remaining.is_zero() { + break; + } + match tokio::time::timeout(remaining, rx.recv()).await { + Ok(Some(job)) => push_text(&mut texts, job), + Ok(None) => break, // senders dropped + Err(_) => break, // timeout + } + } + + publish_batch( + &handle, + &mut feed_state, + &id_keypair, + &message_key, + &channel_key, + &screen_name, + &refresh_tx, + texts, + &status, + ¬ices, + ) + .await; + } + } + } +} + +fn push_text(texts: &mut Vec, job: PubJob) { + match job { + PubJob::Message(text) => texts.push(text), + } +} + +/// Build, sign, encrypt every message in `texts` (chain-linked) then run +/// the immutable_put → mutable_put → announce pipeline serially. +#[allow(clippy::too_many_arguments)] +async fn publish_batch( + handle: &HyperDhtHandle, + feed_state: &mut FeedState, + id_keypair: &KeyPair, + message_key: &[u8; 32], + channel_key: &[u8; 32], + screen_name: &str, + refresh_tx: &watch::Sender<(Vec, u64)>, + texts: Vec, + status: &StatusState, + notices: &NoticeSink, +) { + // We will decrement `send_pending` by `text_count` at the tail of this + // function regardless of partial network failure — the user enqueued + // `text_count` messages and the batch is "done" once we've returned, + // even if some immutable_puts failed. The next batch will re-advertise + // via the FeedRecord chain (handled by the existing retry logic below). + let text_count = texts.len(); + let batch_n = BATCH_COUNTER.fetch_add(1, AtomicOrdering::Relaxed) + 1; + + // --- Phase 1: synchronous chain construction --- + let mut encrypted_blobs: Vec> = Vec::with_capacity(texts.len()); + let mut summary_blobs: Vec> = Vec::new(); + for text in &texts { + match prepare_one(feed_state, id_keypair, message_key, screen_name, text) { + Ok(Prepared { + encrypted, + summary_data, + .. + }) => { + encrypted_blobs.push(encrypted); + if let Some(s) = summary_data { + summary_blobs.push(s); + } + } + Err(e) => { + notices.send(format!("error: failed to prepare message: {e}")); + } + } + } + + if encrypted_blobs.is_empty() { + // Nothing to publish (every prepare_one errored) — still acknowledge + // the texts so send_pending doesn't pin forever. + for _ in 0..text_count { + status.dec_send_pending(); + } + return; + } + + feed_state.seq += 1; + let feed_record_data = feed_state.serialize_feed_record(); + let seq = feed_state.seq; + let msg_count = feed_state.msg_count; + let feed_kp = feed_state.feed_keypair.clone(); + let epoch = crypto::current_epoch(); + let bucket = feed_state.next_bucket(); + let topic = crypto::announce_topic(channel_key, epoch, bucket); + + if probe::is_enabled() { + notices.send(format!( + "[probe] batch#{batch_n} messages={} summary_blocks={} seq={seq}", + encrypted_blobs.len(), + summary_blobs.len(), + )); + } + + // --- Phase 2: all immutable_puts in parallel; await all --- + let put_start = Instant::now(); + let mut put_futures = Vec::with_capacity(encrypted_blobs.len() + summary_blobs.len()); + for blob in encrypted_blobs.iter().chain(summary_blobs.iter()) { + let h = handle.clone(); + let bytes = blob.clone(); + put_futures.push(tokio::spawn(async move { h.immutable_put(&bytes).await })); + } + + let put_results = join_all(put_futures).await; + let mut put_failed = 0usize; + for r in &put_results { + match r { + Ok(Ok(_)) => {} + Ok(Err(e)) => { + notices.send(format!("warning: immutable_put failed: {e}")); + put_failed += 1; + } + Err(e) => { + notices.send(format!("warning: immutable_put task panicked: {e}")); + put_failed += 1; + } + } + } + if probe::is_enabled() { + notices.send(format!( + "[probe] batch#{batch_n} immutable_put_done elapsed_ms={} failed={}", + put_start.elapsed().as_millis(), + put_failed, + )); + } + + // --- Phase 3: mutable_put with retry; only advertise after immutables --- + let mut mput_attempts = 0usize; + let mput_start = Instant::now(); + let mput_ok = loop { + mput_attempts += 1; + match handle.mutable_put(&feed_kp, &feed_record_data, seq).await { + Ok(_) => { + debug::log_event( + "Feed record update", + "mutable_put", + &format!( + "feed_pubkey={}, seq={seq}, msg_count={msg_count}", + debug::short_key(&feed_kp.public_key), + ), + ); + break true; + } + Err(e) => { + if let Some(delay_ms) = MUTABLE_PUT_RETRY_MS.get(mput_attempts - 1) { + notices.send(format!( + "warning: mutable_put failed (attempt {mput_attempts}/{}): {e}; retrying in {delay_ms}ms", + MUTABLE_PUT_RETRY_MS.len() + 1, + )); + tokio::time::sleep(Duration::from_millis(*delay_ms)).await; + } else { + notices.send(format!( + "warning: mutable_put failed after {mput_attempts} attempts: {e}; batch's FeedRecord left unadvertised, next batch will re-advertise via chain" + )); + break false; + } + } + } + }; + if probe::is_enabled() { + notices.send(format!( + "[probe] batch#{batch_n} mutable_put_done elapsed_ms={} attempts={mput_attempts} ok={mput_ok}", + mput_start.elapsed().as_millis(), + )); + } + + // Tell the feed-refresh task the new (data, seq) pair regardless of put + // success — refresh will retry on its own cadence and a future success + // is what users care about. + let _ = refresh_tx.send((feed_record_data, seq)); + + // --- Phase 4: announce --- + let ann_start = Instant::now(); + let _ = handle.announce(topic, &feed_kp, &[]).await; + debug::log_event( + "Channel announce", + "announce", + &format!( + "feed_pubkey={}, epoch={epoch}, bucket={bucket}, topic={}", + debug::short_key(&feed_kp.public_key), + debug::short_key(&topic), + ), + ); + if probe::is_enabled() { + notices.send(format!( + "[probe] batch#{batch_n} announce_done elapsed_ms={}", + ann_start.elapsed().as_millis(), + )); + } + + // Acknowledge all enqueued messages — they're now either on the DHT or + // their FeedRecord update is pending retry on the next batch. + for _ in 0..text_count { + status.dec_send_pending(); + } +} + +/// Rotate the feed keypair, publishing the new feed first and then +/// updating the old feed with `next_feed_pubkey` so readers can follow. +async fn rotate_feed( + handle: &HyperDhtHandle, + feed_state: &mut FeedState, + refresh_tx: &mut watch::Sender<(Vec, u64)>, + refresh_handle: &mut JoinHandle<()>, + notices: &NoticeSink, +) { + let mut new_fs = feed_state.rotate(); + + let new_data = new_fs.serialize_feed_record(); + let new_kp = new_fs.feed_keypair.clone(); + let new_seq = new_fs.seq; + + if let Err(e) = handle.mutable_put(&new_kp, &new_data, new_seq).await { + notices.send(format!( + "warning: feed rotation failed (new feed publish), will retry: {e}" + )); + // Roll back the pointer set during rotate() so we retry cleanly next tick. + feed_state.next_feed_pubkey = [0u8; 32]; + return; + } + debug::log_event( + "Feed rotation (new)", + "mutable_put", + &format!( + "new_feed_pubkey={}, old_feed_pubkey={}", + debug::short_key(&new_kp.public_key), + debug::short_key(&feed_state.feed_keypair.public_key), + ), + ); + + // Publish the old feed one last time so readers can discover the pointer. + let old_record = feed_state.serialize_feed_record(); + feed_state.seq += 1; + let old_seq = feed_state.seq; + let old_kp = feed_state.feed_keypair.clone(); + if let Err(e) = handle.mutable_put(&old_kp, &old_record, old_seq).await { + tracing::warn!("rotation: old feed update failed (non-fatal): {e}"); + } else { + debug::log_event( + "Feed rotation (old ptr)", + "mutable_put", + &format!( + "old_feed_pubkey={}, seq={old_seq}, next_feed={}", + debug::short_key(&old_kp.public_key), + debug::short_key(&new_kp.public_key), + ), + ); + } + + // Spawn the overlap refresh so the old feed stays alive long enough + // for in-flight readers to follow the pointer. + let overlap_h = handle.clone(); + let overlap_kp = old_kp.clone(); + let overlap_data = old_record.clone(); + let overlap_seq = old_seq; + tokio::spawn(async move { + feed::run_rotation_overlap_refresh(overlap_h, overlap_kp, overlap_data, overlap_seq).await; + }); + + // Tear down the old refresh task and start a new one for the new feed. + refresh_handle.abort(); + let (new_tx, new_rx) = watch::channel((new_data.clone(), new_seq)); + *refresh_tx = new_tx; + *refresh_handle = { + let h = handle.clone(); + let kp = new_kp.clone(); + tokio::spawn(async move { + feed::run_feed_refresh(h, kp, new_rx).await; + }) + }; + + // Swap in the new state. + std::mem::swap(feed_state, &mut new_fs); + notices.send("*** feed keypair rotated"); +} diff --git a/peeroxide-cli/src/cmd/chat/reader.rs b/peeroxide-cli/src/cmd/chat/reader.rs new file mode 100644 index 0000000..6f8f5f0 --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/reader.rs @@ -0,0 +1,907 @@ +use std::collections::{HashMap, HashSet}; +use std::sync::Arc; + +use futures::future::join_all; +use peeroxide_dht::crypto::hash; +use peeroxide_dht::hyperdht::HyperDhtHandle; +use tokio::sync::mpsc; +use tokio::time::{Duration, Instant}; + +use crate::cmd::chat::crypto; +use crate::cmd::chat::debug; +use crate::cmd::chat::display::DisplayMessage; +use crate::cmd::chat::known_users; +use crate::cmd::chat::ordering::{chain_sort, ChainGate, DedupRing, PendingMessage, SubmitOutcome}; +use crate::cmd::chat::probe; +use crate::cmd::chat::tui::{DhtActivityGuard, RecvFetchGuard, StatusState}; +use crate::cmd::chat::wire::{self, FeedRecord, MessageEnvelope, SummaryBlock}; + +/// Wrap a `HyperDhtHandle::immutable_get` (a message-blob or summary-block +/// fetch). Holds **both** guards: +/// +/// - `DhtActivityGuard` lights the activity dot +/// - `RecvFetchGuard` increments the user-facing `Receiving (N)` counter +/// +/// These are downloads of actual content the reader has *committed* to +/// fetch — either listed in a FeedRecord's `msg_hashes`, walked from a +/// summary chain, or refetched as a missing predecessor. +async fn tracked_immutable_get( + handle: &HyperDhtHandle, + hash: [u8; 32], + status: &Arc, +) -> Result>, peeroxide_dht::hyperdht::HyperDhtError> { + let _dht = DhtActivityGuard::new(status.clone()); + let _recv = RecvFetchGuard::new(status.clone()); + handle.immutable_get(hash).await +} + +/// Wrap a `HyperDhtHandle::mutable_get` (FeedRecord fetch). Holds only the +/// activity-dot guard — this is a background "check for new content" scan, +/// not yet a confirmed inbound message, so it does **not** light the +/// "Receiving" indicator. If the fetched FeedRecord exposes new +/// `msg_hashes`, the subsequent `tracked_immutable_get`s will surface as +/// "Receiving". +async fn tracked_mutable_get( + handle: &HyperDhtHandle, + pubkey: &[u8; 32], + seq: u64, + status: &Arc, +) -> Result, peeroxide_dht::hyperdht::HyperDhtError> +{ + let _dht = DhtActivityGuard::new(status.clone()); + handle.mutable_get(pubkey, seq).await +} + +/// Wrap a `HyperDhtHandle::lookup` (announce-topic scan for peers). Only +/// bumps the activity dot; lookups are pure discovery, not content fetches. +async fn tracked_lookup( + handle: &HyperDhtHandle, + topic: [u8; 32], + status: &Arc, +) -> Result, peeroxide_dht::hyperdht::HyperDhtError> { + let _dht = DhtActivityGuard::new(status.clone()); + handle.lookup(topic).await +} + +struct KnownFeed { + id_pubkey: [u8; 32], + last_seq: u64, + last_msg_hash: [u8; 32], + last_summary_hash_seen: [u8; 32], + last_active: Instant, + last_message_time: Instant, + next_poll: Instant, +} + +impl KnownFeed { + fn new() -> Self { + let now = Instant::now(); + Self { + id_pubkey: [0u8; 32], + last_seq: 0, + last_msg_hash: [0u8; 32], + last_summary_hash_seen: [0u8; 32], + last_active: now, + last_message_time: now, + next_poll: now, + } + } + + fn poll_interval(&self) -> Duration { + let since_msg = self.last_message_time.elapsed().as_secs(); + match since_msg { + 0..=59 => Duration::from_secs(1), + 60..=119 => Duration::from_secs(2), + 120..=179 => Duration::from_secs(3), + 180..=300 => Duration::from_secs(5), + _ => Duration::from_secs(10), + } + } + + fn schedule_next_poll(&mut self) { + self.next_poll = Instant::now() + self.poll_interval(); + } +} + +const MAX_SUMMARY_DEPTH: usize = 100; +const FEED_EXPIRY_SECS: u64 = 20 * 60; +const DISCOVERY_INTERVAL_SECS: u64 = 8; +const GAP_TIMEOUT: Duration = Duration::from_secs(5); +const REFETCH_SCHEDULE_MS: [u64; 4] = [0, 500, 1500, 3000]; + +struct RefetchResult { + hash: [u8; 32], + owner: [u8; 32], + feed_pubkey: [u8; 32], + data: Option>, +} + +fn spawn_refetch( + handle: HyperDhtHandle, + hash: [u8; 32], + owner: [u8; 32], + feed_pubkey: [u8; 32], + tx: mpsc::UnboundedSender, + status: Arc, +) { + tokio::spawn(async move { + for delay_ms in REFETCH_SCHEDULE_MS { + if delay_ms > 0 { + tokio::time::sleep(Duration::from_millis(delay_ms)).await; + } + if let Ok(Some(data)) = tracked_immutable_get(&handle, hash, &status).await { + let _ = tx.send(RefetchResult { + hash, + owner, + feed_pubkey, + data: Some(data), + }); + return; + } + } + let _ = tx.send(RefetchResult { + hash, + owner, + feed_pubkey, + data: None, + }); + }); +} + +fn decode_envelope( + message_key: &[u8; 32], + data: &[u8], + owner_pubkey: &[u8; 32], +) -> Option { + let plaintext = wire::decrypt_message(message_key, data).ok()?; + let env = MessageEnvelope::deserialize(&plaintext).ok()?; + if !env.verify() || env.id_pubkey != *owner_pubkey { + return None; + } + Some(env) +} + +fn envelope_to_pending( + env: MessageEnvelope, + msg_hash: [u8; 32], + feed_pubkey: [u8; 32], + self_id_pubkey: &[u8; 32], +) -> PendingMessage { + let prev_msg_hash = env.prev_msg_hash; + let id_pubkey = env.id_pubkey; + let is_self = id_pubkey == *self_id_pubkey; + PendingMessage { + display: DisplayMessage { + id_pubkey, + screen_name: env.screen_name, + content: env.content, + timestamp: env.timestamp, + is_self, + late: false, + }, + msg_hash, + prev_msg_hash, + feed_pubkey, + } +} + +#[allow(clippy::too_many_arguments)] +fn submit_to_gate( + gate: &mut ChainGate, + msg: PendingMessage, + dedup: &mut DedupRing, + msg_tx: &mpsc::UnboundedSender, + pending_refetches: &mut HashSet<[u8; 32]>, + refetch_tx: &mpsc::UnboundedSender, + handle: &HyperDhtHandle, + status: &Arc, +) { + let id = msg.display.id_pubkey; + let feed_pubkey = msg.feed_pubkey; + if let SubmitOutcome::Buffered { + missing_predecessor, + } = gate.submit(msg, dedup, msg_tx) + { + if pending_refetches.insert(missing_predecessor) { + spawn_refetch( + handle.clone(), + missing_predecessor, + id, + feed_pubkey, + refetch_tx.clone(), + status.clone(), + ); + } + } +} + +#[allow(clippy::too_many_arguments)] +pub async fn run_reader( + handle: HyperDhtHandle, + channel_key: [u8; 32], + message_key: [u8; 32], + msg_tx: mpsc::UnboundedSender, + profile_name: String, + self_feed_pubkey: Option<[u8; 32]>, + self_id_pubkey: [u8; 32], + status: Arc, +) { + let mut known_feeds: HashMap<[u8; 32], KnownFeed> = HashMap::new(); + let mut dedup = DedupRing::with_default_capacity(); + let mut backlog: Vec = Vec::new(); + let mut gate = ChainGate::new(); + let mut pending_refetches: HashSet<[u8; 32]> = HashSet::new(); + let (refetch_tx, mut refetch_rx) = mpsc::unbounded_channel::(); + + if let Some(pk) = self_feed_pubkey { + known_feeds.insert(pk, KnownFeed::new()); + } + + // --- Cold-start: concurrent discovery across all epochs/buckets --- + let current_epoch = crypto::current_epoch(); + let scan_start = current_epoch.saturating_sub(19); + + let lookup_futures: Vec<_> = (scan_start..=current_epoch) + .flat_map(|epoch| (0..4u8).map(move |bucket| (epoch, bucket))) + .map(|(epoch, bucket)| { + let h = handle.clone(); + let topic = crypto::announce_topic(&channel_key, epoch, bucket); + let status = status.clone(); + async move { (epoch, bucket, tracked_lookup(&h, topic, &status).await) } + }) + .collect(); + + for (epoch, bucket, result) in join_all(lookup_futures).await { + if let Ok(results) = result { + let peer_count: usize = results.iter().map(|r| r.peers.len()).sum(); + if debug::is_enabled() && peer_count > 0 { + debug::log_event( + "Channel scan", + "lookup", + &format!("epoch={epoch}, bucket={bucket}, results={peer_count}"), + ); + } + for result in &results { + for peer in &result.peers { + known_feeds.entry(peer.public_key).or_insert_with(KnownFeed::new); + } + } + } + } + + // --- Cold-start: fetch all feed records concurrently --- + let feed_pks: Vec<[u8; 32]> = known_feeds.keys().copied().collect(); + let mget_futures: Vec<_> = feed_pks + .iter() + .map(|pk| { + let h = handle.clone(); + let pk = *pk; + let status = status.clone(); + async move { (pk, tracked_mutable_get(&h, &pk, 0, &status).await) } + }) + .collect(); + + for (feed_pk, result) in join_all(mget_futures).await { + if let Ok(Some(mget)) = result { + if let Ok(record) = FeedRecord::deserialize(&mget.value) { + if !crypto::verify_ownership_proof( + &record.id_pubkey, + &feed_pk, + &channel_key, + &record.ownership_proof, + ) { + continue; + } + + debug::log_event( + "Feed record discovered", + "mutable_get", + &format!( + "feed_pubkey={}, id_pubkey={}, msg_count={}, next_feed={}", + debug::short_key(&feed_pk), + debug::short_key(&record.id_pubkey), + record.msg_count, + debug::short_key(&record.next_feed_pubkey), + ), + ); + + if let Some(feed_info) = known_feeds.get_mut(&feed_pk) { + feed_info.id_pubkey = record.id_pubkey; + feed_info.last_seq = mget.seq; + } + + let msgs = fetch_and_validate_messages( + &handle, + &message_key, + &record.msg_hashes, + &record.id_pubkey, + feed_pk, + &mut dedup, + &profile_name, + &self_id_pubkey, + &status, + ) + .await; + + if let Some(newest_hash) = record.msg_hashes.first() { + if let Some(feed_info) = known_feeds.get_mut(&feed_pk) { + feed_info.last_msg_hash = *newest_hash; + } + } + + backlog.extend(msgs); + + fetch_summary_history( + &handle, + &message_key, + record.summary_hash, + &record.id_pubkey, + feed_pk, + &mut dedup, + &mut backlog, + &profile_name, + &self_id_pubkey, + &status, + ) + .await; + if let Some(feed_info) = known_feeds.get_mut(&feed_pk) { + feed_info.last_summary_hash_seen = record.summary_hash; + } + } + } + } + + for msg in chain_sort(backlog) { + submit_to_gate( + &mut gate, + msg, + &mut dedup, + &msg_tx, + &mut pending_refetches, + &refetch_tx, + &handle, + &status, + ); + } + + // Initial status snapshot now that cold-start has populated state. + // `recv_pending` is the live in-flight `immutable_get` count managed by + // `tracked_immutable_get`/`RecvFetchGuard`; we don't touch it here. + status.set_feed_count(known_feeds.len()); + + let _ = msg_tx.send(DisplayMessage { + id_pubkey: [0u8; 32], + screen_name: String::new(), + content: String::new(), + timestamp: 0, + is_self: false, + late: false, + }); + + // --- Steady-state: discovery and feed polling run independently --- + + // Discovery task: runs on its own timer, sends newly-found feed pubkeys + let (disc_tx, mut disc_rx) = mpsc::unbounded_channel::<[u8; 32]>(); + { + let handle = handle.clone(); + let status = status.clone(); + tokio::spawn(async move { + run_discovery(handle, channel_key, disc_tx, status).await; + }); + } + + let mut expiry_tick = tokio::time::interval(Duration::from_secs(1)); + expiry_tick.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay); + + // Feed polling loop: wakes on its own adaptive schedule, receives new feeds from discovery + loop { + let now = Instant::now(); + let earliest_feed_poll = known_feeds.values().map(|f| f.next_poll).min(); + let wake_at = earliest_feed_poll.unwrap_or(now + Duration::from_secs(1)); + + tokio::select! { + _ = tokio::time::sleep_until(wake_at) => {} + _ = expiry_tick.tick() => { + let abandoned = gate.expire(Instant::now(), GAP_TIMEOUT, &mut dedup, &msg_tx); + for hash in abandoned { + pending_refetches.remove(&hash); + } + continue; + } + Some(result) = refetch_rx.recv() => { + pending_refetches.remove(&result.hash); + if let Some(data) = result.data { + if hash(&data) == result.hash { + if let Some(env) = + decode_envelope(&message_key, &data, &result.owner) + { + // Do not pre-insert into the dedup ring here — the + // gate will insert on release. Pre-inserting would + // make submit_to_gate reject this very message as + // duplicate. + let pm = envelope_to_pending( + env, + result.hash, + result.feed_pubkey, + &self_id_pubkey, + ); + submit_to_gate( + &mut gate, + pm, + &mut dedup, + &msg_tx, + &mut pending_refetches, + &refetch_tx, + &handle, + &status, + ); + } + } + } + continue; + } + pk = disc_rx.recv() => { + if let Some(pk) = pk { + known_feeds + .entry(pk) + .and_modify(|f| f.last_active = Instant::now()) + .or_insert_with(KnownFeed::new); + } + // Drain any additional queued discoveries without blocking + while let Ok(pk) = disc_rx.try_recv() { + known_feeds + .entry(pk) + .and_modify(|f| f.last_active = Instant::now()) + .or_insert_with(KnownFeed::new); + } + status.set_feed_count(known_feeds.len()); + continue; + } + } + + // Drain any discoveries that arrived while we were sleeping + while let Ok(pk) = disc_rx.try_recv() { + known_feeds + .entry(pk) + .and_modify(|f| f.last_active = Instant::now()) + .or_insert_with(KnownFeed::new); + } + + // Expire feeds inactive for longer than DHT TTL + let now = Instant::now(); + known_feeds + .retain(|_pk, f| now.duration_since(f.last_active).as_secs() < FEED_EXPIRY_SECS); + + // --- Feed polling: fetch all due feeds concurrently --- + let due_feeds: Vec<([u8; 32], u64)> = known_feeds + .iter() + .filter(|(_pk, f)| f.next_poll <= now) + .map(|(pk, f)| (*pk, f.last_seq)) + .collect(); + + if due_feeds.is_empty() { + continue; + } + + if debug::is_enabled() { + debug::log_event( + "Feed poll batch", + "mutable_get", + &format!("feeds_due={}, total_known={}", due_feeds.len(), known_feeds.len()), + ); + } + + let poll_start = Instant::now(); + let poll_futures: Vec<_> = due_feeds + .iter() + .map(|(pk, cached_seq)| { + let h = handle.clone(); + let pk = *pk; + let seq = *cached_seq; + let status = status.clone(); + async move { (pk, tracked_mutable_get(&h, &pk, seq, &status).await) } + }) + .collect(); + + let poll_results = join_all(poll_futures).await; + + if debug::is_enabled() { + let elapsed_ms = poll_start.elapsed().as_millis(); + let updated: usize = poll_results + .iter() + .filter(|(_, r)| matches!(r, Ok(Some(_)))) + .count(); + debug::log_event( + "Feed poll complete", + "mutable_get", + &format!( + "elapsed={}ms, polled={}, updated={}", + elapsed_ms, + due_feeds.len(), + updated + ), + ); + } + + for (feed_pk, result) in poll_results { + let feed_info = match known_feeds.get_mut(&feed_pk) { + Some(f) => f, + None => continue, + }; + + match result { + Ok(Some(mget)) => { + if mget.seq <= feed_info.last_seq { + feed_info.schedule_next_poll(); + continue; + } + feed_info.last_seq = mget.seq; + feed_info.last_active = Instant::now(); + feed_info.last_message_time = Instant::now(); + feed_info.schedule_next_poll(); + + if let Ok(record) = FeedRecord::deserialize(&mget.value) { + if !crypto::verify_ownership_proof( + &record.id_pubkey, + &feed_pk, + &channel_key, + &record.ownership_proof, + ) { + continue; + } + + let first_discovery = feed_info.id_pubkey == [0u8; 32]; + if first_discovery { + feed_info.id_pubkey = record.id_pubkey; + } else if record.id_pubkey != feed_info.id_pubkey { + continue; + } + + debug::log_event( + "Feed record discovered", + "mutable_get", + &format!( + "feed_pubkey={}, id_pubkey={}, msg_count={}, next_feed={}", + debug::short_key(&feed_pk), + debug::short_key(&record.id_pubkey), + record.msg_count, + debug::short_key(&record.next_feed_pubkey), + ), + ); + + let owner_pubkey = feed_info.id_pubkey; + let next_feed = record.next_feed_pubkey; + + if next_feed != [0u8; 32] { + known_feeds.entry(next_feed).or_insert_with(KnownFeed::new); + } + + let msgs = fetch_and_validate_messages( + &handle, + &message_key, + &record.msg_hashes, + &owner_pubkey, + feed_pk, + &mut dedup, + &profile_name, + &self_id_pubkey, + &status, + ) + .await; + + if let Some(newest_hash) = record.msg_hashes.first() { + if let Some(fi) = known_feeds.get_mut(&feed_pk) { + fi.last_msg_hash = *newest_hash; + } + } + + for msg in msgs.into_iter().rev() { + submit_to_gate( + &mut gate, + msg, + &mut dedup, + &msg_tx, + &mut pending_refetches, + &refetch_tx, + &handle, + &status, + ); + } + + let prior_summary_hash = known_feeds + .get(&feed_pk) + .map(|f| f.last_summary_hash_seen) + .unwrap_or([0u8; 32]); + let summary_changed = record.summary_hash != prior_summary_hash + && record.summary_hash != [0u8; 32]; + + if first_discovery || summary_changed { + let mut history = Vec::new(); + fetch_summary_history( + &handle, + &message_key, + record.summary_hash, + &owner_pubkey, + feed_pk, + &mut dedup, + &mut history, + &profile_name, + &self_id_pubkey, + &status, + ) + .await; + for msg in chain_sort(history) { + submit_to_gate( + &mut gate, + msg, + &mut dedup, + &msg_tx, + &mut pending_refetches, + &refetch_tx, + &handle, + &status, + ); + } + if let Some(fi) = known_feeds.get_mut(&feed_pk) { + fi.last_summary_hash_seen = record.summary_hash; + } + } + } + } + _ => { + feed_info.schedule_next_poll(); + } + } + } + + // End-of-iteration: refresh the feed count for the bar. + // `recv_pending` is tracked live by `tracked_immutable_get` / + // `RecvFetchGuard` around each DHT round-trip. + status.set_feed_count(known_feeds.len()); + } +} + +/// Independent discovery task: scans channel topic buckets on a timer, +/// sends newly-found feed pubkeys to the polling loop. +async fn run_discovery( + handle: HyperDhtHandle, + channel_key: [u8; 32], + disc_tx: mpsc::UnboundedSender<[u8; 32]>, + status: Arc, +) { + let mut interval = tokio::time::interval(Duration::from_secs(DISCOVERY_INTERVAL_SECS)); + + loop { + interval.tick().await; + + let current_epoch = crypto::current_epoch(); + let epochs = [current_epoch, current_epoch.saturating_sub(1)]; + let disc_start = Instant::now(); + + let lookup_futures: Vec<_> = epochs + .iter() + .flat_map(|&epoch| (0..4u8).map(move |bucket| (epoch, bucket))) + .map(|(epoch, bucket)| { + let h = handle.clone(); + let topic = crypto::announce_topic(&channel_key, epoch, bucket); + let status = status.clone(); + async move { (epoch, bucket, tracked_lookup(&h, topic, &status).await) } + }) + .collect(); + + let mut new_feeds = 0u32; + for (epoch, bucket, result) in join_all(lookup_futures).await { + if let Ok(results) = result { + let peer_count: usize = results.iter().map(|r| r.peers.len()).sum(); + if debug::is_enabled() && peer_count > 0 { + debug::log_event( + "Channel scan", + "lookup", + &format!("epoch={epoch}, bucket={bucket}, results={peer_count}"), + ); + } + for result in &results { + for peer in &result.peers { + if disc_tx.send(peer.public_key).is_err() { + return; // polling loop dropped, shut down + } + new_feeds += 1; + } + } + } + } + + if debug::is_enabled() { + debug::log_event( + "Discovery scan complete", + "lookup", + &format!( + "elapsed={}ms, feeds_sent={}", + disc_start.elapsed().as_millis(), + new_feeds + ), + ); + } + } +} + +/// Validates and fetches messages from a newest-first hash list. +/// Chain validation: each message's prev_msg_hash must equal the hash of the +/// next-older message in the list (msg_hashes[i+1]). +#[allow(clippy::too_many_arguments)] +async fn fetch_and_validate_messages( + handle: &HyperDhtHandle, + message_key: &[u8; 32], + msg_hashes: &[[u8; 32]], + owner_pubkey: &[u8; 32], + feed_pubkey: [u8; 32], + dedup: &mut DedupRing, + profile_name: &str, + self_id_pubkey: &[u8; 32], + status: &Arc, +) -> Vec { + let _ = profile_name; + let mut messages = Vec::new(); + + // Fetch all unseen messages concurrently + let unseen: Vec<(usize, [u8; 32])> = msg_hashes + .iter() + .enumerate() + .filter(|(_, h)| !dedup.contains(h)) + .map(|(i, h)| (i, *h)) + .collect(); + + if probe::is_enabled() { + crate::cmd::chat::tui::emit_notice(format!( + "[probe] fetch_batch msg_hashes_total={} unseen={}", + msg_hashes.len(), + unseen.len(), + )); + } + + if unseen.is_empty() { + return messages; + } + + let fetch_futures: Vec<_> = unseen + .iter() + .map(|(i, hash)| { + let h = handle.clone(); + let hash = *hash; + let idx = *i; + let status = status.clone(); + async move { + let result = tracked_immutable_get(&h, hash, &status).await; + (idx, hash, result) + } + }) + .collect(); + + let mut fetched: HashMap, [u8; 32])> = HashMap::new(); + for (idx, hash, result) in join_all(fetch_futures).await { + if let Ok(Some(data)) = result { + fetched.insert(idx, (data, hash)); + } + } + + // Validate in order (chain validation requires sequential check) + let mut expected_next_hash: Option<[u8; 32]> = None; + for (i, msg_hash) in msg_hashes.iter().enumerate() { + if dedup.contains(msg_hash) { + expected_next_hash = None; + continue; + } + let (data, _) = match fetched.get(&i) { + Some(d) => d, + None => continue, + }; + if let Ok(plaintext) = wire::decrypt_message(message_key, data) { + if let Ok(env) = MessageEnvelope::deserialize(&plaintext) { + if !env.verify() { + continue; + } + if env.id_pubkey != *owner_pubkey { + continue; + } + if let Some(expected) = expected_next_hash { + if *msg_hash != expected { + expected_next_hash = None; + continue; + } + } + + let expected_prev = if i + 1 < msg_hashes.len() { + msg_hashes[i + 1] + } else { + [0u8; 32] + }; + if env.prev_msg_hash != expected_prev && expected_prev != [0u8; 32] { + continue; + } + + expected_next_hash = Some(env.prev_msg_hash); + + // NB: do not insert into `dedup` here. The shared ring is + // populated by `ChainGate::release` so the gate's duplicate + // check operates on hashes that have actually been emitted + // to display. Inserting here would mask future late/replay + // arrivals from the gate's chain logic. + debug::log_event( + "Message received", + "immutable_get", + &format!( + "msg_hash={}, author={}, prev_hash={}, ts={}, content_type=0x{:02x}", + debug::short_key(msg_hash), + debug::short_key(&env.id_pubkey), + debug::short_key(&env.prev_msg_hash), + env.timestamp, + env.content_type, + ), + ); + let _ = known_users::update_shared(&env.id_pubkey, &env.screen_name); + let prev_msg_hash = env.prev_msg_hash; + messages.push(PendingMessage { + display: DisplayMessage { + id_pubkey: env.id_pubkey, + screen_name: env.screen_name, + content: env.content, + timestamp: env.timestamp, + is_self: env.id_pubkey == *self_id_pubkey, + late: false, + }, + msg_hash: *msg_hash, + prev_msg_hash, + feed_pubkey, + }); + } + } + } + messages +} + +#[allow(clippy::too_many_arguments)] +async fn fetch_summary_history( + handle: &HyperDhtHandle, + message_key: &[u8; 32], + mut summary_hash: [u8; 32], + owner_pubkey: &[u8; 32], + feed_pubkey: [u8; 32], + dedup: &mut DedupRing, + backlog: &mut Vec, + profile_name: &str, + self_id_pubkey: &[u8; 32], + status: &Arc, +) { + let mut depth = 0; + while summary_hash != [0u8; 32] && depth < MAX_SUMMARY_DEPTH { + depth += 1; + let data = match tracked_immutable_get(handle, summary_hash, status).await { + Ok(Some(d)) => d, + _ => break, + }; + let block = match SummaryBlock::deserialize(&data) { + Ok(b) => b, + _ => break, + }; + if !block.verify() || block.id_pubkey != *owner_pubkey { + break; + } + + let reversed: Vec<[u8; 32]> = block.msg_hashes.iter().rev().copied().collect(); + let msgs = fetch_and_validate_messages( + handle, + message_key, + &reversed, + owner_pubkey, + feed_pubkey, + dedup, + profile_name, + self_id_pubkey, + status, + ) + .await; + backlog.extend(msgs); + + summary_hash = block.prev_summary_hash; + } +} diff --git a/peeroxide-cli/src/cmd/chat/session.rs b/peeroxide-cli/src/cmd/chat/session.rs new file mode 100644 index 0000000..12333e4 --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/session.rs @@ -0,0 +1,683 @@ +//! Generic chat session orchestration shared by `chat join` and `chat dm`. +//! +//! A "chat session" is the long-running attached state for a single +//! channel: the spawned reader / publisher / nexus / friend-refresh / +//! inbox-monitor / dht-status tasks, the main `select!` loop that turns +//! incoming messages, system notices, and UI events into actions, and +//! the orderly shutdown sequence. +//! +//! Both `chat join` and `chat dm` build a [`SessionConfig`] and call +//! [`run`]. The DM-specific behaviour (initial inbox invite, per-post +//! nudge, optional invite retraction on shutdown) is gated behind +//! `config.dm.is_some()` and runs in a small dedicated `dm_nudge` task +//! so the rest of the orchestration stays channel-agnostic. + +use std::sync::Arc; +use std::time::Duration; + +use tokio::sync::mpsc; +use tokio::task::JoinHandle; + +use crate::cmd::chat::crypto; +use crate::cmd::chat::display; +use crate::cmd::chat::feed; +use crate::cmd::chat::inbox; +use crate::cmd::chat::known_users::SharedKnownUsers; +use crate::cmd::chat::profile::{self, Profile}; +use crate::cmd::chat::publisher::{self, PubJob}; +use crate::cmd::chat::reader; +use crate::cmd::chat::tui::{ + self, ChatUi, IgnoreSet, NoticeSink, SlashCommand, StatusState, UiInput, UiOptions, commands, +}; +use crate::cmd::{build_dht_config, sigterm_recv}; +use crate::config::ResolvedConfig; + +use libudx::UdxRuntime; +use peeroxide_dht::hyperdht::{self, KeyPair}; + +/// All inputs needed to run one chat-session lifecycle. `chat join` +/// builds this from a channel name + optional salt; `chat dm` builds it +/// from a recipient pubkey + DM-specific extras. +pub struct SessionConfig { + /// Compact human-readable label for the status bar's channel-name + /// field (e.g. `"#room"` or `"DM:alice@abc12345"`). Distinct from + /// any topic / key value. + pub bar_name: String, + /// One-line system notice shown after the DHT is bootstrapped to + /// announce the user has joined this session. e.g. + /// `"*** joining channel '#room'"` or `"*** DM with alice (abc12345)"`. + pub greeting: String, + /// 32-byte channel key (`channel_key(name, salt)` or + /// `dm_channel_key(me, them)`). Drives the announce topic schedule + /// and (for non-DM channels) the message encryption key. + pub channel_key: [u8; 32], + /// 32-byte symmetric message-envelope encryption key + /// (`msg_key(channel_key)` for plain channels, `dm_msg_key(ecdh, + /// channel_key)` for DMs). + pub message_key: [u8; 32], + /// Profile name used for friends file, slash command resolution, + /// nexus refresh. + pub profile: String, + /// Already-loaded profile (avoids the session having to reload). + pub prof: Profile, + /// Identity keypair derived from `prof.seed`. + pub id_keypair: KeyPair, + + pub read_only: bool, + pub no_nexus: bool, + pub no_friends: bool, + pub no_inbox: bool, + pub feed_lifetime: u64, + pub batch_size: usize, + pub batch_wait_ms: u64, + pub inbox_poll_interval: u64, + pub stay_after_eof: bool, + pub line_mode: bool, + + /// DM-specific extras. `Some(_)` activates the inbox-invite send, + /// per-post nudge, and best-effort invite retraction on shutdown. + pub dm: Option, +} + +/// DM-specific session config, carried inside [`SessionConfig::dm`]. +pub struct DmExtras { + /// Recipient identity public key. + pub recipient_pubkey: [u8; 32], + /// Optional initial-message lure included in the first inbox invite + /// sent on session startup. None = silent invite. + pub initial_message: Option, +} + +/// Run one chat session to completion. Returns the process exit code +/// (typically 0 on a clean shutdown, non-zero on a fatal startup error). +pub async fn run(config: SessionConfig, cfg: &ResolvedConfig) -> i32 { + let SessionConfig { + bar_name, + greeting, + channel_key, + message_key, + profile: profile_name, + prof, + id_keypair, + read_only, + no_nexus, + no_friends, + no_inbox, + feed_lifetime, + batch_size, + batch_wait_ms, + inbox_poll_interval, + stay_after_eof, + line_mode, + dm, + } = config; + + let dht_config = build_dht_config(cfg); + let runtime = match UdxRuntime::new() { + Ok(r) => r, + Err(e) => { + eprintln!("error: failed to create UDP runtime: {e}"); + return 1; + } + }; + + // --- ChatUi construction --- + // + // Constructed BEFORE the DHT handshake so all subsequent startup + // notices flow through the UI in proper layout instead of landing + // wherever the cursor happens to be. + let ui_opts = UiOptions { + force_line_mode: line_mode, + channel_name: bar_name.clone(), + profile_name: profile_name.clone(), + }; + let mut ui: Box = tui::make_ui(ui_opts); + let status: Arc = ui.status(); + let ignore: IgnoreSet = ui.ignore_set(); + + // Process-wide notice channel for background helpers. + let (notice_tx, mut notice_rx) = NoticeSink::new(); + tui::install_global_notice_sink(notice_tx.clone()); + + let (task, handle, _server_rx) = match hyperdht::spawn(&runtime, dht_config).await { + Ok(v) => v, + Err(e) => { + ui.render_system(&format!("error: failed to start DHT: {e}")); + return 1; + } + }; + + if let Err(e) = handle.bootstrapped().await { + ui.render_system(&format!("error: bootstrap failed: {e}")); + return 1; + } + + let table_size = handle.table_size().await.unwrap_or(0); + ui.render_system(&format!( + "*** connection established with DHT ({table_size} peers in routing table)" + )); + + let feed_keypair = if !read_only { + Some(KeyPair::generate()) + } else { + None + }; + + let ownership_proof = feed_keypair.as_ref().map(|fkp| { + crypto::ownership_proof(&id_keypair.secret_key, &fkp.public_key, &channel_key) + }); + + let feed_state = feed_keypair.as_ref().map(|fkp| { + feed::FeedState::new( + fkp.clone(), + id_keypair.clone(), + channel_key, + ownership_proof.unwrap(), + feed_lifetime, + ) + }); + + status.set_dht_peers(table_size); + + let (msg_tx, mut msg_rx) = mpsc::unbounded_channel::(); + + let friends = profile::load_friends(&profile_name).unwrap_or_default(); + let mut display_state = + display::DisplayState::new(friends, SharedKnownUsers::load_from_shared()); + + ui.render_system(&greeting); + + // --- DM-specific: invite-feed-keypair + initial invite --- + // + // For DM sessions we generate an ephemeral invite-feed-keypair (used + // for the inbox invite + per-epoch nudges) here, after the DHT is up. + // If an initial message was provided we send the first invite now + // before the rest of the session machinery starts so the recipient's + // inbox monitor has the earliest possible opportunity to discover us. + let invite_feed_keypair = if dm.is_some() && !read_only { + Some(KeyPair::generate()) + } else { + None + }; + if let (Some(dm_extras), Some(inv_kp), Some(fs)) = + (dm.as_ref(), invite_feed_keypair.as_ref(), feed_state.as_ref()) + { + if let Some(msg_text) = dm_extras.initial_message.as_ref() { + if let Err(e) = inbox::send_dm_invite( + &handle, + inv_kp, + &id_keypair, + &dm_extras.recipient_pubkey, + &channel_key, + &fs.feed_keypair.public_key, + msg_text, + ) + .await + { + ui.render_system(&format!("warning: invite send failed: {e}")); + } + } + } + + // --- Reader task --- + let self_id = id_keypair.public_key; + let reader_handle = { + let handle = handle.clone(); + let msg_tx = msg_tx.clone(); + let profile_name = profile_name.clone(); + let self_feed_pubkey = feed_keypair.as_ref().map(|fkp| fkp.public_key); + let status = status.clone(); + tokio::spawn(async move { + reader::run_reader( + handle, + channel_key, + message_key, + msg_tx, + profile_name, + self_feed_pubkey, + self_id, + status, + ) + .await; + }) + }; + + // --- Publisher worker --- + let mut pub_tx: Option> = None; + let mut publisher_handle: Option> = None; + if let Some(fs) = feed_state { + let (tx, rx) = mpsc::channel::(64); + pub_tx = Some(tx); + + let screen_name = prof.screen_name.clone().unwrap_or_default(); + let handle_pub = handle.clone(); + let id_kp = id_keypair.clone(); + let status_pub = status.clone(); + let notices_pub = notice_tx.clone(); + publisher_handle = Some(tokio::spawn(async move { + publisher::run_publisher( + handle_pub, + fs, + id_kp, + message_key, + channel_key, + screen_name, + rx, + batch_size, + batch_wait_ms, + status_pub, + notices_pub, + ) + .await; + })); + } + + // --- DM nudge task --- + // + // For DM sessions, after each user-typed message we forward the text + // into a small dedicated task that checks if the current epoch is + // later than the last nudged one and, if so, fires a `send_dm_nudge` + // (a fresh mutable_put on the invite_feed_keypair + an inbox-topic + // announce). Once-per-epoch throttling matches the original + // dm_cmd.rs behavior; the publisher knows nothing about DM. + let mut nudge_tx: Option> = None; + let mut nudge_handle: Option> = None; + if let (Some(dm_extras), Some(inv_kp), Some(real_feed_pk)) = ( + dm.as_ref(), + invite_feed_keypair.as_ref(), + feed_keypair.as_ref().map(|f| f.public_key), + ) { + let (tx, mut rx) = mpsc::unbounded_channel::(); + nudge_tx = Some(tx); + let handle = handle.clone(); + let inv_kp = inv_kp.clone(); + let id_kp = id_keypair.clone(); + let recipient = dm_extras.recipient_pubkey; + nudge_handle = Some(tokio::spawn(async move { + let mut last_nudged_epoch = 0u64; + let mut nudge_seq = 0u64; + while let Some(text) = rx.recv().await { + let current = crypto::current_epoch(); + if current == last_nudged_epoch { + continue; + } + let _ = inbox::send_dm_nudge( + &handle, + &inv_kp, + &id_kp, + &recipient, + &channel_key, + &real_feed_pk, + &text, + nudge_seq, + ) + .await; + nudge_seq += 1; + last_nudged_epoch = current; + } + })); + } + + // --- Nexus refresh --- + let nexus_handle: Option> = if !no_nexus { + let handle = handle.clone(); + let id_kp = id_keypair.clone(); + let profile_name = profile_name.clone(); + let notices = notice_tx.clone(); + Some(tokio::spawn(async move { + crate::cmd::chat::nexus::run_nexus_refresh(handle, id_kp, profile_name, notices).await; + })) + } else { + None + }; + + // --- Friend refresh --- + let friend_refresh_handle: Option> = if !no_friends { + let handle = handle.clone(); + let profile_name = profile_name.clone(); + let notices = notice_tx.clone(); + Some(tokio::spawn(async move { + crate::cmd::chat::nexus::run_friend_refresh(handle, profile_name, notices).await; + })) + } else { + None + }; + + // --- DHT peer-count poller --- + let dht_status_handle: JoinHandle<()> = { + let handle = handle.clone(); + let status = status.clone(); + tokio::spawn(async move { + let mut tick = tokio::time::interval(Duration::from_secs(5)); + tick.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + tick.tick().await; + loop { + tick.tick().await; + let n = handle.table_size().await.unwrap_or(0); + status.set_dht_peers(n); + } + }) + }; + + // --- Inbox monitor --- + let inbox_state: Option> = if !no_inbox { + let cached_users = crate::cmd::chat::known_users::load_shared_users().unwrap_or_default(); + Some(Arc::new( + crate::cmd::chat::inbox_monitor::InboxMonitor::new(cached_users), + )) + } else { + None + }; + status.set_inbox_enabled(inbox_state.is_some()); + let inbox_handle: Option> = inbox_state.as_ref().map(|m| { + let handle = handle.clone(); + let id_kp = id_keypair.clone(); + let status = status.clone(); + let monitor = m.clone(); + let interval_secs = inbox_poll_interval.max(1); + tokio::spawn(async move { + let mut tick = tokio::time::interval(Duration::from_secs(interval_secs)); + tick.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + tick.tick().await; + loop { + tick.tick().await; + let _ = monitor.poll_once(&handle, &id_kp).await; + status.set_inbox_unread(monitor.unread_count()); + } + }) + }); + + // --- Main loop --- + let mut backlog_done = false; + let friends_reload_interval = Duration::from_secs(30); + let mut friends_reload_tick = tokio::time::interval(friends_reload_interval); + let mut eof_handled = false; + let mut graceful_eof_exit = false; + let mut want_exit = false; + + loop { + tokio::select! { + Some(line) = notice_rx.recv() => { + ui.render_system(&line); + } + Some(msg) = msg_rx.recv() => { + if !backlog_done && msg.content.is_empty() && msg.id_pubkey == [0u8; 32] && msg.timestamp == 0 { + backlog_done = true; + ui.render_system("*** — live —"); + continue; + } + let ignored = { + let g = ignore.read().await; + !g.is_empty() && g.contains(&msg.id_pubkey) + }; + if ignored { + continue; + } + let out = display_state.render_to(&msg); + for notice in &out.system_notices { + ui.render_system(notice); + } + ui.render_message(&msg); + let _ = out; + } + _ = friends_reload_tick.tick() => { + if let Ok(updated_friends) = profile::load_friends(&profile_name) { + display_state.reload_friends(updated_friends); + } + } + input = ui.next_input() => { + match input { + Some(UiInput::Message(text)) => { + if let Some(tx) = pub_tx.as_ref() { + status.inc_send_pending(); + // Forward text to the DM nudge task too (if + // active). Throttling to one nudge per epoch + // happens inside the task. Ignore send errors — + // the task is gone, the user is mid-shutdown. + if let Some(ntx) = nudge_tx.as_ref() { + let _ = ntx.send(text.clone()); + } + // Bounded mpsc(64); on backpressure, watch for + // ctrl_c so the outer select! can react. + tokio::select! { + biased; + _ = tokio::signal::ctrl_c() => { + status.dec_send_pending(); + ui.render_system("*** shutting down"); + break; + } + send_res = tx.send(PubJob::Message(text)) => { + if send_res.is_err() { + status.dec_send_pending(); + } + } + } + } else { + ui.render_system("*** read-only mode; message not sent"); + } + } + Some(UiInput::Command(cmd)) => { + if dispatch_slash( + cmd, + &profile_name, + ui.as_ref(), + &ignore, + &status, + inbox_state.as_ref(), + ).await { + ui.render_system("*** shutting down"); + break; + } + } + Some(UiInput::Eof) => { + if !eof_handled { + eof_handled = true; + if stay_after_eof { + ui.render_system("*** stdin closed, entering read-only mode"); + } else { + graceful_eof_exit = true; + want_exit = true; + } + } + } + Some(UiInput::Interrupt) => { + ui.render_system("*** shutting down"); + break; + } + None => { + break; + } + } + if want_exit { + break; + } + } + _ = tokio::signal::ctrl_c() => { + ui.render_system("*** shutting down"); + break; + } + _ = sigterm_recv() => { + ui.render_system("*** shutting down (SIGTERM)"); + break; + } + } + } + + // --- Shutdown --- + drop(pub_tx); + + if let Some(h) = publisher_handle { + if graceful_eof_exit { + ui.render_system("*** flushing publish queue (Ctrl-C to abort)…"); + tokio::select! { + _ = h => { + ui.render_system("*** publish queue flushed"); + } + _ = tokio::signal::ctrl_c() => { + ui.render_system("*** abort: outgoing messages may not have reached the network"); + } + } + } else { + let _ = tokio::time::timeout(Duration::from_secs(2), h).await; + } + } + reader_handle.abort(); + if let Some(h) = nexus_handle { + h.abort(); + } + if let Some(h) = friend_refresh_handle { + h.abort(); + } + if let Some(h) = inbox_handle { + h.abort(); + } + // Drop nudge_tx so the nudge task's rx.recv() returns None and it + // exits cleanly, then await briefly. Skips when DM mode wasn't + // active. + drop(nudge_tx); + if let Some(h) = nudge_handle { + let _ = tokio::time::timeout(Duration::from_secs(1), h).await; + } + dht_status_handle.abort(); + + // Best-effort: for DM sessions, retract the invite-feed by writing an + // empty payload at the next seq. Bounded to 1 s so a stuck DHT can't + // hang shutdown. Failure is silent — TTL on the DHT will eventually + // expire the announce regardless. + if let (Some(_dm_extras), Some(inv_kp)) = (dm.as_ref(), invite_feed_keypair.as_ref()) { + let _ = tokio::time::timeout( + Duration::from_secs(1), + handle.mutable_put(inv_kp, b"", u64::MAX / 2), + ) + .await; + } + + ui.shutdown().await; + + let _ = handle.destroy().await; + let _ = task.await; + 0 +} + +/// Apply a slash command. Returns `true` if the session should exit. +async fn dispatch_slash( + cmd: SlashCommand, + profile_name: &str, + ui: &dyn ChatUi, + ignore: &IgnoreSet, + status: &StatusState, + inbox_state: Option<&Arc>, +) -> bool { + use crate::cmd::chat::resolve_recipient as resolve_pubkey; + match cmd { + SlashCommand::Quit => return true, + SlashCommand::Help => { + ui.render_system(commands::help_text()); + } + SlashCommand::IgnoreList => { + let g = ignore.read().await; + if g.is_empty() { + ui.render_system("*** ignore list is empty"); + } else { + let mut lines = vec!["*** ignoring:".to_string()]; + for pk in g.iter() { + let short = &hex::encode(pk)[..8]; + lines.push(format!(" {short}")); + } + ui.render_system(&lines.join("\n")); + } + } + SlashCommand::Ignore(arg) => match resolve_pubkey(profile_name, &arg) { + Ok(pk) => { + ignore.write().await.insert(pk); + ui.render_system(&format!("*** ignoring {}", &hex::encode(pk)[..8])); + } + Err(e) => ui.render_system(&format!("*** /ignore: {e}")), + }, + SlashCommand::Unignore(arg) => match resolve_pubkey(profile_name, &arg) { + Ok(pk) => { + let removed = ignore.write().await.remove(&pk); + if removed { + ui.render_system(&format!("*** unignored {}", &hex::encode(pk)[..8])); + } else { + ui.render_system("*** not in ignore list"); + } + } + Err(e) => ui.render_system(&format!("*** /unignore: {e}")), + }, + SlashCommand::FriendList => match profile::load_friends(profile_name) { + Ok(friends) if friends.is_empty() => ui.render_system("*** no friends"), + Ok(friends) => { + let mut lines = vec!["*** friends:".to_string()]; + for f in &friends { + let short = &hex::encode(f.pubkey)[..8]; + let alias = f.alias.as_deref().unwrap_or(""); + if alias.is_empty() { + lines.push(format!(" {short}")); + } else { + lines.push(format!(" {short} {alias}")); + } + } + ui.render_system(&lines.join("\n")); + } + Err(e) => ui.render_system(&format!("*** /friend: {e}")), + }, + SlashCommand::Friend(arg) => match resolve_pubkey(profile_name, &arg) { + Ok(pk) => { + let friend = profile::Friend { + pubkey: pk, + alias: None, + cached_name: None, + cached_bio_line: None, + }; + match profile::save_friend(profile_name, &friend) { + Ok(()) => { + ui.render_system(&format!("*** added friend {}", &hex::encode(pk)[..8])) + } + Err(e) => ui.render_system(&format!("*** /friend: {e}")), + } + } + Err(e) => ui.render_system(&format!("*** /friend: {e}")), + }, + SlashCommand::Unfriend(arg) => match resolve_pubkey(profile_name, &arg) { + Ok(pk) => match profile::remove_friend(profile_name, &pk) { + Ok(()) => ui.render_system(&format!("*** removed friend {}", &hex::encode(pk)[..8])), + Err(e) => ui.render_system(&format!("*** /unfriend: {e}")), + }, + Err(e) => ui.render_system(&format!("*** /unfriend: {e}")), + }, + SlashCommand::Inbox => match inbox_state { + None => ui.render_system( + "*** inbox monitoring disabled (started with --no-inbox); restart without that flag to enable", + ), + Some(monitor) => { + let drained = monitor.take_unread(); + let known = monitor.known_users().to_vec(); + status.set_inbox_unread(0); + if drained.is_empty() { + ui.render_system("*** inbox: no new invites"); + } else { + let n = drained.len(); + ui.render_system(&format!("*** inbox: {n} new invite(s)")); + for inv in &drained { + for line in crate::cmd::chat::inbox_monitor::format_invite_lines( + inv, + profile_name, + &known, + ) { + ui.render_system(&line); + } + } + } + } + }, + SlashCommand::Unknown(s) => { + ui.render_system(&format!("*** unknown command: /{s}")); + ui.render_system(commands::help_text()); + } + SlashCommand::Empty => { + ui.render_system(commands::help_text()); + } + } + false +} diff --git a/peeroxide-cli/src/cmd/chat/tui/commands.rs b/peeroxide-cli/src/cmd/chat/tui/commands.rs new file mode 100644 index 0000000..29559a8 --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/tui/commands.rs @@ -0,0 +1,178 @@ +//! Slash-command parsing for the chat input box. +//! +//! Slash commands run in the foreground process and operate on local state +//! (the ignore set, the friends file). The dispatcher in `join.rs` translates +//! the parsed `SlashCommand` into the appropriate action; this module is pure +//! parsing. + +/// A parsed slash command. The actual side effects (resolving names, updating +/// the friends file, mutating the ignore set) happen in the dispatcher. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum SlashCommand { + /// `/quit`, `/exit` — clean shutdown. + Quit, + /// `/help` — list available commands. + Help, + /// `/ignore` — print current ignore set. + IgnoreList, + /// `/ignore ` — add to ignore set. `name` is the unresolved + /// identifier the user typed; the dispatcher resolves it. + Ignore(String), + /// `/unignore ` — remove from ignore set. + Unignore(String), + /// `/friend` — print current friends list. + FriendList, + /// `/friend ` — add to friends. + Friend(String), + /// `/unfriend ` — remove from friends. + Unfriend(String), + /// `/inbox` — dump unread invites to the chat region as system notices. + Inbox, + /// `/foo` — unknown command. Stored verbatim (without leading `/`) so the + /// dispatcher can print a useful message. + Unknown(String), + /// `/` alone or only whitespace after the slash. + Empty, +} + +/// Parse a line of user input as a slash command. +/// +/// Returns `None` if `line` does not start with `/`. Otherwise always returns +/// some `SlashCommand` (`Unknown` for unrecognised verbs, `Empty` for a bare +/// `/`). +pub fn parse(line: &str) -> Option { + let trimmed = line.trim(); + let rest = trimmed.strip_prefix('/')?; + let rest = rest.trim(); + if rest.is_empty() { + return Some(SlashCommand::Empty); + } + + // Split on first whitespace run into verb + argument. + let (verb, arg) = match rest.split_once(char::is_whitespace) { + Some((v, a)) => (v, a.trim()), + None => (rest, ""), + }; + + let cmd = match verb { + "quit" | "exit" => SlashCommand::Quit, + "help" | "?" => SlashCommand::Help, + "ignore" => { + if arg.is_empty() { + SlashCommand::IgnoreList + } else { + SlashCommand::Ignore(arg.to_string()) + } + } + "unignore" => { + if arg.is_empty() { + SlashCommand::Unknown("unignore: missing argument".to_string()) + } else { + SlashCommand::Unignore(arg.to_string()) + } + } + "friend" => { + if arg.is_empty() { + SlashCommand::FriendList + } else { + SlashCommand::Friend(arg.to_string()) + } + } + "unfriend" => { + if arg.is_empty() { + SlashCommand::Unknown("unfriend: missing argument".to_string()) + } else { + SlashCommand::Unfriend(arg.to_string()) + } + } + "inbox" => SlashCommand::Inbox, + other => SlashCommand::Unknown(other.to_string()), + }; + Some(cmd) +} + +/// One-line help text listing every command. +pub fn help_text() -> &'static str { + "available commands: /help, /quit (alias /exit), /ignore [name], /unignore , /friend [name], /unfriend , /inbox" +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn non_slash_returns_none() { + assert_eq!(parse("hello world"), None); + assert_eq!(parse(""), None); + assert_eq!(parse(" hello /quit"), None); + } + + #[test] + fn quit_aliases() { + assert_eq!(parse("/quit"), Some(SlashCommand::Quit)); + assert_eq!(parse("/exit"), Some(SlashCommand::Quit)); + assert_eq!(parse(" /quit "), Some(SlashCommand::Quit)); + } + + #[test] + fn help() { + assert_eq!(parse("/help"), Some(SlashCommand::Help)); + assert_eq!(parse("/?"), Some(SlashCommand::Help)); + } + + #[test] + fn ignore_with_and_without_arg() { + assert_eq!(parse("/ignore"), Some(SlashCommand::IgnoreList)); + assert_eq!(parse("/ignore alice"), Some(SlashCommand::Ignore("alice".to_string()))); + assert_eq!(parse("/ignore alice "), Some(SlashCommand::Ignore("alice".to_string()))); + } + + #[test] + fn unignore_requires_arg() { + assert!(matches!(parse("/unignore"), Some(SlashCommand::Unknown(_)))); + assert_eq!( + parse("/unignore bob"), + Some(SlashCommand::Unignore("bob".to_string())) + ); + } + + #[test] + fn friend_with_and_without_arg() { + assert_eq!(parse("/friend"), Some(SlashCommand::FriendList)); + assert_eq!( + parse("/friend alice"), + Some(SlashCommand::Friend("alice".to_string())) + ); + } + + #[test] + fn unfriend_requires_arg() { + assert!(matches!(parse("/unfriend"), Some(SlashCommand::Unknown(_)))); + assert_eq!( + parse("/unfriend alice"), + Some(SlashCommand::Unfriend("alice".to_string())) + ); + } + + #[test] + fn inbox_no_args() { + assert_eq!(parse("/inbox"), Some(SlashCommand::Inbox)); + assert_eq!(parse(" /inbox "), Some(SlashCommand::Inbox)); + // Args after /inbox are currently ignored — only the verb is meaningful. + assert_eq!(parse("/inbox extra"), Some(SlashCommand::Inbox)); + } + + #[test] + fn unknown_verb() { + assert_eq!( + parse("/foo bar"), + Some(SlashCommand::Unknown("foo".to_string())) + ); + } + + #[test] + fn bare_slash() { + assert_eq!(parse("/"), Some(SlashCommand::Empty)); + assert_eq!(parse("/ "), Some(SlashCommand::Empty)); + } +} diff --git a/peeroxide-cli/src/cmd/chat/tui/input.rs b/peeroxide-cli/src/cmd/chat/tui/input.rs new file mode 100644 index 0000000..841b6f0 --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/tui/input.rs @@ -0,0 +1,514 @@ +//! Multi-line input editor with readline-style keybindings. +//! +//! Maintained as a `Vec` of logical lines plus a `(line_idx, col)` +//! cursor. Pure data structure — no terminal I/O. The interactive renderer +//! draws this view; this module just mutates state in response to +//! `KeyEvent`s and reports the resulting `EditOutcome`. + +use crossterm::event::{KeyCode, KeyEvent, KeyEventKind, KeyModifiers}; + +/// Outcome of feeding a [`KeyEvent`] to the editor. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum EditOutcome { + /// Buffer or cursor changed; the renderer should redraw the input area. + Redraw, + /// Buffer is being submitted as a single multi-line string. The editor + /// is cleared. + Submit(String), + /// Ctrl-C — the session should shut down. + Interrupt, + /// Ctrl-D on an empty buffer — propagate as EOF. + Eof, + /// User wants a full repaint (`Ctrl-L`). + ForceRepaint, + /// No change (e.g. an unmapped key). + Noop, +} + +/// Multi-line input editor. Initially one empty line, cursor at column 0. +pub struct InputEditor { + /// Logical lines. Always non-empty; an empty buffer is `vec![String::new()]`. + lines: Vec, + /// Cursor row (`0..lines.len()`). + row: usize, + /// Cursor column within `lines[row]` (`0..=lines[row].chars().count()`). + col: usize, +} + +impl Default for InputEditor { + fn default() -> Self { + Self::new() + } +} + +impl InputEditor { + pub fn new() -> Self { + Self { + lines: vec![String::new()], + row: 0, + col: 0, + } + } + + /// Number of logical lines (always ≥ 1). + pub fn line_count(&self) -> usize { + self.lines.len() + } + + /// Logical lines, for the renderer to draw. + pub fn lines(&self) -> &[String] { + &self.lines + } + + /// Current cursor position (row, column). + pub fn cursor(&self) -> (usize, usize) { + (self.row, self.col) + } + + /// True if there's nothing typed. + pub fn is_empty(&self) -> bool { + self.lines.len() == 1 && self.lines[0].is_empty() + } + + /// Reset the buffer to an empty editor (one empty line, cursor at 0,0). + /// Used by the Ctrl-C "clear input line" path when there's unsent text. + pub fn clear(&mut self) { + self.lines.clear(); + self.lines.push(String::new()); + self.row = 0; + self.col = 0; + } + + /// Insert a literal character at the cursor (e.g. for pasted content). + pub fn insert_char(&mut self, ch: char) { + if ch == '\n' { + self.split_line(); + return; + } + let line = &mut self.lines[self.row]; + let byte_idx = byte_index(line, self.col); + line.insert(byte_idx, ch); + self.col += 1; + } + + /// Insert a multi-line string at the cursor (used for bracketed paste). + pub fn insert_str(&mut self, s: &str) { + for ch in s.chars() { + self.insert_char(ch); + } + } + + /// Apply a key event. Mutates state and returns what the renderer should do. + pub fn handle_key(&mut self, ev: KeyEvent) -> EditOutcome { + if !matches!(ev.kind, KeyEventKind::Press | KeyEventKind::Repeat) { + return EditOutcome::Noop; + } + let ctrl = ev.modifiers.contains(KeyModifiers::CONTROL); + let shift = ev.modifiers.contains(KeyModifiers::SHIFT); + let alt = ev.modifiers.contains(KeyModifiers::ALT); + + match ev.code { + KeyCode::Char(c) if ctrl => match c { + 'a' => { + self.col = 0; + EditOutcome::Redraw + } + 'e' => { + self.col = self.lines[self.row].chars().count(); + EditOutcome::Redraw + } + 'u' => { + let line = &mut self.lines[self.row]; + let byte_idx = byte_index(line, self.col); + line.replace_range(..byte_idx, ""); + self.col = 0; + EditOutcome::Redraw + } + 'k' => { + let line = &mut self.lines[self.row]; + let byte_idx = byte_index(line, self.col); + line.truncate(byte_idx); + EditOutcome::Redraw + } + 'w' => { + self.delete_prev_word(); + EditOutcome::Redraw + } + 'l' => EditOutcome::ForceRepaint, + 'c' => EditOutcome::Interrupt, + 'd' => { + if self.is_empty() { + EditOutcome::Eof + } else { + // Forward delete + self.delete_forward(); + EditOutcome::Redraw + } + } + _ => EditOutcome::Noop, + }, + KeyCode::Enter => { + if shift || alt { + self.split_line(); + EditOutcome::Redraw + } else { + let text = self.take_buffer(); + if text.is_empty() { + EditOutcome::Noop + } else { + EditOutcome::Submit(text) + } + } + } + KeyCode::Char(c) => { + self.insert_char(c); + EditOutcome::Redraw + } + KeyCode::Backspace => { + self.delete_backward(); + EditOutcome::Redraw + } + KeyCode::Delete => { + self.delete_forward(); + EditOutcome::Redraw + } + KeyCode::Left => { + self.move_left(); + EditOutcome::Redraw + } + KeyCode::Right => { + self.move_right(); + EditOutcome::Redraw + } + KeyCode::Up => { + self.move_up(); + EditOutcome::Redraw + } + KeyCode::Down => { + self.move_down(); + EditOutcome::Redraw + } + KeyCode::Home => { + self.col = 0; + EditOutcome::Redraw + } + KeyCode::End => { + self.col = self.lines[self.row].chars().count(); + EditOutcome::Redraw + } + _ => EditOutcome::Noop, + } + } + + /// Drain the buffer into a single string with `\n` between logical lines, + /// resetting the editor to empty. + fn take_buffer(&mut self) -> String { + let joined = self.lines.join("\n"); + self.lines.clear(); + self.lines.push(String::new()); + self.row = 0; + self.col = 0; + joined + } + + fn split_line(&mut self) { + let line = &mut self.lines[self.row]; + let byte_idx = byte_index(line, self.col); + let rest = line.split_off(byte_idx); + self.lines.insert(self.row + 1, rest); + self.row += 1; + self.col = 0; + } + + fn delete_backward(&mut self) { + if self.col > 0 { + let line = &mut self.lines[self.row]; + let from = byte_index(line, self.col - 1); + let to = byte_index(line, self.col); + line.replace_range(from..to, ""); + self.col -= 1; + } else if self.row > 0 { + // Join with previous line + let cur = self.lines.remove(self.row); + self.row -= 1; + self.col = self.lines[self.row].chars().count(); + self.lines[self.row].push_str(&cur); + } + } + + fn delete_forward(&mut self) { + let line_len = self.lines[self.row].chars().count(); + if self.col < line_len { + let line = &mut self.lines[self.row]; + let from = byte_index(line, self.col); + let to = byte_index(line, self.col + 1); + line.replace_range(from..to, ""); + } else if self.row + 1 < self.lines.len() { + // Join with next line + let next = self.lines.remove(self.row + 1); + self.lines[self.row].push_str(&next); + } + } + + fn delete_prev_word(&mut self) { + // Walk backwards over whitespace, then over non-whitespace. + let line = &mut self.lines[self.row]; + if self.col == 0 { + // Same as backspace on a line boundary. + if self.row > 0 { + let cur = self.lines.remove(self.row); + self.row -= 1; + self.col = self.lines[self.row].chars().count(); + self.lines[self.row].push_str(&cur); + } + return; + } + let chars: Vec = line.chars().collect(); + let mut i = self.col; + while i > 0 && chars[i - 1].is_whitespace() { + i -= 1; + } + while i > 0 && !chars[i - 1].is_whitespace() { + i -= 1; + } + let from = byte_index(line, i); + let to = byte_index(line, self.col); + line.replace_range(from..to, ""); + self.col = i; + } + + fn move_left(&mut self) { + if self.col > 0 { + self.col -= 1; + } else if self.row > 0 { + self.row -= 1; + self.col = self.lines[self.row].chars().count(); + } + } + + fn move_right(&mut self) { + let line_len = self.lines[self.row].chars().count(); + if self.col < line_len { + self.col += 1; + } else if self.row + 1 < self.lines.len() { + self.row += 1; + self.col = 0; + } + } + + fn move_up(&mut self) { + if self.row > 0 { + self.row -= 1; + let line_len = self.lines[self.row].chars().count(); + if self.col > line_len { + self.col = line_len; + } + } + } + + fn move_down(&mut self) { + if self.row + 1 < self.lines.len() { + self.row += 1; + let line_len = self.lines[self.row].chars().count(); + if self.col > line_len { + self.col = line_len; + } + } + } +} + +/// Convert a char-index into a byte-index inside `s` for safe `String::insert` +/// / `replace_range`. Saturates at `s.len()` on out-of-range. +fn byte_index(s: &str, char_idx: usize) -> usize { + s.char_indices() + .nth(char_idx) + .map(|(i, _)| i) + .unwrap_or(s.len()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crossterm::event::{KeyCode, KeyEvent, KeyEventKind, KeyEventState, KeyModifiers}; + + fn key(code: KeyCode) -> KeyEvent { + KeyEvent { + code, + modifiers: KeyModifiers::NONE, + kind: KeyEventKind::Press, + state: KeyEventState::NONE, + } + } + + fn key_mod(code: KeyCode, mods: KeyModifiers) -> KeyEvent { + KeyEvent { + code, + modifiers: mods, + kind: KeyEventKind::Press, + state: KeyEventState::NONE, + } + } + + #[test] + fn typing_inserts_chars() { + let mut ed = InputEditor::new(); + for c in "hi".chars() { + assert_eq!(ed.handle_key(key(KeyCode::Char(c))), EditOutcome::Redraw); + } + assert_eq!(ed.lines(), &["hi".to_string()]); + assert_eq!(ed.cursor(), (0, 2)); + } + + #[test] + fn enter_submits_and_clears() { + let mut ed = InputEditor::new(); + ed.handle_key(key(KeyCode::Char('h'))); + ed.handle_key(key(KeyCode::Char('i'))); + assert_eq!( + ed.handle_key(key(KeyCode::Enter)), + EditOutcome::Submit("hi".to_string()) + ); + assert!(ed.is_empty()); + } + + #[test] + fn shift_enter_inserts_newline() { + let mut ed = InputEditor::new(); + ed.handle_key(key(KeyCode::Char('a'))); + ed.handle_key(key_mod(KeyCode::Enter, KeyModifiers::SHIFT)); + ed.handle_key(key(KeyCode::Char('b'))); + assert_eq!(ed.lines(), &["a".to_string(), "b".to_string()]); + assert_eq!(ed.cursor(), (1, 1)); + } + + #[test] + fn alt_enter_inserts_newline_as_fallback() { + let mut ed = InputEditor::new(); + ed.handle_key(key_mod(KeyCode::Enter, KeyModifiers::ALT)); + assert_eq!(ed.lines(), &["".to_string(), "".to_string()]); + } + + #[test] + fn enter_on_multiline_submits_with_newlines() { + let mut ed = InputEditor::new(); + for c in "a".chars() { + ed.handle_key(key(KeyCode::Char(c))); + } + ed.handle_key(key_mod(KeyCode::Enter, KeyModifiers::SHIFT)); + for c in "b".chars() { + ed.handle_key(key(KeyCode::Char(c))); + } + let out = ed.handle_key(key(KeyCode::Enter)); + assert_eq!(out, EditOutcome::Submit("a\nb".to_string())); + } + + #[test] + fn backspace_within_line() { + let mut ed = InputEditor::new(); + ed.insert_str("hello"); + assert_eq!(ed.cursor(), (0, 5)); + ed.handle_key(key(KeyCode::Backspace)); + assert_eq!(ed.lines(), &["hell".to_string()]); + assert_eq!(ed.cursor(), (0, 4)); + } + + #[test] + fn backspace_at_line_start_joins() { + let mut ed = InputEditor::new(); + ed.insert_str("ab\ncd"); + assert_eq!(ed.lines(), &["ab".to_string(), "cd".to_string()]); + // Move cursor to start of line 1 + ed.row = 1; + ed.col = 0; + ed.handle_key(key(KeyCode::Backspace)); + assert_eq!(ed.lines(), &["abcd".to_string()]); + assert_eq!(ed.cursor(), (0, 2)); + } + + #[test] + fn ctrl_u_clears_to_start() { + let mut ed = InputEditor::new(); + ed.insert_str("hello world"); + ed.col = 6; // after "hello " + ed.handle_key(key_mod(KeyCode::Char('u'), KeyModifiers::CONTROL)); + assert_eq!(ed.lines(), &["world".to_string()]); + assert_eq!(ed.cursor(), (0, 0)); + } + + #[test] + fn ctrl_w_deletes_word() { + let mut ed = InputEditor::new(); + ed.insert_str("hello world "); + ed.handle_key(key_mod(KeyCode::Char('w'), KeyModifiers::CONTROL)); + assert_eq!(ed.lines(), &["hello ".to_string()]); + } + + #[test] + fn ctrl_c_interrupts() { + let mut ed = InputEditor::new(); + ed.insert_str("hi"); + assert_eq!( + ed.handle_key(key_mod(KeyCode::Char('c'), KeyModifiers::CONTROL)), + EditOutcome::Interrupt + ); + } + + #[test] + fn ctrl_d_on_empty_is_eof() { + let mut ed = InputEditor::new(); + assert_eq!( + ed.handle_key(key_mod(KeyCode::Char('d'), KeyModifiers::CONTROL)), + EditOutcome::Eof + ); + } + + #[test] + fn ctrl_d_on_nonempty_is_forward_delete() { + let mut ed = InputEditor::new(); + ed.insert_str("ab"); + ed.col = 0; + assert_eq!( + ed.handle_key(key_mod(KeyCode::Char('d'), KeyModifiers::CONTROL)), + EditOutcome::Redraw + ); + assert_eq!(ed.lines(), &["b".to_string()]); + } + + #[test] + fn clear_resets_to_single_empty_line() { + let mut ed = InputEditor::new(); + ed.insert_str("line one\nline two\nline three"); + assert!(!ed.is_empty()); + assert!(ed.line_count() > 1); + ed.clear(); + assert!(ed.is_empty()); + assert_eq!(ed.line_count(), 1); + assert_eq!(ed.cursor(), (0, 0)); + assert_eq!(ed.lines(), &[String::new()]); + } + + #[test] + fn arrow_keys() { + let mut ed = InputEditor::new(); + ed.insert_str("a\nbc"); + ed.handle_key(key(KeyCode::Up)); + assert_eq!(ed.cursor(), (0, 1)); + ed.handle_key(key(KeyCode::Home)); + assert_eq!(ed.cursor(), (0, 0)); + ed.handle_key(key(KeyCode::End)); + assert_eq!(ed.cursor(), (0, 1)); + ed.handle_key(key(KeyCode::Down)); + assert_eq!(ed.cursor(), (1, 1)); + } + + #[test] + fn unicode_byte_indexing() { + // Multi-byte chars must not corrupt indexing. + let mut ed = InputEditor::new(); + for c in "café".chars() { + ed.handle_key(key(KeyCode::Char(c))); + } + ed.handle_key(key(KeyCode::Backspace)); + assert_eq!(ed.lines(), &["caf".to_string()]); + } +} diff --git a/peeroxide-cli/src/cmd/chat/tui/interactive.rs b/peeroxide-cli/src/cmd/chat/tui/interactive.rs new file mode 100644 index 0000000..2cdcca5 --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/tui/interactive.rs @@ -0,0 +1,1087 @@ +//! Interactive TTY chat UI: status bar pinned at the bottom of the terminal, +//! multi-line input area above it, chat history scrolling in the region above +//! that. +//! +//! ## Architecture +//! +//! Three concurrent tokio tasks (plus the caller's `join.rs` event loop): +//! +//! - **Renderer task** (`render_loop`): sole writer to stdout. Receives +//! [`UiOp`]s (incoming chat messages to print into the scroll region, +//! input-area repaint requests, resize events, shutdown signal) and +//! `StatusState::dirty` notifications. Coalesces work into ~30 fps idle +//! redraws. +//! - **Keyboard task** (`keyboard_loop`): reads `crossterm::event::Event`s, +//! feeds them to [`InputEditor`], and sends: +//! - to the renderer: an `InputRedraw` op so the cursor/text repaints +//! - to the consumer (`InteractiveUi::next_input`): a `UiInput` event when +//! the user submits a line, hits Ctrl-C, or hits Ctrl-D on empty input +//! - The **consumer** (`join.rs`) pulls `UiInput`s via `next_input()` and +//! pushes messages-to-render through `render_message` / `render_system` +//! (which produce `UiOp::Message` / `UiOp::System`). +//! +//! The scroll region (DECSTBM) reserves the bottom rows of the terminal for +//! the bar + input. Stdout writes into the upper region are managed by the +//! renderer with `MoveTo(0, region_bottom)` + `Print(line)` + `\n`; the +//! terminal handles the scroll. After every write the renderer repaints the +//! status bar and input area, then `MoveTo`s the cursor back to the editor +//! position. This way an inbound message never disturbs what the user is +//! typing. + +use std::collections::{HashSet, VecDeque}; +use std::io::{Stdout, Write, stdout}; +use std::sync::Arc; + +use crossterm::{ + cursor, + event::{Event, EventStream}, + queue, + style::{Color, ResetColor, SetBackgroundColor, SetForegroundColor}, + terminal::{self, Clear, ClearType}, +}; +use futures::StreamExt; +use futures::future::BoxFuture; +use tokio::sync::{Mutex, RwLock, mpsc}; +use tokio::task::JoinHandle; + +use crate::cmd::chat::display::{DisplayMessage, render_message_line}; +use crate::cmd::chat::tui::commands; +use crate::cmd::chat::tui::input::{EditOutcome, InputEditor}; +use crate::cmd::chat::tui::status::{self, SlotWidths, StatusState}; +use crate::cmd::chat::tui::{ChatUi, IgnoreSet, UiInput, UiOptions}; + +/// Renderer ops. Funneled through a single mpsc so only the renderer task +/// writes to stdout. +enum UiOp { + /// Print a chat message into the scroll region. + Message(String), + /// Print a system notice into the scroll region. + System(String), + /// Repaint the input area (cursor moved, text changed, etc.). + InputRedraw, + /// Full repaint (terminal resize, Ctrl-L). + FullRepaint, + /// Show a transient overlay text on the status-bar row for `duration`. + /// While active the overlay replaces the normal bar with yellow-on-black + /// styling. Used by the keyboard task to surface the "press Ctrl-C + /// again…" prompt without disturbing the chat scrollback. A new overlay + /// while one is already active simply replaces it (new deadline). + ShowTransientOverlay { text: String, duration: std::time::Duration }, + /// Clear any active transient overlay (e.g. user typed something so the + /// armed Ctrl-C window should be cancelled). + ClearTransientOverlay, + /// Renderer should exit. + Shutdown, +} + +/// Snapshot of the editor state, passed renderer-bound so the renderer +/// doesn't need a lock on the editor. +#[derive(Clone, Default)] +struct EditorSnapshot { + lines: Vec, + row: usize, + col: usize, +} + +/// Public interactive UI handle. Owns the renderer + keyboard tasks; cleanup +/// on `shutdown` restores the terminal. +pub struct InteractiveUi { + status: Arc, + ignore: IgnoreSet, + ops_tx: mpsc::UnboundedSender, + input_rx: Mutex>, + renderer_handle: Option>, + keyboard_handle: Option>, + /// Shared editor state — written by the keyboard task, read (and only + /// read) by the renderer task to paint the input area. + editor_view: Arc>, +} + +impl InteractiveUi { + /// Attempt to enter interactive mode. Returns `Err` (with the original + /// error message) if the terminal does not support the required + /// operations — the factory will fall back to line mode. + /// + /// **Synchronously** completes the terminal setup (raw mode, scroll + /// region, initial paint) before returning, so by the time the caller + /// gets a handle the bottom rows are already claimed by the bar + input + /// area. Without this, the renderer task (started via `tokio::spawn`) + /// could be scheduled later than the caller's first `render_system` + /// call, and although the queued messages would still be processed + /// in-order, any *third-party* stderr write from a spawned task in the + /// gap would land at the cursor wherever the shell left it. + pub fn new(opts: &UiOptions) -> Result { + let status = StatusState::new(opts.channel_name.clone()); + let ignore: IgnoreSet = Arc::new(RwLock::new(HashSet::new())); + + let (ops_tx, ops_rx) = mpsc::unbounded_channel::(); + let (input_tx, input_rx) = mpsc::unbounded_channel::(); + + let editor_view = Arc::new(RwLock::new(EditorSnapshot { + lines: vec![String::new()], + row: 0, + col: 0, + })); + + // Do the terminal setup *synchronously* on this thread (we're inside + // a sync `new`, called from an async context). The TerminalGuard + + // initial layout happen here; the spawned renderer task only owns + // the steady-state paint loop. This guarantees that by the time + // `new()` returns, the scroll region is already in place. + use crate::cmd::chat::tui::terminal::TerminalGuard; + let mut guard = + TerminalGuard::enter().map_err(|e| format!("terminal init failed: {e}"))?; + let (cols, rows) = + crossterm::terminal::size().map_err(|e| format!("terminal::size failed: {e}"))?; + let input_height: u16 = 1; + // Reserve the bottom 1+input_height rows. + let reserved = 1 + input_height; + let region_bottom = if reserved < rows { rows - reserved } else { 1 }; + guard + .set_scroll_region(1, region_bottom.max(1)) + .map_err(|e| format!("scroll region setup failed: {e}"))?; + + // Initial paint of status bar + input area so the divider is visible + // immediately. Errors here aren't fatal — we'll just look ugly. + { + let mut out = stdout(); + let mut slots = SlotWidths::default(); + let snap = EditorSnapshot { + lines: vec![String::new()], + row: 0, + col: 0, + }; + let _ = paint_status_and_input( + &mut out, &status, cols, rows, input_height, &snap, &mut slots, None, + ); + } + + let renderer_status = status.clone(); + let renderer_editor = editor_view.clone(); + let renderer_handle = tokio::spawn(async move { + if let Err(e) = render_loop( + ops_rx, + renderer_status, + renderer_editor, + guard, + cols, + rows, + input_height, + ) + .await + { + // Renderer error: TerminalGuard's drop will fire from inside + // the failing await chain, so the terminal restores cleanly. + eprintln!("*** interactive renderer error: {e}"); + } + }); + + let ops_tx_kb = ops_tx.clone(); + let editor_view_kb = editor_view.clone(); + let keyboard_handle = tokio::spawn(async move { + keyboard_loop(input_tx, ops_tx_kb, editor_view_kb).await; + }); + + Ok(Self { + status, + ignore, + ops_tx, + input_rx: Mutex::new(input_rx), + renderer_handle: Some(renderer_handle), + keyboard_handle: Some(keyboard_handle), + editor_view, + }) + } +} + +impl ChatUi for InteractiveUi { + fn render_message(&self, msg: &DisplayMessage) { + let rendered = render_message_line(msg); + for notice in rendered.system_notices { + let _ = self.ops_tx.send(UiOp::System(notice)); + } + let _ = self.ops_tx.send(UiOp::Message(rendered.message_line)); + } + + fn render_system(&self, line: &str) { + // System notices may already contain embedded newlines (e.g. the + // multi-line ignore-list dump from `dispatch_slash`). Split so each + // line independently scrolls into the region — otherwise the renderer + // would `\n` once and rely on terminal wrapping for the rest, which + // looks ragged. + for sub in line.split('\n') { + let _ = self.ops_tx.send(UiOp::System(sub.to_string())); + } + } + + fn status(&self) -> Arc { + self.status.clone() + } + + fn ignore_set(&self) -> IgnoreSet { + self.ignore.clone() + } + + fn next_input(&mut self) -> BoxFuture<'_, Option> { + Box::pin(async move { + let mut rx = self.input_rx.lock().await; + rx.recv().await + }) + } + + fn shutdown(mut self: Box) -> BoxFuture<'static, ()> { + Box::pin(async move { + let _ = self.ops_tx.send(UiOp::Shutdown); + if let Some(h) = self.keyboard_handle.take() { + h.abort(); + let _ = h.await; + } + if let Some(h) = self.renderer_handle.take() { + let _ = tokio::time::timeout(std::time::Duration::from_millis(200), h).await; + } + // Belt-and-suspenders: even if the renderer's TerminalGuard didn't + // drop cleanly (e.g. it panicked), the panic hook installed inside + // `terminal::enter` will have run. Nothing to do here. + // Drop reference to the editor so the keyboard task's snapshot + // owner is the only one left; the keyboard task's abort releases + // it on its own. + let _ = self.editor_view; + }) + } +} + +// ===== Renderer task ===== + +/// Renderer entry point. Owns the terminal guard (already set up by +/// [`InteractiveUi::new`]), the editor view, and the status state. Returns +/// only after receiving [`UiOp::Shutdown`] or on a fatal I/O error. +async fn render_loop( + mut ops_rx: mpsc::UnboundedReceiver, + status: Arc, + editor: Arc>, + mut guard: crate::cmd::chat::tui::terminal::TerminalGuard, + mut cols: u16, + mut rows: u16, + mut input_height: u16, +) -> std::io::Result<()> { + let mut out = stdout(); + let mut slots = SlotWidths::default(); + + // In-memory chat-history ring buffer. Every `Message` / `System` line we + // write into the scroll region is pushed here too. On `FullRepaint` + // (terminal resize / Ctrl-L) we replay the tail of this buffer into the + // freshly-laid-out scroll region so the user's visible chat history + // survives the resize, instead of being wiped along with stale bar / + // input artifacts. + // + // Bounded to `HISTORY_CAP` lines (currently 500). Enough to refill the + // largest reasonable terminal a few times; cheap in memory. + let mut history: VecDeque = VecDeque::with_capacity(HISTORY_CAP); + + // Cache the last-rendered status snapshot so the idle timer arm can + // detect "the rendered bar would now differ" (e.g. the `recv_active` + // flash just decayed back to false) and trigger a repaint. Without + // this, the flash would stay on screen until the next inbound message + // forces a paint. + let mut last_rendered: Option = None; + + // Transient overlay painted in place of the status bar (e.g. the + // "press Ctrl-C again…" prompt). Cleared automatically once + // `expires_at` has elapsed (the idle tick checks every ~100ms). + let mut transient_overlay: Option<(String, std::time::Instant)> = None; + + loop { + tokio::select! { + biased; + op = ops_rx.recv() => { + let Some(op) = op else { break }; + match op { + UiOp::Shutdown => break, + UiOp::Message(line) | UiOp::System(line) => { + write_into_scroll_region(&mut out, &line, rows, input_height)?; + push_history(&mut history, line); + // After a scroll-region write the cursor sits at the + // bottom of the region; we still need to repaint the + // status bar (in case `Receiving...` count changed) + // and put the cursor back in the input area. + let editor_snap = editor.read().await.clone(); + paint_status_and_input( + &mut out, &status, cols, rows, input_height, &editor_snap, &mut slots, + overlay_text(&transient_overlay), + )?; + last_rendered = Some(status.snapshot()); + } + UiOp::InputRedraw => { + let editor_snap = editor.read().await.clone(); + let needed = compute_input_height(&editor_snap, rows); + if needed != input_height { + input_height = needed; + apply_layout(&mut guard, &mut out, cols, rows, input_height)?; + paint_full( + &mut out, &status, cols, rows, input_height, &editor_snap, &mut slots, + overlay_text(&transient_overlay), + )?; + } else { + paint_status_and_input( + &mut out, &status, cols, rows, input_height, &editor_snap, &mut slots, + overlay_text(&transient_overlay), + )?; + } + last_rendered = Some(status.snapshot()); + } + UiOp::FullRepaint => { + let new_size = terminal::size()?; + cols = new_size.0; + rows = new_size.1; + slots.reset(); + let editor_snap = editor.read().await.clone(); + input_height = compute_input_height(&editor_snap, rows); + apply_layout(&mut guard, &mut out, cols, rows, input_height)?; + crossterm::queue!( + out, + cursor::MoveTo(0, 0), + Clear(ClearType::All), + )?; + replay_history(&mut out, &history, rows, input_height)?; + paint_full( + &mut out, &status, cols, rows, input_height, &editor_snap, &mut slots, + overlay_text(&transient_overlay), + )?; + last_rendered = Some(status.snapshot()); + } + UiOp::ShowTransientOverlay { text, duration } => { + transient_overlay = Some((text, std::time::Instant::now() + duration)); + let editor_snap = editor.read().await.clone(); + paint_status_and_input( + &mut out, &status, cols, rows, input_height, &editor_snap, &mut slots, + overlay_text(&transient_overlay), + )?; + // Don't touch last_rendered — the next status-based + // tick should still trigger a real paint if the bar + // would otherwise differ. + } + UiOp::ClearTransientOverlay => { + if transient_overlay.is_some() { + transient_overlay = None; + let editor_snap = editor.read().await.clone(); + paint_status_and_input( + &mut out, &status, cols, rows, input_height, &editor_snap, &mut slots, + None, + )?; + } + } + } + } + _ = status.dirty.notified() => { + let editor_snap = editor.read().await.clone(); + paint_status_and_input( + &mut out, &status, cols, rows, input_height, &editor_snap, &mut slots, + overlay_text(&transient_overlay), + )?; + last_rendered = Some(status.snapshot()); + } + _ = tokio::time::sleep(std::time::Duration::from_millis(100)) => { + // Auto-expire the transient overlay if its deadline has + // passed. Triggers a repaint of the normal bar. + let overlay_expired = matches!( + transient_overlay, + Some((_, expires_at)) if std::time::Instant::now() >= expires_at + ); + if overlay_expired { + transient_overlay = None; + let editor_snap = editor.read().await.clone(); + paint_status_and_input( + &mut out, &status, cols, rows, input_height, &editor_snap, &mut slots, + None, + )?; + last_rendered = Some(status.snapshot()); + continue; + } + + // Idle tick: if the snapshot would now render differently + // from the last paint (e.g. the recv_active flash just + // decayed), repaint. Skipping the paint when nothing + // changed keeps the terminal quiet between activity. + let snap = status.snapshot(); + if last_rendered.as_ref() != Some(&snap) { + let editor_snap = editor.read().await.clone(); + paint_status_and_input( + &mut out, &status, cols, rows, input_height, &editor_snap, &mut slots, + overlay_text(&transient_overlay), + )?; + last_rendered = Some(snap); + } + } + } + } + + // TerminalGuard restores the terminal on drop. + drop(guard); + Ok(()) +} + +/// Compute desired input area height for the given editor snapshot, capped +/// at half the screen. +fn compute_input_height(snap: &EditorSnapshot, rows: u16) -> u16 { + let want = snap.lines.len().max(1) as u16; + let cap = (rows / 2).max(1); + want.min(cap) +} + +/// Apply (or re-apply) the scroll region for the current input height. The +/// bottom `1 + input_height` rows become the status bar + input area; the +/// rest is the chat scroll region. +fn apply_layout( + guard: &mut crate::cmd::chat::tui::terminal::TerminalGuard, + _out: &mut Stdout, + _cols: u16, + rows: u16, + input_height: u16, +) -> std::io::Result<()> { + let reserved = 1 + input_height; + if reserved >= rows { + // Pathological: terminal too short. Drop the bar; use the last row + // for input only. + guard.set_scroll_region(1, rows.saturating_sub(1).max(1))?; + return Ok(()); + } + let region_bottom = rows - reserved; + guard.set_scroll_region(1, region_bottom)?; + Ok(()) +} + +/// Project an `Option<(String, Instant)>` to an `Option<&str>` for the +/// paint helpers. Returns `None` if the overlay has already expired so the +/// callers paint the normal bar even if the auto-expire tick hasn't fired +/// yet (defensive — the tick should clear it within ~100 ms anyway). +fn overlay_text(overlay: &Option<(String, std::time::Instant)>) -> Option<&str> { + overlay + .as_ref() + .filter(|(_, expires_at)| std::time::Instant::now() < *expires_at) + .map(|(text, _)| text.as_str()) +} + +/// Full repaint: clears the bar + input rows then paints both. +#[allow(clippy::too_many_arguments)] +fn paint_full( + out: &mut Stdout, + status: &StatusState, + cols: u16, + rows: u16, + input_height: u16, + editor: &EditorSnapshot, + slots: &mut SlotWidths, + overlay: Option<&str>, +) -> std::io::Result<()> { + // Clear status + input rows (just paint them fresh). + paint_status_and_input(out, status, cols, rows, input_height, editor, slots, overlay) +} + +#[allow(clippy::too_many_arguments)] +fn paint_status_and_input( + out: &mut Stdout, + status: &StatusState, + cols: u16, + rows: u16, + input_height: u16, + editor: &EditorSnapshot, + slots: &mut SlotWidths, + overlay: Option<&str>, +) -> std::io::Result<()> { + paint_status_bar(out, status, cols, rows, input_height, slots, overlay)?; + paint_input_area(out, cols, rows, input_height, editor)?; + Ok(()) +} + +fn paint_status_bar( + out: &mut Stdout, + status: &StatusState, + cols: u16, + rows: u16, + input_height: u16, + slots: &mut SlotWidths, + overlay: Option<&str>, +) -> std::io::Result<()> { + // Row index (1-based for DECSTBM, 0-based for crossterm). The bar lives + // at `rows - input_height - 1` in 0-based coords. + let bar_row = rows.saturating_sub(input_height + 1); + // Clear the row before painting so that on a terminal resize (when the + // previous bar was wider, in different columns, or at a different row + // index) no leftover bytes remain past the new bar's right edge. + queue!( + out, + cursor::Hide, + cursor::MoveTo(0, bar_row), + ResetColor, + Clear(ClearType::CurrentLine), + )?; + + if let Some(text) = overlay { + // Transient overlay: yellow background, black foreground. Pad/ + // truncate to exactly `cols` so the row is fully covered. + let body = fit_overlay(text, cols as usize); + queue!( + out, + SetBackgroundColor(Color::Yellow), + SetForegroundColor(Color::Black), + )?; + out.write_all(body.as_bytes())?; + } else { + // Normal status bar (grey background) with optional yellow-bg + // overlay on the INBOX segment. + let snap = status.snapshot(); + let level = status::pick_level(&snap, cols as usize); + let bar = status::render_bar(&snap, level, cols as usize, slots); + paint_bar_body(out, &bar)?; + } + queue!(out, ResetColor)?; + Ok(()) +} + +/// Write a `BarRender` to stdout: grey background everywhere, except the +/// optional `inbox_highlight` byte range which is painted with the yellow +/// background + black foreground "attention" styling. +fn paint_bar_body(out: &mut Stdout, bar: &status::BarRender) -> std::io::Result<()> { + let body = &bar.body; + queue!( + out, + SetBackgroundColor(Color::Grey), + SetForegroundColor(Color::Black), + )?; + match &bar.inbox_highlight { + None => { + out.write_all(body.as_bytes())?; + } + Some(range) => { + // body is ASCII for the bar's structural chars; INBOX/inbox/I/i + // are ASCII too. Char index == byte index in this context — we + // verify by walking and slicing on char boundaries to be safe + // against any non-ASCII (e.g. the '●' dot or '…' ellipsis). + let mut byte_start = None; + let mut byte_end = None; + for (char_idx, (b_idx, _)) in body.char_indices().enumerate() { + if char_idx == range.start { + byte_start = Some(b_idx); + } + if char_idx == range.end { + byte_end = Some(b_idx); + break; + } + } + let bs = byte_start.unwrap_or(0); + let be = byte_end.unwrap_or(body.len()); + out.write_all(&body.as_bytes()[..bs])?; + queue!( + out, + SetBackgroundColor(Color::Yellow), + SetForegroundColor(Color::Black), + )?; + out.write_all(&body.as_bytes()[bs..be])?; + queue!( + out, + SetBackgroundColor(Color::Grey), + SetForegroundColor(Color::Black), + )?; + out.write_all(&body.as_bytes()[be..])?; + } + } + Ok(()) +} + +/// Pure helper: format a transient overlay text to exactly `cols` cells +/// (truncate if too long, right-pad with spaces if too short). One space +/// of left padding for visual breathing room when there's room. +fn fit_overlay(text: &str, cols: usize) -> String { + if cols == 0 { + return String::new(); + } + // Truncate by char count, then pad with spaces. We don't have a true + // visible-width crate in scope; chat content is ASCII-dominant so + // chars().count() approximates well for our overlay text. + let with_pad = format!(" {text} "); + let n = with_pad.chars().count(); + if n >= cols { + with_pad.chars().take(cols).collect() + } else { + let mut s = with_pad; + for _ in 0..(cols - n) { + s.push(' '); + } + s + } +} + +fn paint_input_area( + out: &mut Stdout, + cols: u16, + rows: u16, + input_height: u16, + editor: &EditorSnapshot, +) -> std::io::Result<()> { + let first_row = rows.saturating_sub(input_height); + // Render up to `input_height` editor lines, starting from the row + // containing the cursor and walking back. If there are fewer logical + // lines than `input_height`, pad with blanks. + let editor_lines = &editor.lines; + let total = editor_lines.len(); + // Determine the window of editor lines to display: keep the cursor + // visible. Strategy: start at line 0 if `total <= input_height`, + // otherwise scroll so the cursor row is the last visible line. + let start = if total as u16 <= input_height { + 0 + } else if editor.row as u16 >= input_height { + editor.row as u16 - (input_height - 1) + } else { + 0 + }; + for i in 0..input_height { + let row = first_row + i; + queue!( + out, + cursor::MoveTo(0, row), + Clear(ClearType::CurrentLine), + )?; + let line_idx = start as usize + i as usize; + if line_idx < total { + let line = &editor_lines[line_idx]; + // Truncate to cols-1 to keep room for the cursor at end-of-line. + let max_len = cols.saturating_sub(1) as usize; + let truncated: String = line.chars().take(max_len).collect(); + out.write_all(truncated.as_bytes())?; + } + } + // Position cursor. + let cursor_row_window = (editor.row as u16).saturating_sub(start); + let cursor_col = (editor.col as u16).min(cols.saturating_sub(1)); + let cursor_row = first_row + cursor_row_window.min(input_height.saturating_sub(1)); + queue!(out, cursor::MoveTo(cursor_col, cursor_row), cursor::Show)?; + out.flush()?; + Ok(()) +} + +/// Print `line` into the chat scroll region. The terminal handles the +/// scroll for us — we just `MoveTo` the last row of the region and emit the +/// text plus `\n`. +fn write_into_scroll_region( + out: &mut Stdout, + line: &str, + rows: u16, + input_height: u16, +) -> std::io::Result<()> { + let region_bottom_zero = rows.saturating_sub(input_height + 2); + queue!( + out, + cursor::Hide, + cursor::MoveTo(0, region_bottom_zero), + ResetColor, + )?; + // Write content (truncate visually if needed; the terminal will wrap + // otherwise, which is fine for chat history). + out.write_all(line.as_bytes())?; + // Newline so the next message starts on a fresh line; this triggers the + // scroll within the region. + out.write_all(b"\r\n")?; + out.flush()?; + Ok(()) +} + +/// Maximum number of chat lines kept in the in-memory history buffer. +/// Lines older than this are evicted FIFO as new ones are pushed. +/// +/// Sized to comfortably cover any plausible terminal height (a 4K display +/// at a tiny font is on the order of 250-300 rows) with headroom. Bigger +/// values don't help — only the tail up to `region_height` is ever +/// replayed; older history is never visible again once it falls past the +/// top of the region. +const HISTORY_CAP: usize = 500; + +/// Push a chat line onto the bounded history buffer, evicting the oldest +/// line when the capacity is reached. +fn push_history(history: &mut VecDeque, line: String) { + if history.len() == HISTORY_CAP { + history.pop_front(); + } + history.push_back(line); +} + +/// Replay the tail of the in-memory chat history into the new scroll region. +/// Called from the `FullRepaint` arm after the screen has been cleared and +/// the scroll region applied for the new geometry. +/// +/// Each replayed line is emitted exactly the way fresh chat messages are +/// written by [`write_into_scroll_region`]: `MoveTo` the bottom row of the +/// region, write the bytes, then `\r\n`. The terminal handles wrap-on- +/// overflow and scroll-within-region naturally — the same machinery that +/// handles in-flight messages — so a long line that no longer fits in the +/// new `cols` simply wraps to multiple visible rows and the oldest replayed +/// content scrolls past the top of the region (which is fine: at most +/// `region_height` rows can ever be visible at once). +/// +/// We replay exactly `region_height` lines (or all of history, whichever +/// is smaller). That's the most that could ever be visible at one time; +/// any additional lines would just scroll off the top and add latency +/// without changing the final visible state. +fn replay_history( + out: &mut Stdout, + history: &VecDeque, + rows: u16, + input_height: u16, +) -> std::io::Result<()> { + let region_bottom_zero = rows.saturating_sub(input_height + 2); + let region_height = (region_bottom_zero + 1) as usize; + if history.is_empty() || region_height == 0 { + return Ok(()); + } + let replay_count = replay_count(history.len(), region_height); + let start = history.len() - replay_count; + queue!(out, cursor::Hide, ResetColor)?; + for line in history.iter().skip(start) { + queue!(out, cursor::MoveTo(0, region_bottom_zero), ResetColor)?; + out.write_all(line.as_bytes())?; + out.write_all(b"\r\n")?; + } + out.flush()?; + Ok(()) +} + +/// Pure helper: how many history entries [`replay_history`] should replay +/// given the buffer length and the new region height. At most +/// `region_height` lines can be visible in the region at once, so that's +/// the natural upper bound; extras would only scroll off the top. +fn replay_count(history_len: usize, region_height: usize) -> usize { + history_len.min(region_height) +} + +// ===== Keyboard task ===== + +/// Text shown when the user presses Ctrl-C on an empty input buffer, arming +/// the 2-second force-quit window. +const CTRL_C_ARM_OVERLAY: &str = + "*** press Ctrl-C again within 2 seconds to force quit — press Ctrl-D for graceful exit"; + +/// How long the force-quit arming stays hot after the first empty-input +/// Ctrl-C. A second Ctrl-C inside this window triggers `UiInput::Interrupt`; +/// after expiry a fresh double-press is required. +const CTRL_C_ARM_WINDOW: std::time::Duration = std::time::Duration::from_secs(2); + +/// Decision for a Ctrl-C press given the current editor / arming state. +/// Pure-function output so the logic is unit-testable. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum CtrlCAction { + /// Editor had unsent text — clear it and disarm any pending window. + ClearBuffer, + /// Editor was empty and the arming window is hot — force-quit. + ForceQuit, + /// Editor was empty and no hot arming — show the prompt overlay and + /// arm the window. + ArmAndPrompt, +} + +/// Classify the current Ctrl-C press given editor emptiness, whether the +/// force-quit window is armed, and the time elapsed since arming. +fn classify_ctrl_c( + editor_empty: bool, + armed_at: Option, + now: std::time::Instant, +) -> CtrlCAction { + if !editor_empty { + return CtrlCAction::ClearBuffer; + } + match armed_at { + Some(t) if now.duration_since(t) < CTRL_C_ARM_WINDOW => CtrlCAction::ForceQuit, + _ => CtrlCAction::ArmAndPrompt, + } +} + +async fn keyboard_loop( + input_tx: mpsc::UnboundedSender, + ops_tx: mpsc::UnboundedSender, + editor_view: Arc>, +) { + let mut editor = InputEditor::new(); + let mut events = EventStream::new(); + // Hot-timestamp for the double-Ctrl-C force-quit window. `Some(t)` means + // the user pressed Ctrl-C at `t` on an empty buffer; another Ctrl-C + // within `CTRL_C_ARM_WINDOW` confirms the exit. Cleared on any other + // editing action (typing, paste, submit, Ctrl-D, etc.) so the prompt + // disappears as soon as the user shows they're still active. + let mut ctrl_c_armed_at: Option = None; + while let Some(event) = events.next().await { + let event = match event { + Ok(e) => e, + Err(_) => continue, + }; + match event { + Event::Key(k) => { + let outcome = editor.handle_key(k); + match outcome { + EditOutcome::Submit(text) => { + // Any active force-quit arming is cancelled — user + // clearly didn't mean to quit. + if ctrl_c_armed_at.take().is_some() { + let _ = ops_tx.send(UiOp::ClearTransientOverlay); + } + // Snapshot the cleared editor and trigger a redraw + // so the input area visibly empties before the + // server round-trip. + publish_view(&editor_view, &editor).await; + let _ = ops_tx.send(UiOp::InputRedraw); + // Slash command? Parse, otherwise it's a chat message. + let trimmed = text.trim().to_string(); + if trimmed.is_empty() { + continue; + } + let ev = match commands::parse(&trimmed) { + Some(cmd) => UiInput::Command(cmd), + None => UiInput::Message(trimmed), + }; + if input_tx.send(ev).is_err() { + return; + } + } + EditOutcome::Interrupt => { + let action = classify_ctrl_c( + editor.is_empty(), + ctrl_c_armed_at, + std::time::Instant::now(), + ); + match action { + CtrlCAction::ClearBuffer => { + editor.clear(); + if ctrl_c_armed_at.take().is_some() { + let _ = ops_tx.send(UiOp::ClearTransientOverlay); + } + publish_view(&editor_view, &editor).await; + let _ = ops_tx.send(UiOp::InputRedraw); + } + CtrlCAction::ForceQuit => { + let _ = input_tx.send(UiInput::Interrupt); + return; + } + CtrlCAction::ArmAndPrompt => { + ctrl_c_armed_at = Some(std::time::Instant::now()); + let _ = ops_tx.send(UiOp::ShowTransientOverlay { + text: CTRL_C_ARM_OVERLAY.to_string(), + duration: CTRL_C_ARM_WINDOW, + }); + } + } + } + EditOutcome::Eof => { + // User chose graceful exit while armed — drop the + // overlay so the prompt doesn't linger past EOF. + if ctrl_c_armed_at.take().is_some() { + let _ = ops_tx.send(UiOp::ClearTransientOverlay); + } + let _ = input_tx.send(UiInput::Eof); + // Don't return — user may continue if --stay-after-eof. + } + EditOutcome::Redraw => { + // Any active arming is invalidated — the user is + // editing again, so the prompt should disappear. + if ctrl_c_armed_at.take().is_some() { + let _ = ops_tx.send(UiOp::ClearTransientOverlay); + } + publish_view(&editor_view, &editor).await; + let _ = ops_tx.send(UiOp::InputRedraw); + } + EditOutcome::ForceRepaint => { + if ctrl_c_armed_at.take().is_some() { + let _ = ops_tx.send(UiOp::ClearTransientOverlay); + } + publish_view(&editor_view, &editor).await; + let _ = ops_tx.send(UiOp::FullRepaint); + } + EditOutcome::Noop => {} + } + } + Event::Paste(s) => { + if ctrl_c_armed_at.take().is_some() { + let _ = ops_tx.send(UiOp::ClearTransientOverlay); + } + editor.insert_str(&s); + publish_view(&editor_view, &editor).await; + let _ = ops_tx.send(UiOp::InputRedraw); + } + Event::Resize(_, _) => { + let _ = ops_tx.send(UiOp::FullRepaint); + } + _ => {} + } + } +} + +async fn publish_view(view: &Arc>, editor: &InputEditor) { + let (row, col) = editor.cursor(); + let lines = editor.lines().to_vec(); + let mut w = view.write().await; + w.lines = lines; + w.row = row; + w.col = col; +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn replay_count_capped_by_history_len() { + // Small history fits entirely. + assert_eq!(replay_count(3, 24), 3); + assert_eq!(replay_count(0, 24), 0); + } + + #[test] + fn replay_count_capped_by_region_height() { + // Large history is trimmed to region_height — extras would scroll + // off the top of the region anyway. + assert_eq!(replay_count(1000, 24), 24); + assert_eq!(replay_count(HISTORY_CAP, 200), 200); + } + + #[test] + fn replay_count_handles_zero_region() { + assert_eq!(replay_count(1000, 0), 0); + } + + #[test] + fn replay_count_handles_exact_fit() { + assert_eq!(replay_count(50, 50), 50); + } + + #[test] + fn push_history_evicts_oldest_when_full() { + let mut h: VecDeque = VecDeque::with_capacity(HISTORY_CAP); + // Fill to capacity. + for i in 0..HISTORY_CAP { + push_history(&mut h, format!("line{i}")); + } + assert_eq!(h.len(), HISTORY_CAP); + assert_eq!(h.front().unwrap(), "line0"); + // One more push evicts the oldest. + push_history(&mut h, "new".to_string()); + assert_eq!(h.len(), HISTORY_CAP); + assert_eq!(h.front().unwrap(), "line1"); + assert_eq!(h.back().unwrap(), "new"); + } + + #[test] + fn push_history_grows_below_cap() { + let mut h: VecDeque = VecDeque::new(); + push_history(&mut h, "a".to_string()); + push_history(&mut h, "b".to_string()); + push_history(&mut h, "c".to_string()); + assert_eq!(h.len(), 3); + assert_eq!(h.iter().collect::>(), vec!["a", "b", "c"]); + } + + // ── classify_ctrl_c ──────────────────────────────────────────────── + + #[test] + fn classify_ctrl_c_clears_when_buffer_nonempty() { + let now = std::time::Instant::now(); + // Even with a hot arming, non-empty buffer means "clear". + assert_eq!( + classify_ctrl_c(false, Some(now), now), + CtrlCAction::ClearBuffer + ); + // Without arming too. + assert_eq!( + classify_ctrl_c(false, None, now), + CtrlCAction::ClearBuffer + ); + } + + #[test] + fn classify_ctrl_c_arms_when_empty_and_cold() { + let now = std::time::Instant::now(); + assert_eq!( + classify_ctrl_c(true, None, now), + CtrlCAction::ArmAndPrompt + ); + } + + #[test] + fn classify_ctrl_c_quits_when_empty_and_hot() { + let armed = std::time::Instant::now(); + let now = armed + std::time::Duration::from_millis(500); + assert_eq!( + classify_ctrl_c(true, Some(armed), now), + CtrlCAction::ForceQuit + ); + } + + #[test] + fn classify_ctrl_c_re_arms_after_window_expires() { + let armed = std::time::Instant::now(); + let now = armed + CTRL_C_ARM_WINDOW + std::time::Duration::from_millis(1); + assert_eq!( + classify_ctrl_c(true, Some(armed), now), + CtrlCAction::ArmAndPrompt + ); + } + + #[test] + fn classify_ctrl_c_quits_at_exactly_one_ms_before_window_end() { + // Boundary check: within the window means strictly less than. + let armed = std::time::Instant::now(); + let now = armed + CTRL_C_ARM_WINDOW - std::time::Duration::from_millis(1); + assert_eq!( + classify_ctrl_c(true, Some(armed), now), + CtrlCAction::ForceQuit + ); + } + + // ── fit_overlay ──────────────────────────────────────────────────── + + #[test] + fn fit_overlay_zero_cols_returns_empty() { + assert_eq!(fit_overlay("hello", 0), ""); + } + + #[test] + fn fit_overlay_pads_short_text() { + // " hi " padded to 10 cells -> " hi ". + let out = fit_overlay("hi", 10); + assert_eq!(out.chars().count(), 10); + assert!(out.starts_with(" hi ")); + assert!(out.ends_with(" ") || out.ends_with(" ")); // padding trail + } + + #[test] + fn fit_overlay_truncates_long_text() { + let text = "this overlay is longer than the available room"; + let out = fit_overlay(text, 12); + assert_eq!(out.chars().count(), 12); + } + + #[test] + fn fit_overlay_exact_fit() { + let out = fit_overlay("hi", 4); // " hi " is exactly 4 + assert_eq!(out, " hi "); + } + + // ── overlay_text helper ──────────────────────────────────────────── + + #[test] + fn overlay_text_returns_none_when_expired() { + let past = std::time::Instant::now() + .checked_sub(std::time::Duration::from_secs(1)) + .unwrap_or_else(std::time::Instant::now); + let o = Some(("x".to_string(), past)); + assert!(overlay_text(&o).is_none()); + } + + #[test] + fn overlay_text_returns_some_when_active() { + let future = std::time::Instant::now() + std::time::Duration::from_secs(5); + let o = Some(("hello".to_string(), future)); + assert_eq!(overlay_text(&o), Some("hello")); + } + + #[test] + fn overlay_text_none_when_absent() { + let o: Option<(String, std::time::Instant)> = None; + assert!(overlay_text(&o).is_none()); + } +} diff --git a/peeroxide-cli/src/cmd/chat/tui/line.rs b/peeroxide-cli/src/cmd/chat/tui/line.rs new file mode 100644 index 0000000..2a6dcd0 --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/tui/line.rs @@ -0,0 +1,102 @@ +//! Line-oriented (non-TTY) chat UI. Preserves the historical +//! `chat join` stdout contract documented in `docs/src/chat/user-guide.md` — one message per +//! line in the format `[HH:MM:SS] [name]: content`, system notices on stderr. + +use std::collections::HashSet; +use std::sync::Arc; + +use futures::future::BoxFuture; +use tokio::io::{AsyncBufReadExt, BufReader}; +use tokio::sync::{Mutex, RwLock, mpsc}; + +use crate::cmd::chat::display::{DisplayMessage, render_message_line}; +use crate::cmd::chat::tui::{ChatUi, IgnoreSet, StatusState, UiInput, commands}; + +pub struct LineUi { + status: Arc, + ignore: IgnoreSet, + input_rx: Mutex>, + _stdin_task: tokio::task::JoinHandle<()>, +} + +impl LineUi { + pub fn new(opts: super::UiOptions) -> Self { + let status = StatusState::new(opts.channel_name); + let ignore: IgnoreSet = Arc::new(RwLock::new(HashSet::new())); + let (tx, rx) = mpsc::unbounded_channel(); + let stdin_task = tokio::spawn(stdin_task(tx)); + Self { + status, + ignore, + input_rx: Mutex::new(rx), + _stdin_task: stdin_task, + } + } +} + +impl ChatUi for LineUi { + fn render_message(&self, msg: &DisplayMessage) { + let rendered = render_message_line(msg); + for notice in &rendered.system_notices { + eprintln!("{notice}"); + } + println!("{}", rendered.message_line); + } + + fn render_system(&self, line: &str) { + eprintln!("{line}"); + } + + fn status(&self) -> Arc { + self.status.clone() + } + + fn ignore_set(&self) -> IgnoreSet { + self.ignore.clone() + } + + fn next_input(&mut self) -> BoxFuture<'_, Option> { + Box::pin(async move { + let mut rx = self.input_rx.lock().await; + rx.recv().await + }) + } + + fn shutdown(self: Box) -> BoxFuture<'static, ()> { + // Nothing to clean up — stdin task drops naturally with the struct. + Box::pin(async move {}) + } +} + +/// Read stdin line-by-line, classify each line, forward `UiInput` into the +/// channel. On EOF, emit `UiInput::Eof` and exit. +async fn stdin_task(tx: mpsc::UnboundedSender) { + let stdin = tokio::io::stdin(); + let mut lines = BufReader::new(stdin).lines(); + loop { + match lines.next_line().await { + Ok(Some(text)) => { + let trimmed = text.trim(); + if trimmed.is_empty() { + continue; + } + let event = match commands::parse(trimmed) { + Some(cmd) => UiInput::Command(cmd), + None => UiInput::Message(trimmed.to_string()), + }; + if tx.send(event).is_err() { + return; + } + } + Ok(None) => { + let _ = tx.send(UiInput::Eof); + return; + } + Err(e) => { + eprintln!("error reading stdin: {e}"); + let _ = tx.send(UiInput::Eof); + return; + } + } + } +} diff --git a/peeroxide-cli/src/cmd/chat/tui/mod.rs b/peeroxide-cli/src/cmd/chat/tui/mod.rs new file mode 100644 index 0000000..e6f2550 --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/tui/mod.rs @@ -0,0 +1,200 @@ +//! Interactive terminal UI for `peeroxide chat join`. +//! +//! Two implementations of [`ChatUi`] are provided: +//! +//! - [`line::LineUi`]: byte-compatible with the historical behaviour — +//! line-oriented stdin, `println!`/`eprintln!` to stdout/stderr. Used when +//! stdout is not a TTY, when `--line-mode` is passed, or when +//! `PEEROXIDE_LINE_MODE=1` is set in the environment. +//! - [`interactive::InteractiveUi`]: full TTY mode with a status bar pinned at +//! the bottom of the terminal, multi-line input area, slash commands, and +//! chat history flowing through a scroll region above. +//! +//! Pick one via [`make_ui`]. Callers (i.e. `join.rs`) interact only through +//! the [`ChatUi`] trait, so the two implementations are interchangeable. + +pub mod commands; +pub mod input; +pub mod interactive; +pub mod line; +pub mod status; +pub mod terminal; + +use std::collections::HashSet; +use std::io::IsTerminal; +use std::sync::Arc; + +use tokio::sync::RwLock; + +use crate::cmd::chat::display::DisplayMessage; + +pub use commands::SlashCommand; +pub use status::{DhtActivityGuard, RecvFetchGuard, StatusState}; + +/// Cheap-to-clone handle used by spawned background tasks (publisher, reader, +/// nexus refresh, friend refresh, post.rs helpers) to surface a user-visible +/// system notice — e.g. `" nexus published (seq=…)"` or `"warning: feed +/// mutable_put failed: …"`. +/// +/// Notices flow into a `mpsc::UnboundedSender`; the main loop in +/// `join.rs` drains the corresponding receiver and forwards each line through +/// [`ChatUi::render_system`]. This keeps spawned tasks free of `ChatUi` +/// references and makes it impossible for a background task to accidentally +/// write directly into the terminal at the wrong cursor position (which would +/// land on top of the interactive UI's input area). +/// +/// In line mode the round-trip is byte-equivalent to the historical +/// `eprintln!` because `LineUi::render_system` is itself an `eprintln!`. +#[derive(Clone)] +pub struct NoticeSink { + tx: tokio::sync::mpsc::UnboundedSender, +} + +impl NoticeSink { + pub fn new() -> (Self, tokio::sync::mpsc::UnboundedReceiver) { + let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); + (Self { tx }, rx) + } + + /// Send a notice. Silently drops if the receiver has been closed — there + /// is no value in panicking from a background task on a UI teardown race. + pub fn send(&self, line: impl Into) { + let _ = self.tx.send(line.into()); + } + + /// Equivalent to `send(format!(...))` but spelled to match the `eprintln!` + /// call sites it's replacing for grep-ability. + pub fn notify(&self, line: impl Into) { + self.send(line); + } +} + +mod notice_global { + //! Process-wide notice sink for code paths that can't easily take a + //! `NoticeSink` parameter (probe traces deep inside helpers, etc.). + //! + //! Set once at session start by `join::run` (via [`install_global`]) and + //! never replaced. Concurrent calls from spawned tasks are safe — the + //! underlying `UnboundedSender` is `Clone + Send + Sync`. + + use std::sync::OnceLock; + + static GLOBAL: OnceLock = OnceLock::new(); + + pub fn install(sink: super::NoticeSink) { + // `set` returns Err if already initialized; we just leave the first + // one in place. That matches the "one session per process" model. + let _ = GLOBAL.set(sink); + } + + pub fn try_get() -> Option<&'static super::NoticeSink> { + GLOBAL.get() + } +} + +/// Register `sink` as the process-wide notice channel. Idempotent: the first +/// caller wins; subsequent calls are no-ops (the session model is one chat +/// loop per process). Used by deep helpers that emit probe / warning lines +/// without taking a `NoticeSink` parameter. +pub fn install_global_notice_sink(sink: NoticeSink) { + notice_global::install(sink); +} + +/// Emit a single system-notice line. If a global sink has been registered +/// (i.e. we're inside a `chat join` session), route through it so the +/// interactive UI can paint the line into the scroll region. Otherwise +/// fall back to `eprintln!` — that preserves behaviour for standalone +/// subcommands and for tests that don't construct a `ChatUi`. +pub fn emit_notice(line: impl Into) { + let line = line.into(); + match notice_global::try_get() { + Some(sink) => sink.send(line), + None => eprintln!("{line}"), + } +} + +/// One unit of user input from the UI. `Message` and `Command` are produced by +/// the input handler; `Eof` and `Interrupt` are signals from the terminal. +#[derive(Debug)] +pub enum UiInput { + /// User typed and submitted a chat message. + Message(String), + /// User typed a slash command (e.g. `/quit`, `/ignore alice`). + Command(SlashCommand), + /// stdin reached EOF (e.g. piped input completed). + Eof, + /// Ctrl-C or equivalent interrupt. + Interrupt, +} + +/// Shared local-only state that survives across input lines: who the user is +/// currently ignoring (consulted by the reader task before forwarding inbound +/// messages to the display). +pub type IgnoreSet = Arc>>; + +/// Common surface that `join.rs` uses to interact with the user, regardless of +/// whether we're in line mode or interactive TUI mode. +pub trait ChatUi: Send { + /// Render an inbound (or self-echoed) chat message. + fn render_message(&self, msg: &DisplayMessage); + + /// Render a system notice (`*** ...`, debug log, probe trace). + fn render_system(&self, line: &str); + + /// Snapshot of observable status counters. Updated by publisher / reader / + /// dht-poll task; consumed by the status bar renderer. + fn status(&self) -> Arc; + + /// Shared ignore set. The reader task should consult this before + /// forwarding a message to `render_message`. + fn ignore_set(&self) -> IgnoreSet; + + /// Wait for the next input event from the user. + /// + /// Returns `None` once the input source is permanently closed (the UI is + /// shutting down). Callers should treat this as terminal. + fn next_input(&mut self) -> futures::future::BoxFuture<'_, Option>; + + /// Tear down the UI cleanly. After this returns, the terminal must be in + /// a usable state (cursor visible, raw mode disabled, scroll region reset). + fn shutdown(self: Box) -> futures::future::BoxFuture<'static, ()>; +} + +/// Options controlling which `ChatUi` is constructed. +#[derive(Debug, Clone)] +pub struct UiOptions { + /// Force line mode regardless of whether stdout is a TTY (`--line-mode`). + pub force_line_mode: bool, + /// Channel name to display on the status bar. + pub channel_name: String, + /// Profile name for `/friend` and `/unfriend` resolution. + pub profile_name: String, +} + +/// Build the appropriate `ChatUi` implementation based on the runtime +/// environment and command-line flags. +/// +/// Picks `InteractiveUi` only when **both** stdout and stdin are TTYs and the +/// user hasn't opted out via `--line-mode` / `PEEROXIDE_LINE_MODE`. Falls back +/// to `LineUi` on any error setting up the interactive renderer. +/// +/// Stdin must also be a TTY because interactive mode reads keystrokes via +/// `crossterm::event::EventStream`, which polls the controlling terminal — +/// when stdin is a pipe or redirected file the event reader returns errors +/// or stalls, so a pipeline like `cat msgs.txt | peeroxide chat join …` +/// would fail. Auto-detecting non-TTY stdin and falling back to line mode +/// lets such pipelines just work without needing `--line-mode`. +pub fn make_ui(opts: UiOptions) -> Box { + let stdout_is_tty = std::io::stdout().is_terminal(); + let stdin_is_tty = std::io::stdin().is_terminal(); + let want_interactive = !opts.force_line_mode && stdout_is_tty && stdin_is_tty; + if want_interactive { + match interactive::InteractiveUi::new(&opts) { + Ok(ui) => return Box::new(ui), + Err(e) => { + eprintln!("*** interactive UI unavailable ({e}); falling back to line mode"); + } + } + } + Box::new(line::LineUi::new(opts)) +} diff --git a/peeroxide-cli/src/cmd/chat/tui/status.rs b/peeroxide-cli/src/cmd/chat/tui/status.rs new file mode 100644 index 0000000..e8861f0 --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/tui/status.rs @@ -0,0 +1,1198 @@ +//! Shared status state observed by publisher / reader / DHT-poll task and +//! consumed by the status bar renderer. +//! +//! All counters are `AtomicUsize` with `Relaxed` ordering — these are +//! advisory display values, not synchronisation primitives. + +use std::sync::Arc; +use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; + +use arc_swap::ArcSwap; +use tokio::sync::Notify; + +/// Counters and labels shown on the status bar. +/// +/// Created once at session start and shared via `Arc` across: +/// - `join.rs` (channel name, dht peers polling task) +/// - `publisher.rs` (`send_pending`) +/// - `reader.rs` (`recv_pending`, `feed_count`) +/// - `tui::interactive` (renderer, reads all fields) +/// +/// Mutators call `dirty.notify_one()` after a write so the renderer can repaint +/// promptly. Renderers can also poll on an idle timer. +pub struct StatusState { + pub send_pending: AtomicUsize, + /// Number of DHT `immutable_get` requests currently outstanding for + /// **message or summary content**. This is the "Receiving (N)" count + /// the user sees — it represents content the reader is actively pulling + /// because it knows about new messages (FeedRecord listed unseen hashes, + /// summary-history walk, or predecessor refetch). Managed via + /// [`RecvFetchGuard`] which inc/decs atomically across an `await`. + /// + /// Background scans (`lookup` for new peers, `mutable_get` of FeedRecords + /// to *check* for new messages) are **not** counted here — those are + /// signalled separately by `dht_active`. + pub recv_pending: AtomicUsize, + /// Number of any-kind DHT requests currently outstanding (lookup, + /// mutable_get, immutable_get). Surfaces as a single-character activity + /// indicator at the far left of the bar so the user can tell when + /// background DHT chatter is happening even though no message is + /// incoming. Managed via [`DhtActivityGuard`]; `RecvFetchGuard` + /// additionally bumps `recv_pending`. + pub dht_active: AtomicUsize, + pub feed_count: AtomicUsize, + pub dht_peers: AtomicUsize, + /// True when inbox monitoring is active for this session (configured + /// via `chat join` flags). When false the inbox segment is omitted from + /// the bar layout entirely. When true the segment renders as 'inbox' / + /// 'i' (plain) when `inbox_unread == 0`, or 'INBOX' / 'I' (yellow-bg, + /// black-fg) when there's at least one unread invite. + pub inbox_enabled: AtomicBool, + /// Count of invites surfaced by the inbox monitor that haven't yet been + /// displayed via `/inbox`. The bar uses this only as a boolean (lit / + /// not lit); the count itself isn't shown. + pub inbox_unread: AtomicUsize, + pub channel_name: ArcSwap, + pub dirty: Notify, +} + +impl StatusState { + pub fn new(channel_name: impl Into) -> Arc { + Arc::new(Self { + send_pending: AtomicUsize::new(0), + recv_pending: AtomicUsize::new(0), + dht_active: AtomicUsize::new(0), + feed_count: AtomicUsize::new(0), + dht_peers: AtomicUsize::new(0), + inbox_enabled: AtomicBool::new(false), + inbox_unread: AtomicUsize::new(0), + channel_name: ArcSwap::from_pointee(channel_name.into()), + dirty: Notify::new(), + }) + } + + /// Enable or disable the inbox segment on the status bar. + pub fn set_inbox_enabled(&self, enabled: bool) { + let prev = self.inbox_enabled.swap(enabled, Ordering::Relaxed); + if prev != enabled { + self.dirty.notify_one(); + } + } + + /// Set the count of unread invites; the bar lights up (yellow bg, + /// uppercase) when this is > 0. + pub fn set_inbox_unread(&self, count: usize) { + let prev = self.inbox_unread.swap(count, Ordering::Relaxed); + if prev != count { + self.dirty.notify_one(); + } + } + + /// Increment `send_pending` and notify the renderer. + pub fn inc_send_pending(&self) { + self.send_pending.fetch_add(1, Ordering::Relaxed); + self.dirty.notify_one(); + } + + /// Decrement `send_pending` (saturating) and notify. + pub fn dec_send_pending(&self) { + // saturating: don't wrap if mismatched inc/dec ever sneak in. + let _ = self.send_pending.fetch_update( + Ordering::Relaxed, + Ordering::Relaxed, + |v| Some(v.saturating_sub(1)), + ); + self.dirty.notify_one(); + } + + /// Set `recv_pending` to an absolute count and notify. + pub fn set_recv_pending(&self, n: usize) { + let prev = self.recv_pending.swap(n, Ordering::Relaxed); + if prev != n { + self.dirty.notify_one(); + } + } + + /// Increment the in-flight `immutable_get` counter and notify the + /// renderer. Paired with [`StatusState::dec_recv_in_flight`]; prefer + /// using [`RecvFetchGuard`] which couples the two and survives early + /// returns / panics across an `await`. + pub fn inc_recv_in_flight(&self) { + self.recv_pending.fetch_add(1, Ordering::Relaxed); + self.dirty.notify_one(); + } + + /// Decrement the in-flight counter (saturating at zero). + pub fn dec_recv_in_flight(&self) { + let _ = self + .recv_pending + .fetch_update(Ordering::Relaxed, Ordering::Relaxed, |v| { + Some(v.saturating_sub(1)) + }); + self.dirty.notify_one(); + } + + /// Increment the any-DHT-op counter (lookup / mutable_get / immutable_get). + /// Prefer [`DhtActivityGuard`]. + pub fn inc_dht_active(&self) { + self.dht_active.fetch_add(1, Ordering::Relaxed); + self.dirty.notify_one(); + } + + /// Decrement the any-DHT-op counter (saturating at zero). + pub fn dec_dht_active(&self) { + let _ = self + .dht_active + .fetch_update(Ordering::Relaxed, Ordering::Relaxed, |v| { + Some(v.saturating_sub(1)) + }); + self.dirty.notify_one(); + } + + /// Set `feed_count` and notify. + pub fn set_feed_count(&self, n: usize) { + let prev = self.feed_count.swap(n, Ordering::Relaxed); + if prev != n { + self.dirty.notify_one(); + } + } + + /// Set `dht_peers` and notify. + pub fn set_dht_peers(&self, n: usize) { + let prev = self.dht_peers.swap(n, Ordering::Relaxed); + if prev != n { + self.dirty.notify_one(); + } + } + + /// Snapshot a consistent view of the counters for one render pass. + pub fn snapshot(&self) -> StatusSnapshot { + StatusSnapshot { + send_pending: self.send_pending.load(Ordering::Relaxed), + recv_pending: self.recv_pending.load(Ordering::Relaxed), + dht_active: self.dht_active.load(Ordering::Relaxed) > 0, + feed_count: self.feed_count.load(Ordering::Relaxed), + dht_peers: self.dht_peers.load(Ordering::Relaxed), + inbox_enabled: self.inbox_enabled.load(Ordering::Relaxed), + inbox_unread: self.inbox_unread.load(Ordering::Relaxed), + channel_name: (**self.channel_name.load()).clone(), + } + } +} + +/// RAII guard that increments `recv_pending` on construction and decrements +/// on drop. Wrap each `immutable_get` call (for message / summary content) +/// in one of these so the in-flight count stays consistent across early +/// returns, errors, and panics that unwind through the await. +pub struct RecvFetchGuard { + status: Arc, +} + +impl RecvFetchGuard { + pub fn new(status: Arc) -> Self { + status.inc_recv_in_flight(); + Self { status } + } +} + +impl Drop for RecvFetchGuard { + fn drop(&mut self) { + self.status.dec_recv_in_flight(); + } +} + +/// RAII guard that increments `dht_active` on construction and decrements on +/// drop. Wrap **every** DHT read call (lookup / mutable_get / immutable_get) +/// in one of these so the left-edge activity dot lights up while any DHT op +/// is in flight. For content fetches that should also surface as +/// `Receiving (N)`, additionally use [`RecvFetchGuard`]. +pub struct DhtActivityGuard { + status: Arc, +} + +impl DhtActivityGuard { + pub fn new(status: Arc) -> Self { + status.inc_dht_active(); + Self { status } + } +} + +impl Drop for DhtActivityGuard { + fn drop(&mut self) { + self.status.dec_dht_active(); + } +} + +/// A point-in-time copy of the status counters and channel name. Cheap to +/// pass to the pure-function renderer in this module. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct StatusSnapshot { + pub send_pending: usize, + pub recv_pending: usize, + /// True when at least one DHT op (lookup / mutable_get / immutable_get) + /// is currently in flight. Drives the left-edge activity dot. + pub dht_active: bool, + pub feed_count: usize, + pub dht_peers: usize, + /// True when the inbox monitor is running for this session (omitted + /// from the bar entirely when false). + pub inbox_enabled: bool, + /// Number of unread inbox invites. `> 0` paints the inbox segment with + /// the highlighted (yellow-bg / uppercase) form. + pub inbox_unread: usize, + pub channel_name: String, +} + +/// Truncation level applied to the status bar based on terminal width. +/// +/// Levels are ordered from most-detailed (`Full`) to least (`ChannelOnly`). +/// The renderer chooses the most-detailed level whose natural width fits. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum TruncLevel { + /// `Sending... (3) Receiving... (12)` ··· `Feeds: 7 DHT: 42 #room-name` + Full, + /// `Sending Receiving` ··· `Feeds: 7 DHT: 42 #room-name` + DropWords, + /// `S:3 R:12` ··· `F:7 D:42 #room-name` + Short, + /// `S:3 R:12` ··· `D:42 #room-name` + ShortDropF, + /// `S:3 R:12` ··· `#room-name` + ShortDropFD, + /// `Ready` ··· `#room-name` (or just `#room-name` if no left activity) + ChannelAndReady, + /// `#room-name` (possibly truncated with `…`) + ChannelOnly, +} + +/// Identifier for a status-bar segment, used to key sticky slot widths +/// across renders. Two segments are "the same slot" iff their `LeftSeg` / +/// `RightSeg` value is equal — so when a counter goes to zero and the +/// `Sending` segment disappears, its slot is released and the `Ready` +/// segment that takes its place starts fresh. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum LeftSeg { + Sending, + Receiving, + Ready, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum RightSeg { + Feeds, + Dht, + Channel, +} + +/// Sticky slot widths for the left and right segment groups. Once a slot has +/// grown to fit a value, it stays at that width until the terminal is resized +/// (which calls [`SlotWidths::reset`]). +/// +/// Left-side slots are also positionally sticky: once `Sending` or `Receiving` +/// has appeared at least once, its slot remains reserved in the bar (rendered +/// as padded blanks when the underlying counter is zero) so subsequent +/// segments don't visually shift left when an upstream segment goes idle. +/// Slot widths grow monotonically and are only released by [`Self::reset`]. +#[derive(Debug, Default)] +pub struct SlotWidths { + pub left: std::collections::HashMap, + pub right: std::collections::HashMap, +} + +impl SlotWidths { + pub fn reset(&mut self) { + self.left.clear(); + self.right.clear(); + } +} + +/// Pure function: choose a truncation level given a snapshot and terminal width. +/// +/// Returns the most detailed level whose natural rendered width fits within +/// `cols`, accounting for one space of padding on each end of the bar, the +/// activity-dot slot at the far left (2 cols: dot + separator), and a +/// minimum 2-column gap between the left and right groups. +pub fn pick_level(snap: &StatusSnapshot, cols: usize) -> TruncLevel { + // Padding (1 left + 1 right) + dot slot (2) + minimum gap (2) = 6 reserved. + let avail = cols.saturating_sub(6); + + for level in [ + TruncLevel::Full, + TruncLevel::DropWords, + TruncLevel::Short, + TruncLevel::ShortDropF, + TruncLevel::ShortDropFD, + TruncLevel::ChannelAndReady, + TruncLevel::ChannelOnly, + ] { + let (l, r) = natural_widths(snap, level); + if l + r <= avail { + return level; + } + } + TruncLevel::ChannelOnly +} + +/// Natural rendered width of the left and right groups at a given truncation +/// level. Does not include sticky-slot padding (that's added at layout time) +/// nor end-padding/gap (those are added by `pick_level` / `render_bar`). +fn natural_widths(snap: &StatusSnapshot, level: TruncLevel) -> (usize, usize) { + let activity_present = snap.send_pending > 0 || snap.recv_pending > 0; + let l = match level { + TruncLevel::Full => { + let mut parts: Vec = Vec::new(); + if snap.send_pending > 0 { + parts.push(format!("Sending... ({})", snap.send_pending)); + } + if snap.recv_pending > 0 { + parts.push(format!("Receiving... ({})", snap.recv_pending)); + } + if parts.is_empty() { + "Ready".len() + } else { + parts.join(" ").len() + } + } + TruncLevel::DropWords => { + let mut parts: Vec<&str> = Vec::new(); + if snap.send_pending > 0 { + parts.push("Sending"); + } + if snap.recv_pending > 0 { + parts.push("Receiving"); + } + if parts.is_empty() { + "Ready".len() + } else { + parts.join(" ").len() + } + } + TruncLevel::Short + | TruncLevel::ShortDropF + | TruncLevel::ShortDropFD => { + let mut parts: Vec = Vec::new(); + if snap.send_pending > 0 { + parts.push(format!("S:{}", snap.send_pending)); + } + if snap.recv_pending > 0 { + parts.push(format!("R:{}", snap.recv_pending)); + } + parts.join(" ").len() + } + TruncLevel::ChannelAndReady => { + if activity_present { 0 } else { "Ready".len() } + } + TruncLevel::ChannelOnly => 0, + }; + let r = match level { + TruncLevel::Full | TruncLevel::DropWords => { + // Feeds: N DHT: N #channel + let f = format!("Feeds: {}", snap.feed_count); + let d = format!("DHT: {}", snap.dht_peers); + f.len() + 2 + d.len() + 2 + snap.channel_name.len() + } + TruncLevel::Short => { + let f = format!("F:{}", snap.feed_count); + let d = format!("D:{}", snap.dht_peers); + f.len() + 1 + d.len() + 1 + snap.channel_name.len() + } + TruncLevel::ShortDropF => { + let d = format!("D:{}", snap.dht_peers); + d.len() + 1 + snap.channel_name.len() + } + TruncLevel::ShortDropFD | TruncLevel::ChannelAndReady => snap.channel_name.len(), + TruncLevel::ChannelOnly => snap.channel_name.len(), + }; + (l, r) +} + +/// Result of rendering the status bar: the plain-text body (exactly `cols` +/// wide, padded with spaces) plus an optional character range within `body` +/// to be painted with the "attention" styling (yellow background, black +/// foreground) by the caller. +/// +/// Today only the INBOX segment uses `inbox_highlight`; the rest of the +/// body should be painted with the normal grey-background status-bar +/// styling. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct BarRender { + pub body: String, + pub inbox_highlight: Option>, +} + +/// Convenience: deref to `str` so callers (and tests) can use the usual +/// `&str` methods (`contains`, `find`, `chars`, `len`, …) directly on a +/// `BarRender` value without unwrapping `.body`. The caller still needs to +/// reach into `inbox_highlight` explicitly when painting styles. +impl std::ops::Deref for BarRender { + type Target = str; + fn deref(&self) -> &str { + &self.body + } +} + +/// Render the plain-text content of the status bar (no terminal escapes) at +/// the chosen level, applying sticky slot widths. Returns a `BarRender` +/// whose `body` is exactly `cols` wide; the caller wraps the body in grey +/// styling and overlays yellow on the `inbox_highlight` range (when Some). +pub fn render_bar( + snap: &StatusSnapshot, + level: TruncLevel, + cols: usize, + slots: &mut SlotWidths, +) -> BarRender { + if cols < 4 { + // Pathological — terminal is essentially unusable for a bar. Return + // exactly `cols` spaces; caller still gets a coloured row. + return BarRender { + body: " ".repeat(cols), + inbox_highlight: None, + }; + } + + // Activity dot at the far left. Always 1 visible cell: '●' when any DHT + // op is in flight, ' ' otherwise. Followed by a 1-cell separator so left + // segments don't visually touch the dot. The slot is always reserved + // regardless of activity state — keeps left segment positions stable. + // Only included if cols ≥ 6 (otherwise the bar is too narrow and we drop + // the dot to preserve room for the channel name). + let show_dot_slot = cols >= 6; + let dot_slot_w: usize = if show_dot_slot { 2 } else { 0 }; + let dot_char = if snap.dht_active { '●' } else { ' ' }; + + // Build segment lists for both groups, tagged by segment kind so we can + // look up sticky slot widths. + let (left_segs, right_segs) = build_segments(snap, level); + + // Drop right-side slot entries for segments that aren't present this + // frame. + let right_kinds: std::collections::HashSet = + right_segs.iter().map(|(k, _)| *k).collect(); + slots.right.retain(|k, _| right_kinds.contains(k)); + + // Left side: positionally sticky until `slots.reset()` (called on + // resize). Grow sticky widths from the active set; never drop a Sending + // / Receiving slot once reserved. `Ready` is suppressed once a real + // activity slot exists. + for (k, s) in &left_segs { + let w = s.chars().count(); + let entry = slots.left.entry(*k).or_insert(0); + if w > *entry { + *entry = w; + } + } + let has_real_sticky = slots.left.contains_key(&LeftSeg::Sending) + || slots.left.contains_key(&LeftSeg::Receiving); + if has_real_sticky { + slots.left.remove(&LeftSeg::Ready); + } + + // Grow right-side slots to fit current values (monotonic). + for (k, s) in &right_segs { + let w = s.chars().count(); + let entry = slots.right.entry(*k).or_insert(0); + if w > *entry { + *entry = w; + } + } + + let active_left: std::collections::HashMap = left_segs + .iter() + .map(|(k, s)| (*k, s.as_str())) + .collect(); + let left_rendered: Vec = if has_real_sticky { + [LeftSeg::Sending, LeftSeg::Receiving] + .iter() + .filter_map(|k| { + let w = *slots.left.get(k)?; + let s = active_left.get(k).copied().unwrap_or(""); + Some(pad_right(s, w)) + }) + .collect() + } else { + left_segs + .iter() + .map(|(k, s)| { + let w = slots + .left + .get(k) + .copied() + .unwrap_or_else(|| s.chars().count()); + pad_right(s, w) + }) + .collect() + }; + let right_rendered: Vec = right_segs + .iter() + .map(|(k, s)| { + let w = slots.right.get(k).copied().unwrap_or_else(|| s.chars().count()); + pad_left(s, w) + }) + .collect(); + let left_join = left_rendered.join(" "); + let right_join = right_rendered.join(" "); + + let inner = cols.saturating_sub(2 + dot_slot_w); + + // ChannelOnly + too-long channel name: ellipsis-truncate (preserved + // behaviour). No inbox segment at this level. + if matches!(level, TruncLevel::ChannelOnly) && right_join.chars().count() > inner { + let mut name = snap.channel_name.clone(); + if inner == 0 { + return BarRender { + body: " ".repeat(cols), + inbox_highlight: None, + }; + } + let take = inner.saturating_sub(1); + name = name.chars().take(take).collect::(); + name.push('…'); + let body = format!( + " {}{:>width$} ", + if show_dot_slot { + format!("{dot_char} ") + } else { + String::new() + }, + name, + width = inner + ); + return BarRender { + body, + inbox_highlight: None, + }; + } + + // ── Place all segments into a fixed-width char buffer ─────────────── + // + // Layout columns (0-based): + // col 0 — lead space + // col 1 — activity dot (when `show_dot_slot`) + // col 2 — dot/left separator + // col 1+dot_slot_w .. col 1+dot_slot_w+left_len — left segments + // col cols-1-right_len .. col cols-1 — right segments + // col cols-1 — trail space + // center cols/2 — anchor for the inbox segment (if placed) + // + // Inbox candidates (longest first): the level dictates the maximum + // form; we downgrade to single-char if the long form would collide + // with left/right (centre placement leaves at least 1 space margin on + // both sides) and drop entirely if even the single-char form can't + // fit. + + let mut buf: Vec = vec![' '; cols]; + + if show_dot_slot { + buf[1] = dot_char; + } + + let left_start = 1 + dot_slot_w; + let left_len = left_join.chars().count(); + for (i, c) in left_join.chars().enumerate() { + let col = left_start + i; + if col >= cols - 1 { + break; + } + buf[col] = c; + } + + let right_len = right_join.chars().count(); + let right_end = cols.saturating_sub(1); // exclusive + let right_start = right_end.saturating_sub(right_len); + for (i, c) in right_join.chars().enumerate() { + let col = right_start + i; + if col >= right_end { + break; + } + buf[col] = c; + } + + let inbox_highlight = place_inbox_segment( + &mut buf, + cols, + left_start + left_len, + right_start, + inbox_candidates(snap, level), + snap.inbox_unread > 0, + ); + + BarRender { + body: buf.into_iter().collect(), + inbox_highlight, + } +} + +/// Candidate strings for the INBOX segment, longest-to-shortest. Empty +/// when the level forbids it or inbox monitoring is disabled. +fn inbox_candidates(snap: &StatusSnapshot, level: TruncLevel) -> Vec<&'static str> { + if !snap.inbox_enabled { + return Vec::new(); + } + let highlighted = snap.inbox_unread > 0; + match level { + TruncLevel::Full | TruncLevel::DropWords => { + if highlighted { + vec!["INBOX", "I"] + } else { + vec!["inbox", "i"] + } + } + TruncLevel::Short | TruncLevel::ShortDropF | TruncLevel::ShortDropFD => { + if highlighted { + vec!["I"] + } else { + vec!["i"] + } + } + TruncLevel::ChannelAndReady | TruncLevel::ChannelOnly => Vec::new(), + } +} + +/// Attempt to place an inbox candidate at the centre of the bar without +/// colliding with the left or right segment groups. Tries each candidate +/// in order (longest to shortest); the first one that fits is written +/// into `buf` and its `Range` is returned. If none fit, returns `None`. +/// +/// The centre is anchored at `cols / 2`: an N-char candidate starts at +/// `cols/2 - N/2` and ends at `cols/2 - N/2 + N`. A minimum 1-cell +/// space gap is enforced on both sides between the inbox segment and the +/// nearest left / right segment characters. +/// +/// `left_end_exclusive` is the column index one past the last left-segment +/// char (i.e. the first column where placement could legally start, before +/// adding the gap). +/// `right_start` is the column index where the right-segment characters +/// begin (i.e. the first column where placement must NOT extend into, +/// before adding the gap). +fn place_inbox_segment( + buf: &mut [char], + cols: usize, + left_end_exclusive: usize, + right_start: usize, + candidates: Vec<&'static str>, + highlight: bool, +) -> Option> { + if candidates.is_empty() { + return None; + } + let bar_center = cols / 2; + for cand in &candidates { + let text_len = cand.chars().count(); + if text_len == 0 { + continue; + } + let start = bar_center.saturating_sub(text_len / 2); + let end = start.saturating_add(text_len); + // Stay within [1, cols-1) (col 0 and col cols-1 are lead/trail + // spaces). + if start < 1 || end > cols.saturating_sub(1) { + continue; + } + // Min 1-cell gap on each side. + if start <= left_end_exclusive { + continue; + } + if end + 1 > right_start { + continue; + } + for (i, ch) in cand.chars().enumerate() { + buf[start + i] = ch; + } + // Return the highlight range only when the bar should paint the + // attention styling. When inbox is enabled but empty, the + // placeholder text ('inbox' / 'i') is written into `buf` but no + // highlight range is returned, so the caller paints normal grey. + return if highlight { Some(start..end) } else { None }; + } + None +} + +type LeftSegments = Vec<(LeftSeg, String)>; +type RightSegments = Vec<(RightSeg, String)>; + +/// Build the ordered, kind-tagged list of segments for each group at the +/// chosen level. Segments are excluded when their underlying counter is zero +/// or omitted at that level. +fn build_segments(snap: &StatusSnapshot, level: TruncLevel) -> (LeftSegments, RightSegments) { + let activity = snap.send_pending > 0 || snap.recv_pending > 0; + let left: Vec<(LeftSeg, String)> = match level { + TruncLevel::Full => { + let mut v = Vec::new(); + if snap.send_pending > 0 { + v.push((LeftSeg::Sending, format!("Sending... ({})", snap.send_pending))); + } + if snap.recv_pending > 0 { + v.push(( + LeftSeg::Receiving, + format!("Receiving... ({})", snap.recv_pending), + )); + } + if v.is_empty() { + vec![(LeftSeg::Ready, "Ready".to_string())] + } else { + v + } + } + TruncLevel::DropWords => { + let mut v = Vec::new(); + if snap.send_pending > 0 { + v.push((LeftSeg::Sending, "Sending".to_string())); + } + if snap.recv_pending > 0 { + v.push((LeftSeg::Receiving, "Receiving".to_string())); + } + if v.is_empty() { + vec![(LeftSeg::Ready, "Ready".to_string())] + } else { + v + } + } + TruncLevel::Short | TruncLevel::ShortDropF | TruncLevel::ShortDropFD => { + let mut v = Vec::new(); + if snap.send_pending > 0 { + v.push((LeftSeg::Sending, format!("S:{}", snap.send_pending))); + } + if snap.recv_pending > 0 { + v.push((LeftSeg::Receiving, format!("R:{}", snap.recv_pending))); + } + v + } + TruncLevel::ChannelAndReady => { + if activity { + Vec::new() + } else { + vec![(LeftSeg::Ready, "Ready".to_string())] + } + } + TruncLevel::ChannelOnly => Vec::new(), + }; + let right: Vec<(RightSeg, String)> = match level { + TruncLevel::Full | TruncLevel::DropWords => vec![ + (RightSeg::Feeds, format!("Feeds: {}", snap.feed_count)), + (RightSeg::Dht, format!("DHT: {}", snap.dht_peers)), + (RightSeg::Channel, snap.channel_name.clone()), + ], + TruncLevel::Short => vec![ + (RightSeg::Feeds, format!("F:{}", snap.feed_count)), + (RightSeg::Dht, format!("D:{}", snap.dht_peers)), + (RightSeg::Channel, snap.channel_name.clone()), + ], + TruncLevel::ShortDropF => vec![ + (RightSeg::Dht, format!("D:{}", snap.dht_peers)), + (RightSeg::Channel, snap.channel_name.clone()), + ], + TruncLevel::ShortDropFD | TruncLevel::ChannelAndReady | TruncLevel::ChannelOnly => { + vec![(RightSeg::Channel, snap.channel_name.clone())] + } + }; + (left, right) +} + +fn pad_right(s: &str, width: usize) -> String { + let len = s.chars().count(); + if len >= width { + s.to_string() + } else { + let mut out = String::from(s); + for _ in 0..(width - len) { + out.push(' '); + } + out + } +} + +fn pad_left(s: &str, width: usize) -> String { + let len = s.chars().count(); + if len >= width { + s.to_string() + } else { + let mut out = String::new(); + for _ in 0..(width - len) { + out.push(' '); + } + out.push_str(s); + out + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn snap(s: usize, r: usize, f: usize, d: usize, name: &str) -> StatusSnapshot { + StatusSnapshot { + send_pending: s, + recv_pending: r, + dht_active: false, + feed_count: f, + dht_peers: d, + inbox_enabled: false, + inbox_unread: 0, + channel_name: name.to_string(), + } + } + + fn snap_active(s: usize, r: usize, f: usize, d: usize, name: &str) -> StatusSnapshot { + StatusSnapshot { + send_pending: s, + recv_pending: r, + dht_active: true, + feed_count: f, + dht_peers: d, + inbox_enabled: false, + inbox_unread: 0, + channel_name: name.to_string(), + } + } + + fn snap_inbox(s: usize, r: usize, f: usize, d: usize, name: &str, inbox_unread: usize) -> StatusSnapshot { + StatusSnapshot { + send_pending: s, + recv_pending: r, + dht_active: false, + feed_count: f, + dht_peers: d, + inbox_enabled: true, + inbox_unread, + channel_name: name.to_string(), + } + } + + #[test] + fn picks_full_when_room() { + let s = snap(3, 12, 7, 42, "#room-name"); + assert_eq!(pick_level(&s, 120), TruncLevel::Full); + } + + #[test] + fn falls_back_progressively() { + let s = snap(3, 12, 7, 42, "#room-name"); + // 120 → Full; shrink to find each level. + let levels: Vec = (10..=120) + .map(|w| pick_level(&s, w)) + .collect(); + // Should be monotone-non-increasing in "detail" (i.e. as we go from + // narrow to wide, level moves Full → ChannelOnly direction). + // Spot-check: at very narrow widths we end at ChannelOnly. + assert_eq!(pick_level(&s, 10), TruncLevel::ChannelOnly); + // Sanity: somewhere in between we hit Short. + assert!(levels.iter().any(|l| matches!(l, TruncLevel::Short))); + } + + #[test] + fn idle_shows_ready() { + let s = snap(0, 0, 7, 42, "#room"); + let mut slots = SlotWidths::default(); + let bar = render_bar(&s, TruncLevel::Full, 80, &mut slots); + assert_eq!(bar.chars().count(), 80); + assert!(bar.contains("Ready"), "bar = {bar:?}"); + assert!(bar.contains("Feeds: 7")); + assert!(bar.contains("DHT: 42")); + assert!(bar.contains("#room")); + } + + #[test] + fn activity_dot_shows_when_dht_active() { + let s = snap_active(0, 0, 7, 42, "#room"); + let mut slots = SlotWidths::default(); + let bar = render_bar(&s, TruncLevel::Full, 80, &mut slots); + // Bar layout: " ● {body} " + assert!(bar.starts_with(" ● "), "bar = {bar:?}"); + } + + #[test] + fn activity_dot_hidden_when_idle() { + let s = snap(0, 0, 7, 42, "#room"); + let mut slots = SlotWidths::default(); + let bar = render_bar(&s, TruncLevel::Full, 80, &mut slots); + // Idle: same slot width but ' ' instead of '●'. Bar starts with " " + // (1 lead pad + 1 dot slot + 1 separator). + assert!(!bar.contains('●'), "bar = {bar:?}"); + assert!(bar.starts_with(" ")); + } + + #[test] + fn activity_dot_dropped_in_extreme_narrow() { + // cols < 6: dot slot is dropped entirely to give channel name room. + let s = snap_active(0, 0, 0, 0, "#r"); + let mut slots = SlotWidths::default(); + let bar = render_bar(&s, TruncLevel::ChannelOnly, 5, &mut slots); + assert!(!bar.contains('●'), "bar = {bar:?}"); + assert_eq!(bar.chars().count(), 5); + } + + #[test] + fn recv_in_flight_shows_count() { + // recv_pending now represents in-flight DHT immutable_gets. A value + // of 3 should render as "Receiving... (3)". + let s = snap(0, 3, 7, 42, "#room"); + let mut slots = SlotWidths::default(); + let bar = render_bar(&s, TruncLevel::Full, 80, &mut slots); + assert!(bar.contains("Receiving... (3)"), "bar = {bar:?}"); + assert!(!bar.contains("Ready")); + } + + #[test] + fn active_shows_counts() { + let s = snap(3, 12, 7, 42, "#room"); + let mut slots = SlotWidths::default(); + let bar = render_bar(&s, TruncLevel::Full, 80, &mut slots); + assert!(bar.contains("Sending... (3)")); + assert!(bar.contains("Receiving... (12)")); + assert!(!bar.contains("Ready")); + } + + #[test] + fn short_level_uses_abbreviations() { + let s = snap(3, 12, 7, 42, "#room"); + let mut slots = SlotWidths::default(); + let bar = render_bar(&s, TruncLevel::Short, 40, &mut slots); + assert!(bar.contains("S:3")); + assert!(bar.contains("R:12")); + assert!(bar.contains("F:7")); + assert!(bar.contains("D:42")); + assert!(bar.contains("#room")); + } + + #[test] + fn channel_only_truncates_with_ellipsis() { + let s = snap(0, 0, 0, 0, "#very-long-channel-name"); + let mut slots = SlotWidths::default(); + let bar = render_bar(&s, TruncLevel::ChannelOnly, 12, &mut slots); + assert_eq!(bar.chars().count(), 12); + assert!(bar.contains('…'), "bar = {bar:?}"); + } + + #[test] + fn sticky_slots_grow_monotonically() { + let s1 = snap(3, 0, 7, 42, "#room"); + let s2 = snap(123456, 0, 7, 42, "#room"); + let s3 = snap(3, 0, 7, 42, "#room"); + let mut slots = SlotWidths::default(); + let _ = render_bar(&s1, TruncLevel::Full, 80, &mut slots); + let w1 = *slots.left.get(&LeftSeg::Sending).unwrap(); + let _ = render_bar(&s2, TruncLevel::Full, 80, &mut slots); + let w2 = *slots.left.get(&LeftSeg::Sending).unwrap(); + assert!(w2 > w1, "slot should grow with bigger value"); + let _ = render_bar(&s3, TruncLevel::Full, 80, &mut slots); + let w3 = *slots.left.get(&LeftSeg::Sending).unwrap(); + assert_eq!(w3, w2, "slot should NOT shrink when value shrinks"); + } + + #[test] + fn slot_kept_sticky_when_segment_disappears() { + // Sticky semantics: once Sending has appeared, its slot stays + // reserved (rendered as blanks when idle) until `reset()` is called + // — which happens on terminal resize. Ready is suppressed once any + // real activity slot is reserved. + let active = snap(3, 0, 7, 42, "#room"); + let idle = snap(0, 0, 7, 42, "#room"); + let mut slots = SlotWidths::default(); + let _ = render_bar(&active, TruncLevel::Full, 80, &mut slots); + assert!(slots.left.contains_key(&LeftSeg::Sending)); + assert!(!slots.left.contains_key(&LeftSeg::Ready)); + let _ = render_bar(&idle, TruncLevel::Full, 80, &mut slots); + assert!( + slots.left.contains_key(&LeftSeg::Sending), + "Sending slot should remain sticky after going idle" + ); + assert!( + !slots.left.contains_key(&LeftSeg::Ready), + "Ready should not appear once a real activity slot is reserved" + ); + + // After reset (simulates a terminal resize) the sticky state clears + // and the next idle render picks Ready up again. + slots.reset(); + let _ = render_bar(&idle, TruncLevel::Full, 80, &mut slots); + assert!(!slots.left.contains_key(&LeftSeg::Sending)); + assert_eq!( + slots.left.get(&LeftSeg::Ready).copied(), + Some("Ready".len()) + ); + } + + #[test] + fn receiving_position_sticky_when_sending_goes_idle() { + // The collapse-left bug: once both Sending and Receiving have been + // seen, Receiving must keep its slot position even after Sending's + // counter drops to zero. + let both = snap(3, 5, 7, 42, "#room"); + let only_recv = snap(0, 5, 7, 42, "#room"); + let mut slots = SlotWidths::default(); + let bar_both = render_bar(&both, TruncLevel::Full, 80, &mut slots); + // Find columns of "Receiving" while both are active. + let col_both = bar_both + .find("Receiving") + .expect("Receiving present when active"); + + let bar_recv = render_bar(&only_recv, TruncLevel::Full, 80, &mut slots); + let col_recv = bar_recv + .find("Receiving") + .expect("Receiving still present after Sending goes idle"); + assert_eq!( + col_both, col_recv, + "Receiving column must be sticky when Sending goes idle\n both: {bar_both:?}\n recv: {bar_recv:?}" + ); + // And Sending's slot is now blanks of its previous width. + let sending_w = slots.left.get(&LeftSeg::Sending).copied().unwrap(); + assert!(sending_w >= "Sending... (3)".len()); + } + + #[test] + fn bar_is_always_cols_wide() { + for cols in [4_usize, 10, 20, 40, 80, 120] { + let s = snap(3, 12, 7, 42, "#room"); + let level = pick_level(&s, cols); + let mut slots = SlotWidths::default(); + let bar = render_bar(&s, level, cols, &mut slots); + assert_eq!(bar.chars().count(), cols, "level={level:?} cols={cols}"); + } + } + + #[test] + fn pad_helpers() { + assert_eq!(pad_right("ab", 5), "ab "); + assert_eq!(pad_left("ab", 5), " ab"); + assert_eq!(pad_right("abcdef", 3), "abcdef"); + } + + // ── inbox segment ───────────────────────────────────────────────── + + #[test] + fn inbox_omitted_when_disabled() { + // No inbox_enabled in this snapshot → no INBOX/inbox anywhere. + let s = snap(0, 0, 7, 42, "#room"); + let mut slots = SlotWidths::default(); + let bar = render_bar(&s, TruncLevel::Full, 80, &mut slots); + assert!(!bar.body.contains("INBOX")); + assert!(!bar.body.contains("inbox")); + assert_eq!(bar.inbox_highlight, None); + } + + #[test] + fn inbox_lowercase_when_enabled_and_no_unread() { + let s = snap_inbox(0, 0, 7, 42, "#room", 0); + let mut slots = SlotWidths::default(); + let bar = render_bar(&s, TruncLevel::Full, 80, &mut slots); + assert!( + bar.body.contains("inbox"), + "expected lowercase 'inbox' in {:?}", + bar.body + ); + assert!(!bar.body.contains("INBOX")); + assert_eq!( + bar.inbox_highlight, None, + "no highlight when unread = 0" + ); + } + + #[test] + fn inbox_uppercase_when_unread_present() { + let s = snap_inbox(0, 0, 7, 42, "#room", 3); + let mut slots = SlotWidths::default(); + let bar = render_bar(&s, TruncLevel::Full, 80, &mut slots); + assert!( + bar.body.contains("INBOX"), + "expected uppercase 'INBOX' in {:?}", + bar.body + ); + assert!(!bar.body.contains("inbox")); + let range = bar.inbox_highlight.expect("highlight should be Some when unread > 0"); + assert_eq!(range.end - range.start, "INBOX".len()); + // Body slice at the range should equal "INBOX". + let chars: Vec = bar.body.chars().collect(); + let slice: String = chars[range.start..range.end].iter().collect(); + assert_eq!(slice, "INBOX"); + } + + #[test] + fn inbox_centered_at_cols_div_two() { + let s = snap_inbox(0, 0, 7, 42, "#room", 1); + let mut slots = SlotWidths::default(); + let cols = 80; + let bar = render_bar(&s, TruncLevel::Full, cols, &mut slots); + let range = bar.inbox_highlight.expect("highlight"); + let center = cols / 2; + // 'INBOX' has 5 chars; center anchor places start = center - 5/2 = 38. + assert_eq!(range.start, center - "INBOX".len() / 2); + assert_eq!(range.end, range.start + "INBOX".len()); + } + + #[test] + fn inbox_downgrades_to_single_char_at_short_level() { + let s = snap_inbox(3, 12, 7, 42, "#room", 5); + let mut slots = SlotWidths::default(); + let bar = render_bar(&s, TruncLevel::Short, 50, &mut slots); + assert!( + !bar.body.contains("INBOX"), + "INBOX shouldn't appear at Short level: {:?}", + bar.body + ); + // 'I' should appear, highlighted. + let range = bar.inbox_highlight.expect("highlight should be Some"); + assert_eq!(range.end - range.start, 1); + } + + #[test] + fn inbox_lowercase_single_char_when_no_unread_at_short() { + let s = snap_inbox(3, 12, 7, 42, "#room", 0); + let mut slots = SlotWidths::default(); + let bar = render_bar(&s, TruncLevel::Short, 50, &mut slots); + // 'i' should be present in the body somewhere around the centre, + // and no highlight range returned. + assert_eq!(bar.inbox_highlight, None); + // The body should still contain a lowercase 'i' centred. + let cols = 50; + let center = cols / 2; + let center_char = bar.body.chars().nth(center).unwrap(); + // At cols=50, center=25; 'i' starts at 25 - 0 = 25. + assert_eq!(center_char, 'i'); + } + + #[test] + fn inbox_dropped_at_channel_only() { + let s = snap_inbox(0, 0, 0, 0, "#room", 7); + let mut slots = SlotWidths::default(); + let bar = render_bar(&s, TruncLevel::ChannelOnly, 30, &mut slots); + assert!(!bar.body.contains("INBOX")); + assert!(!bar.body.contains("inbox")); + assert_eq!(bar.inbox_highlight, None); + } + + #[test] + fn inbox_dropped_at_channel_and_ready() { + let s = snap_inbox(0, 0, 0, 0, "#room", 7); + let mut slots = SlotWidths::default(); + let bar = render_bar(&s, TruncLevel::ChannelAndReady, 35, &mut slots); + assert!(!bar.body.contains("INBOX")); + assert_eq!(bar.inbox_highlight, None); + } + + #[test] + fn inbox_downgrades_when_centre_would_overlap_left() { + // Construct a scenario where the left group is wide enough that + // 'INBOX' (5 chars) at centre would overlap, but 'I' fits. + // At cols=40, centre=20. 'INBOX' wants cols 18..23. + // If left group occupies cols 3..18 (i.e. left_len=15), overlap. + // We synthesize this via a huge channel name to push the right + // group out and a Sending counter that makes left wide. Simpler: + // just verify the downgrade logic with a tighter cols. + let s = snap_inbox(123456, 0, 7, 42, "#room-name", 1); + let mut slots = SlotWidths::default(); + // Narrow enough that 'INBOX' won't fit, but the bar is still in + // the Full level for the test's setup. At cols=30 with a long + // send-pending, the centre is squeezed. + let bar = render_bar(&s, TruncLevel::Full, 30, &mut slots); + // Either INBOX downgraded to I, or omitted entirely. Both are + // acceptable; we just assert the result is consistent (range + // length matches what's in `body`). + if let Some(range) = bar.inbox_highlight { + let len = range.end - range.start; + assert!( + len == 1 || len == 5, + "highlight should be 1 or 5 chars, got {len}" + ); + } + } +} diff --git a/peeroxide-cli/src/cmd/chat/tui/terminal.rs b/peeroxide-cli/src/cmd/chat/tui/terminal.rs new file mode 100644 index 0000000..efdefff --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/tui/terminal.rs @@ -0,0 +1,144 @@ +//! RAII terminal-state guard. +//! +//! When the interactive UI starts it must: +//! +//! 1. Enable raw mode (so stdin produces individual key events rather than +//! cooked lines, and so the program's own `Ctrl-C` handling supersedes the +//! tty's). +//! 2. Reserve the bottom rows for the status bar + input area by setting the +//! terminal's scroll region (DECSTBM, `ESC[top;bottom r`). All `Print` +//! calls that follow flow naturally within the upper region. +//! 3. Enable bracketed paste so multi-line pastes arrive as a single bursty +//! sequence rather than triggering Enter handling on every newline. +//! 4. Hide the cursor while painting (the renderer restores it explicitly at +//! the input cursor when each frame ends — see `interactive.rs`). +//! +//! On drop — including panics, Ctrl-C, normal shutdown — *all* of those need +//! to be undone, otherwise the user's shell prompt comes back to a scroll- +//! constrained, raw-mode, hidden-cursor terminal. This guard owns the +//! lifetime. + +use std::io::{Write, stdout}; + +use crossterm::{ + cursor, event, + style::ResetColor, + terminal::{self, ClearType}, +}; + +/// RAII handle for the terminal's interactive-mode state. The guard's `drop` +/// implementation restores the terminal regardless of how we leave the +/// session — clean exit, Ctrl-C, panic. +pub struct TerminalGuard { + /// Last-applied scroll region (top, bottom) using 1-based row indices, or + /// `None` if no scroll region has been set yet. Stored so the restore + /// path can emit a matching reset. + scroll_region: Option<(u16, u16)>, +} + +impl TerminalGuard { + /// Enter interactive mode. Installs a panic hook chained on top of the + /// existing one so that even an unexpected panic restores the terminal. + /// + /// Before flipping into raw mode this also scrolls the existing visible + /// screen content up into the terminal's scrollback buffer, so the user + /// doesn't lose their shell prompt + recent command history when the + /// status bar / input area paints over the bottom rows. This is a + /// best-effort courtesy — terminals that don't honour line scrolling + /// into scrollback simply lose the content, which matches the + /// pre-existing behaviour. + pub fn enter() -> std::io::Result { + let mut out = stdout(); + + // Push the current visible screen into scrollback by moving to the + // bottom row and emitting `rows` newlines. Each newline at the + // terminal's bottom row scrolls the viewport up by one and (on + // scrollback-capable terminals) preserves the displaced line. + // + // Done BEFORE raw mode so the tty driver's ONLCR is still in effect + // and `\n` produces a proper line feed; with cooked mode the cursor + // column is normalised back to 0 by terminals that translate to + // CR-LF, which is what we want when leaving the screen clean for + // our first paint. Run before any state we'd need to unwind, so a + // failure here doesn't leave the user in a partial state. + if let Ok((_cols, rows)) = terminal::size() { + let _ = crossterm::execute!(out, cursor::MoveTo(0, rows.saturating_sub(1))); + for _ in 0..rows { + let _ = writeln!(out); + } + let _ = out.flush(); + } + + terminal::enable_raw_mode()?; + + // Best-effort bracketed paste — some terminals don't support it but + // failing here would be hostile. Errors are swallowed. + let _ = crossterm::execute!(out, event::EnableBracketedPaste, cursor::Hide); + out.flush().ok(); + + install_panic_hook(); + Ok(Self { + scroll_region: None, + }) + } + + /// Set (or update) the scroll region to rows `top..=bottom` (1-based, + /// inclusive). All subsequent normal output scrolls within this region; + /// rows above and below remain untouched. + pub fn set_scroll_region(&mut self, top: u16, bottom: u16) -> std::io::Result<()> { + // Top must be <= bottom and both within 1..=rows. The caller is + // responsible for sanity; we just emit the escape. + let mut out = stdout(); + write!(out, "\x1b[{top};{bottom}r")?; + out.flush()?; + self.scroll_region = Some((top, bottom)); + Ok(()) + } + + /// Reset the scroll region to the full screen. + pub fn reset_scroll_region(&mut self) { + let mut out = stdout(); + let _ = write!(out, "\x1b[r"); + let _ = out.flush(); + self.scroll_region = None; + } +} + +impl Drop for TerminalGuard { + fn drop(&mut self) { + restore_terminal(); + } +} + +/// Best-effort restore: reset scroll region, show cursor, disable bracketed +/// paste, leave raw mode. Idempotent so it's safe to call from the panic hook +/// AND `Drop`. +fn restore_terminal() { + let mut out = stdout(); + // Reset scroll region to full screen. + let _ = write!(out, "\x1b[r"); + // Move to a sane spot, clear from cursor down so the status bar / input + // area artefacts don't leak into the user's shell prompt. + if let Ok((_cols, rows)) = terminal::size() { + let _ = crossterm::queue!(out, cursor::MoveTo(0, rows.saturating_sub(1))); + let _ = crossterm::queue!(out, terminal::Clear(ClearType::CurrentLine)); + } + let _ = crossterm::queue!(out, ResetColor, cursor::Show, event::DisableBracketedPaste); + let _ = out.flush(); + let _ = terminal::disable_raw_mode(); +} + +/// Install a panic hook that restores the terminal before delegating to the +/// previous hook. Idempotent — repeated calls replace the previous chained +/// hook with a fresh one. +fn install_panic_hook() { + use std::sync::OnceLock; + static INSTALLED: OnceLock<()> = OnceLock::new(); + INSTALLED.get_or_init(|| { + let previous = std::panic::take_hook(); + std::panic::set_hook(Box::new(move |info| { + restore_terminal(); + previous(info); + })); + }); +} diff --git a/peeroxide-cli/src/cmd/chat/wire.rs b/peeroxide-cli/src/cmd/chat/wire.rs new file mode 100644 index 0000000..5cd5ac4 --- /dev/null +++ b/peeroxide-cli/src/cmd/chat/wire.rs @@ -0,0 +1,1039 @@ +//! Wire format serialization/deserialization for all chat protocol record types, +//! plus XSalsa20Poly1305 encryption/decryption wrappers. +//! +//! Record layout specifications documented in `docs/src/chat/wire-format.md`. + +use std::fmt; + +use peeroxide_dht::crypto::{sign_detached, verify_detached}; +use rand::RngCore; +use xsalsa20poly1305::aead::AeadInPlace; +use xsalsa20poly1305::{KeyInit, Nonce, Tag, XSalsa20Poly1305}; + +// --------------------------------------------------------------------------- +// Constants +// --------------------------------------------------------------------------- + +pub const CONTENT_TYPE_TEXT: u8 = 0x01; +pub const INVITE_TYPE_DM: u8 = 0x01; +pub const INVITE_TYPE_PRIVATE: u8 = 0x02; + +pub const MAX_RECORD_SIZE: usize = 1000; + +pub const MSG_FIXED_OVERHEAD: usize = 180; +pub const MAX_SCREEN_NAME_CONTENT: usize = 820; + +const NONCE_SIZE: usize = 24; +const TAG_SIZE: usize = 16; + +// --------------------------------------------------------------------------- +// Error type +// --------------------------------------------------------------------------- + +#[derive(Debug)] +pub enum WireError { + BufferTooShort { need: usize, got: usize }, + RecordTooLarge { size: usize }, + InvalidContentType(u8), + InvalidInviteType(u8), + InvalidUtf8(String), + DecryptionFailed, + SignatureInvalid, +} + +impl fmt::Display for WireError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + WireError::BufferTooShort { need, got } => { + write!(f, "buffer too short: need {need} bytes, got {got}") + } + WireError::RecordTooLarge { size } => { + write!(f, "record too large: {size} bytes exceeds {MAX_RECORD_SIZE} byte limit") + } + WireError::InvalidContentType(b) => { + write!(f, "invalid content type: {b}") + } + WireError::InvalidInviteType(b) => { + write!(f, "invalid invite type: {b}") + } + WireError::InvalidUtf8(field) => { + write!(f, "invalid UTF-8 in field: {field}") + } + WireError::DecryptionFailed => write!(f, "decryption failed"), + WireError::SignatureInvalid => write!(f, "signature verification failed"), + } + } +} + +impl std::error::Error for WireError {} + +// --------------------------------------------------------------------------- +// §7.1 MessageEnvelope +// --------------------------------------------------------------------------- +// +// Plaintext layout: +// 0 32 id_pubkey +// 32 32 prev_msg_hash +// 64 8 timestamp (u64 LE) +// 72 1 content_type +// 73 1 screen_name_len +// 74 N screen_name (UTF-8) +// 74+N 2 content_len (u16 LE) +// 76+N M content (UTF-8) +// 76+N+M 64 signature +// +// Signature covers: +// b"peeroxide-chat:msg:v1:" || prev_msg_hash(32) || timestamp(8 LE) +// || content_type(1) || screen_name_len(1) || screen_name(N) || content(M) + +/// A signed, encrypted chat message envelope. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct MessageEnvelope { + pub id_pubkey: [u8; 32], + pub prev_msg_hash: [u8; 32], + pub timestamp: u64, + pub content_type: u8, + pub screen_name: String, + pub content: String, + pub signature: [u8; 64], +} + +impl MessageEnvelope { + /// Serialize to plaintext bytes per the §7.1 layout. + pub fn serialize(&self) -> Vec { + let sn = self.screen_name.as_bytes(); + let ct = self.content.as_bytes(); + let total = 32 + 32 + 8 + 1 + 1 + sn.len() + 2 + ct.len() + 64; + let mut buf = Vec::with_capacity(total); + + buf.extend_from_slice(&self.id_pubkey); + buf.extend_from_slice(&self.prev_msg_hash); + buf.extend_from_slice(&self.timestamp.to_le_bytes()); + buf.push(self.content_type); + buf.push(sn.len() as u8); + buf.extend_from_slice(sn); + buf.extend_from_slice(&(ct.len() as u16).to_le_bytes()); + buf.extend_from_slice(ct); + buf.extend_from_slice(&self.signature); + buf + } + + /// Deserialize from plaintext bytes. + pub fn deserialize(data: &[u8]) -> Result { + // Minimum: 32+32+8+1+1+2+64 = 140 bytes (zero-length screen_name + content) + let min_len = 140; + if data.len() < min_len { + return Err(WireError::BufferTooShort { need: min_len, got: data.len() }); + } + + let mut pos = 0usize; + + let mut id_pubkey = [0u8; 32]; + id_pubkey.copy_from_slice(&data[pos..pos + 32]); + pos += 32; + + let mut prev_msg_hash = [0u8; 32]; + prev_msg_hash.copy_from_slice(&data[pos..pos + 32]); + pos += 32; + + let timestamp = u64::from_le_bytes(data[pos..pos + 8].try_into().unwrap()); + pos += 8; + + let content_type = data[pos]; + pos += 1; + if content_type != CONTENT_TYPE_TEXT { + return Err(WireError::InvalidContentType(content_type)); + } + + let sn_len = data[pos] as usize; + pos += 1; + + if data.len() < pos + sn_len + 2 { + return Err(WireError::BufferTooShort { + need: pos + sn_len + 2, + got: data.len(), + }); + } + let screen_name = std::str::from_utf8(&data[pos..pos + sn_len]) + .map_err(|_| WireError::InvalidUtf8("screen_name".into()))? + .to_owned(); + pos += sn_len; + + let ct_len = u16::from_le_bytes([data[pos], data[pos + 1]]) as usize; + pos += 2; + + if data.len() < pos + ct_len + 64 { + return Err(WireError::BufferTooShort { + need: pos + ct_len + 64, + got: data.len(), + }); + } + let content = std::str::from_utf8(&data[pos..pos + ct_len]) + .map_err(|_| WireError::InvalidUtf8("content".into()))? + .to_owned(); + pos += ct_len; + + let mut signature = [0u8; 64]; + signature.copy_from_slice(&data[pos..pos + 64]); + + Ok(MessageEnvelope { + id_pubkey, + prev_msg_hash, + timestamp, + content_type, + screen_name, + content, + signature, + }) + } + + /// Builds and signs a new `MessageEnvelope`. + /// + /// `id_secret` is the 64-byte Ed25519 secret key (seed || pubkey as produced + /// by `ed25519-dalek`); `id_pubkey` is the corresponding 32-byte public key. + pub fn sign( + id_secret: &[u8; 64], + id_pubkey: [u8; 32], + prev_msg_hash: [u8; 32], + timestamp: u64, + content_type: u8, + screen_name: &str, + content: &str, + ) -> Self { + let sn = screen_name.as_bytes(); + let ct = content.as_bytes(); + + let msg = build_msg_signable(&prev_msg_hash, timestamp, content_type, sn, ct); + let signature = sign_detached(&msg, id_secret); + + MessageEnvelope { + id_pubkey, + prev_msg_hash, + timestamp, + content_type, + screen_name: screen_name.to_owned(), + content: content.to_owned(), + signature, + } + } + + /// Verifies the signature against the contained `id_pubkey`. + pub fn verify(&self) -> bool { + let sn = self.screen_name.as_bytes(); + let ct = self.content.as_bytes(); + let msg = + build_msg_signable(&self.prev_msg_hash, self.timestamp, self.content_type, sn, ct); + verify_detached(&self.signature, &msg, &self.id_pubkey) + } +} + +/// Build the byte buffer that is signed for a `MessageEnvelope`. +fn build_msg_signable( + prev_msg_hash: &[u8; 32], + timestamp: u64, + content_type: u8, + screen_name: &[u8], + content: &[u8], +) -> Vec { + let prefix = b"peeroxide-chat:msg:v1:"; + let mut msg = Vec::with_capacity( + prefix.len() + 32 + 8 + 1 + 1 + screen_name.len() + content.len(), + ); + msg.extend_from_slice(prefix); + msg.extend_from_slice(prev_msg_hash); + msg.extend_from_slice(×tamp.to_le_bytes()); + msg.push(content_type); + msg.push(screen_name.len() as u8); + msg.extend_from_slice(screen_name); + msg.extend_from_slice(content); + msg +} + +// --------------------------------------------------------------------------- +// §7.2 FeedRecord +// --------------------------------------------------------------------------- +// +// Plaintext layout: +// 0 32 id_pubkey +// 32 64 ownership_proof +// 96 32 next_feed_pubkey (32 zeros if none) +// 128 32 summary_hash (32 zeros if none) +// 160 1 msg_count +// 161 N×32 msg_hashes (newest first) + +/// Mutable-put value for a user's feed head record. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct FeedRecord { + pub id_pubkey: [u8; 32], + pub ownership_proof: [u8; 64], + pub next_feed_pubkey: [u8; 32], // 32 zeros if none + pub summary_hash: [u8; 32], // 32 zeros if none + pub msg_count: u8, // 0–26 + pub msg_hashes: Vec<[u8; 32]>, // newest first +} + +impl FeedRecord { + /// Serialize to bytes, returning `Err` if the result would exceed 1000 bytes. + pub fn serialize(&self) -> Result, WireError> { + let total = 32 + 64 + 32 + 32 + 1 + self.msg_hashes.len() * 32; + if total > MAX_RECORD_SIZE { + return Err(WireError::RecordTooLarge { size: total }); + } + let mut buf = Vec::with_capacity(total); + buf.extend_from_slice(&self.id_pubkey); + buf.extend_from_slice(&self.ownership_proof); + buf.extend_from_slice(&self.next_feed_pubkey); + buf.extend_from_slice(&self.summary_hash); + buf.push(self.msg_count); + for h in &self.msg_hashes { + buf.extend_from_slice(h); + } + Ok(buf) + } + + /// Deserialize from bytes. + pub fn deserialize(data: &[u8]) -> Result { + let min_len = 32 + 64 + 32 + 32 + 1; // 161 + if data.len() < min_len { + return Err(WireError::BufferTooShort { need: min_len, got: data.len() }); + } + + let mut pos = 0usize; + + let mut id_pubkey = [0u8; 32]; + id_pubkey.copy_from_slice(&data[pos..pos + 32]); + pos += 32; + + let mut ownership_proof = [0u8; 64]; + ownership_proof.copy_from_slice(&data[pos..pos + 64]); + pos += 64; + + let mut next_feed_pubkey = [0u8; 32]; + next_feed_pubkey.copy_from_slice(&data[pos..pos + 32]); + pos += 32; + + let mut summary_hash = [0u8; 32]; + summary_hash.copy_from_slice(&data[pos..pos + 32]); + pos += 32; + + let msg_count = data[pos] as usize; + pos += 1; + + if data.len() < pos + msg_count * 32 { + return Err(WireError::BufferTooShort { + need: pos + msg_count * 32, + got: data.len(), + }); + } + + let mut msg_hashes = Vec::with_capacity(msg_count); + for _ in 0..msg_count { + let mut h = [0u8; 32]; + h.copy_from_slice(&data[pos..pos + 32]); + msg_hashes.push(h); + pos += 32; + } + + Ok(FeedRecord { + id_pubkey, + ownership_proof, + next_feed_pubkey, + summary_hash, + msg_count: msg_count as u8, + msg_hashes, + }) + } +} + +// --------------------------------------------------------------------------- +// §7.3 SummaryBlock +// --------------------------------------------------------------------------- +// +// Plaintext layout: +// 0 32 id_pubkey +// 32 32 prev_summary_hash (32 zeros if first) +// 64 1 msg_count +// 65 N×32 msg_hashes (oldest first, max 27) +// 65+N×32 64 signature +// +// Signature covers: +// b"peeroxide-chat:summary:v1:" || prev_summary_hash(32) || msg_hashes(N×32) + +/// Immutable-put value for a historical summary block. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct SummaryBlock { + pub id_pubkey: [u8; 32], + pub prev_summary_hash: [u8; 32], // 32 zeros if first + pub msg_count: u8, + pub msg_hashes: Vec<[u8; 32]>, // oldest first, max 27 + pub signature: [u8; 64], +} + +impl SummaryBlock { + /// Serialize to bytes, returning `Err` if the result would exceed 1000 bytes. + pub fn serialize(&self) -> Result, WireError> { + let total = 32 + 32 + 1 + self.msg_hashes.len() * 32 + 64; + if total > MAX_RECORD_SIZE { + return Err(WireError::RecordTooLarge { size: total }); + } + let mut buf = Vec::with_capacity(total); + buf.extend_from_slice(&self.id_pubkey); + buf.extend_from_slice(&self.prev_summary_hash); + buf.push(self.msg_count); + for h in &self.msg_hashes { + buf.extend_from_slice(h); + } + buf.extend_from_slice(&self.signature); + Ok(buf) + } + + /// Deserialize from bytes. + pub fn deserialize(data: &[u8]) -> Result { + let min_len = 32 + 32 + 1 + 64; // 129 (zero hashes) + if data.len() < min_len { + return Err(WireError::BufferTooShort { need: min_len, got: data.len() }); + } + + let mut pos = 0usize; + + let mut id_pubkey = [0u8; 32]; + id_pubkey.copy_from_slice(&data[pos..pos + 32]); + pos += 32; + + let mut prev_summary_hash = [0u8; 32]; + prev_summary_hash.copy_from_slice(&data[pos..pos + 32]); + pos += 32; + + let msg_count = data[pos] as usize; + pos += 1; + + if data.len() < pos + msg_count * 32 + 64 { + return Err(WireError::BufferTooShort { + need: pos + msg_count * 32 + 64, + got: data.len(), + }); + } + + let mut msg_hashes = Vec::with_capacity(msg_count); + for _ in 0..msg_count { + let mut h = [0u8; 32]; + h.copy_from_slice(&data[pos..pos + 32]); + msg_hashes.push(h); + pos += 32; + } + + let mut signature = [0u8; 64]; + signature.copy_from_slice(&data[pos..pos + 64]); + + Ok(SummaryBlock { + id_pubkey, + prev_summary_hash, + msg_count: msg_count as u8, + msg_hashes, + signature, + }) + } + + /// Build and sign a new `SummaryBlock`. + pub fn sign( + id_secret: &[u8; 64], + id_pubkey: [u8; 32], + prev_summary_hash: [u8; 32], + msg_hashes: Vec<[u8; 32]>, + ) -> Self { + let msg = build_summary_signable(&prev_summary_hash, &msg_hashes); + let signature = sign_detached(&msg, id_secret); + let msg_count = msg_hashes.len() as u8; + SummaryBlock { + id_pubkey, + prev_summary_hash, + msg_count, + msg_hashes, + signature, + } + } + + /// Verify the signature against the contained `id_pubkey`. + pub fn verify(&self) -> bool { + let msg = build_summary_signable(&self.prev_summary_hash, &self.msg_hashes); + verify_detached(&self.signature, &msg, &self.id_pubkey) + } +} + +/// Build the byte buffer that is signed for a `SummaryBlock`. +fn build_summary_signable(prev_summary_hash: &[u8; 32], msg_hashes: &[[u8; 32]]) -> Vec { + let prefix = b"peeroxide-chat:summary:v1:"; + let mut msg = Vec::with_capacity(prefix.len() + 32 + msg_hashes.len() * 32); + msg.extend_from_slice(prefix); + msg.extend_from_slice(prev_summary_hash); + for h in msg_hashes { + msg.extend_from_slice(h); + } + msg +} + +// --------------------------------------------------------------------------- +// §7.4 NexusRecord +// --------------------------------------------------------------------------- +// +// Plaintext layout: +// 0 1 name_len +// 1 N name (UTF-8) +// 1+N 2 bio_len (u16 LE) +// 3+N M bio (UTF-8) + +/// Mutable-put value for a user's public profile (name + bio). +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct NexusRecord { + pub name: String, + pub bio: String, +} + +impl NexusRecord { + /// Serialize to bytes, returning `Err` if the result would exceed 1000 bytes. + pub fn serialize(&self) -> Result, WireError> { + let name_bytes = self.name.as_bytes(); + let bio_bytes = self.bio.as_bytes(); + let total = 1 + name_bytes.len() + 2 + bio_bytes.len(); + if total > MAX_RECORD_SIZE { + return Err(WireError::RecordTooLarge { size: total }); + } + let mut buf = Vec::with_capacity(total); + buf.push(name_bytes.len() as u8); + buf.extend_from_slice(name_bytes); + buf.extend_from_slice(&(bio_bytes.len() as u16).to_le_bytes()); + buf.extend_from_slice(bio_bytes); + Ok(buf) + } + + /// Deserialize from bytes. + pub fn deserialize(data: &[u8]) -> Result { + if data.is_empty() { + return Err(WireError::BufferTooShort { need: 1, got: 0 }); + } + + let mut pos = 0usize; + let name_len = data[pos] as usize; + pos += 1; + + if data.len() < pos + name_len + 2 { + return Err(WireError::BufferTooShort { + need: pos + name_len + 2, + got: data.len(), + }); + } + let name = std::str::from_utf8(&data[pos..pos + name_len]) + .map_err(|_| WireError::InvalidUtf8("name".into()))? + .to_owned(); + pos += name_len; + + let bio_len = u16::from_le_bytes([data[pos], data[pos + 1]]) as usize; + pos += 2; + + if data.len() < pos + bio_len { + return Err(WireError::BufferTooShort { + need: pos + bio_len, + got: data.len(), + }); + } + let bio = std::str::from_utf8(&data[pos..pos + bio_len]) + .map_err(|_| WireError::InvalidUtf8("bio".into()))? + .to_owned(); + + Ok(NexusRecord { name, bio }) + } +} + +// --------------------------------------------------------------------------- +// §7.5 InviteRecord +// --------------------------------------------------------------------------- +// +// Plaintext layout: +// 0 32 id_pubkey +// 32 64 ownership_proof +// 96 32 next_feed_pubkey +// 128 1 invite_type +// 129 2 payload_len (u16 LE) +// 131 N payload + +/// Encrypted invite record, carried inside an encrypted envelope. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct InviteRecord { + pub id_pubkey: [u8; 32], + pub ownership_proof: [u8; 64], + pub next_feed_pubkey: [u8; 32], + pub invite_type: u8, + pub payload: Vec, +} + +impl InviteRecord { + /// Serialize to bytes, returning `Err` if the result would exceed 1000 bytes. + pub fn serialize(&self) -> Result, WireError> { + let total = 32 + 64 + 32 + 1 + 2 + self.payload.len(); + if total > MAX_RECORD_SIZE { + return Err(WireError::RecordTooLarge { size: total }); + } + let mut buf = Vec::with_capacity(total); + buf.extend_from_slice(&self.id_pubkey); + buf.extend_from_slice(&self.ownership_proof); + buf.extend_from_slice(&self.next_feed_pubkey); + buf.push(self.invite_type); + buf.extend_from_slice(&(self.payload.len() as u16).to_le_bytes()); + buf.extend_from_slice(&self.payload); + Ok(buf) + } + + /// Deserialize from bytes. + pub fn deserialize(data: &[u8]) -> Result { + let min_len = 32 + 64 + 32 + 1 + 2; // 131 + if data.len() < min_len { + return Err(WireError::BufferTooShort { need: min_len, got: data.len() }); + } + + let mut pos = 0usize; + + let mut id_pubkey = [0u8; 32]; + id_pubkey.copy_from_slice(&data[pos..pos + 32]); + pos += 32; + + let mut ownership_proof = [0u8; 64]; + ownership_proof.copy_from_slice(&data[pos..pos + 64]); + pos += 64; + + let mut next_feed_pubkey = [0u8; 32]; + next_feed_pubkey.copy_from_slice(&data[pos..pos + 32]); + pos += 32; + + let invite_type = data[pos]; + pos += 1; + if invite_type != INVITE_TYPE_DM && invite_type != INVITE_TYPE_PRIVATE { + return Err(WireError::InvalidInviteType(invite_type)); + } + + let payload_len = u16::from_le_bytes([data[pos], data[pos + 1]]) as usize; + pos += 2; + + if data.len() < pos + payload_len { + return Err(WireError::BufferTooShort { + need: pos + payload_len, + got: data.len(), + }); + } + let payload = data[pos..pos + payload_len].to_vec(); + + Ok(InviteRecord { + id_pubkey, + ownership_proof, + next_feed_pubkey, + invite_type, + payload, + }) + } +} + +// --------------------------------------------------------------------------- +// Encryption wrappers +// --------------------------------------------------------------------------- + +/// Encrypt `plaintext` using XSalsa20Poly1305 with a random nonce. +/// +/// Wire format: `nonce(24) || tag(16) || ciphertext` +pub fn encrypt_message(key: &[u8; 32], plaintext: &[u8]) -> Result, WireError> { + let mut nonce_bytes = [0u8; NONCE_SIZE]; + rand::rng().fill_bytes(&mut nonce_bytes); + let nonce = Nonce::from(nonce_bytes); + let cipher = XSalsa20Poly1305::new(key.into()); + + let mut ciphertext = plaintext.to_vec(); + let tag = cipher + .encrypt_in_place_detached(&nonce, b"", &mut ciphertext) + .map_err(|_| WireError::DecryptionFailed)?; + + let mut result = Vec::with_capacity(NONCE_SIZE + TAG_SIZE + ciphertext.len()); + result.extend_from_slice(&nonce_bytes); + result.extend_from_slice(tag.as_slice()); + result.extend_from_slice(&ciphertext); + Ok(result) +} + +/// Decrypt data in wire format `nonce(24) || tag(16) || ciphertext`. +/// +/// Returns the plaintext on success. +pub fn decrypt_message(key: &[u8; 32], data: &[u8]) -> Result, WireError> { + if data.len() < NONCE_SIZE + TAG_SIZE { + return Err(WireError::BufferTooShort { + need: NONCE_SIZE + TAG_SIZE, + got: data.len(), + }); + } + + let nonce = Nonce::from_slice(&data[..NONCE_SIZE]); + let tag = Tag::from_slice(&data[NONCE_SIZE..NONCE_SIZE + TAG_SIZE]); + let mut plaintext = data[NONCE_SIZE + TAG_SIZE..].to_vec(); + + let cipher = XSalsa20Poly1305::new(key.into()); + cipher + .decrypt_in_place_detached(nonce, b"", &mut plaintext, tag) + .map_err(|_| WireError::DecryptionFailed)?; + + Ok(plaintext) +} + +pub fn encrypt_invite(invite_key: &[u8; 32], plaintext: &[u8]) -> Result, WireError> { + let encrypted = encrypt_message(invite_key, plaintext)?; + if encrypted.len() > MAX_RECORD_SIZE { + return Err(WireError::RecordTooLarge { + size: encrypted.len(), + }); + } + Ok(encrypted) +} + +pub fn decrypt_invite(invite_key: &[u8; 32], data: &[u8]) -> Result, WireError> { + decrypt_message(invite_key, data) +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use rand::RngCore; + + fn random_key() -> [u8; 32] { + let mut k = [0u8; 32]; + rand::rng().fill_bytes(&mut k); + k + } + + fn random_bytes() -> [u8; N] { + let mut b = [0u8; N]; + rand::rng().fill_bytes(&mut b); + b + } + + /// Generate a deterministic-ish Ed25519 keypair using ed25519-dalek for tests. + fn make_keypair() -> ([u8; 64], [u8; 32]) { + use ed25519_dalek::{SigningKey, VerifyingKey}; + let seed: [u8; 32] = random_bytes(); + let sk = SigningKey::from_bytes(&seed); + let pk: VerifyingKey = (&sk).into(); + // ed25519-dalek's to_keypair_bytes() gives seed||pubkey + let mut secret = [0u8; 64]; + secret[..32].copy_from_slice(&seed); + secret[32..].copy_from_slice(pk.as_bytes()); + let pubkey: [u8; 32] = *pk.as_bytes(); + (secret, pubkey) + } + + // --- MessageEnvelope --- + + #[test] + fn message_envelope_round_trip() { + let prev = random_bytes::<32>(); + let ts = 1_700_000_000u64; + let (secret, pubkey) = make_keypair(); + + let env = MessageEnvelope::sign(&secret, pubkey, prev, ts, CONTENT_TYPE_TEXT, "alice", "hello"); + let bytes = env.serialize(); + let env2 = MessageEnvelope::deserialize(&bytes).expect("deserialize"); + assert_eq!(env, env2); + } + + #[test] + fn message_envelope_sign_verify() { + let prev = random_bytes::<32>(); + let (secret, pubkey) = make_keypair(); + let env = MessageEnvelope::sign(&secret, pubkey, prev, 42, CONTENT_TYPE_TEXT, "bob", "world"); + assert!(env.verify(), "signature must be valid"); + } + + #[test] + fn message_envelope_verify_rejects_tampered() { + let prev = random_bytes::<32>(); + let (secret, pubkey) = make_keypair(); + let mut env = + MessageEnvelope::sign(&secret, pubkey, prev, 42, CONTENT_TYPE_TEXT, "carol", "secret"); + env.content = "tampered".to_owned(); + assert!(!env.verify(), "tampered content must fail verification"); + } + + #[test] + fn message_envelope_max_content_fits() { + let prev = random_bytes::<32>(); + let (secret, pubkey) = make_keypair(); + let content = "x".repeat(819); + let env = + MessageEnvelope::sign(&secret, pubkey, prev, 0, CONTENT_TYPE_TEXT, "a", &content); + let bytes = env.serialize(); + assert!(bytes.len() <= MAX_RECORD_SIZE - 40, "plaintext must fit"); + } + + #[test] + fn message_envelope_bad_content_type() { + let prev = random_bytes::<32>(); + let (secret, pubkey) = make_keypair(); + let mut env = MessageEnvelope::sign(&secret, pubkey, prev, 0, CONTENT_TYPE_TEXT, "a", "b"); + env.content_type = 0xFF; + let bytes = env.serialize(); + let result = MessageEnvelope::deserialize(&bytes); + assert!(matches!(result, Err(WireError::InvalidContentType(0xFF)))); + } + + #[test] + fn message_envelope_buffer_too_short() { + let result = MessageEnvelope::deserialize(&[0u8; 10]); + assert!(matches!(result, Err(WireError::BufferTooShort { .. }))); + } + + // --- FeedRecord --- + + #[test] + fn feed_record_round_trip() { + let rec = FeedRecord { + id_pubkey: random_bytes::<32>(), + ownership_proof: random_bytes::<64>(), + next_feed_pubkey: [0u8; 32], + summary_hash: [0u8; 32], + msg_count: 3, + msg_hashes: vec![random_bytes::<32>(), random_bytes::<32>(), random_bytes::<32>()], + }; + let bytes = rec.serialize().expect("serialize"); + let rec2 = FeedRecord::deserialize(&bytes).expect("deserialize"); + assert_eq!(rec, rec2); + } + + #[test] + fn feed_record_empty_hashes() { + let rec = FeedRecord { + id_pubkey: [1u8; 32], + ownership_proof: [2u8; 64], + next_feed_pubkey: [0u8; 32], + summary_hash: [0u8; 32], + msg_count: 0, + msg_hashes: vec![], + }; + let bytes = rec.serialize().expect("serialize"); + let rec2 = FeedRecord::deserialize(&bytes).expect("deserialize"); + assert_eq!(rec, rec2); + } + + #[test] + fn feed_record_too_large() { + // 27 hashes → 32+64+32+32+1+27*32 = 1025 > 1000 + let rec = FeedRecord { + id_pubkey: [0u8; 32], + ownership_proof: [0u8; 64], + next_feed_pubkey: [0u8; 32], + summary_hash: [0u8; 32], + msg_count: 27, + msg_hashes: vec![[0u8; 32]; 27], + }; + assert!(matches!(rec.serialize(), Err(WireError::RecordTooLarge { .. }))); + } + + #[test] + fn feed_record_buffer_too_short() { + let result = FeedRecord::deserialize(&[0u8; 10]); + assert!(matches!(result, Err(WireError::BufferTooShort { .. }))); + } + + // --- SummaryBlock --- + + #[test] + fn summary_block_round_trip() { + let (secret, pubkey) = make_keypair(); + let prev = random_bytes::<32>(); + let hashes: Vec<[u8; 32]> = (0..5).map(|_| random_bytes::<32>()).collect(); + let blk = SummaryBlock::sign(&secret, pubkey, prev, hashes); + let bytes = blk.serialize().expect("serialize"); + let blk2 = SummaryBlock::deserialize(&bytes).expect("deserialize"); + assert_eq!(blk, blk2); + } + + #[test] + fn summary_block_sign_verify() { + let (secret, pubkey) = make_keypair(); + let prev = random_bytes::<32>(); + let hashes: Vec<[u8; 32]> = (0..3).map(|_| random_bytes::<32>()).collect(); + let blk = SummaryBlock::sign(&secret, pubkey, prev, hashes); + assert!(blk.verify()); + } + + #[test] + fn summary_block_verify_rejects_tampered() { + let (secret, pubkey) = make_keypair(); + let prev = random_bytes::<32>(); + let hashes: Vec<[u8; 32]> = (0..3).map(|_| random_bytes::<32>()).collect(); + let mut blk = SummaryBlock::sign(&secret, pubkey, prev, hashes); + blk.msg_hashes[0] = [0xFF; 32]; + assert!(!blk.verify()); + } + + #[test] + fn summary_block_buffer_too_short() { + assert!(matches!( + SummaryBlock::deserialize(&[0u8; 5]), + Err(WireError::BufferTooShort { .. }) + )); + } + + // --- NexusRecord --- + + #[test] + fn nexus_record_round_trip() { + let rec = NexusRecord { + name: "Alice".to_owned(), + bio: "Hello, world!".to_owned(), + }; + let bytes = rec.serialize().expect("serialize"); + let rec2 = NexusRecord::deserialize(&bytes).expect("deserialize"); + assert_eq!(rec, rec2); + } + + #[test] + fn nexus_record_empty_fields() { + let rec = NexusRecord { name: "".to_owned(), bio: "".to_owned() }; + let bytes = rec.serialize().expect("serialize"); + let rec2 = NexusRecord::deserialize(&bytes).expect("deserialize"); + assert_eq!(rec, rec2); + } + + #[test] + fn nexus_record_too_large() { + let rec = NexusRecord { + name: "a".repeat(255), + bio: "b".repeat(750), + }; + assert!(matches!(rec.serialize(), Err(WireError::RecordTooLarge { .. }))); + } + + #[test] + fn nexus_record_buffer_too_short() { + assert!(matches!( + NexusRecord::deserialize(&[]), + Err(WireError::BufferTooShort { .. }) + )); + } + + // --- InviteRecord --- + + #[test] + fn invite_record_dm_round_trip() { + let rec = InviteRecord { + id_pubkey: random_bytes::<32>(), + ownership_proof: random_bytes::<64>(), + next_feed_pubkey: random_bytes::<32>(), + invite_type: INVITE_TYPE_DM, + payload: b"some dm payload".to_vec(), + }; + let bytes = rec.serialize().expect("serialize"); + let rec2 = InviteRecord::deserialize(&bytes).expect("deserialize"); + assert_eq!(rec, rec2); + } + + #[test] + fn invite_record_private_round_trip() { + let rec = InviteRecord { + id_pubkey: random_bytes::<32>(), + ownership_proof: random_bytes::<64>(), + next_feed_pubkey: random_bytes::<32>(), + invite_type: INVITE_TYPE_PRIVATE, + payload: vec![0xDE, 0xAD, 0xBE, 0xEF], + }; + let bytes = rec.serialize().expect("serialize"); + let rec2 = InviteRecord::deserialize(&bytes).expect("deserialize"); + assert_eq!(rec, rec2); + } + + #[test] + fn invite_record_invalid_type() { + let rec = InviteRecord { + id_pubkey: [0u8; 32], + ownership_proof: [0u8; 64], + next_feed_pubkey: [0u8; 32], + invite_type: INVITE_TYPE_DM, + payload: vec![], + }; + let mut bytes = rec.serialize().expect("serialize"); + // corrupt the invite_type byte (offset 128) + bytes[128] = 0x99; + assert!(matches!( + InviteRecord::deserialize(&bytes), + Err(WireError::InvalidInviteType(0x99)) + )); + } + + #[test] + fn invite_record_buffer_too_short() { + assert!(matches!( + InviteRecord::deserialize(&[0u8; 10]), + Err(WireError::BufferTooShort { .. }) + )); + } + + // --- Encryption --- + + #[test] + fn encrypt_decrypt_roundtrip() { + let key = random_key(); + let plaintext = b"the quick brown fox jumps over the lazy dog"; + let ciphertext = encrypt_message(&key, plaintext).expect("encrypt"); + let decrypted = decrypt_message(&key, &ciphertext).expect("decrypt"); + assert_eq!(decrypted, plaintext); + } + + #[test] + fn decrypt_wrong_key_fails() { + let key1 = random_key(); + let key2 = random_key(); + let plaintext = b"secret message"; + let ciphertext = encrypt_message(&key1, plaintext).expect("encrypt"); + let result = decrypt_message(&key2, &ciphertext); + assert!(matches!(result, Err(WireError::DecryptionFailed))); + } + + #[test] + fn decrypt_too_short_fails() { + let key = random_key(); + let result = decrypt_message(&key, &[0u8; 10]); + assert!(matches!(result, Err(WireError::BufferTooShort { .. }))); + } + + #[test] + fn encrypt_empty_plaintext() { + let key = random_key(); + let ct = encrypt_message(&key, b"").expect("encrypt"); + let pt = decrypt_message(&key, &ct).expect("decrypt"); + assert_eq!(pt, b""); + } + + #[test] + fn encrypt_invite_roundtrip() { + let key = random_key(); + let plaintext = b"invite data here"; + let ct = encrypt_invite(&key, plaintext).expect("encrypt_invite"); + let pt = decrypt_invite(&key, &ct).expect("decrypt_invite"); + assert_eq!(pt, plaintext); + } + + #[test] + fn encrypt_invite_rejects_oversized() { + let key = random_key(); + let plaintext = vec![0xAB; 980]; + let result = encrypt_invite(&key, &plaintext); + assert!(matches!(result, Err(WireError::RecordTooLarge { .. }))); + } + + #[test] + fn encrypt_invite_max_size_boundary() { + let key = random_key(); + let max_plaintext_size = MAX_RECORD_SIZE - NONCE_SIZE - TAG_SIZE; + let plaintext = vec![0u8; max_plaintext_size]; + let result = encrypt_invite(&key, &plaintext); + assert!(result.is_ok()); + assert_eq!(result.unwrap().len(), MAX_RECORD_SIZE); + } +} diff --git a/peeroxide-cli/src/cmd/config.rs b/peeroxide-cli/src/cmd/config.rs deleted file mode 100644 index e4a8db6..0000000 --- a/peeroxide-cli/src/cmd/config.rs +++ /dev/null @@ -1,104 +0,0 @@ -use clap::{Args, Subcommand}; - -#[derive(Subcommand)] -pub enum ConfigCommands { - /// Generate a config file with sane defaults and documentation - Init(InitArgs), -} - -#[derive(Args)] -pub struct InitArgs { - /// Write to file instead of stdout - #[arg(long)] - output: Option, -} - -pub async fn run(cmd: ConfigCommands) -> i32 { - match cmd { - ConfigCommands::Init(args) => run_init(args).await, - } -} - -async fn run_init(args: InitArgs) -> i32 { - let content = generate_default_config(); - - if let Some(path) = args.output { - let parent = std::path::Path::new(&path).parent(); - if let Some(dir) = parent { - if !dir.as_os_str().is_empty() { - if let Err(e) = std::fs::create_dir_all(dir) { - eprintln!("error: cannot create directory: {e}"); - return 1; - } - } - } - if let Err(e) = std::fs::write(&path, &content) { - eprintln!("error: failed to write config: {e}"); - return 1; - } - eprintln!("Config written to {path}"); - } else { - print!("{content}"); - } - 0 -} - -fn generate_default_config() -> String { - r#"# Peeroxide configuration file -# Place at ~/.config/peeroxide/config.toml or set PEEROXIDE_CONFIG env var - -[network] -# Whether this node is publicly reachable (not behind NAT/firewall) -# public = false - -# Bootstrap node addresses (host:port). If empty and public=true, uses default public bootstrap. -# bootstrap = ["bootstrap1.example.com:49737"] - -[node] -# Bind port for the DHT node (default: 49737) -# port = 49737 - -# Bind address (default: 0.0.0.0) -# host = "0.0.0.0" - -# How often to log stats in seconds (default: 60) -# stats_interval = 60 - -# Max announcement records stored (default: 65536) -# max_records = 65536 - -# Max entries per LRU cache (default: 65536) -# max_lru_size = 65536 - -# Max peer announcements per topic (default: 20) -# max_per_key = 20 - -# TTL for announcement records in seconds (default: 1200) -# max_record_age = 1200 - -# TTL for LRU cache entries in seconds (default: 1200) -# max_lru_age = 1200 - -[announce] -# (No configurable options currently) - -[cp] -# (No configurable options currently) -"#.to_string() -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::config::ConfigFile; - - #[test] - fn generated_config_is_valid_toml() { - let content = generate_default_config(); - let parsed: ConfigFile = toml::from_str(&content).unwrap(); - assert!(parsed.network.public.is_none()); - assert!(parsed.network.bootstrap.is_none()); - assert!(parsed.node.port.is_none()); - assert!(parsed.node.host.is_none()); - } -} diff --git a/peeroxide-cli/src/cmd/cp.rs b/peeroxide-cli/src/cmd/cp.rs index cce3fba..648fafe 100644 --- a/peeroxide-cli/src/cmd/cp.rs +++ b/peeroxide-cli/src/cmd/cp.rs @@ -7,7 +7,7 @@ use tokio::signal; use tokio::io::AsyncWriteExt; use crate::config::ResolvedConfig; -use super::{build_dht_config, parse_topic, to_hex, FIREWALL_CONSISTENT, FIREWALL_OPEN}; +use super::{build_dht_config, parse_topic, to_hex}; const CHUNK_SIZE: usize = 65536; @@ -122,11 +122,6 @@ async fn run_send(args: SendArgs, cfg: &ResolvedConfig) -> i32 { let dht_config = build_dht_config(cfg); let mut swarm_config = SwarmConfig::default(); swarm_config.dht = dht_config; - if cfg.public { - swarm_config.firewall = FIREWALL_OPEN; - } else if cfg.firewalled { - swarm_config.firewall = FIREWALL_CONSISTENT; - } let (task, handle, mut conn_rx) = match spawn(swarm_config).await { Ok(v) => v, @@ -354,11 +349,6 @@ async fn run_recv(args: RecvArgs, cfg: &ResolvedConfig) -> i32 { let dht_config = build_dht_config(cfg); let mut swarm_config = SwarmConfig::default(); swarm_config.dht = dht_config; - if cfg.public { - swarm_config.firewall = FIREWALL_OPEN; - } else if cfg.firewalled { - swarm_config.firewall = FIREWALL_CONSISTENT; - } let (task, handle, mut conn_rx) = match spawn(swarm_config).await { Ok(v) => v, diff --git a/peeroxide-cli/src/cmd/deaddrop/mod.rs b/peeroxide-cli/src/cmd/deaddrop/mod.rs new file mode 100644 index 0000000..74b7aa7 --- /dev/null +++ b/peeroxide-cli/src/cmd/deaddrop/mod.rs @@ -0,0 +1,489 @@ +pub mod progress; +pub mod v1; +pub mod v2; + +use clap::{Args, Subcommand}; +use libudx::UdxRuntime; +use peeroxide::KeyPair; +use peeroxide_dht::hyperdht::{self, HyperDhtHandle, MutablePutResult}; +use std::collections::HashSet; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::Arc; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use tokio::signal; +use tokio::sync::{Mutex, Semaphore}; + +use crate::config::ResolvedConfig; +use super::{build_dht_config, to_hex}; +use crate::cmd::deaddrop::progress::reporter::ProgressReporter; +use crate::cmd::deaddrop::progress::state::{Phase, ProgressState}; + +const MAX_PAYLOAD: usize = 1000; + +#[derive(Subcommand)] +pub enum DdCommands { + /// Store data at a dead drop location on the DHT + Put(PutArgs), + /// Retrieve data from a dead drop location on the DHT + Get(GetArgs), +} + +#[derive(Args)] +pub struct PutArgs { + /// File path or - for stdin + file: String, + + /// Hard cap on outbound byte rate (e.g. 100k, 1m) + #[arg(long)] + max_speed: Option, + + /// Refresh interval in seconds (default: 600) + #[arg(long, default_value_t = 600)] + refresh_interval: u64, + + /// Stop refreshing after this duration + #[arg(long)] + ttl: Option, + + /// Exit after N pickups detected + #[arg(long)] + max_pickups: Option, + + /// Derive keypair from passphrase (provided on command line) + #[arg(long, conflicts_with = "interactive_passphrase")] + passphrase: Option, + + /// Derive keypair from passphrase (prompted interactively, hidden input) + #[arg(long, conflicts_with = "passphrase")] + interactive_passphrase: bool, + + /// Disable progress output + #[arg(long)] + pub no_progress: bool, + + /// Emit JSON progress/output + #[arg(long)] + pub json: bool, + + /// Use legacy v1 protocol (default: v2) + #[arg(long)] + pub v1: bool, +} + +#[derive(Args)] +pub struct GetArgs { + /// Pickup key (64-char hex or passphrase text) + #[arg(required_unless_present_any = ["passphrase", "interactive_passphrase"])] + key: Option, + + /// Derive pickup key from passphrase (provided on command line) + #[arg(long, conflicts_with = "interactive_passphrase")] + passphrase: Option, + + /// Derive pickup key from passphrase (prompted interactively, hidden input) + #[arg(long, conflicts_with = "passphrase")] + interactive_passphrase: bool, + + /// Disable progress output + #[arg(long)] + pub no_progress: bool, + + /// Write output to file (default: stdout) + #[arg(long)] + output: Option, + + /// Emit JSON progress/output + #[arg(long, requires = "output")] + pub json: bool, + + /// Abort if no progress is made for this duration (sliding window, default: 1200s). + /// Steady-progressing downloads have no hard wall-clock limit. + #[arg(long, default_value_t = 1200)] + timeout: u64, + + /// Don't announce pickup acknowledgement + #[arg(long)] + no_ack: bool, +} + +pub async fn run(cmd: DdCommands, cfg: &ResolvedConfig) -> i32 { + match cmd { + DdCommands::Put(args) => { + if args.v1 { + v1::run_put(&args, cfg).await + } else { + v2::run_put(&args, cfg).await + } + } + DdCommands::Get(args) => run_get(args, cfg).await, + } +} + +async fn run_get(args: GetArgs, cfg: &ResolvedConfig) -> i32 { + if args.timeout == 0 { + eprintln!("error: --timeout must be greater than 0"); + return 1; + } + + let root_public_key = if let Some(ref phrase) = args.passphrase { + if phrase.is_empty() { + eprintln!("error: passphrase cannot be empty"); + return 1; + } + derive_pk_from_passphrase(phrase) + } else if args.interactive_passphrase { + eprintln!("Enter passphrase: "); + let passphrase = rpassword_read(); + if passphrase.is_empty() { + eprintln!("error: passphrase cannot be empty"); + return 1; + } + derive_pk_from_passphrase(&passphrase) + } else { + let key = args.key.as_ref().unwrap(); + if key.len() == 64 { + match hex::decode(key) { + Ok(bytes) if bytes.len() == 32 => { + let mut pk = [0u8; 32]; + pk.copy_from_slice(&bytes); + pk + } + _ => derive_pk_from_passphrase(key), + } + } else { + derive_pk_from_passphrase(key) + } + }; + + let pk_hex = to_hex(&root_public_key); + eprintln!("DD GET @{}...", &pk_hex[..8]); + + let dht_config = build_dht_config(cfg); + let runtime = match UdxRuntime::new() { + Ok(r) => r, + Err(e) => { + eprintln!("error: failed to create UDP runtime: {e}"); + return 1; + } + }; + + let (task_handle, handle, _rx) = match hyperdht::spawn(&runtime, dht_config).await { + Ok(v) => v, + Err(e) => { + eprintln!("error: failed to start DHT: {e}"); + return 1; + } + }; + + if let Err(e) = handle.bootstrapped().await { + eprintln!("error: bootstrap failed: {e}"); + return 1; + } + + let chunk_timeout = Duration::from_secs(args.timeout); + + let root_data = match fetch_with_retry(&handle, &root_public_key, chunk_timeout).await { + Some(d) => d, + None => { + eprintln!("error: root chunk not found (timeout after {}s)", args.timeout); + let _ = handle.destroy().await; + let _ = task_handle.await; + return 1; + } + }; + + if root_data.is_empty() { + eprintln!("error: root chunk is empty"); + let _ = handle.destroy().await; + let _ = task_handle.await; + return 1; + } + + match root_data[0] { + 0x01 => { + let get_filename: Arc = match args.output.as_deref() { + None => Arc::from(""), + Some(p) => { + let base = std::path::Path::new(p) + .file_name() + .and_then(|n| n.to_str()) + .unwrap_or(p); + Arc::from(base) + } + }; + let state = ProgressState::new_with_wire( + Phase::Get, + 0x01, + get_filename, + handle.wire_counters(), + ); + let reporter = + ProgressReporter::from_args(state.clone(), args.no_progress, args.json); + reporter.on_start(); + v1::get_from_root(root_data, root_public_key, handle, task_handle, &args, state, reporter).await + } + 0x02 => { + let get_filename: Arc = match args.output.as_deref() { + None => Arc::from(""), + Some(p) => { + let base = std::path::Path::new(p) + .file_name() + .and_then(|n| n.to_str()) + .unwrap_or(p); + Arc::from(base) + } + }; + let state = ProgressState::new_with_wire( + Phase::Get, + 0x02, + get_filename, + handle.wire_counters(), + ); + let reporter = + ProgressReporter::from_args(state.clone(), args.no_progress, args.json); + reporter.on_start(); + v2::get_from_root(root_data, root_public_key, handle, task_handle, &args, state, reporter).await + } + v => { + eprintln!("error: unknown dead drop version 0x{v:02x}"); + let _ = handle.destroy().await; + let _ = task_handle.await; + 1 + } + } +} + +fn compute_crc32c(data: &[u8]) -> u32 { + crc32c::crc32c(data) +} + +fn parse_max_speed(s: &str) -> Result { + let s = s.trim().to_lowercase(); + if let Some(num) = s.strip_suffix('m') { + num.parse::() + .map(|n| n * 1_000_000) + .map_err(|e| format!("invalid --max-speed: {e}")) + } else if let Some(num) = s.strip_suffix('k') { + num.parse::() + .map(|n| n * 1_000) + .map_err(|e| format!("invalid --max-speed: {e}")) + } else { + s.parse::() + .map_err(|e| format!("invalid --max-speed: {e}")) + } +} + +fn rpassword_read() -> String { + use std::io::{BufRead, BufReader}; + let tty = match std::fs::File::open("/dev/tty") { + Ok(f) => f, + Err(_) => { + let mut line = String::new(); + std::io::stdin().read_line(&mut line).unwrap_or(0); + return line.trim_end_matches('\n').trim_end_matches('\r').to_string(); + } + }; + let mut reader = BufReader::new(tty); + let mut line = String::new(); + reader.read_line(&mut line).unwrap_or(0); + line.trim_end_matches('\n').trim_end_matches('\r').to_string() +} + +fn derive_pk_from_passphrase(passphrase: &str) -> [u8; 32] { + let seed = peeroxide::discovery_key(passphrase.as_bytes()); + let kp = KeyPair::from_seed(seed); + kp.public_key +} + +async fn fetch_with_retry( + handle: &HyperDhtHandle, + public_key: &[u8; 32], + timeout: Duration, +) -> Option> { + let deadline = tokio::time::Instant::now() + timeout; + let mut backoff = Duration::from_secs(1); + let max_backoff = Duration::from_secs(30); + + loop { + match handle.mutable_get(public_key, 0).await { + Ok(Some(result)) => return Some(result.value), + Ok(None) => {} + Err(_) => {} + } + + if tokio::time::Instant::now() >= deadline { + return None; + } + + tokio::time::sleep(backoff.min(deadline - tokio::time::Instant::now())).await; + backoff = (backoff * 2).min(max_backoff); + } +} + +#[derive(Clone)] +pub(crate) struct ChunkData { + keypair: KeyPair, + encoded: Vec, +} + +struct AimdController { + current: usize, + max_cap: Option, + window_size: usize, + degraded_in_window: u32, + total_in_window: u32, +} + +impl AimdController { + fn new(initial: usize, max_cap: Option) -> Self { + Self { + current: initial, + max_cap, + window_size: 10, + degraded_in_window: 0, + total_in_window: 0, + } + } + + fn record(&mut self, degraded: bool) -> Option { + if degraded { + self.degraded_in_window += 1; + } + self.total_in_window += 1; + + if self.total_in_window >= self.window_size as u32 { + let ratio = self.degraded_in_window as f64 / self.total_in_window as f64; + self.degraded_in_window = 0; + self.total_in_window = 0; + + if ratio > 0.3 { + self.current = (self.current / 2).max(1); + } else if ratio == 0.0 { + let next = self.current + 1; + self.current = match self.max_cap { + Some(cap) => next.min(cap), + None => next, + }; + } + Some(self.current) + } else { + None + } + } +} + +async fn publish_chunks( + handle: &HyperDhtHandle, + chunks: &[ChunkData], + max_concurrency: Option, + dispatch_delay: Option, + progress: Option>, +) -> Result<(), String> { + let initial_concurrency = 4usize; + let sem = Arc::new(Semaphore::new(initial_concurrency)); + let active_target = Arc::new(AtomicUsize::new(initial_concurrency)); + let permits_to_forget = Arc::new(AtomicUsize::new(0)); + let controller = Arc::new(Mutex::new(AimdController::new(initial_concurrency, max_concurrency))); + + let mut handles: Vec>> = Vec::new(); + let mut chunk_byte_sizes: Vec = Vec::new(); + + for chunk in chunks { + let permit = loop { + let p = sem.clone().acquire_owned().await.unwrap(); + let forget_pending = permits_to_forget.load(Ordering::Relaxed); + if forget_pending > 0 && permits_to_forget.fetch_sub(1, Ordering::Relaxed) > 0 { + p.forget(); + } else { + break p; + } + }; + + let h = handle.clone(); + let kp = chunk.keypair.clone(); + let data = chunk.encoded.clone(); + let chunk_size = data.len(); + + let seq = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_secs(); + + let sem_inner = sem.clone(); + let active_target_inner = active_target.clone(); + let permits_to_forget_inner = permits_to_forget.clone(); + let controller_inner = controller.clone(); + + handles.push(tokio::spawn(async move { + let result = h.mutable_put(&kp, &data, seq).await; + let put_result = match result { + Ok(r) => r, + Err(e) => { + drop(permit); + return Err(format!("mutable_put failed: {e}")); + } + }; + + let degraded = put_result.commit_timeouts > 0; + let new_target = { + let mut ctrl = controller_inner.lock().await; + ctrl.record(degraded) + }; + + if let Some(target) = new_target { + let current_target = active_target_inner.load(Ordering::Relaxed); + if target > current_target { + let add = target - current_target; + sem_inner.add_permits(add); + active_target_inner.store(target, Ordering::Relaxed); + } else if target < current_target { + let remove = current_target - target; + permits_to_forget_inner.fetch_add(remove, Ordering::Relaxed); + active_target_inner.store(target, Ordering::Relaxed); + } + } + + drop(permit); + Ok(put_result) + })); + chunk_byte_sizes.push(chunk_size); + + if let Some(delay) = dispatch_delay { + tokio::time::sleep(delay).await; + } + + let mut i = 0; + while i < handles.len() { + if handles[i].is_finished() { + let chunk_bytes = chunk_byte_sizes.swap_remove(i); + let h = handles.swap_remove(i); + match h.await { + Ok(Ok(_)) => { + if let Some(ref state) = progress { + state.inc_data(chunk_bytes as u64); + } + } + Ok(Err(e)) => return Err(e), + Err(e) => return Err(format!("task panicked: {e}")), + } + } else { + i += 1; + } + } + } + + for (h, chunk_bytes) in handles.into_iter().zip(chunk_byte_sizes) { + match h.await { + Ok(Ok(_)) => { + if let Some(ref state) = progress { + state.inc_data(chunk_bytes as u64); + } + } + Ok(Err(e)) => return Err(e), + Err(e) => return Err(format!("task panicked: {e}")), + } + } + + Ok(()) +} + diff --git a/peeroxide-cli/src/cmd/deaddrop/progress/bar.rs b/peeroxide-cli/src/cmd/deaddrop/progress/bar.rs new file mode 100644 index 0000000..4f01d68 --- /dev/null +++ b/peeroxide-cli/src/cmd/deaddrop/progress/bar.rs @@ -0,0 +1,275 @@ +#![allow(dead_code)] + +use std::sync::Arc; +use std::sync::atomic::Ordering; +use std::time::Duration; + +use indicatif::{MultiProgress, ProgressBar, ProgressStyle}; +use tokio::sync::{Mutex, Notify}; +use tokio::task::JoinHandle; + +use crate::cmd::deaddrop::progress::{ + format::{ + render_bar_line, render_data_line, render_index_line, render_overall_line, + render_wire_line, + }, + rate::RateCalculator, + state::{Phase, ProgressState}, +}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum BarLayout { + Single, + V2GetMulti, +} + +/// indicatif-driven renderer that ticks a background task to refresh the +/// progress bar(s). +/// +/// Layout: +/// Single (v1 + v2 PUT): 2 bars — main bar line, wire-stats line. +/// V2GetMulti (v2 GET): 4 bars — index, data, wire, overall. +/// +/// The wire line samples `state.wire_bytes_sent` / `state.wire_bytes_received` +/// (which are `Arc` shared with `peeroxide_dht::io::WireCounters`) +/// and renders rates plus an amplification factor (wire bytes / payload bytes). +pub struct BarRenderer { + layout: BarLayout, + #[allow(dead_code)] + mp: Option, + bars: Vec, + state: Arc, + #[allow(dead_code)] + rate: Arc>, + stop: Arc, + tick_handle: Option>, + finished: bool, +} + +impl BarRenderer { + pub fn new(state: Arc) -> Self { + let layout = if state.phase == Phase::Get && state.version == 2 { + BarLayout::V2GetMulti + } else { + BarLayout::Single + }; + + let style = ProgressStyle::with_template("{msg}").expect("static template is valid"); + + // All layouts now use MultiProgress because we add a wire-stats bar. + let bar_count = match layout { + BarLayout::Single => 2, // main + wire + BarLayout::V2GetMulti => 4, // index + data + wire + overall + }; + let mp = MultiProgress::new(); + let mut bars = Vec::with_capacity(bar_count); + for _ in 0..bar_count { + let bar = mp.add(ProgressBar::new(0)); + bar.set_style(style.clone()); + bar.enable_steady_tick(Duration::from_millis(100)); + bars.push(bar); + } + let mp = Some(mp); + + let rate = Arc::new(Mutex::new(RateCalculator::new())); + // Separate rate calculators for wire-up and wire-down. They share the + // same window/sample policy but track distinct atomic counters. + let wire_up_rate = Arc::new(Mutex::new(RateCalculator::new())); + let wire_down_rate = Arc::new(Mutex::new(RateCalculator::new())); + let stop = Arc::new(Notify::new()); + + let stop_clone = stop.clone(); + let state_clone = state.clone(); + let rate_clone = rate.clone(); + let wire_up_clone = wire_up_rate.clone(); + let wire_down_clone = wire_down_rate.clone(); + let bars_clone = bars.clone(); + let layout_clone = layout; + + let tick_handle = tokio::spawn(async move { + let mut interval = tokio::time::interval(Duration::from_millis(100)); + loop { + tokio::select! { + _ = interval.tick() => { + let now = std::time::Instant::now(); + + // Payload-throughput rate calc. + let mut rate_guard = rate_clone.lock().await; + let bytes_done = state_clone.bytes_done.load(Ordering::Relaxed); + rate_guard.record(now, bytes_done); + let smoothed = rate_guard.rate_bps(); + let total = state_clone.bytes_total.load(Ordering::Relaxed); + let eta = rate_guard.eta_secs(total, bytes_done); + drop(rate_guard); + + // Wire-byte rate calcs (independent up/down). + let wire_sent = state_clone.wire_bytes_sent.load(Ordering::Relaxed); + let wire_recv = state_clone.wire_bytes_received.load(Ordering::Relaxed); + let mut up_guard = wire_up_clone.lock().await; + up_guard.record(now, wire_sent); + let up_bps = up_guard.rate_bps(); + drop(up_guard); + let mut down_guard = wire_down_clone.lock().await; + down_guard.record(now, wire_recv); + let down_bps = down_guard.rate_bps(); + drop(down_guard); + let wire_total = wire_sent.saturating_add(wire_recv); + let wire_line = render_wire_line( + &state_clone, up_bps, down_bps, wire_total, + ); + + match layout_clone { + BarLayout::Single => { + let msg = render_bar_line(&state_clone, smoothed, eta); + if let Some(bar) = bars_clone.first() { + bar.set_message(msg); + } + if let Some(bar) = bars_clone.get(1) { + bar.set_message(wire_line); + } + } + BarLayout::V2GetMulti => { + if let Some(bar) = bars_clone.first() { + bar.set_message(render_index_line(&state_clone, smoothed)); + } + if let Some(bar) = bars_clone.get(1) { + bar.set_message(render_data_line(&state_clone, smoothed, eta)); + } + if let Some(bar) = bars_clone.get(2) { + bar.set_message(wire_line); + } + if let Some(bar) = bars_clone.get(3) { + bar.set_message(render_overall_line(&state_clone)); + } + } + } + } + _ = stop_clone.notified() => break, + } + } + }); + + Self { + layout, + mp, + bars, + state, + rate, + stop, + tick_handle: Some(tick_handle), + finished: false, + } + } + + /// Stop the tick task and clear the bars without consuming `self`. + /// Idempotent — calling twice is a no-op. + pub async fn finish_initial(&mut self) { + if self.finished { + return; + } + self.stop.notify_one(); + if let Some(handle) = self.tick_handle.take() { + let _ = handle.await; + } + for bar in &self.bars { + bar.finish_with_message(""); + } + self.finished = true; + } + + /// Full cleanup, consuming `self`. + pub async fn finish(mut self) { + self.finish_initial().await; + } + + /// Stop the tick task and remove the bar lines from the terminal, + /// consuming `self`. Used for transient per-operation bars where we + /// don't want empty placeholder lines left behind. + pub async fn finish_and_clear(mut self) { + if !self.finished { + self.stop.notify_one(); + if let Some(handle) = self.tick_handle.take() { + let _ = handle.await; + } + self.finished = true; + } + for bar in &self.bars { + bar.finish_and_clear(); + } + } + + pub fn state(&self) -> &Arc { + &self.state + } +} + +impl Drop for BarRenderer { + fn drop(&mut self) { + // Cancellation signal is sync-safe; the spawned tick task will + // observe it on its next select poll. We do NOT await here. + self.stop.notify_one(); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::time::Duration; + use tokio::time::timeout; + + fn put_v1_state() -> Arc { + ProgressState::new(Phase::Put, 1, Arc::::from("file.txt")) + } + + fn put_v2_state() -> Arc { + ProgressState::new(Phase::Put, 2, Arc::::from("file.txt")) + } + + fn get_v2_state() -> Arc { + ProgressState::new(Phase::Get, 2, Arc::::from("file.txt")) + } + + #[tokio::test] + async fn single_layout_for_v1() { + let renderer = BarRenderer::new(put_v1_state()); + assert_eq!(renderer.layout, BarLayout::Single); + // 2 bars: main line + wire-stats line. + assert_eq!(renderer.bars.len(), 2); + assert!(renderer.mp.is_some()); + renderer.finish().await; + } + + #[tokio::test] + async fn multi_layout_for_v2_get() { + let renderer = BarRenderer::new(get_v2_state()); + assert_eq!(renderer.layout, BarLayout::V2GetMulti); + // 4 bars: index + data + wire + overall. + assert_eq!(renderer.bars.len(), 4); + assert!(renderer.mp.is_some()); + renderer.finish().await; + } + + #[tokio::test] + async fn single_layout_for_v2_put() { + let renderer = BarRenderer::new(put_v2_state()); + assert_eq!(renderer.layout, BarLayout::Single); + assert_eq!(renderer.bars.len(), 2); + assert!(renderer.mp.is_some()); + renderer.finish().await; + } + + #[tokio::test] + async fn finish_initial_idempotent() { + let mut renderer = BarRenderer::new(put_v1_state()); + renderer.finish_initial().await; + renderer.finish_initial().await; + assert!(renderer.finished); + } + + #[tokio::test] + async fn finish_completes_within_timeout() { + let renderer = BarRenderer::new(get_v2_state()); + let result = timeout(Duration::from_millis(500), renderer.finish()).await; + assert!(result.is_ok(), "finish() should complete within 500ms"); + } +} diff --git a/peeroxide-cli/src/cmd/deaddrop/progress/events.rs b/peeroxide-cli/src/cmd/deaddrop/progress/events.rs new file mode 100644 index 0000000..004e6f2 --- /dev/null +++ b/peeroxide-cli/src/cmd/deaddrop/progress/events.rs @@ -0,0 +1,277 @@ +#![allow(dead_code)] + +use std::sync::atomic::Ordering; + +use serde::Serialize; + +use super::state::{Phase, ProgressState}; + +fn now_rfc3339() -> String { + use chrono::Utc; + + Utc::now().to_rfc3339() +} + +#[derive(Serialize, Debug)] +#[serde(tag = "type", rename_all = "lowercase")] +pub enum ProgressEvent<'a> { + Start { + phase: Phase, + version: u8, + filename: &'a str, + bytes_total: u64, + indexes_total: u32, + indexes_done: u32, + data_total: u32, + data_done: u32, + ts: String, + }, + Progress { + phase: Phase, + version: u8, + filename: &'a str, + bytes_done: u64, + bytes_total: u64, + indexes_done: u32, + indexes_total: u32, + data_done: u32, + data_total: u32, + rate_bytes_per_sec: f64, + #[serde(skip_serializing_if = "Option::is_none")] + eta_seconds: Option, + elapsed_seconds: f64, + ts: String, + }, + Done { + phase: Phase, + version: u8, + filename: &'a str, + bytes_done: u64, + bytes_total: u64, + indexes_done: u32, + indexes_total: u32, + data_done: u32, + data_total: u32, + elapsed_seconds: f64, + ts: String, + }, + #[serde(rename = "result")] + PutResult { + phase: Phase, + version: u8, + pickup_key: String, + bytes: u64, + chunks: u32, + ts: String, + }, + #[serde(rename = "result")] + GetResult { + phase: Phase, + version: u8, + bytes: u64, + crc: String, + output: String, + ts: String, + }, + Ack { + pickup_number: u64, + peer: String, + ts: String, + }, +} + +pub fn snapshot_start<'a>(state: &'a ProgressState) -> ProgressEvent<'a> { + ProgressEvent::Start { + phase: state.phase, + version: state.version, + filename: &state.filename, + bytes_total: state.bytes_total.load(Ordering::Relaxed), + indexes_total: state.indexes_total.load(Ordering::Relaxed), + indexes_done: state.indexes_done.load(Ordering::Relaxed), + data_total: state.data_total.load(Ordering::Relaxed), + data_done: state.data_done.load(Ordering::Relaxed), + ts: now_rfc3339(), + } +} + +pub fn snapshot_progress<'a>(state: &'a ProgressState, rate: f64, eta: Option) -> ProgressEvent<'a> { + ProgressEvent::Progress { + phase: state.phase, + version: state.version, + filename: &state.filename, + bytes_done: state.bytes_done.load(Ordering::Relaxed), + bytes_total: state.bytes_total.load(Ordering::Relaxed), + indexes_done: state.indexes_done.load(Ordering::Relaxed), + indexes_total: state.indexes_total.load(Ordering::Relaxed), + data_done: state.data_done.load(Ordering::Relaxed), + data_total: state.data_total.load(Ordering::Relaxed), + rate_bytes_per_sec: rate, + eta_seconds: eta, + elapsed_seconds: state.start_instant.elapsed().as_secs_f64(), + ts: now_rfc3339(), + } +} + +pub fn snapshot_done<'a>(state: &'a ProgressState) -> ProgressEvent<'a> { + ProgressEvent::Done { + phase: state.phase, + version: state.version, + filename: &state.filename, + bytes_done: state.bytes_done.load(Ordering::Relaxed), + bytes_total: state.bytes_total.load(Ordering::Relaxed), + indexes_done: state.indexes_done.load(Ordering::Relaxed), + indexes_total: state.indexes_total.load(Ordering::Relaxed), + data_done: state.data_done.load(Ordering::Relaxed), + data_total: state.data_total.load(Ordering::Relaxed), + elapsed_seconds: state.start_instant.elapsed().as_secs_f64(), + ts: now_rfc3339(), + } +} + +pub fn put_result( + phase: Phase, + version: u8, + pickup_key: String, + bytes: u64, + chunks: u32, +) -> ProgressEvent<'static> { + ProgressEvent::PutResult { + phase, + version, + pickup_key, + bytes, + chunks, + ts: now_rfc3339(), + } +} + +pub fn get_result( + phase: Phase, + version: u8, + bytes: u64, + crc: String, + output: String, +) -> ProgressEvent<'static> { + ProgressEvent::GetResult { + phase, + version, + bytes, + crc, + output, + ts: now_rfc3339(), + } +} + +pub fn ack(pickup_number: u64, peer: String) -> ProgressEvent<'static> { + ProgressEvent::Ack { + pickup_number, + peer, + ts: now_rfc3339(), + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + use std::time::Duration; + + use serde_json::Value; + + use super::*; + + fn assert_has_type(json: &str) -> Value { + let value: Value = serde_json::from_str(json).unwrap(); + assert!(value.get("type").is_some()); + value + } + + #[test] + fn serialize_all() { + let state = ProgressState::new(Phase::Put, 2, Arc::::from("file.txt")); + state.set_length(4500, 5, 5); + state.inc_data(900); + state.inc_index(); + + let events = [ + serde_json::to_string(&snapshot_start(&state)).unwrap(), + serde_json::to_string(&snapshot_progress(&state, 12.5, Some(7.5))).unwrap(), + serde_json::to_string(&snapshot_done(&state)).unwrap(), + serde_json::to_string(&put_result(Phase::Put, 2, "pickup".into(), 4500, 5)).unwrap(), + serde_json::to_string(&get_result(Phase::Get, 2, 4500, "abcd".into(), "stdout".into())).unwrap(), + serde_json::to_string(&ack(1, "abc".into())).unwrap(), + ]; + + for json in events { + assert_has_type(&json); + } + } + + #[test] + fn result_variants() { + let put = serde_json::to_string(&put_result(Phase::Put, 1, "k".into(), 10, 2)).unwrap(); + let get = serde_json::to_string(&get_result(Phase::Get, 1, 10, "crc".into(), "stdout".into())).unwrap(); + + let put_v = assert_has_type(&put); + let get_v = assert_has_type(&get); + + assert_eq!(put_v["type"], "result"); + assert_eq!(get_v["type"], "result"); + assert!(put_v.get("pickup_key").is_some()); + assert!(get_v.get("output").is_some()); + } + + #[test] + fn omits_none_eta() { + let state = ProgressState::new(Phase::Get, 2, Arc::::from("file.txt")); + state.set_length(100, 1, 1); + state.inc_data(20); + let json = serde_json::to_string(&ProgressEvent::Progress { + phase: state.phase, + version: state.version, + filename: &state.filename, + bytes_done: 20, + bytes_total: 100, + indexes_done: 0, + indexes_total: 1, + data_done: 1, + data_total: 1, + rate_bytes_per_sec: 1.0, + eta_seconds: None, + elapsed_seconds: 0.0, + ts: "2026-01-01T00:00:00Z".into(), + }) + .unwrap(); + let value = assert_has_type(&json); + assert!(value.get("eta_seconds").is_none()); + } + + #[test] + fn v1_done_includes_indexes() { + let state = ProgressState::new(Phase::Put, 1, Arc::::from("file.txt")); + state.set_length(123, 0, 0); + state.inc_data(123); + std::thread::sleep(Duration::from_millis(1)); + let json = serde_json::to_string(&snapshot_done(&state)).unwrap(); + let value = assert_has_type(&json); + assert_eq!(value["indexes_total"], 0); + } + + #[test] + fn ack_natural_fields() { + let json = serde_json::to_string(&ProgressEvent::Ack { + pickup_number: 1, + peer: "abc".into(), + ts: "2026-01-01T00:00:00Z".into(), + }) + .unwrap(); + let value = assert_has_type(&json); + assert_eq!(value["type"], "ack"); + assert!(value.get("pickup_number").is_some()); + assert!(value.get("peer").is_some()); + assert!(value.get("ts").is_some()); + assert!(value.get("phase").is_none()); + assert!(value.get("version").is_none()); + assert!(value.get("indexes_total").is_none()); + assert!(value.get("data_total").is_none()); + } +} diff --git a/peeroxide-cli/src/cmd/deaddrop/progress/format.rs b/peeroxide-cli/src/cmd/deaddrop/progress/format.rs new file mode 100644 index 0000000..288dfa9 --- /dev/null +++ b/peeroxide-cli/src/cmd/deaddrop/progress/format.rs @@ -0,0 +1,354 @@ +#![allow(dead_code)] + +use std::sync::atomic::Ordering; + +use crate::cmd::deaddrop::progress::state::{Phase, ProgressState}; + +fn snapshot(state: &ProgressState) -> (u64, u64, u32, u32, u32, u32) { + ( + state.bytes_done.load(Ordering::Relaxed), + state.bytes_total.load(Ordering::Relaxed), + state.indexes_done.load(Ordering::Relaxed), + state.indexes_total.load(Ordering::Relaxed), + state.data_done.load(Ordering::Relaxed), + state.data_total.load(Ordering::Relaxed), + ) +} + +fn pct(done: u64, total: u64) -> f64 { + if total == 0 { + 0.0 + } else { + ((done as f64 / total as f64) * 100.0).min(100.0) + } +} + +pub fn human_bytes(b: u64) -> String { + const KIB: f64 = 1024.0; + const MIB: f64 = KIB * 1024.0; + const GIB: f64 = MIB * 1024.0; + + match b { + 0..=1023 => format!("{b} B"), + 1024..=1_048_575 => format!("{:.1} KiB", b as f64 / KIB), + 1_048_576..=1_073_741_823 => format!("{:.1} MiB", b as f64 / MIB), + _ => format!("{:.1} GiB", b as f64 / GIB), + } +} + +pub fn human_rate(bps: f64) -> String { + const KIB: f64 = 1024.0; + const MIB: f64 = KIB * 1024.0; + const GIB: f64 = MIB * 1024.0; + + if bps <= 0.0 { + return "0 B/s".to_string(); + } + + if bps < KIB { + format!("{:.0} B/s", bps) + } else if bps < MIB { + format!("{:.1} KiB/s", bps / KIB) + } else if bps < GIB { + format!("{:.1} MiB/s", bps / MIB) + } else { + format!("{:.1} GiB/s", bps / GIB) + } +} + +pub fn human_eta(eta: Option) -> String { + let Some(eta) = eta else { return "—".to_string(); }; + if eta <= 0.0 { + return "0s".to_string(); + } + let secs = eta.floor() as u64; + let mins = secs / 60; + let rem = secs % 60; + if mins == 0 { + format!("{rem}s") + } else { + format!("{mins}m{rem}s") + } +} + +pub fn draw_bar(done: u64, total: u64) -> String { + const WIDTH: usize = 20; + let filled = if total == 0 { + 0 + } else { + (((done as f64 / total as f64).min(1.0)) * WIDTH as f64).floor() as usize + }; + let filled = filled.min(WIDTH); + let empty = WIDTH - filled; + format!("{}{}", "█".repeat(filled), "░".repeat(empty)) +} + +pub fn render_bar_line(state: &ProgressState, smoothed_rate: f64, eta: Option) -> String { + let (bytes_done, bytes_total, indexes_done, indexes_total, _, _) = snapshot(state); + let bar = draw_bar(bytes_done, bytes_total); + let pct = pct(bytes_done, bytes_total); + let rate = human_rate(smoothed_rate); + let eta = human_eta(eta); + + if indexes_total == 0 { + format!( + "↑ {} D({}/{}) [{}] {:.0}% {} ETA {}", + state.filename, + human_bytes(bytes_done), + human_bytes(bytes_total), + bar, + pct, + rate, + eta + ) + } else { + format!( + "↑ {} I[{}/{}] D({}/{}) [{}] {:.0}% {} ETA {}", + state.filename, + indexes_done, + indexes_total, + human_bytes(bytes_done), + human_bytes(bytes_total), + bar, + pct, + rate, + eta + ) + } +} + +pub fn render_index_line(state: &ProgressState, smoothed_rate: f64) -> String { + let (_, _, indexes_done, indexes_total, _, _) = snapshot(state); + format!( + "I[{}/{}] {}", + indexes_done, + indexes_total, + human_rate(smoothed_rate) + ) +} + +pub fn render_data_line(state: &ProgressState, smoothed_rate: f64, eta: Option) -> String { + let (bytes_done, bytes_total, _, _, _, _) = snapshot(state); + format!( + "D({}/{}) [{}] {:.0}% {} ETA {}", + human_bytes(bytes_done), + human_bytes(bytes_total), + draw_bar(bytes_done, bytes_total), + pct(bytes_done, bytes_total), + human_rate(smoothed_rate), + human_eta(eta) + ) +} + +pub fn render_overall_line(state: &ProgressState) -> String { + let (bytes_done, bytes_total, _, _, _, _) = snapshot(state); + format!( + "{} {}/{} {:.0}%", + state.filename, + human_bytes(bytes_done), + human_bytes(bytes_total), + pct(bytes_done, bytes_total) + ) +} + +/// Render the wire-level network metrics line. +/// +/// Displays raw UDP send/receive rates from the DHT IO layer alongside +/// an "amplification factor" — the ratio of total wire bytes to useful +/// payload bytes. Returns an empty string when wire stats are unavailable +/// (e.g. v1 path where `ProgressState::new` was used without wire counters). +/// +/// `up_bps` and `down_bps` are pre-computed by the caller (smoothed across +/// a rate window). `wire_total` is the cumulative `wire_sent + wire_received` +/// since the operation started; used to compute the amplification ratio +/// against the cumulative payload bytes from `state.bytes_done`. +/// +/// Format: "W ↑ {up} ↓ {down} (×{amp} amplification)" +pub fn render_wire_line( + state: &ProgressState, + up_bps: f64, + down_bps: f64, + wire_total: u64, +) -> String { + let bytes_done = state.bytes_done.load(Ordering::Relaxed); + let amp_str = if bytes_done == 0 { + String::new() + } else { + let amp = wire_total as f64 / bytes_done as f64; + format!(" (×{amp:.1} amplification)") + }; + format!( + "W ↑ {} ↓ {}{}", + human_rate(up_bps), + human_rate(down_bps), + amp_str + ) +} + +pub fn render_log_line(state: &ProgressState, smoothed_rate: f64, eta: Option) -> String { + let (bytes_done, bytes_total, indexes_done, indexes_total, data_done, data_total) = snapshot(state); + let phase = match state.phase { + Phase::Put => "put", + Phase::Get => "get", + }; + format!( + "[dd-{phase}] indexes {}/{}, data {}/{}, {}/{} ({:.0}%), {}, eta {}", + indexes_done, + indexes_total, + data_done, + data_total, + human_bytes(bytes_done), + human_bytes(bytes_total), + pct(bytes_done, bytes_total), + human_rate(smoothed_rate), + human_eta(eta) + ) +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use super::*; + + fn state() -> Arc { + let state = ProgressState::new(Phase::Put, 2, Arc::::from("file.bin")); + state.set_length(10 * 1024, 2, 4); + state.bytes_done.store(5 * 1024, Ordering::Relaxed); + state.indexes_done.store(1, Ordering::Relaxed); + state.data_done.store(2, Ordering::Relaxed); + state + } + + #[test] + fn human_bytes_thresholds() { + assert_eq!(human_bytes(0), "0 B"); + assert_eq!(human_bytes(1023), "1023 B"); + assert_eq!(human_bytes(1024), "1.0 KiB"); + assert_eq!(human_bytes(1536), "1.5 KiB"); + assert_eq!(human_bytes(1_048_576), "1.0 MiB"); + assert_eq!(human_bytes(1_073_741_824), "1.0 GiB"); + } + + #[test] + fn human_rate_thresholds() { + assert_eq!(human_rate(0.0), "0 B/s"); + assert_eq!(human_rate(500.0), "500 B/s"); + assert_eq!(human_rate(1.4 * 1024.0 * 1024.0), "1.4 MiB/s"); + } + + #[test] + fn human_eta_cases() { + assert_eq!(human_eta(None), "—"); + assert_eq!(human_eta(Some(3.0)), "3s"); + assert_eq!(human_eta(Some(63.0)), "1m3s"); + assert_eq!(human_eta(Some(3661.0)), "61m1s"); + } + + #[test] + fn draw_bar_cases() { + assert_eq!(draw_bar(0, 0), "░░░░░░░░░░░░░░░░░░░░"); + assert_eq!(draw_bar(0, 10), "░░░░░░░░░░░░░░░░░░░░"); + assert_eq!(draw_bar(5, 10), "██████████░░░░░░░░░░"); + assert_eq!(draw_bar(10, 10), "████████████████████"); + assert_eq!(draw_bar(20, 10), "████████████████████"); + } + + #[test] + fn render_bar_line_v1_omits_indexes() { + let state = ProgressState::new(Phase::Put, 1, Arc::::from("a.txt")); + state.set_length(2000, 0, 0); + state.bytes_done.store(1000, Ordering::Relaxed); + let s = render_bar_line(&state, 2048.0, Some(12.0)); + assert!(s.starts_with("↑ a.txt D(")); + assert!(!s.contains("I[")); + assert!(s.contains(" ETA 12s")); + } + + #[test] + fn render_bar_line_v2_put_includes_indexes() { + let state = state(); + let s = render_bar_line(&state, 2048.0, Some(12.0)); + assert!(s.starts_with("↑ file.bin I[1/2] D(")); + assert!(s.contains("ETA 12s")); + } + + #[test] + fn render_bar_line_byte_values_and_pct_are_bytes_not_chunks() { + // Regression for the snapshot-destructure bug: render_bar_line was + // pulling data_done (a chunk count) into a variable named bytes_done + // and formatting it as bytes. Verify that with N=4 chunks done out of + // 4 totalling 10 KiB, the displayed byte count is 5 KiB (real bytes), + // NOT "2 B" (the chunk count formatted as bytes), and the percentage + // reflects 5/10 = 50% (bytes), not 2/10240 ≈ 0% (chunks vs bytes). + let state = ProgressState::new(Phase::Put, 2, Arc::::from("file.bin")); + state.set_length(10 * 1024, 2, 4); + state.bytes_done.store(5 * 1024, Ordering::Relaxed); + state.indexes_done.store(1, Ordering::Relaxed); + state.data_done.store(2, Ordering::Relaxed); + let s = render_bar_line(&state, 0.0, None); + assert!( + s.contains("D(5.0 KiB/10.0 KiB)"), + "expected D(5.0 KiB/10.0 KiB) in: {s}" + ); + assert!(s.contains("50%"), "expected 50% in: {s}"); + } + + #[test] + fn render_data_line_byte_values_and_pct_are_bytes_not_chunks() { + // Same regression for the v2-GET multi-bar path. + let state = ProgressState::new(Phase::Get, 2, Arc::::from("out.bin")); + state.set_length(10 * 1024, 2, 4); + state.bytes_done.store(5 * 1024, Ordering::Relaxed); + state.data_done.store(2, Ordering::Relaxed); + let s = render_data_line(&state, 0.0, None); + assert!( + s.contains("D(5.0 KiB/10.0 KiB)"), + "expected D(5.0 KiB/10.0 KiB) in: {s}" + ); + assert!(s.contains("50%"), "expected 50% in: {s}"); + } + + #[test] + fn render_log_line_shape() { + let s = render_log_line(&state(), 500.0, Some(4.0)); + assert!(s.starts_with("[dd-put] indexes 1/2, data 2/4, 5.0 KiB/10.0 KiB (50%), 500 B/s, eta 4s")); + } + + #[test] + fn render_wire_line_shows_rates_and_amplification() { + let s = ProgressState::new(Phase::Get, 2, Arc::::from("file.bin")); + s.set_length(10 * 1024, 0, 0); + // 1 KiB of useful payload, 35 KiB of wire traffic → ×35 amplification. + s.bytes_done.store(1024, Ordering::Relaxed); + let line = render_wire_line(&s, 12_345.0, 67_890.0, 35 * 1024); + assert!(line.starts_with("W ↑ "), "got: {line}"); + assert!(line.contains(" ↓ "), "got: {line}"); + assert!(line.contains("×35.0 amplification"), "got: {line}"); + } + + #[test] + fn render_wire_line_omits_amplification_when_no_payload() { + let s = ProgressState::new(Phase::Get, 2, Arc::::from("file.bin")); + let line = render_wire_line(&s, 0.0, 1024.0, 4096); + assert!(line.starts_with("W ↑ "), "got: {line}"); + assert!(!line.contains("amplification"), "should be omitted: {line}"); + } + + #[test] + fn render_wire_line_zero_rates_renders_cleanly() { + let s = ProgressState::new(Phase::Get, 2, Arc::::from("file.bin")); + let line = render_wire_line(&s, 0.0, 0.0, 0); + assert_eq!(line, "W ↑ 0 B/s ↓ 0 B/s"); + } + + #[test] + fn pct_caps_and_zero_total_is_safe() { + let state = ProgressState::new(Phase::Get, 2, Arc::::from("b.bin")); + state.set_length(0, 0, 0); + state.bytes_done.store(10, Ordering::Relaxed); + let s = render_bar_line(&state, 0.0, None); + assert!(s.contains("100%") || s.contains("0%")); + assert_eq!(draw_bar(1, 0), "░░░░░░░░░░░░░░░░░░░░"); + } +} diff --git a/peeroxide-cli/src/cmd/deaddrop/progress/json.rs b/peeroxide-cli/src/cmd/deaddrop/progress/json.rs new file mode 100644 index 0000000..bb3a55a --- /dev/null +++ b/peeroxide-cli/src/cmd/deaddrop/progress/json.rs @@ -0,0 +1,142 @@ +#![allow(dead_code)] + +//! `JsonEmitter` — on-demand stdout JSON-Lines event emitter. +//! +//! The emitter is synchronous: the orchestrator calls `emit_*` helpers +//! explicitly for start, per-chunk progress, result, ack, and done events. +//! There is no background tick task. Each event is serialized to JSON and +//! written to stdout with a trailing newline via `println!`. JSON events +//! own stdout per the docs convention; bar/log renderers own stderr. + +use std::sync::Arc; + +use super::events::{ + ProgressEvent, ack, get_result, put_result, snapshot_done, snapshot_progress, snapshot_start, +}; +use super::state::ProgressState; + +pub struct JsonEmitter { + pub state: Arc, +} + +impl JsonEmitter { + pub fn new(state: Arc) -> Self { + Self { state } + } + + /// Serialize event to JSON and write to stdout with a trailing newline. + /// Silently no-ops on serialization failure (the channel must not panic). + pub fn emit(&self, event: &ProgressEvent<'_>) { + if let Ok(json) = serde_json::to_string(event) { + println!("{}", json); + } + // silently no-op on serialization failure + } + + pub fn emit_start(&self) { + let event = snapshot_start(&self.state); + self.emit(&event); + } + + pub fn emit_progress(&self, rate: f64, eta: Option) { + let event = snapshot_progress(&self.state, rate, eta); + self.emit(&event); + } + + pub fn emit_done(&self) { + let event = snapshot_done(&self.state); + self.emit(&event); + } + + pub fn emit_put_result(&self, pickup_key: &str) { + let bytes = self + .state + .bytes_total + .load(std::sync::atomic::Ordering::Relaxed); + let chunks = self + .state + .data_total + .load(std::sync::atomic::Ordering::Relaxed); + let event = put_result( + self.state.phase, + self.state.version, + pickup_key.to_string(), + bytes, + chunks, + ); + self.emit(&event); + } + + pub fn emit_get_result(&self, bytes: u64, crc: &str, output: Option<&str>) { + let output_str = output.unwrap_or("stdout").to_string(); + let event = get_result( + self.state.phase, + self.state.version, + bytes, + crc.to_string(), + output_str, + ); + self.emit(&event); + } + + pub fn emit_ack(&self, pickup_number: u64, peer: &str) { + let event = ack(pickup_number, peer.to_string()); + self.emit(&event); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::cmd::deaddrop::progress::state::Phase; + + fn make_state(phase: Phase) -> Arc { + let state = ProgressState::new(phase, 2, Arc::::from("file.txt")); + state.set_length(1000, 2, 3); + state + } + + #[test] + fn emit_silently_handles_serialization() { + let emitter = JsonEmitter::new(make_state(Phase::Put)); + emitter.emit_start(); + } + + #[test] + fn emit_progress_no_panic() { + let state = make_state(Phase::Put); + state.inc_data(100); + let emitter = JsonEmitter::new(state); + emitter.emit_progress(50.0, Some(18.0)); + } + + #[test] + fn emit_done_no_panic() { + let emitter = JsonEmitter::new(make_state(Phase::Put)); + emitter.emit_done(); + } + + #[test] + fn emit_put_result_no_panic() { + let emitter = JsonEmitter::new(make_state(Phase::Put)); + emitter.emit_put_result("abc123deadbeef"); + } + + #[test] + fn emit_get_result_no_panic() { + let emitter = JsonEmitter::new(make_state(Phase::Get)); + emitter.emit_get_result(5000, "deadbeef", Some("/tmp/out.bin")); + } + + #[test] + fn emit_get_result_stdout_default_no_panic() { + let emitter = JsonEmitter::new(make_state(Phase::Get)); + emitter.emit_get_result(5000, "deadbeef", None); + } + + #[test] + fn emit_ack_no_panic() { + let emitter = JsonEmitter::new(make_state(Phase::Put)); + emitter.emit_ack(1, "abc"); + } +} diff --git a/peeroxide-cli/src/cmd/deaddrop/progress/log.rs b/peeroxide-cli/src/cmd/deaddrop/progress/log.rs new file mode 100644 index 0000000..3873cb0 --- /dev/null +++ b/peeroxide-cli/src/cmd/deaddrop/progress/log.rs @@ -0,0 +1,152 @@ +#![allow(dead_code)] + +use std::sync::Arc; +use std::sync::atomic::Ordering; +use std::time::Duration; + +use tokio::sync::{Mutex, Notify}; +use tokio::task::JoinHandle; + +use crate::cmd::deaddrop::progress::{ + format::render_log_line, + rate::RateCalculator, + state::ProgressState, +}; + +/// Non-TTY progress renderer that prints one formatted line to stderr every +/// 2 seconds. Mirrors the cancellation pattern used by `BarRenderer`: +/// `Arc` + `tokio::select!`, with `Drop` issuing a sync +/// `notify_one()` so the tick task exits cleanly without async-in-Drop. +pub struct PeriodicLogRenderer { + state: Arc, + #[allow(dead_code)] + rate: Arc>, + stop: Arc, + tick_handle: Option>, + finished: bool, +} + +impl PeriodicLogRenderer { + pub fn new(state: Arc) -> Self { + let rate = Arc::new(Mutex::new(RateCalculator::new())); + let stop = Arc::new(Notify::new()); + + let stop_clone = stop.clone(); + let state_clone = state.clone(); + let rate_clone = rate.clone(); + + let tick_handle = tokio::spawn(async move { + let mut interval = tokio::time::interval(Duration::from_secs(2)); + interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + // First tick fires immediately by default; advance past it so the + // first log line lands ~2s after construction. + interval.tick().await; + loop { + tokio::select! { + _ = interval.tick() => { + let now = std::time::Instant::now(); + let bytes_done = state_clone.bytes_done.load(Ordering::Relaxed); + let mut rate_guard = rate_clone.lock().await; + rate_guard.record(now, bytes_done); + let smoothed = rate_guard.rate_bps(); + let total = state_clone.bytes_total.load(Ordering::Relaxed); + let done = state_clone.bytes_done.load(Ordering::Relaxed); + let eta = rate_guard.eta_secs(total, done); + drop(rate_guard); + let line = render_log_line(&state_clone, smoothed, eta); + eprintln!("{}", line); + } + _ = stop_clone.notified() => break, + } + } + }); + + Self { + state, + rate, + stop, + tick_handle: Some(tick_handle), + finished: false, + } + } + + /// Stop the tick task without consuming `self`. Idempotent — calling + /// twice is a no-op. Lets the reporter survive a PUT refresh-loop + /// handoff before final cleanup. + pub async fn finish_initial(&mut self) { + if self.finished { + return; + } + self.stop.notify_one(); + if let Some(handle) = self.tick_handle.take() { + let _ = handle.await; + } + self.finished = true; + } + + /// Full cleanup, consuming `self`. + pub async fn finish(mut self) { + self.finish_initial().await; + } + + pub fn state(&self) -> &Arc { + &self.state + } +} + +impl Drop for PeriodicLogRenderer { + fn drop(&mut self) { + // Cancellation signal is sync-safe; the spawned tick task will + // observe it on its next select poll. We do NOT await here. + self.stop.notify_one(); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::cmd::deaddrop::progress::state::Phase; + use tokio::time::timeout; + + fn put_v2_state() -> Arc { + ProgressState::new(Phase::Put, 2, Arc::::from("file.txt")) + } + + #[tokio::test] + async fn new_creates_renderer() { + let renderer = PeriodicLogRenderer::new(put_v2_state()); + assert!(!renderer.finished); + assert!(renderer.tick_handle.is_some()); + renderer.finish().await; + } + + #[tokio::test] + async fn finish_initial_idempotent() { + let mut renderer = PeriodicLogRenderer::new(put_v2_state()); + renderer.finish_initial().await; + renderer.finish_initial().await; + assert!(renderer.finished); + assert!(renderer.tick_handle.is_none()); + } + + #[tokio::test] + async fn finish_completes_within_timeout() { + let renderer = PeriodicLogRenderer::new(put_v2_state()); + let result = timeout(Duration::from_millis(500), renderer.finish()).await; + assert!(result.is_ok(), "finish() should complete within 500ms"); + } + + #[tokio::test] + async fn drop_does_not_panic() { + drop(PeriodicLogRenderer::new(put_v2_state())); + tokio::time::sleep(Duration::from_millis(10)).await; + } + + #[tokio::test] + async fn tick_does_not_fire_before_first_interval() { + let renderer = PeriodicLogRenderer::new(put_v2_state()); + assert!(!renderer.finished); + assert!(renderer.tick_handle.is_some()); + renderer.finish().await; + } +} diff --git a/peeroxide-cli/src/cmd/deaddrop/progress/mod.rs b/peeroxide-cli/src/cmd/deaddrop/progress/mod.rs new file mode 100644 index 0000000..716bdd1 --- /dev/null +++ b/peeroxide-cli/src/cmd/deaddrop/progress/mod.rs @@ -0,0 +1,12 @@ +pub mod bar; +pub mod format; +pub mod events; +pub mod json; +pub mod log; +pub mod mode; +pub mod rate; +pub mod reporter; +pub mod state; + +#[allow(unused_imports)] +pub use reporter::ProgressReporter; diff --git a/peeroxide-cli/src/cmd/deaddrop/progress/mode.rs b/peeroxide-cli/src/cmd/deaddrop/progress/mode.rs new file mode 100644 index 0000000..4618fc5 --- /dev/null +++ b/peeroxide-cli/src/cmd/deaddrop/progress/mode.rs @@ -0,0 +1,64 @@ +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum ProgressMode { + Bar, + PeriodicLog, + Json, + Off, +} + +pub fn select(stderr_is_tty: bool, no_progress: bool, json: bool) -> ProgressMode { + if json { + ProgressMode::Json + } else if no_progress { + ProgressMode::Off + } else if stderr_is_tty { + ProgressMode::Bar + } else { + ProgressMode::PeriodicLog + } +} + +#[cfg(test)] +mod tests { + use super::{select, ProgressMode}; + + #[test] + fn tty_and_no_progress_off_with_json() { + assert_eq!(select(false, true, true), ProgressMode::Json); + } + + #[test] + fn tty_and_progress_bar() { + assert_eq!(select(true, false, false), ProgressMode::Bar); + } + + #[test] + fn tty_and_no_progress_off() { + assert_eq!(select(true, true, false), ProgressMode::Off); + } + + #[test] + fn tty_and_json_wins() { + assert_eq!(select(true, false, true), ProgressMode::Json); + } + + #[test] + fn non_tty_and_periodic_log() { + assert_eq!(select(false, false, false), ProgressMode::PeriodicLog); + } + + #[test] + fn non_tty_and_no_progress_off() { + assert_eq!(select(false, true, false), ProgressMode::Off); + } + + #[test] + fn non_tty_and_json_wins() { + assert_eq!(select(false, false, true), ProgressMode::Json); + } + + #[test] + fn non_tty_no_progress_json_still_wins() { + assert_eq!(select(false, true, true), ProgressMode::Json); + } +} diff --git a/peeroxide-cli/src/cmd/deaddrop/progress/rate.rs b/peeroxide-cli/src/cmd/deaddrop/progress/rate.rs new file mode 100644 index 0000000..1b9b596 --- /dev/null +++ b/peeroxide-cli/src/cmd/deaddrop/progress/rate.rs @@ -0,0 +1,186 @@ +#![allow(dead_code)] + +use std::collections::VecDeque; +use std::time::{Duration, Instant}; + +pub struct RateCalculator { + window: VecDeque<(Instant, u64)>, + window_secs: f64, + max_samples: usize, +} + +impl RateCalculator { + pub fn new() -> Self { + Self::new_with_window(5.0, 200) + } + + pub fn new_with_window(window_secs: f64, max_samples: usize) -> Self { + Self { + window: VecDeque::new(), + window_secs: window_secs.max(0.0), + max_samples, + } + } + + pub fn record(&mut self, now: Instant, bytes_so_far: u64) { + self.window.push_back((now, bytes_so_far)); + + let window_secs = self.window_secs; + while let Some((instant, _)) = self.window.front() { + let Some(age) = now.checked_duration_since(*instant) else { + break; + }; + if age > Duration::from_secs_f64(window_secs) { + self.window.pop_front(); + } else { + break; + } + } + + while self.window.len() > self.max_samples { + self.window.pop_front(); + } + } + + pub fn rate_bps(&self) -> f64 { + if self.window_secs == 0.0 || self.window.len() < 2 { + return 0.0; + } + + let Some((latest_instant, latest_bytes)) = self.window.back() else { + return 0.0; + }; + let Some((oldest_instant, oldest_bytes)) = self.window.front() else { + return 0.0; + }; + + let Some(window) = latest_instant.checked_duration_since(*oldest_instant) else { + return 0.0; + }; + if window.is_zero() || latest_bytes < oldest_bytes { + return 0.0; + } + + let bytes = latest_bytes - oldest_bytes; + bytes as f64 / window.as_secs_f64() + } + + pub fn eta_secs(&self, total: u64, done: u64) -> Option { + if total == 0 || done >= total { + return None; + } + + let rate = self.rate_bps(); + if rate < 1e-3 { + return None; + } + + Some((total - done) as f64 / rate) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn base() -> Instant { + Instant::now() + } + + #[test] + fn constant_rate() { + let start = base(); + let mut rate = RateCalculator::new(); + + for i in 0..51_u64 { + rate.record(start + Duration::from_millis(i * 100), i * 100_000); + } + + let bps = rate.rate_bps(); + assert!((950_000.0..=1_050_000.0).contains(&bps), "rate={bps}"); + } + + #[test] + fn burst_then_idle() { + let start = base(); + let mut rate = RateCalculator::new(); + + for i in 0..10_u64 { + rate.record(start + Duration::from_millis(i * 100), (i + 1) * 1_000_000); + } + rate.record(start + Duration::from_secs(6), 10_000_000); + + assert!(rate.rate_bps() < 500_000.0, "rate={}", rate.rate_bps()); + } + + #[test] + fn single_sample() { + let mut rate = RateCalculator::new(); + rate.record(base(), 123); + assert_eq!(rate.rate_bps(), 0.0); + } + + #[test] + fn zero_rate_eta() { + let mut rate = RateCalculator::new(); + rate.record(base(), 123); + assert_eq!(rate.eta_secs(100, 0), None); + } + + #[test] + fn done_equals_total() { + let mut rate = RateCalculator::new(); + rate.record(base(), 123); + assert_eq!(rate.eta_secs(100, 100), None); + } + + #[test] + fn done_greater_total() { + let mut rate = RateCalculator::new(); + rate.record(base(), 123); + assert_eq!(rate.eta_secs(100, 150), None); + } + + #[test] + fn reversed_samples() { + let t = base(); + let mut rate = RateCalculator::new(); + rate.record(t, 100); + rate.record(t, 200); + assert_eq!(rate.rate_bps(), 0.0); + } + + #[test] + fn sample_cap() { + let t = base(); + let mut rate = RateCalculator::new(); + + for i in 0..300_u64 { + rate.record(t, i); + } + + assert!(rate.window.len() <= 200, "len={}", rate.window.len()); + } + + #[test] + fn eviction_by_age() { + let start = base(); + let mut rate = RateCalculator::new(); + + for i in 0..100_u64 { + rate.record(start + Duration::from_millis(i * 100), i); + } + rate.record(start + Duration::from_secs(10), 100); + + assert!((50..=51).contains(&rate.window.len()), "len={}", rate.window.len()); + assert!( + rate.window.front().is_some_and(|(instant, _)| { + instant + .checked_duration_since(start + Duration::from_secs(5)) + .is_none_or(|age| age <= Duration::from_millis(100)) + }), + "front={:?}", + rate.window.front() + ); + } +} diff --git a/peeroxide-cli/src/cmd/deaddrop/progress/reporter.rs b/peeroxide-cli/src/cmd/deaddrop/progress/reporter.rs new file mode 100644 index 0000000..908562a --- /dev/null +++ b/peeroxide-cli/src/cmd/deaddrop/progress/reporter.rs @@ -0,0 +1,465 @@ +#![allow(dead_code)] + +//! `ProgressReporter` — enum facade over the four progress channels. +//! +//! The rest of the codebase only ever interacts with `ProgressReporter`. +//! Construction picks a variant based on `ProgressMode`, and lifecycle / +//! event-dispatch methods fan out to the underlying renderer (or no-op +//! for `Off`). The `Bar` and `Log` renderers run their own internal tick +//! tasks; the `Json` variant is caller-driven and owns a `RateCalculator` +//! so it can fill the rate/eta fields on each progress snapshot. + +use std::sync::Arc; +use std::sync::atomic::{AtomicU64, Ordering}; + +use tokio::sync::Mutex; + +use crate::cmd::deaddrop::progress::{ + bar::BarRenderer, + json::JsonEmitter, + log::PeriodicLogRenderer, + mode::ProgressMode, + rate::RateCalculator, + state::{Phase, ProgressState}, +}; + +pub enum ProgressReporter { + Bar(BarRenderer), + Log(PeriodicLogRenderer), + Json { + emitter: JsonEmitter, + rate: Arc>, + }, + Off, +} + +impl ProgressReporter { + pub fn new(mode: ProgressMode, state: Arc) -> Self { + match mode { + ProgressMode::Bar => Self::Bar(BarRenderer::new(state)), + ProgressMode::PeriodicLog => Self::Log(PeriodicLogRenderer::new(state)), + ProgressMode::Json => Self::Json { + emitter: JsonEmitter::new(state), + rate: Arc::new(Mutex::new(RateCalculator::new())), + }, + ProgressMode::Off => Self::Off, + } + } + + /// Convenience constructor: reads stderr TTY status and args flags, selects mode. + pub fn from_args(state: Arc, no_progress: bool, json: bool) -> Self { + use std::io::IsTerminal; + let mode = crate::cmd::deaddrop::progress::mode::select( + std::io::stderr().is_terminal(), + no_progress, + json, + ); + Self::new(mode, state) + } + + /// Called after initial PUT publish completes. + /// - Bar/Log: stops the tick, then prints pickup key to stdout. + /// - Json: emits a `put_result` event (which includes the pickup key). + /// - Off: prints pickup key to stdout. + /// + /// Does NOT consume self — the reporter stays alive for the refresh/ack loop. + pub async fn emit_initial_publish_complete(&mut self, pickup_key: &str) { + match self { + Self::Bar(r) => { + r.finish_initial().await; + println!("{pickup_key}"); + } + Self::Log(r) => { + r.finish_initial().await; + println!("{pickup_key}"); + } + Self::Json { emitter, .. } => { + emitter.emit_put_result(pickup_key); + } + Self::Off => { + println!("{pickup_key}"); + } + } + } + + /// Stop the tick task; leave `self` alive for the PUT refresh-loop + /// handoff. For Json, emit a `done` event since there is no tick to + /// stop. Off is a no-op. + pub async fn finish_initial(&mut self) { + match self { + Self::Bar(r) => r.finish_initial().await, + Self::Log(r) => r.finish_initial().await, + Self::Json { emitter, .. } => emitter.emit_done(), + Self::Off => {} + } + } + + /// Full shutdown — consumes `self`. + pub async fn finish(self) { + match self { + Self::Bar(r) => r.finish().await, + Self::Log(r) => r.finish().await, + Self::Json { emitter, .. } => emitter.emit_done(), + Self::Off => {} + } + } + + /// Called after each data chunk is fetched/stored. Bar and Log + /// renderers have their own internal tick — this is a no-op for + /// them. The caller drives explicit progress emission for Json via + /// `emit_progress_snapshot`. + pub fn on_chunk_done(&self) {} + + /// Called after each index chunk is fetched (v2 GET). Same as + /// `on_chunk_done` — internal tick handles Bar/Log; caller drives + /// Json. + pub fn on_index_done(&self) {} + + /// Emit a `start` event. Json only; other variants no-op. + pub fn on_start(&self) { + if let Self::Json { emitter, .. } = self { + emitter.emit_start(); + } + } + + /// Signal completion to the active channel. Equivalent to + /// `finish_initial` — emits a Json `done` event or stops the + /// renderer tick task. + pub async fn on_done(&mut self) { + self.finish_initial().await; + } + + /// Emit a `put_result` event with the assembled pickup key. Json + /// only. + pub fn on_put_result(&self, key: &str) { + if let Self::Json { emitter, .. } = self { + emitter.emit_put_result(key); + } + } + + /// Emit a `get_result` event. Json only. + pub fn on_get_result(&self, bytes: u64, crc: &str, output: Option<&str>) { + if let Self::Json { emitter, .. } = self { + emitter.emit_get_result(bytes, crc, output); + } + } + + /// Emit an `ack` event for a notify-pickup. Json only. + pub fn on_ack(&self, pickup_number: u64, peer: &str) { + if let Self::Json { emitter, .. } = self { + emitter.emit_ack(pickup_number, peer); + } + } + + /// Emit a periodic progress snapshot for the Json channel. Bar/Log + /// have their own tick tasks and ignore this; Off no-ops. The + /// caller is expected to invoke this from the orchestrator's tick + /// loop so the rate/eta fields stay fresh. + pub async fn emit_progress_snapshot(&self) { + if let Self::Json { emitter, rate } = self { + let now = std::time::Instant::now(); + let bytes_done = emitter.state.bytes_done.load(Ordering::Relaxed); + let total = emitter.state.bytes_total.load(Ordering::Relaxed); + let mut r = rate.lock().await; + r.record(now, bytes_done); + let rate_bps = r.rate_bps(); + let eta = r.eta_secs(total, bytes_done); + drop(r); + emitter.emit_progress(rate_bps, eta); + } + } + + /// Build a clonable factory for spawning short-lived per-operation + /// progress bars after the initial publish has finished. Use this + /// for refresh ticks and need-list republishes — `begin_operation` + /// returns a fresh transient bar that the caller drives by + /// `inc_data`/`inc_index` on the returned state and then disposes + /// with `OperationHandle::finish`. + /// + /// The factory inherits wire counters / filename / version from the + /// reporter so wire-throughput readings stay continuous across + /// operations. + pub fn operation_factory(&self) -> OperationFactory { + let kind = match self { + Self::Bar(r) => { + let st = r.state(); + OperationFactoryKind::Bar { + wire_sent: st.wire_bytes_sent.clone(), + wire_received: st.wire_bytes_received.clone(), + filename: st.filename.clone(), + version: st.version, + } + } + Self::Log(_) | Self::Json { .. } | Self::Off => OperationFactoryKind::Quiet, + }; + OperationFactory { kind } + } +} + +/// Cloneable handle that can spawn transient per-operation progress +/// bars. Safe to pass into background tasks (e.g. the need-list +/// watcher) so they can show their own progress without holding a +/// reference to the main reporter. +#[derive(Clone)] +pub struct OperationFactory { + kind: OperationFactoryKind, +} + +#[derive(Clone)] +enum OperationFactoryKind { + Bar { + wire_sent: Arc, + wire_received: Arc, + filename: Arc, + version: u8, + }, + /// Log / Json / Off: no visible per-operation UI. The handle still + /// exposes a `ProgressState` so publish helpers can call + /// `inc_data`/`inc_index` unconditionally without branching. + Quiet, +} + +impl OperationFactory { + /// Begin a per-operation progress display. The returned handle owns + /// a fresh `ProgressState` that callers should hand to publish + /// helpers via `handle.state()`. Drop or call `finish()` when the + /// operation completes. + pub fn begin_operation( + &self, + bytes_total: u64, + indexes_total: u32, + data_total: u32, + ) -> OperationHandle { + match &self.kind { + OperationFactoryKind::Bar { + wire_sent, + wire_received, + filename, + version, + } => { + let wire = peeroxide_dht::io::WireCounters { + bytes_sent: wire_sent.clone(), + bytes_received: wire_received.clone(), + }; + let state = ProgressState::new_with_wire( + Phase::Put, + *version, + filename.clone(), + wire, + ); + state.set_length(bytes_total, indexes_total, data_total); + let renderer = BarRenderer::new(state.clone()); + OperationHandle { + state, + inner: OperationInner::Bar(Some(renderer)), + } + } + OperationFactoryKind::Quiet => { + let state = ProgressState::new(Phase::Put, 2, Arc::::from("")); + state.set_length(bytes_total, indexes_total, data_total); + OperationHandle { + state, + inner: OperationInner::Quiet, + } + } + } + } +} + +/// Handle to an in-flight per-operation progress display. +pub struct OperationHandle { + state: Arc, + inner: OperationInner, +} + +enum OperationInner { + Bar(Option), + Quiet, +} + +impl OperationHandle { + /// Shared progress state for this operation. Hand it to publish + /// helpers so they can increment data/index counters as work + /// completes. + pub fn state(&self) -> Arc { + self.state.clone() + } + + /// Stop the per-operation bar and clear its lines from the + /// terminal. Quiet variants no-op. Consumes `self`. + pub async fn finish(mut self) { + match &mut self.inner { + OperationInner::Bar(slot) => { + if let Some(renderer) = slot.take() { + renderer.finish_and_clear().await; + } + } + OperationInner::Quiet => {} + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::cmd::deaddrop::progress::state::Phase; + + fn make_state() -> Arc { + let s = ProgressState::new(Phase::Put, 1, Arc::::from("test.bin")); + s.set_length(1000, 0, 2); + s + } + + #[test] + fn from_args_off_when_no_progress() { + let state = make_state(); + let r = ProgressReporter::from_args(state, true, false); + assert!(matches!(r, ProgressReporter::Off)); + } + + #[test] + fn from_args_json_when_json_flag() { + let state = make_state(); + let r = ProgressReporter::from_args(state, false, true); + assert!(matches!(r, ProgressReporter::Json { .. })); + } + + #[test] + fn off_variant_constructs() { + let r = ProgressReporter::new(ProgressMode::Off, make_state()); + assert!(matches!(r, ProgressReporter::Off)); + } + + #[tokio::test] + async fn off_variant_lifecycle_is_noop() { + let mut r = ProgressReporter::new(ProgressMode::Off, make_state()); + r.finish_initial().await; + r.finish().await; + } + + #[tokio::test] + async fn off_event_methods_noop() { + let r = ProgressReporter::new(ProgressMode::Off, make_state()); + r.on_start(); + r.on_chunk_done(); + r.on_index_done(); + r.on_put_result("key"); + r.on_get_result(100, "crc", None); + r.on_ack(1, "peer"); + r.emit_progress_snapshot().await; + } + + #[test] + fn json_variant_constructs() { + let r = ProgressReporter::new(ProgressMode::Json, make_state()); + assert!(matches!(r, ProgressReporter::Json { .. })); + } + + #[tokio::test] + async fn json_on_start_no_panic() { + let r = ProgressReporter::new(ProgressMode::Json, make_state()); + r.on_start(); + } + + #[tokio::test] + async fn json_on_put_result_no_panic() { + let r = ProgressReporter::new(ProgressMode::Json, make_state()); + r.on_put_result("abc123key"); + } + + #[tokio::test] + async fn json_on_get_result_no_panic() { + let r = ProgressReporter::new(ProgressMode::Json, make_state()); + r.on_get_result(5000, "deadbeef", Some("/tmp/out.bin")); + } + + #[tokio::test] + async fn json_on_ack_no_panic() { + let r = ProgressReporter::new(ProgressMode::Json, make_state()); + r.on_ack(2, "peer-id"); + } + + #[tokio::test] + async fn json_emit_progress_snapshot_no_panic() { + let r = ProgressReporter::new(ProgressMode::Json, make_state()); + r.emit_progress_snapshot().await; + } + + #[tokio::test] + async fn json_finish_initial_emits_done() { + let mut r = ProgressReporter::new(ProgressMode::Json, make_state()); + r.finish_initial().await; + } + + #[tokio::test] + async fn json_finish_consumes_and_emits_done() { + let r = ProgressReporter::new(ProgressMode::Json, make_state()); + r.finish().await; + } + + #[tokio::test] + async fn bar_variant_constructs() { + let r = ProgressReporter::new(ProgressMode::Bar, make_state()); + assert!(matches!(r, ProgressReporter::Bar(_))); + r.finish().await; + } + + #[tokio::test] + async fn log_variant_constructs() { + let r = ProgressReporter::new(ProgressMode::PeriodicLog, make_state()); + assert!(matches!(r, ProgressReporter::Log(_))); + r.finish().await; + } + + #[tokio::test] + async fn bar_finish_initial_then_finish() { + let mut r = ProgressReporter::new(ProgressMode::Bar, make_state()); + r.finish_initial().await; + r.finish().await; + } + + #[tokio::test] + async fn bar_operation_factory_begin_creates_visible_bar() { + let r = ProgressReporter::new(ProgressMode::Bar, make_state()); + let factory = r.operation_factory(); + let op = factory.begin_operation(500, 1, 4); + // The op state should have been initialized with the requested totals. + assert_eq!(op.state().bytes_total.load(Ordering::Relaxed), 500); + assert_eq!(op.state().indexes_total.load(Ordering::Relaxed), 1); + assert_eq!(op.state().data_total.load(Ordering::Relaxed), 4); + // Incrementing the state should be reflected. + op.state().inc_data(100); + assert_eq!(op.state().bytes_done.load(Ordering::Relaxed), 100); + assert_eq!(op.state().data_done.load(Ordering::Relaxed), 1); + op.finish().await; + r.finish().await; + } + + #[tokio::test] + async fn quiet_operation_factory_returns_usable_state() { + for mode in [ProgressMode::Off, ProgressMode::PeriodicLog, ProgressMode::Json] { + let r = ProgressReporter::new(mode, make_state()); + let factory = r.operation_factory(); + let op = factory.begin_operation(100, 0, 1); + op.state().inc_data(50); + assert_eq!(op.state().bytes_done.load(Ordering::Relaxed), 50); + op.finish().await; + r.finish().await; + } + } + + #[tokio::test] + async fn operation_factory_is_clone_and_send() { + let r = ProgressReporter::new(ProgressMode::Bar, make_state()); + let factory = r.operation_factory(); + let f2 = factory.clone(); + let task = tokio::spawn(async move { + let op = f2.begin_operation(10, 0, 1); + op.finish().await; + }); + task.await.unwrap(); + let op = factory.begin_operation(20, 0, 1); + op.finish().await; + r.finish().await; + } +} diff --git a/peeroxide-cli/src/cmd/deaddrop/progress/state.rs b/peeroxide-cli/src/cmd/deaddrop/progress/state.rs new file mode 100644 index 0000000..7df9fdd --- /dev/null +++ b/peeroxide-cli/src/cmd/deaddrop/progress/state.rs @@ -0,0 +1,162 @@ +#![allow(dead_code)] + +use std::sync::Arc; +use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; +use std::time::Instant; + +use serde::Serialize; + +#[derive(Serialize, Clone, Copy, Debug, PartialEq, Eq)] +#[serde(rename_all = "lowercase")] +pub enum Phase { + Put, + Get, +} + +pub struct ProgressState { + pub phase: Phase, + pub version: u8, + pub filename: Arc, + pub bytes_total: AtomicU64, + pub bytes_done: AtomicU64, + pub indexes_total: AtomicU32, + pub indexes_done: AtomicU32, + pub data_total: AtomicU32, + pub data_done: AtomicU32, + /// Cumulative UDP bytes sent at the DHT IO layer. Shared `Arc` + /// with `peeroxide_dht::io::WireCounters` so the display can sample the + /// live counter without going through a getter call. Default-constructed + /// states have an unconnected counter that stays at 0 (useful for v1 + /// where wire stats aren't displayed). + pub wire_bytes_sent: Arc, + /// Cumulative UDP bytes received at the DHT IO layer. See `wire_bytes_sent`. + pub wire_bytes_received: Arc, + pub start_instant: Instant, +} + +impl ProgressState { + pub fn new(phase: Phase, version: u8, filename: Arc) -> Arc { + Self::new_with_wire( + phase, + version, + filename, + peeroxide_dht::io::WireCounters::default(), + ) + } + + /// Construct a `ProgressState` connected to a live `WireCounters` so the + /// renderer can display real DHT wire-byte rates alongside payload rates. + pub fn new_with_wire( + phase: Phase, + version: u8, + filename: Arc, + wire: peeroxide_dht::io::WireCounters, + ) -> Arc { + Arc::new(Self { + phase, + version, + filename, + bytes_total: AtomicU64::new(0), + bytes_done: AtomicU64::new(0), + indexes_total: AtomicU32::new(0), + indexes_done: AtomicU32::new(0), + data_total: AtomicU32::new(0), + data_done: AtomicU32::new(0), + wire_bytes_sent: wire.bytes_sent, + wire_bytes_received: wire.bytes_received, + start_instant: Instant::now(), + }) + } + + pub fn set_length(&self, bytes_total: u64, indexes_total: u32, data_total: u32) { + self.bytes_total.store(bytes_total, Ordering::Relaxed); + self.indexes_total.store(indexes_total, Ordering::Relaxed); + self.data_total.store(data_total, Ordering::Relaxed); + } + + pub fn inc_index(&self) { + self.indexes_done.fetch_add(1, Ordering::Relaxed); + } + + pub fn inc_data(&self, chunk_bytes: u64) { + self.data_done.fetch_add(1, Ordering::Relaxed); + self.bytes_done.fetch_add(chunk_bytes, Ordering::Relaxed); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn phase_serde() { + assert_eq!(serde_json::to_string(&Phase::Put).unwrap(), "\"put\""); + assert_eq!(serde_json::to_string(&Phase::Get).unwrap(), "\"get\""); + } + + #[test] + fn set_length_after_start() { + let state = ProgressState::new(Phase::Put, 2, Arc::::from("file.txt")); + assert_eq!(state.bytes_total.load(Ordering::Relaxed), 0); + state.set_length(1000, 3, 5); + assert_eq!(state.bytes_total.load(Ordering::Relaxed), 1000); + assert_eq!(state.indexes_total.load(Ordering::Relaxed), 3); + assert_eq!(state.data_total.load(Ordering::Relaxed), 5); + } + + #[test] + fn no_panic_on_zero_bytes() { + let state = ProgressState::new(Phase::Get, 2, Arc::::from("file.txt")); + state.inc_data(0); + assert_eq!(state.data_done.load(Ordering::Relaxed), 1); + assert_eq!(state.bytes_done.load(Ordering::Relaxed), 0); + } + + #[test] + fn new_with_wire_shares_atomics_with_counters() { + // Verify that incrementing the WireCounters' atomics is visible from + // the ProgressState (i.e. the Arcs are shared, not cloned by value). + use std::sync::atomic::AtomicU64; + let wire = peeroxide_dht::io::WireCounters { + bytes_sent: Arc::new(AtomicU64::new(0)), + bytes_received: Arc::new(AtomicU64::new(0)), + }; + let state = ProgressState::new_with_wire( + Phase::Put, + 2, + Arc::::from("file.txt"), + wire.clone(), + ); + wire.bytes_sent.store(12_345, Ordering::Relaxed); + wire.bytes_received.store(67_890, Ordering::Relaxed); + assert_eq!(state.wire_bytes_sent.load(Ordering::Relaxed), 12_345); + assert_eq!(state.wire_bytes_received.load(Ordering::Relaxed), 67_890); + } + + #[test] + fn new_default_wire_is_unconnected_zero() { + // Plain `new` produces a state whose wire counters are independent + // and stay at 0 forever — useful for v1 paths that don't display + // wire stats. + let state = ProgressState::new(Phase::Put, 1, Arc::::from("file.txt")); + assert_eq!(state.wire_bytes_sent.load(Ordering::Relaxed), 0); + assert_eq!(state.wire_bytes_received.load(Ordering::Relaxed), 0); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn concurrent_inc() { + let state = ProgressState::new(Phase::Put, 2, Arc::::from("file.txt")); + let mut tasks = Vec::with_capacity(64); + for _ in 0..64 { + let state = Arc::clone(&state); + tasks.push(tokio::spawn(async move { + state.inc_data(65536); + })); + } + for task in tasks { + task.await.unwrap(); + } + assert_eq!(state.data_done.load(Ordering::Relaxed), 64); + assert_eq!(state.bytes_done.load(Ordering::Relaxed), 64 * 65536); + } +} diff --git a/peeroxide-cli/src/cmd/deaddrop.rs b/peeroxide-cli/src/cmd/deaddrop/v1.rs similarity index 57% rename from peeroxide-cli/src/cmd/deaddrop.rs rename to peeroxide-cli/src/cmd/deaddrop/v1.rs index af543e3..0c76f04 100644 --- a/peeroxide-cli/src/cmd/deaddrop.rs +++ b/peeroxide-cli/src/cmd/deaddrop/v1.rs @@ -1,97 +1,17 @@ -use clap::{Args, Subcommand}; -use libudx::UdxRuntime; -use peeroxide::KeyPair; -use peeroxide_dht::hyperdht::{self, HyperDhtHandle, MutablePutResult}; -use std::collections::HashSet; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Arc; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; -use tokio::signal; -use tokio::sync::{Mutex, Semaphore}; - -use crate::config::ResolvedConfig; -use super::{build_dht_config, to_hex}; +use super::*; +use crate::cmd::sigterm_recv; +use crate::cmd::deaddrop::progress::{ + state::{Phase, ProgressState}, + reporter::ProgressReporter, +}; const MAX_CHUNKS: usize = 65535; const ROOT_HEADER_SIZE: usize = 39; const NON_ROOT_HEADER_SIZE: usize = 33; -const MAX_PAYLOAD: usize = 1000; const ROOT_PAYLOAD_MAX: usize = MAX_PAYLOAD - ROOT_HEADER_SIZE; const NON_ROOT_PAYLOAD_MAX: usize = MAX_PAYLOAD - NON_ROOT_HEADER_SIZE; const VERSION: u8 = 0x01; -#[derive(Subcommand)] -pub enum DeaddropCommands { - /// Store data on DHT, print pickup key - Leave(LeaveArgs), - /// Retrieve data from DHT using pickup key - Pickup(PickupArgs), -} - -#[derive(Args)] -pub struct LeaveArgs { - /// File path or - for stdin - file: String, - - /// Hard cap on outbound byte rate (e.g. 100k, 1m) - #[arg(long)] - max_speed: Option, - - /// Refresh interval in seconds (default: 600) - #[arg(long, default_value_t = 600)] - refresh_interval: u64, - - /// Stop refreshing after this duration - #[arg(long)] - ttl: Option, - - /// Exit after N pickups detected - #[arg(long)] - max_pickups: Option, - - /// Derive keypair from passphrase (provided on command line) - #[arg(long, conflicts_with = "interactive_passphrase")] - passphrase: Option, - - /// Derive keypair from passphrase (prompted interactively, hidden input) - #[arg(long, conflicts_with = "passphrase")] - interactive_passphrase: bool, -} - -#[derive(Args)] -pub struct PickupArgs { - /// Pickup key (64-char hex or passphrase text) - #[arg(required_unless_present_any = ["passphrase", "interactive_passphrase"])] - key: Option, - - /// Derive pickup key from passphrase (provided on command line) - #[arg(long, conflicts_with = "interactive_passphrase")] - passphrase: Option, - - /// Derive pickup key from passphrase (prompted interactively, hidden input) - #[arg(long, conflicts_with = "passphrase")] - interactive_passphrase: bool, - - /// Write output to file (default: stdout) - #[arg(long)] - output: Option, - - /// Give up on any single chunk after this duration (default: 1200s) - #[arg(long, default_value_t = 1200)] - timeout: u64, - - /// Don't announce pickup acknowledgement - #[arg(long)] - no_ack: bool, -} - -pub async fn run(cmd: DeaddropCommands, cfg: &ResolvedConfig) -> i32 { - match cmd { - DeaddropCommands::Leave(args) => run_leave(args, cfg).await, - DeaddropCommands::Pickup(args) => run_pickup(args, cfg).await, - } -} - fn derive_chunk_keypair(root_seed: &[u8; 32], chunk_index: u16) -> KeyPair { let mut input = Vec::with_capacity(34); input.extend_from_slice(root_seed); @@ -100,10 +20,6 @@ fn derive_chunk_keypair(root_seed: &[u8; 32], chunk_index: u16) -> KeyPair { KeyPair::from_seed(hash) } -fn compute_crc32c(data: &[u8]) -> u32 { - crc32c::crc32c(data) -} - fn encode_root_chunk(total_chunks: u16, crc: u32, next_pk: &[u8; 32], payload: &[u8]) -> Vec { let mut buf = Vec::with_capacity(ROOT_HEADER_SIZE + payload.len()); buf.push(VERSION); @@ -122,23 +38,7 @@ fn encode_non_root_chunk(next_pk: &[u8; 32], payload: &[u8]) -> Vec { buf } -fn parse_max_speed(s: &str) -> Result { - let s = s.trim().to_lowercase(); - if let Some(num) = s.strip_suffix('m') { - num.parse::() - .map(|n| n * 1_000_000) - .map_err(|e| format!("invalid --max-speed: {e}")) - } else if let Some(num) = s.strip_suffix('k') { - num.parse::() - .map(|n| n * 1_000) - .map_err(|e| format!("invalid --max-speed: {e}")) - } else { - s.parse::() - .map_err(|e| format!("invalid --max-speed: {e}")) - } -} - -async fn run_leave(args: LeaveArgs, cfg: &ResolvedConfig) -> i32 { +pub async fn run_put(args: &PutArgs, cfg: &ResolvedConfig) -> i32 { if args.refresh_interval == 0 { eprintln!("error: --refresh-interval must be greater than 0"); return 1; @@ -240,22 +140,36 @@ async fn run_leave(args: LeaveArgs, cfg: &ResolvedConfig) -> i32 { (None, None) }; - eprintln!("DEADDROP LEAVE {} chunks ({} bytes)", total_chunks, data.len()); + let filename: Arc = if args.file == "-" { + Arc::from("") + } else { + let base = std::path::Path::new(&args.file) + .file_name() + .map(|n| n.to_string_lossy().into_owned()) + .unwrap_or_else(|| args.file.clone()); + Arc::from(base.as_str()) + }; + let state = ProgressState::new_with_wire(Phase::Put, 1, filename, handle.wire_counters()); + state.set_length(data.len() as u64, 0, total_chunks as u32); + let mut reporter = ProgressReporter::from_args(state.clone(), args.no_progress, args.json); + reporter.on_start(); - if let Err(e) = publish_chunks(&handle, &chunks, max_concurrency, dispatch_delay, true).await { + if let Err(e) = publish_chunks(&handle, &chunks, max_concurrency, dispatch_delay, Some(state.clone())).await { eprintln!("error: publish failed: {e}"); + reporter.finish().await; let _ = handle.destroy().await; let _ = task.await; return 1; } let pickup_key = to_hex(&root_kp.public_key); - println!("{pickup_key}"); + reporter.emit_initial_publish_complete(&pickup_key).await; eprintln!(" published to DHT (best-effort)"); eprintln!(" pickup key printed to stdout"); eprintln!(" refreshing every {}s, monitoring for acks...", args.refresh_interval); + let op_factory = reporter.operation_factory(); let ack_topic = peeroxide::discovery_key(&[root_kp.public_key.as_slice(), b"ack"].concat()); let mut seen_acks: HashSet<[u8; 32]> = HashSet::new(); let mut pickup_count: u64 = 0; @@ -269,7 +183,7 @@ async fn run_leave(args: LeaveArgs, cfg: &ResolvedConfig) -> i32 { loop { tokio::select! { _ = signal::ctrl_c() => break, - _ = super::sigterm_recv() => break, + _ = sigterm_recv() => break, _ = async { if let Some(deadline) = ttl_deadline { tokio::time::sleep_until(deadline).await; @@ -279,7 +193,18 @@ async fn run_leave(args: LeaveArgs, cfg: &ResolvedConfig) -> i32 { } => break, _ = refresh_interval.tick() => { eprintln!(" refreshing {} chunks...", chunks.len()); - if let Err(e) = publish_chunks(&handle, &chunks, max_concurrency, dispatch_delay, true).await { + let bytes_total: u64 = chunks.iter().map(|c| c.encoded.len() as u64).sum(); + let op = op_factory.begin_operation(bytes_total, 0, chunks.len() as u32); + let refresh_result = publish_chunks( + &handle, + &chunks, + max_concurrency, + dispatch_delay, + Some(op.state()), + ) + .await; + op.finish().await; + if let Err(e) = refresh_result { eprintln!(" warning: refresh failed: {e}"); } } @@ -289,10 +214,12 @@ async fn run_leave(args: LeaveArgs, cfg: &ResolvedConfig) -> i32 { for peer in &result.peers { if seen_acks.insert(peer.public_key) { pickup_count += 1; + reporter.on_ack(pickup_count, &to_hex(&peer.public_key)); eprintln!(" [ack] pickup #{pickup_count} detected"); if let Some(max) = args.max_pickups { if pickup_count >= max { eprintln!(" max pickups reached, stopping"); + reporter.finish().await; let _ = handle.destroy().await; let _ = task.await; return 0; @@ -307,27 +234,12 @@ async fn run_leave(args: LeaveArgs, cfg: &ResolvedConfig) -> i32 { } eprintln!(" stopped refreshing; records expire in ~20m"); + reporter.finish().await; let _ = handle.destroy().await; let _ = task.await; 0 } -fn rpassword_read() -> String { - use std::io::{BufRead, BufReader}; - let tty = match std::fs::File::open("/dev/tty") { - Ok(f) => f, - Err(_) => { - let mut line = String::new(); - std::io::stdin().read_line(&mut line).unwrap_or(0); - return line.trim_end_matches('\n').trim_end_matches('\r').to_string(); - } - }; - let mut reader = BufReader::new(tty); - let mut line = String::new(); - reader.read_line(&mut line).unwrap_or(0); - line.trim_end_matches('\n').trim_end_matches('\r').to_string() -} - fn compute_chunk_count(data_len: usize) -> usize { if data_len <= ROOT_PAYLOAD_MAX { 1 @@ -337,11 +249,6 @@ fn compute_chunk_count(data_len: usize) -> usize { } } -struct ChunkData { - keypair: KeyPair, - encoded: Vec, -} - fn split_into_chunks(data: &[u8], total: u16, crc: u32, root_seed: &[u8; 32]) -> Vec { let mut chunks = Vec::new(); let root_kp = KeyPair::from_seed(*root_seed); @@ -387,251 +294,22 @@ fn split_into_chunks(data: &[u8], total: u16, crc: u32, root_seed: &[u8; 32]) -> chunks } -struct AimdController { - current: usize, - max_cap: Option, - window_size: usize, - degraded_in_window: u32, - total_in_window: u32, -} - -impl AimdController { - fn new(initial: usize, max_cap: Option) -> Self { - Self { - current: initial, - max_cap, - window_size: 10, - degraded_in_window: 0, - total_in_window: 0, - } - } - - fn record(&mut self, degraded: bool) -> Option { - if degraded { - self.degraded_in_window += 1; - } - self.total_in_window += 1; - - if self.total_in_window >= self.window_size as u32 { - let ratio = self.degraded_in_window as f64 / self.total_in_window as f64; - self.degraded_in_window = 0; - self.total_in_window = 0; - - if ratio > 0.3 { - self.current = (self.current / 2).max(1); - } else if ratio == 0.0 { - let next = self.current + 1; - self.current = match self.max_cap { - Some(cap) => next.min(cap), - None => next, - }; - } - Some(self.current) - } else { - None - } - } -} - -async fn publish_chunks( - handle: &HyperDhtHandle, - chunks: &[ChunkData], - max_concurrency: Option, - dispatch_delay: Option, - show_progress: bool, -) -> Result<(), String> { - let initial_concurrency = 4usize; - let sem = Arc::new(Semaphore::new(initial_concurrency)); - let active_target = Arc::new(AtomicUsize::new(initial_concurrency)); - let permits_to_forget = Arc::new(AtomicUsize::new(0)); - let controller = Arc::new(Mutex::new(AimdController::new(initial_concurrency, max_concurrency))); - - let total = chunks.len(); - let mut completed = 0usize; - - let mut handles: Vec>> = Vec::new(); - for chunk in chunks { - let permit = loop { - let p = sem.clone().acquire_owned().await.unwrap(); - let forget_pending = permits_to_forget.load(Ordering::Relaxed); - if forget_pending > 0 && permits_to_forget.fetch_sub(1, Ordering::Relaxed) > 0 { - p.forget(); - } else { - break p; - } - }; - - let h = handle.clone(); - let kp = chunk.keypair.clone(); - let data = chunk.encoded.clone(); - - let seq = SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap_or_default() - .as_secs(); - - let sem_inner = sem.clone(); - let active_target_inner = active_target.clone(); - let permits_to_forget_inner = permits_to_forget.clone(); - let controller_inner = controller.clone(); - - handles.push(tokio::spawn(async move { - let result = h.mutable_put(&kp, &data, seq).await; - let put_result = match result { - Ok(r) => r, - Err(e) => { - drop(permit); - return Err(format!("mutable_put failed: {e}")); - } - }; - - let degraded = put_result.commit_timeouts > 0; - let new_target = { - let mut ctrl = controller_inner.lock().await; - ctrl.record(degraded) - }; - - if let Some(target) = new_target { - let current_target = active_target_inner.load(Ordering::Relaxed); - if target > current_target { - let add = target - current_target; - sem_inner.add_permits(add); - active_target_inner.store(target, Ordering::Relaxed); - } else if target < current_target { - let remove = current_target - target; - permits_to_forget_inner.fetch_add(remove, Ordering::Relaxed); - active_target_inner.store(target, Ordering::Relaxed); - } - } - - drop(permit); - Ok(put_result) - })); - - if let Some(delay) = dispatch_delay { - tokio::time::sleep(delay).await; - } - - let mut i = 0; - while i < handles.len() { - if handles[i].is_finished() { - let h = handles.swap_remove(i); - match h.await { - Ok(Ok(_)) => { - completed += 1; - if show_progress { - eprintln!(" published chunk {completed}/{total}"); - } - } - Ok(Err(e)) => return Err(e), - Err(e) => return Err(format!("task panicked: {e}")), - } - } else { - i += 1; - } - } - } - - for h in handles { - match h.await { - Ok(Ok(_)) => { - completed += 1; - if show_progress { - eprintln!(" published chunk {completed}/{total}"); - } - } - Ok(Err(e)) => return Err(e), - Err(e) => return Err(format!("task panicked: {e}")), - } - } - - Ok(()) -} - -async fn run_pickup(args: PickupArgs, cfg: &ResolvedConfig) -> i32 { - if args.timeout == 0 { - eprintln!("error: --timeout must be greater than 0"); - return 1; - } - - let root_public_key = if let Some(ref phrase) = args.passphrase { - if phrase.is_empty() { - eprintln!("error: passphrase cannot be empty"); - return 1; - } - derive_pk_from_passphrase(phrase) - } else if args.interactive_passphrase { - eprintln!("Enter passphrase: "); - let passphrase = rpassword_read(); - if passphrase.is_empty() { - eprintln!("error: passphrase cannot be empty"); - return 1; - } - derive_pk_from_passphrase(&passphrase) - } else { - let key = args.key.as_ref().unwrap(); - if key.len() == 64 { - match hex::decode(key) { - Ok(bytes) if bytes.len() == 32 => { - let mut pk = [0u8; 32]; - pk.copy_from_slice(&bytes); - pk - } - _ => derive_pk_from_passphrase(key), - } - } else { - derive_pk_from_passphrase(key) - } - }; - - let pk_hex = to_hex(&root_public_key); - eprintln!("DEADDROP PICKUP @{}...", &pk_hex[..8]); - - let dht_config = build_dht_config(cfg); - let runtime = match UdxRuntime::new() { - Ok(r) => r, - Err(e) => { - eprintln!("error: failed to create UDP runtime: {e}"); - return 1; - } - }; - - let (task, handle, _rx) = match hyperdht::spawn(&runtime, dht_config).await { - Ok(v) => v, - Err(e) => { - eprintln!("error: failed to start DHT: {e}"); - return 1; - } - }; - - if let Err(e) = handle.bootstrapped().await { - eprintln!("error: bootstrap failed: {e}"); - return 1; - } - +pub async fn get_from_root( + root_data: Vec, + root_pk: [u8; 32], + handle: HyperDhtHandle, + task_handle: tokio::task::JoinHandle>, + args: &GetArgs, + progress: Arc, + reporter: ProgressReporter, +) -> i32 { let chunk_timeout = Duration::from_secs(args.timeout); - let root_data = match fetch_with_retry(&handle, &root_public_key, chunk_timeout).await { - Some(d) => d, - None => { - eprintln!("error: root chunk not found (timeout after {}s)", args.timeout); - let _ = handle.destroy().await; - let _ = task.await; - return 1; - } - }; - - if root_data.is_empty() || root_data[0] != VERSION { - eprintln!("error: invalid root chunk (bad version)"); - let _ = handle.destroy().await; - let _ = task.await; - return 1; - } - if root_data.len() < ROOT_HEADER_SIZE { eprintln!("error: root chunk too small"); + reporter.finish().await; let _ = handle.destroy().await; - let _ = task.await; + let _ = task_handle.await; return 1; } @@ -643,36 +321,47 @@ async fn run_pickup(args: PickupArgs, cfg: &ResolvedConfig) -> i32 { if total_chunks == 0 || total_chunks > MAX_CHUNKS { eprintln!("error: invalid chunk count: {total_chunks}"); + reporter.finish().await; let _ = handle.destroy().await; - let _ = task.await; + let _ = task_handle.await; return 1; } - eprintln!(" fetching chunk 1/{total_chunks}..."); - let mut payload_data = Vec::new(); payload_data.extend_from_slice(root_payload); + // Estimated total file size: cannot be exactly computed before the final chunk + // arrives (last chunk may be short), so use the maximum-possible upper bound + // (root payload + (total-1) * non-root payload). This drives the bar; the + // bytes-done counter is exact via inc_data per chunk. + let estimated_total: u64 = if total_chunks == 1 { + root_payload.len() as u64 + } else { + ROOT_PAYLOAD_MAX as u64 + ((total_chunks - 1) as u64) * NON_ROOT_PAYLOAD_MAX as u64 + }; + progress.set_length(estimated_total, 0, total_chunks as u32); + progress.inc_data(root_payload.len() as u64); + let mut seen_keys: HashSet<[u8; 32]> = HashSet::new(); - seen_keys.insert(root_public_key); + seen_keys.insert(root_pk); for i in 1..total_chunks { - eprintln!(" fetching chunk {}/{}...", i + 1, total_chunks); - if next_pk == [0u8; 32] { if i == total_chunks - 1 { break; } eprintln!("error: chain ended prematurely at chunk {i}"); + reporter.finish().await; let _ = handle.destroy().await; - let _ = task.await; + let _ = task_handle.await; return 1; } if !seen_keys.insert(next_pk) { eprintln!("error: loop detected in chunk chain"); + reporter.finish().await; let _ = handle.destroy().await; - let _ = task.await; + let _ = task_handle.await; return 1; } @@ -680,48 +369,52 @@ async fn run_pickup(args: PickupArgs, cfg: &ResolvedConfig) -> i32 { Some(d) => d, None => { eprintln!("error: chunk {} not found (timeout)", i + 1); + reporter.finish().await; let _ = handle.destroy().await; - let _ = task.await; + let _ = task_handle.await; return 1; } }; if chunk_data.is_empty() || chunk_data[0] != VERSION { eprintln!("error: invalid chunk {} (bad version)", i + 1); + reporter.finish().await; let _ = handle.destroy().await; - let _ = task.await; + let _ = task_handle.await; return 1; } if chunk_data.len() < NON_ROOT_HEADER_SIZE { eprintln!("error: chunk {} too small", i + 1); + reporter.finish().await; let _ = handle.destroy().await; - let _ = task.await; + let _ = task_handle.await; return 1; } next_pk.copy_from_slice(&chunk_data[1..33]); let chunk_payload = &chunk_data[33..]; + progress.inc_data(chunk_payload.len() as u64); payload_data.extend_from_slice(chunk_payload); } if total_chunks > 1 && next_pk != [0u8; 32] { eprintln!("error: final chunk does not terminate chain (next != zeros)"); + reporter.finish().await; let _ = handle.destroy().await; - let _ = task.await; + let _ = task_handle.await; return 1; } let computed_crc = compute_crc32c(&payload_data); if computed_crc != stored_crc { eprintln!("error: CRC mismatch (expected {stored_crc:08x}, got {computed_crc:08x})"); + reporter.finish().await; let _ = handle.destroy().await; - let _ = task.await; + let _ = task_handle.await; return 1; } - eprintln!(" reassembled {} bytes", payload_data.len()); - if let Some(ref output_path) = args.output { let dir = std::path::Path::new(output_path) .parent() @@ -730,16 +423,18 @@ async fn run_pickup(args: PickupArgs, cfg: &ResolvedConfig) -> i32 { if let Err(e) = tokio::fs::write(&temp_path, &payload_data).await { eprintln!("error: failed to write temp file: {e}"); + reporter.finish().await; let _ = handle.destroy().await; - let _ = task.await; + let _ = task_handle.await; return 1; } if let Err(e) = tokio::fs::rename(&temp_path, output_path).await { let _ = tokio::fs::remove_file(&temp_path).await; eprintln!("error: failed to rename: {e}"); + reporter.finish().await; let _ = handle.destroy().await; - let _ = task.await; + let _ = task_handle.await; return 1; } @@ -748,15 +443,16 @@ async fn run_pickup(args: PickupArgs, cfg: &ResolvedConfig) -> i32 { use std::io::Write; if let Err(e) = std::io::stdout().write_all(&payload_data) { eprintln!("error: failed to write to stdout: {e}"); + reporter.finish().await; let _ = handle.destroy().await; - let _ = task.await; + let _ = task_handle.await; return 1; } } if !args.no_ack { let ack_topic = - peeroxide::discovery_key(&[root_public_key.as_slice(), b"ack"].concat()); + peeroxide::discovery_key(&[root_pk.as_slice(), b"ack"].concat()); let ack_kp = KeyPair::generate(); let _ = handle.announce(ack_topic, &ack_kp, &[]).await; eprintln!(" ack sent (ephemeral identity)"); @@ -765,42 +461,14 @@ async fn run_pickup(args: PickupArgs, cfg: &ResolvedConfig) -> i32 { } eprintln!(" done"); + let crc_hex = format!("{computed_crc:08x}"); + reporter.on_get_result(payload_data.len() as u64, &crc_hex, args.output.as_deref()); + reporter.finish().await; let _ = handle.destroy().await; - let _ = task.await; + let _ = task_handle.await; 0 } -fn derive_pk_from_passphrase(passphrase: &str) -> [u8; 32] { - let seed = peeroxide::discovery_key(passphrase.as_bytes()); - let kp = KeyPair::from_seed(seed); - kp.public_key -} - -async fn fetch_with_retry( - handle: &HyperDhtHandle, - public_key: &[u8; 32], - timeout: Duration, -) -> Option> { - let deadline = tokio::time::Instant::now() + timeout; - let mut backoff = Duration::from_secs(1); - let max_backoff = Duration::from_secs(30); - - loop { - match handle.mutable_get(public_key, 0).await { - Ok(Some(result)) => return Some(result.value), - Ok(None) => {} - Err(_) => {} - } - - if tokio::time::Instant::now() >= deadline { - return None; - } - - tokio::time::sleep(backoff.min(deadline - tokio::time::Instant::now())).await; - backoff = (backoff * 2).min(max_backoff); - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/peeroxide-cli/src/cmd/deaddrop/v2/build.rs b/peeroxide-cli/src/cmd/deaddrop/v2/build.rs new file mode 100644 index 0000000..8dd2d38 --- /dev/null +++ b/peeroxide-cli/src/cmd/deaddrop/v2/build.rs @@ -0,0 +1,401 @@ +//! v2 sender-side tree construction. +//! +//! Bottom-up greedy. Spec: see *Tree Shape (normative)* section of +//! `DEADDROP_V2.md (and `docs/src/dd/`)`. The construction is fully determined by `file_size`; +//! senders MUST produce exactly this shape. + +#![allow(dead_code)] + +use peeroxide::KeyPair; + +use super::keys::{data_chunk_address, derive_index_keypair, salt as compute_salt}; +use super::tree::{compute_layout, TreeLayout}; +use super::wire::{ + encode_data_chunk, encode_non_root_index, encode_root_index, DATA_PAYLOAD_MAX, HASH_LEN, + NON_ROOT_INDEX_SLOT_CAP, +}; + +/// A single index chunk that the sender will publish via `mutable_put`. +#[derive(Clone)] +pub struct IndexChunk { + /// Sender-assigned linear index in the keypair derivation scheme. + pub keypair_index: u32, + /// The keypair used to sign this chunk (`derive_index_keypair(seed, keypair_index)`). + pub keypair: KeyPair, + /// Encoded chunk bytes. + pub encoded: Vec, + /// Tree-position metadata: `0` = leaf-index level, higher values = closer to root. + pub layer: u32, + /// Position within `layer` (0-indexed; layout traversal order matches build order). + pub position_in_layer: u64, +} + +/// A single data chunk that the sender will publish via `immutable_put`. +#[derive(Clone)] +pub struct DataChunk { + /// File-order position (0-indexed). + pub file_position: u64, + /// Content address: `discovery_key(encoded)`. + pub address: [u8; HASH_LEN], + /// Encoded chunk bytes (`[VERSION][salt][payload]`). + pub encoded: Vec, +} + +/// The fully built v2 tree, ready to publish. +pub struct BuiltTree { + /// Encoded root chunk bytes. + pub root_encoded: Vec, + /// Root keypair (derived from `root_seed` directly). + pub root_keypair: KeyPair, + /// Non-root index chunks, in bottom-up build order matching their `keypair_index`. + pub index_chunks: Vec, + /// Data chunks in file order. + pub data_chunks: Vec, + /// Layout metadata. + pub layout: TreeLayout, + /// CRC-32C of the reassembled file payload. + pub crc32c: u32, +} + +/// Errors that can arise while building. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum BuildError { + DataCountMismatch { expected: u64, got: u64 }, + EmptyChunkInNonEmptyFile, +} + +impl std::fmt::Display for BuildError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + BuildError::DataCountMismatch { expected, got } => write!( + f, + "data chunk count mismatch: expected {expected}, got {got}" + ), + BuildError::EmptyChunkInNonEmptyFile => { + write!(f, "received an empty data chunk for a non-empty file") + } + } + } +} + +impl std::error::Error for BuildError {} + +/// Build the v2 tree for a file. +/// +/// `data_payloads` is an iterator over the file's data-chunk payloads in +/// file order. Each payload must be ≤ `DATA_PAYLOAD_MAX` bytes and (apart +/// from the last) exactly that size; the iterator must yield `data_chunk_count(file_size)` +/// items. +/// +/// `crc32c` is the CRC-32C of the entire reassembled file payload. +pub fn build_tree( + root_seed: &[u8; 32], + file_size: u64, + crc32c: u32, + data_payloads: I, +) -> Result +where + I: IntoIterator, + I::Item: AsRef<[u8]>, +{ + let salt = compute_salt(root_seed); + let root_keypair = KeyPair::from_seed(*root_seed); + let layout = compute_layout(file_size); + let n = layout.data_chunk_count; + + // Encode all data chunks. + let mut data_chunks: Vec = Vec::with_capacity(n as usize); + for (i, payload) in data_payloads.into_iter().enumerate() { + let payload = payload.as_ref(); + debug_assert!( + payload.len() <= DATA_PAYLOAD_MAX, + "data payload {} exceeds DATA_PAYLOAD_MAX", + payload.len() + ); + let encoded = encode_data_chunk(salt, payload); + let address = data_chunk_address(&encoded); + data_chunks.push(DataChunk { + file_position: i as u64, + address, + encoded, + }); + } + if data_chunks.len() as u64 != n { + return Err(BuildError::DataCountMismatch { + expected: n, + got: data_chunks.len() as u64, + }); + } + + // Special case: empty file. Root has zero slots, no non-root index chunks. + if n == 0 { + let root_encoded = encode_root_index(file_size, crc32c, &[]); + return Ok(BuiltTree { + root_encoded, + root_keypair, + index_chunks: Vec::new(), + data_chunks, + layout, + crc32c, + }); + } + + // Special case: N ≤ 30. Root holds data hashes directly; no non-root chunks. + if layout.depth == 0 { + let slots: Vec<[u8; HASH_LEN]> = data_chunks.iter().map(|d| d.address).collect(); + let root_encoded = encode_root_index(file_size, crc32c, &slots); + return Ok(BuiltTree { + root_encoded, + root_keypair, + index_chunks: Vec::new(), + data_chunks, + layout, + crc32c, + }); + } + + // General case: bottom-up greedy. + // + // Layer 0 is leaf-index (each holds up to 31 data hashes from `data_chunks`). + // Layer L > 0 holds up to 31 child pubkeys from layer L-1. + // The top layer (`depth - 1`) becomes the root's children. + let mut index_chunks: Vec = Vec::new(); + let mut next_keypair_index: u32 = 0; + + // Build leaf-index layer (layer 0). + let leaf_count = layout.layer_counts[0]; + let mut leaf_pubkeys: Vec<[u8; HASH_LEN]> = Vec::with_capacity(leaf_count as usize); + + for leaf_pos in 0..leaf_count { + let start = (leaf_pos * NON_ROOT_INDEX_SLOT_CAP as u64) as usize; + let end = ((leaf_pos + 1) * NON_ROOT_INDEX_SLOT_CAP as u64).min(n) as usize; + let slots: Vec<[u8; HASH_LEN]> = + data_chunks[start..end].iter().map(|d| d.address).collect(); + let encoded = encode_non_root_index(&slots); + let kp = derive_index_keypair(root_seed, next_keypair_index); + leaf_pubkeys.push(kp.public_key); + index_chunks.push(IndexChunk { + keypair_index: next_keypair_index, + keypair: kp, + encoded, + layer: 0, + position_in_layer: leaf_pos, + }); + next_keypair_index += 1; + } + + // Build higher layers. + let mut child_pubkeys = leaf_pubkeys; + for layer_idx in 1..layout.depth { + let layer_chunk_count = layout.layer_counts[layer_idx as usize]; + let mut layer_pubkeys: Vec<[u8; HASH_LEN]> = Vec::with_capacity(layer_chunk_count as usize); + let prev_count = child_pubkeys.len(); + + for chunk_pos in 0..layer_chunk_count { + let start = (chunk_pos * NON_ROOT_INDEX_SLOT_CAP as u64) as usize; + let end = ((chunk_pos + 1) * NON_ROOT_INDEX_SLOT_CAP as u64) + .min(prev_count as u64) as usize; + let slots: Vec<[u8; HASH_LEN]> = child_pubkeys[start..end].to_vec(); + let encoded = encode_non_root_index(&slots); + let kp = derive_index_keypair(root_seed, next_keypair_index); + layer_pubkeys.push(kp.public_key); + index_chunks.push(IndexChunk { + keypair_index: next_keypair_index, + keypair: kp, + encoded, + layer: layer_idx, + position_in_layer: chunk_pos, + }); + next_keypair_index += 1; + } + + child_pubkeys = layer_pubkeys; + } + + // Root holds the top layer's pubkeys. + let root_encoded = encode_root_index(file_size, crc32c, &child_pubkeys); + + Ok(BuiltTree { + root_encoded, + root_keypair, + index_chunks, + data_chunks, + layout, + crc32c, + }) +} + +/// Convenience wrapper: split an in-memory byte slice into payloads and +/// build the tree in one shot. CRC32C is computed over the whole file. +pub fn build_tree_from_bytes(root_seed: &[u8; 32], file: &[u8]) -> Result { + let crc = crc32c::crc32c(file); + let payloads = file.chunks(DATA_PAYLOAD_MAX); + build_tree(root_seed, file.len() as u64, crc, payloads) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::cmd::deaddrop::v2::tree::canonical_depth; + use crate::cmd::deaddrop::v2::wire::{decode_root_index, ROOT_INDEX_SLOT_CAP}; + + fn make_data(n_chunks: usize, last_partial: usize) -> Vec { + let mut data = Vec::new(); + for i in 0..n_chunks { + let len = if i + 1 == n_chunks && last_partial > 0 { + last_partial + } else { + DATA_PAYLOAD_MAX + }; + data.extend(std::iter::repeat_n((i % 256) as u8, len)); + } + data + } + + #[test] + fn build_empty_file() { + let seed = [0u8; 32]; + let tree = build_tree_from_bytes(&seed, &[]).unwrap(); + assert!(tree.data_chunks.is_empty()); + assert!(tree.index_chunks.is_empty()); + let decoded = decode_root_index(&tree.root_encoded).unwrap(); + assert_eq!(decoded.file_size, 0); + assert_eq!(decoded.crc32c, 0); + assert!(decoded.slots.is_empty()); + } + + #[test] + fn build_tiny_file_n_1() { + let seed = [1u8; 32]; + let data = make_data(1, 100); + let tree = build_tree_from_bytes(&seed, &data).unwrap(); + assert_eq!(tree.data_chunks.len(), 1); + assert!(tree.index_chunks.is_empty()); + let decoded = decode_root_index(&tree.root_encoded).unwrap(); + assert_eq!(decoded.slots.len(), 1); + assert_eq!(decoded.slots[0], tree.data_chunks[0].address); + } + + #[test] + fn build_n_eq_30() { + let seed = [2u8; 32]; + let data = make_data(30, 0); + let tree = build_tree_from_bytes(&seed, &data).unwrap(); + assert_eq!(tree.data_chunks.len(), 30); + assert!(tree.index_chunks.is_empty()); + assert_eq!(tree.layout.depth, 0); + let decoded = decode_root_index(&tree.root_encoded).unwrap(); + assert_eq!(decoded.slots.len(), 30); + for (i, slot) in decoded.slots.iter().enumerate() { + assert_eq!(*slot, tree.data_chunks[i].address); + } + } + + #[test] + fn build_n_eq_31_depth_1() { + let seed = [3u8; 32]; + let data = make_data(31, 0); + let tree = build_tree_from_bytes(&seed, &data).unwrap(); + assert_eq!(tree.data_chunks.len(), 31); + assert_eq!(tree.layout.depth, 1); + // 1 leaf-index chunk holding 31 data hashes; root has 1 child slot. + assert_eq!(tree.index_chunks.len(), 1); + let leaf = &tree.index_chunks[0]; + assert_eq!(leaf.layer, 0); + assert_eq!(leaf.position_in_layer, 0); + + let decoded_root = decode_root_index(&tree.root_encoded).unwrap(); + assert_eq!(decoded_root.slots.len(), 1); + assert_eq!(decoded_root.slots[0], leaf.keypair.public_key); + } + + #[test] + fn build_n_eq_70_depth_1_three_leaves() { + let seed = [4u8; 32]; + let data = make_data(70, 0); + let tree = build_tree_from_bytes(&seed, &data).unwrap(); + assert_eq!(tree.layout.depth, 1); + assert_eq!(tree.index_chunks.len(), 3); + let decoded = decode_root_index(&tree.root_encoded).unwrap(); + assert_eq!(decoded.slots.len(), 3); + for (i, leaf) in tree.index_chunks.iter().enumerate() { + assert_eq!(leaf.layer, 0); + assert_eq!(leaf.position_in_layer, i as u64); + assert_eq!(decoded.slots[i], leaf.keypair.public_key); + } + } + + #[test] + fn build_n_eq_931_triggers_depth_2() { + let seed = [5u8; 32]; + let data = make_data(931, 100); + let tree = build_tree_from_bytes(&seed, &data).unwrap(); + assert_eq!(tree.layout.depth, 2); + // 931 data → 31 leaves → 1 L1 → root holds 1 child. + let leaves: Vec<_> = tree.index_chunks.iter().filter(|c| c.layer == 0).collect(); + let l1: Vec<_> = tree.index_chunks.iter().filter(|c| c.layer == 1).collect(); + assert_eq!(leaves.len(), 31); + assert_eq!(l1.len(), 1); + let decoded = decode_root_index(&tree.root_encoded).unwrap(); + assert_eq!(decoded.slots.len(), 1); + assert_eq!(decoded.slots[0], l1[0].keypair.public_key); + } + + #[test] + fn keypair_indices_are_dense_in_build_order() { + let seed = [7u8; 32]; + let data = make_data(70, 0); + let tree = build_tree_from_bytes(&seed, &data).unwrap(); + for (i, chunk) in tree.index_chunks.iter().enumerate() { + assert_eq!(chunk.keypair_index, i as u32); + } + } + + #[test] + fn rejects_too_few_payloads() { + let seed = [0u8; 32]; + // Claim file_size of 100 (1 chunk) but pass no payloads. + let result = build_tree(&seed, 100, 0, std::iter::empty::<&[u8]>()); + assert!(matches!(result, Err(BuildError::DataCountMismatch { .. }))); + } + + #[test] + fn data_chunks_in_file_order() { + let seed = [9u8; 32]; + let data = make_data(70, 0); + let tree = build_tree_from_bytes(&seed, &data).unwrap(); + for (i, dc) in tree.data_chunks.iter().enumerate() { + assert_eq!(dc.file_position, i as u64); + } + } + + #[test] + fn root_carries_correct_file_size_and_crc() { + let seed = [11u8; 32]; + let data = b"some content"; + let tree = build_tree_from_bytes(&seed, data).unwrap(); + let decoded = decode_root_index(&tree.root_encoded).unwrap(); + assert_eq!(decoded.file_size, data.len() as u64); + assert_eq!(decoded.crc32c, crc32c::crc32c(data)); + } + + #[test] + fn salt_propagates_to_data_chunks() { + let mut seed = [0u8; 32]; + seed[0] = 0xAA; + let data = make_data(2, 100); + let tree = build_tree_from_bytes(&seed, &data).unwrap(); + for dc in &tree.data_chunks { + assert_eq!(dc.encoded[0], 0x02); // version + assert_eq!(dc.encoded[1], 0x00); // salt (forced to 0) + } + } + + #[test] + fn root_slot_cap_boundary() { + // Use ROOT_INDEX_SLOT_CAP to make the boundary explicit. + assert_eq!(ROOT_INDEX_SLOT_CAP, 30); + // Just past the boundary triggers depth 1. + assert_eq!(canonical_depth(31), 1); + } +} diff --git a/peeroxide-cli/src/cmd/deaddrop/v2/fetch.rs b/peeroxide-cli/src/cmd/deaddrop/v2/fetch.rs new file mode 100644 index 0000000..25dfa6b --- /dev/null +++ b/peeroxide-cli/src/cmd/deaddrop/v2/fetch.rs @@ -0,0 +1,785 @@ +//! v2 receiver: BFS fetch over the index tree with mmap output (`--output`) +//! or streaming stdout output. +//! +//! Spec: see *Fetch Protocol (Receiver)* in `DEADDROP_V2.md (and `docs/src/dd/`)`. + +#![allow(dead_code)] + +use std::collections::HashSet; +use std::sync::Arc; +use std::time::Duration; + +use peeroxide::KeyPair; +use peeroxide_dht::hyperdht::HyperDhtHandle; +use tokio::sync::{Mutex, Semaphore}; +use tokio::task::JoinSet; + +use crate::cmd::deaddrop::progress::reporter::ProgressReporter; +use crate::cmd::deaddrop::progress::state::ProgressState; + +use super::super::GetArgs; +use super::keys::{ack_topic, need_topic}; +use super::need::{coalesce_missing_ranges, encode_need_list}; +use super::stream::StreamSink; +use super::tree::{compute_layout, data_chunk_count}; +use super::wire::{ + decode_data_chunk, decode_non_root_index, decode_root_index, NON_ROOT_INDEX_SLOT_CAP, +}; +use super::PARALLEL_FETCH_CAP; + +/// Per-task fetch result variants. +enum TaskOutcome { + Index { + remaining_depth: u32, + base: u64, + end: u64, + result: Result, String>, + }, + Data { + position: u64, + result: Result, String>, + }, +} + +/// Per-data-position state. Drives need-list publishing: only `Failed` +/// positions are advertised, so the sender is not asked to re-publish +/// chunks the receiver hasn't actually attempted yet. +#[derive(Clone, Debug)] +enum ChunkState { + /// No fetch has been scheduled — the parent index chunk hasn't been + /// decoded yet, so we don't know the chunk's address. + Unscheduled, + /// A fetch task is running (or queued behind the parallel-fetch + /// semaphore) for this address. + InFlight { address: [u8; 32] }, + /// The fetch task returned an error. Eligible for re-spawn on the + /// next need-list publish cycle. + Failed { address: [u8; 32] }, + /// The chunk has been successfully decoded and written. + Done, +} + +/// Output destination strategy. +enum OutputSink { + /// Memory-mapped output file (write-by-position). + File { + mmap: memmap2::MmapMut, + temp_path: std::path::PathBuf, + final_path: std::path::PathBuf, + }, + /// Empty output file: no mmap, just create-and-rename at finalize. + EmptyFile { + temp_path: std::path::PathBuf, + final_path: std::path::PathBuf, + }, + /// Streaming stdout via reorder buffer. + Stdout(StreamSink), + /// Empty stdout: write nothing. + EmptyStdout, +} + +impl OutputSink { + /// Accept a data chunk's payload at its file-order position. + /// Returns Err if I/O fails. + fn accept(&mut self, position: u64, payload: &[u8]) -> Result<(), String> { + match self { + OutputSink::File { mmap, .. } => { + use super::wire::DATA_PAYLOAD_MAX; + let offset = (position * DATA_PAYLOAD_MAX as u64) as usize; + if offset + payload.len() > mmap.len() { + return Err(format!( + "chunk at position {position} extends past mmap end" + )); + } + mmap[offset..offset + payload.len()].copy_from_slice(payload); + Ok(()) + } + OutputSink::Stdout(sink) => { + let to_emit = sink.accept(position, payload.to_vec()); + use std::io::Write; + let mut out = std::io::stdout().lock(); + for bytes in to_emit { + out.write_all(&bytes) + .map_err(|e| format!("stdout write failed: {e}"))?; + } + out.flush().map_err(|e| format!("stdout flush failed: {e}"))?; + Ok(()) + } + OutputSink::EmptyFile { .. } | OutputSink::EmptyStdout => { + // Nothing to write — empty-file callers shouldn't pass any chunks + // (N=0 means no data layer). Be permissive: just no-op. + Ok(()) + } + } + } + + /// Finalize the output (flush mmap + atomic rename, or no-op for stdout). + fn finalize(self) -> Result<(), String> { + match self { + OutputSink::File { + mmap, + temp_path, + final_path, + } => { + mmap.flush().map_err(|e| format!("mmap flush failed: {e}"))?; + drop(mmap); + std::fs::rename(&temp_path, &final_path) + .map_err(|e| format!("rename to {final_path:?} failed: {e}"))?; + Ok(()) + } + OutputSink::EmptyFile { + temp_path, + final_path, + } => { + // Create an empty file at temp_path, then rename. + std::fs::write(&temp_path, []) + .map_err(|e| format!("failed to write empty temp file: {e}"))?; + std::fs::rename(&temp_path, &final_path) + .map_err(|e| format!("rename to {final_path:?} failed: {e}"))?; + Ok(()) + } + OutputSink::Stdout(sink) => { + use std::io::Write; + let _ = sink; // ensure consumed + std::io::stdout() + .flush() + .map_err(|e| format!("stdout flush failed: {e}"))?; + Ok(()) + } + OutputSink::EmptyStdout => Ok(()), + } + } + + /// Discard the output without committing (used on error before finalize). + fn discard(self) { + match self { + OutputSink::File { + mmap, temp_path, .. + } => { + drop(mmap); + let _ = std::fs::remove_file(&temp_path); + } + OutputSink::EmptyFile { temp_path, .. } => { + let _ = std::fs::remove_file(&temp_path); + } + OutputSink::Stdout(_) | OutputSink::EmptyStdout => {} + } + } +} + +/// Build the appropriate `OutputSink` for the user's request. +fn open_output_sink(args: &GetArgs, file_size: u64) -> Result { + use super::wire::DATA_PAYLOAD_MAX; + if let Some(path) = args.output.as_ref() { + let path = std::path::PathBuf::from(path); + let dir = path.parent().unwrap_or_else(|| std::path::Path::new(".")).to_path_buf(); + let temp_name = format!(".peeroxide-pickup-{}", std::process::id()); + let temp_path = dir.join(temp_name); + + if file_size == 0 { + return Ok(OutputSink::EmptyFile { + temp_path, + final_path: path, + }); + } + + // Allocate output file. We size it to N * DATA_PAYLOAD_MAX so that + // each chunk writes to its position * 998 byte offset; the last + // chunk may overshoot file_size by up to 998 bytes. We truncate + // to file_size before rename. + let n = data_chunk_count(file_size); + let alloc_size = (n.saturating_mul(DATA_PAYLOAD_MAX as u64)).max(file_size); + + let file = std::fs::OpenOptions::new() + .create(true) + .read(true) + .write(true) + .truncate(true) + .open(&temp_path) + .map_err(|e| format!("failed to open temp file {temp_path:?}: {e}"))?; + file.set_len(alloc_size) + .map_err(|e| format!("failed to size temp file: {e}"))?; + let mmap = unsafe { + memmap2::MmapMut::map_mut(&file).map_err(|e| format!("mmap failed: {e}"))? + }; + drop(file); // mmap holds the underlying mapping + Ok(OutputSink::File { + mmap, + temp_path, + final_path: path, + }) + } else if file_size == 0 { + Ok(OutputSink::EmptyStdout) + } else { + let n = data_chunk_count(file_size); + Ok(OutputSink::Stdout(StreamSink::new(n))) + } +} + +/// Fetch a single mutable record with exponential backoff, bounded by `deadline`. +async fn fetch_mutable_with_retry( + handle: &HyperDhtHandle, + pk: &[u8; 32], + deadline: tokio::time::Instant, +) -> Result, String> { + let mut backoff = Duration::from_millis(500); + let max_backoff = Duration::from_secs(15); + loop { + match handle.mutable_get(pk, 0).await { + Ok(Some(r)) => return Ok(r.value), + Ok(None) => {} + Err(e) => { + let now = tokio::time::Instant::now(); + if now >= deadline { + return Err(format!("mutable_get failed: {e}")); + } + } + } + let now = tokio::time::Instant::now(); + if now >= deadline { + return Err("timeout".to_string()); + } + let sleep = backoff.min(deadline.saturating_duration_since(now)); + tokio::time::sleep(sleep).await; + backoff = (backoff * 2).min(max_backoff); + } +} + +/// Fetch a single immutable record (data chunk) with exponential backoff. +async fn fetch_immutable_with_retry( + handle: &HyperDhtHandle, + address: &[u8; 32], + deadline: tokio::time::Instant, +) -> Result, String> { + let mut backoff = Duration::from_millis(500); + let max_backoff = Duration::from_secs(15); + loop { + match handle.immutable_get(*address).await { + Ok(Some(bytes)) => return Ok(bytes), + Ok(None) => {} + Err(e) => { + let now = tokio::time::Instant::now(); + if now >= deadline { + return Err(format!("immutable_get failed: {e}")); + } + } + } + let now = tokio::time::Instant::now(); + if now >= deadline { + return Err("timeout".to_string()); + } + let sleep = backoff.min(deadline.saturating_duration_since(now)); + tokio::time::sleep(sleep).await; + backoff = (backoff * 2).min(max_backoff); + } +} + +/// Receiver-side need-list keepalive: announces the receiver's ephemeral +/// keypair on the need topic on a refresh cycle while the get is in +/// progress. +async fn run_need_announcer( + handle: HyperDhtHandle, + need_topic_key: [u8; 32], + need_kp: KeyPair, + shutdown: Arc, +) { + let interval = Duration::from_secs(60); + loop { + tokio::select! { + _ = shutdown.notified() => break, + _ = async { + if let Err(e) = handle.announce(need_topic_key, &need_kp, &[]).await { + eprintln!(" warning: need-topic announce failed: {e}"); + } + tokio::time::sleep(interval).await; + } => {} + } + } +} + +/// Top-level GET entry point. Already given the fetched root chunk bytes +/// from `mod.rs::run_get` (which had to read the version byte to dispatch). +#[allow(clippy::too_many_arguments)] +pub async fn get_from_root( + root_data: Vec, + root_pk: [u8; 32], + handle: HyperDhtHandle, + task_handle: tokio::task::JoinHandle< + Result<(), peeroxide_dht::hyperdht::HyperDhtError>, + >, + args: &GetArgs, + progress: Arc, + reporter: ProgressReporter, +) -> i32 { + if args.timeout == 0 { + eprintln!("error: --timeout must be greater than 0"); + return cleanup(handle, task_handle, reporter, None, 1).await; + } + + // 1. Decode the root index chunk. + let root = match decode_root_index(&root_data) { + Ok(r) => r, + Err(e) => { + eprintln!("error: invalid root index chunk: {e}"); + return cleanup(handle, task_handle, reporter, None, 1).await; + } + }; + let layout = compute_layout(root.file_size); + let n = layout.data_chunk_count; + let tree_depth = layout.depth; + + // Sanity: root.slots should match the canonical layer 0 (data direct) or + // top-non-root layer (root's children) shape. + let expected_root_slots: u64 = if tree_depth == 0 { + n + } else { + *layout.layer_counts.last().unwrap() + }; + if root.slots.len() as u64 != expected_root_slots { + eprintln!( + "error: root slot count mismatch: got {}, expected {} (file_size={}, depth={})", + root.slots.len(), + expected_root_slots, + root.file_size, + tree_depth + ); + return cleanup(handle, task_handle, reporter, None, 1).await; + } + + // 2. Update progress state with totals. + let total_index_chunks = super::tree::total_non_root_index_chunks(root.file_size) + 1; + progress.set_length(root.file_size, total_index_chunks as u32, n as u32); + progress.inc_index(); // root accounted for + + // 3. Open output sink. + let mut output = match open_output_sink(args, root.file_size) { + Ok(o) => o, + Err(e) => { + eprintln!("error: {e}"); + return cleanup(handle, task_handle, reporter, None, 1).await; + } + }; + + // 4. BFS fetch. + // + // `chunk_timeout` is a *sliding* no-progress window: if no chunk has been + // successfully decoded for this long, the operation aborts. It's *not* a + // wall-clock budget for the whole download — a steady-progressing fetch + // can run as long as it needs to. + // + // Per-fetch-task deadlines are anchored at spawn time (see + // `schedule_children_from_index`), so each individual `mutable_get` / + // `immutable_get` retry loop has up to `chunk_timeout` to land its + // chunk. Combined with the outer sliding window, this gives: + // - Any single chunk: up to chunk_timeout from when its task started. + // - Whole operation: aborts only when no progress is being made + // anywhere for chunk_timeout. + let chunk_timeout = Duration::from_secs(args.timeout); + let sem = Arc::new(Semaphore::new(PARALLEL_FETCH_CAP)); + let mut tasks: JoinSet = JoinSet::new(); + let seen_index = Arc::new(Mutex::new(HashSet::<[u8; 32]>::new())); + seen_index.lock().await.insert(root_pk); + + // Per-data-position state. Drives need-list publishing and re-spawn + // of failed fetches; see `ChunkState` for the state machine. + let mut chunk_states: Vec = vec![ChunkState::Unscheduled; n as usize]; + + // Schedule all of root's children first (or root data slots if depth 0). + schedule_children_from_index( + &handle, + &mut tasks, + sem.clone(), + root.slots.clone(), + tree_depth, + 0, + n, + chunk_timeout, + &mut chunk_states, + ) + .await; + + // 5. Setup need-list keepalive. + let need_kp = KeyPair::generate(); + let need_topic_key = need_topic(&root_pk); + let need_shutdown = Arc::new(tokio::sync::Notify::new()); + let need_announce_handle = tokio::spawn(run_need_announcer( + handle.clone(), + need_topic_key, + need_kp.clone(), + need_shutdown.clone(), + )); + let mut need_seq: u64 = 0; + let mut last_need_publish = tokio::time::Instant::now(); + let need_publish_interval = Duration::from_secs(20); + // Skip republishing identical need-list content; keep a single + // keepalive republish so the DHT record (which expires in ~20m) stays + // alive even when the missing-set hasn't changed. 10m = half the TTL. + let mut last_published_encoded: Option> = None; + let mut last_actual_publish_at: Option = None; + let need_keepalive_interval = Duration::from_secs(600); + + // Sliding no-progress window: updated on every successful index/data + // decode. The drain loop aborts only if this stops moving forward for + // `chunk_timeout` seconds. + let mut last_progress_at = tokio::time::Instant::now(); + + // 6. Drain results. + let mut had_error = false; + while !tasks.is_empty() { + let outcome = match tokio::time::timeout(Duration::from_secs(1), tasks.join_next()).await { + Ok(Some(joined)) => match joined { + Ok(o) => Some(o), + Err(e) => { + eprintln!(" warning: fetch task panicked: {e}"); + None + } + }, + Ok(None) => break, + Err(_) => None, + }; + + if let Some(outcome) = outcome { + match outcome { + TaskOutcome::Index { + remaining_depth, + base, + end, + result, + } => match result { + Ok(bytes) => { + match decode_non_root_index(&bytes) { + Ok(slots) => { + progress.inc_index(); + last_progress_at = tokio::time::Instant::now(); + let mut seen = seen_index.lock().await; + // No-op for loop detection; we already + // de-duplicate at schedule time below. + let _ = &mut *seen; + drop(seen); + schedule_children_from_index( + &handle, + &mut tasks, + sem.clone(), + slots, + remaining_depth, + base, + end, + chunk_timeout, + &mut chunk_states, + ) + .await; + } + Err(e) => { + eprintln!( + "error: invalid non-root index at base={base}: {e}" + ); + had_error = true; + break; + } + } + } + Err(e) => { + eprintln!("error: failed to fetch index chunk: {e}"); + had_error = true; + break; + } + }, + TaskOutcome::Data { position, result } => match result { + Ok(bytes) => match decode_data_chunk(&bytes) { + Ok(payload) => { + // Drop late duplicates if this position was + // re-spawned and an earlier task also returned. + if matches!(chunk_states[position as usize], ChunkState::Done) { + continue; + } + // Trim payload for the last chunk if necessary. + let trim_len = if (position + 1) * super::wire::DATA_PAYLOAD_MAX as u64 + > root.file_size + { + let already = position * super::wire::DATA_PAYLOAD_MAX as u64; + (root.file_size - already) as usize + } else { + payload.len() + }; + let trimmed = &payload[..trim_len.min(payload.len())]; + if let Err(e) = output.accept(position, trimmed) { + eprintln!("error: {e}"); + had_error = true; + break; + } + progress.inc_data(trimmed.len() as u64); + last_progress_at = tokio::time::Instant::now(); + chunk_states[position as usize] = ChunkState::Done; + } + Err(e) => { + eprintln!("error: invalid data chunk at position {position}: {e}"); + had_error = true; + break; + } + }, + Err(e) => { + eprintln!( + " warning: failed to fetch data chunk at position {position}: {e}" + ); + // Transition InFlight → Failed so the next need-list + // cycle advertises it and re-spawns a fetch. + if let ChunkState::InFlight { address } = + chunk_states[position as usize] + { + chunk_states[position as usize] = + ChunkState::Failed { address }; + } + } + }, + } + } + + // Periodically publish need-list for chunks the receiver has + // actually attempted and confirmed missing (Failed state). Chunks + // that haven't been scheduled yet, or whose fetch is still in + // flight, are deliberately excluded — we don't want to ask the + // sender to re-publish what the normal DHT get path may still + // deliver. The 20s cadence is a batching window so a burst of + // failures gets coalesced into one need-list update. + if tokio::time::Instant::now() - last_need_publish >= need_publish_interval { + let missing: Vec = chunk_states + .iter() + .enumerate() + .filter_map(|(p, s)| match s { + ChunkState::Failed { .. } => Some(p as u32), + _ => None, + }) + .collect(); + if !missing.is_empty() { + let entries = coalesce_missing_ranges(&missing); + let encoded = encode_need_list(&entries); + let unchanged = last_published_encoded.as_deref() == Some(encoded.as_slice()); + let needs_keepalive = last_actual_publish_at + .is_none_or(|t| t.elapsed() >= need_keepalive_interval); + if !unchanged || needs_keepalive { + need_seq += 1; + let _ = handle.mutable_put(&need_kp, &encoded, need_seq).await; + last_actual_publish_at = Some(tokio::time::Instant::now()); + last_published_encoded = Some(encoded); + } + // Re-spawn fetch tasks for Failed positions: with the + // need-list now published, the sender will republish the + // missing chunks, and the in-flight retry loop in the new + // task gets a fresh chunk_timeout window to pick them up. + for (pos, state) in chunk_states.iter_mut().enumerate() { + if let ChunkState::Failed { address } = *state { + let h = handle.clone(); + let permit_sem = sem.clone(); + tasks.spawn(async move { + let _permit = permit_sem.acquire_owned().await.unwrap(); + let task_deadline = + tokio::time::Instant::now() + chunk_timeout; + let result = + fetch_immutable_with_retry(&h, &address, task_deadline) + .await; + TaskOutcome::Data { + position: pos as u64, + result, + } + }); + *state = ChunkState::InFlight { address }; + } + } + } + last_need_publish = tokio::time::Instant::now(); + } + + // Sliding-window timeout: abort only if no chunk has decoded in + // the last `chunk_timeout` seconds. Steady-progressing downloads + // can run as long as they need. + if tokio::time::Instant::now() - last_progress_at >= chunk_timeout { + eprintln!( + "error: no progress for {}s; aborting", + chunk_timeout.as_secs() + ); + had_error = true; + break; + } + } + + // 7. Finalize. + need_shutdown.notify_one(); + let _ = need_announce_handle.await; + + if had_error { + output.discard(); + return cleanup(handle, task_handle, reporter, Some(need_kp), 1).await; + } + + // Verify all data positions arrived. + let done_count = chunk_states + .iter() + .filter(|s| matches!(s, ChunkState::Done)) + .count() as u64; + if done_count != n { + eprintln!( + "error: only {} of {} data chunks received", + done_count, n + ); + output.discard(); + return cleanup(handle, task_handle, reporter, Some(need_kp), 1).await; + } + + // CRC verification: read back from output (only meaningful for File mode; + // streaming stdout has emitted bytes already). + if let Err(e) = verify_crc(&output, root.file_size, root.crc32c) { + eprintln!("error: {e}"); + output.discard(); + return cleanup(handle, task_handle, reporter, Some(need_kp), 1).await; + } + + if let OutputSink::File { temp_path, .. } = &output { + // Truncate the temp file to file_size before rename. + if let Ok(file) = std::fs::OpenOptions::new().write(true).open(temp_path) { + let _ = file.set_len(root.file_size); + } + } + + if let Err(e) = output.finalize() { + eprintln!("error: {e}"); + return cleanup(handle, task_handle, reporter, Some(need_kp), 1).await; + } + + // Send empty need-list as the done sentinel, plus an ack. + need_seq += 1; + let _ = handle.mutable_put(&need_kp, &[], need_seq).await; + if !args.no_ack { + let ack = ack_topic(&root_pk); + let ack_kp = KeyPair::generate(); + let _ = handle.announce(ack, &ack_kp, &[]).await; + } + + cleanup(handle, task_handle, reporter, Some(need_kp), 0).await +} + +#[allow(clippy::too_many_arguments)] +async fn schedule_children_from_index( + handle: &HyperDhtHandle, + tasks: &mut JoinSet, + sem: Arc, + slots: Vec<[u8; 32]>, + remaining_depth: u32, + base: u64, + end: u64, + chunk_timeout: Duration, + chunk_states: &mut [ChunkState], +) { + if remaining_depth == 0 { + // Slots are data hashes. Position[i] = base + i. + for (i, address) in slots.into_iter().enumerate() { + let pos = base + i as u64; + if pos >= end { + break; + } + chunk_states[pos as usize] = ChunkState::InFlight { address }; + let h = handle.clone(); + let permit_sem = sem.clone(); + tasks.spawn(async move { + let _permit = permit_sem.acquire_owned().await.unwrap(); + // Per-task deadline anchored at when the task actually + // starts running, so chunks scheduled mid-fetch get a + // full budget rather than inheriting the original + // operation-start deadline. + let task_deadline = tokio::time::Instant::now() + chunk_timeout; + let result = fetch_immutable_with_retry(&h, &address, task_deadline).await; + TaskOutcome::Data { + position: pos, + result, + } + }); + } + return; + } + + // Slots are child index pubkeys. Each child covers a subtree. + // Subtree size at remaining_depth r = NON_ROOT_INDEX_SLOT_CAP^r. + let child_remaining = remaining_depth - 1; + let mut subtree_size: u64 = 1; + for _ in 0..=child_remaining { + subtree_size = subtree_size.saturating_mul(NON_ROOT_INDEX_SLOT_CAP as u64); + } + + let mut child_base = base; + for (i, child_pk) in slots.into_iter().enumerate() { + if child_base >= end { + break; + } + // Last child of a parent may have a smaller range (due to N being + // less than the full canonical capacity at this layer). Compute + // the child's end as min(child_base + subtree_size, end). + let child_end = (child_base + subtree_size).min(end); + let h = handle.clone(); + let permit_sem = sem.clone(); + tasks.spawn(async move { + let _permit = permit_sem.acquire_owned().await.unwrap(); + let task_deadline = tokio::time::Instant::now() + chunk_timeout; + let result = fetch_mutable_with_retry(&h, &child_pk, task_deadline).await; + TaskOutcome::Index { + remaining_depth: child_remaining, + base: child_base, + end: child_end, + result, + } + }); + child_base = child_end; + let _ = i; // suppress unused + } +} + +/// CRC-verify the reassembled output. For File mode, reads the mmap; for +/// Stdout mode, this is a no-op (bytes are downstream already). For empty +/// outputs, verifies that `expected_crc` matches `crc32c(&[])`. +fn verify_crc(output: &OutputSink, file_size: u64, expected_crc: u32) -> Result<(), String> { + match output { + OutputSink::File { mmap, .. } => { + let bytes = &mmap[..file_size as usize]; + let computed = crc32c::crc32c(bytes); + if computed != expected_crc { + return Err(format!( + "CRC mismatch: expected {expected_crc:08x}, got {computed:08x}" + )); + } + } + OutputSink::EmptyFile { .. } | OutputSink::EmptyStdout => { + let computed = crc32c::crc32c(&[]); + if computed != expected_crc { + return Err(format!( + "CRC mismatch on empty file: expected {expected_crc:08x}, got {computed:08x}" + )); + } + } + OutputSink::Stdout(_) => { + // Streaming has already emitted; CRC mismatch is best-effort. + // We don't recompute (would require buffering the entire file). + } + } + Ok(()) +} + +/// Cleanup helper: drains DHT handle, awaits the runtime task, finishes the +/// reporter, and returns the exit code. +async fn cleanup( + handle: HyperDhtHandle, + task_handle: tokio::task::JoinHandle< + Result<(), peeroxide_dht::hyperdht::HyperDhtError>, + >, + reporter: ProgressReporter, + _need_kp: Option, + code: i32, +) -> i32 { + reporter.finish().await; + let _ = handle.destroy().await; + let _ = task_handle.await; + code +} + +#[cfg(test)] +mod tests { + // Most fetch.rs logic requires a running DHT; integration tests cover + // the end-to-end roundtrip in `peeroxide-cli/tests/local_commands.rs`. +} diff --git a/peeroxide-cli/src/cmd/deaddrop/v2/keys.rs b/peeroxide-cli/src/cmd/deaddrop/v2/keys.rs new file mode 100644 index 0000000..1d02273 --- /dev/null +++ b/peeroxide-cli/src/cmd/deaddrop/v2/keys.rs @@ -0,0 +1,131 @@ +//! v2 key derivation. +//! +//! Spec: see *Key Derivation* section of `DEADDROP_V2.md (and `docs/src/dd/`)`. +//! +//! root_keypair = KeyPair::from_seed(root_seed) +//! index_keypair[i] = KeyPair::from_seed(blake2b(root_seed || b"idx" || i_le)) +//! where i is u32 little-endian +//! salt = root_seed[0] + +#![allow(dead_code)] + +use peeroxide::{discovery_key, KeyPair}; + +/// Per-deaddrop salt byte. Embedded in every data chunk header. +/// +/// Currently forced to `0x00`: the original intent was DHT address-space +/// isolation between unrelated deaddrops with identical content, but in +/// practice this is unnecessary. The header byte is retained so the wire +/// format does not change. +pub fn salt(_root_seed: &[u8; 32]) -> u8 { + 0x00 +} + +/// Derive the keypair for non-root index chunk number `i`. +/// +/// `i` is a sender-assigned linear number in `[0, 2^32 - 1]`. The order +/// in which the sender assigns numbers is unspecified by the protocol; +/// the reference sender uses bottom-up build order. Tree position is +/// not encoded in the keypair. +pub fn derive_index_keypair(root_seed: &[u8; 32], i: u32) -> KeyPair { + let mut input = Vec::with_capacity(32 + 3 + 4); + input.extend_from_slice(root_seed); + input.extend_from_slice(b"idx"); + input.extend_from_slice(&i.to_le_bytes()); + let seed = discovery_key(&input); + KeyPair::from_seed(seed) +} + +/// Topic for need-list publishing: `discovery_key(root_pk || b"need")`. +pub fn need_topic(root_pk: &[u8; 32]) -> [u8; 32] { + let mut input = Vec::with_capacity(32 + 4); + input.extend_from_slice(root_pk); + input.extend_from_slice(b"need"); + discovery_key(&input) +} + +/// Topic for pickup acknowledgements: `discovery_key(root_pk || b"ack")`. +pub fn ack_topic(root_pk: &[u8; 32]) -> [u8; 32] { + let mut input = Vec::with_capacity(32 + 3); + input.extend_from_slice(root_pk); + input.extend_from_slice(b"ack"); + discovery_key(&input) +} + +/// Compute the DHT address (BLAKE2b-256 of the encoded chunk) for a data chunk. +/// +/// Same as `discovery_key` of the encoded bytes, but named to make intent +/// clear at call sites. +pub fn data_chunk_address(encoded_chunk: &[u8]) -> [u8; 32] { + discovery_key(encoded_chunk) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn salt_is_zero() { + let mut seed = [0u8; 32]; + seed[0] = 0xAB; + seed[1] = 0xCD; + assert_eq!(salt(&seed), 0x00); + } + + #[test] + fn derive_index_keypair_deterministic() { + let seed = [42u8; 32]; + let kp_a = derive_index_keypair(&seed, 7); + let kp_b = derive_index_keypair(&seed, 7); + assert_eq!(kp_a.public_key, kp_b.public_key); + assert_eq!(kp_a.secret_key, kp_b.secret_key); + } + + #[test] + fn derive_index_keypair_distinct_per_index() { + let seed = [42u8; 32]; + let kp_0 = derive_index_keypair(&seed, 0); + let kp_1 = derive_index_keypair(&seed, 1); + let kp_2 = derive_index_keypair(&seed, 2); + assert_ne!(kp_0.public_key, kp_1.public_key); + assert_ne!(kp_1.public_key, kp_2.public_key); + assert_ne!(kp_0.public_key, kp_2.public_key); + } + + #[test] + fn derive_index_keypair_distinct_per_seed() { + let kp_a = derive_index_keypair(&[1u8; 32], 0); + let kp_b = derive_index_keypair(&[2u8; 32], 0); + assert_ne!(kp_a.public_key, kp_b.public_key); + } + + #[test] + fn derive_index_keypair_supports_high_indices() { + // Sanity: u32 max should not panic. + let _ = derive_index_keypair(&[0u8; 32], u32::MAX); + let _ = derive_index_keypair(&[0u8; 32], 1_000_000); + } + + #[test] + fn need_topic_deterministic() { + let pk = [99u8; 32]; + assert_eq!(need_topic(&pk), need_topic(&pk)); + } + + #[test] + fn need_and_ack_topics_differ() { + let pk = [42u8; 32]; + assert_ne!(need_topic(&pk), ack_topic(&pk)); + } + + #[test] + fn data_chunk_address_changes_with_salt() { + // Same payload, different salt → different address (the whole point). + let payload = b"identical content"; + let mut chunk_a = vec![0x02, 0xAA]; + chunk_a.extend_from_slice(payload); + let mut chunk_b = vec![0x02, 0xBB]; + chunk_b.extend_from_slice(payload); + assert_ne!(data_chunk_address(&chunk_a), data_chunk_address(&chunk_b)); + } +} diff --git a/peeroxide-cli/src/cmd/deaddrop/v2/mod.rs b/peeroxide-cli/src/cmd/deaddrop/v2/mod.rs new file mode 100644 index 0000000..e588a06 --- /dev/null +++ b/peeroxide-cli/src/cmd/deaddrop/v2/mod.rs @@ -0,0 +1,66 @@ +//! Dead Drop v2 (wire-byte 0x02). +//! +//! Tree-indexed storage protocol: the index layer is a tree of mutable +//! signed records; the data layer is a flat collection of immutable, +//! content-addressed records, each carrying a per-deaddrop salt slot for +//! DHT address-space isolation. +//! +//! See `peeroxide-cli/DEADDROP_V2.md` (and `docs/src/dd/`) +//! for the wire-format specification. + +#![allow(dead_code)] + +pub mod build; +pub mod fetch; +pub mod keys; +pub mod need; +pub mod publish; +pub mod queue; +pub mod stream; +pub mod tree; +pub mod wire; + +use super::{GetArgs, PutArgs}; +use crate::cmd::deaddrop::progress::reporter::ProgressReporter; +use crate::cmd::deaddrop::progress::state::ProgressState; +use crate::config::ResolvedConfig; +use peeroxide_dht::hyperdht::HyperDhtHandle; +use std::sync::Arc; + +#[allow(unused_imports)] +pub use wire::VERSION; + +/// Concurrency cap shared between fetch and put pipelines. +pub const PARALLEL_FETCH_CAP: usize = 64; + +/// PUT entry point: dispatched from `cmd::deaddrop::run_put` when the +/// user's command is `dd put` and `--v1` is not set. +pub async fn run_put(args: &PutArgs, cfg: &ResolvedConfig) -> i32 { + publish::run_put(args, cfg).await +} + +/// GET entry point: dispatched from `cmd::deaddrop::run_get` when the +/// fetched root chunk's first byte is `0x02`. +#[allow(clippy::too_many_arguments)] +pub async fn get_from_root( + root_data: Vec, + root_pk: [u8; 32], + handle: HyperDhtHandle, + task_handle: tokio::task::JoinHandle< + Result<(), peeroxide_dht::hyperdht::HyperDhtError>, + >, + args: &GetArgs, + progress: Arc, + reporter: ProgressReporter, +) -> i32 { + fetch::get_from_root( + root_data, + root_pk, + handle, + task_handle, + args, + progress, + reporter, + ) + .await +} diff --git a/peeroxide-cli/src/cmd/deaddrop/v2/need.rs b/peeroxide-cli/src/cmd/deaddrop/v2/need.rs new file mode 100644 index 0000000..b4f17bb --- /dev/null +++ b/peeroxide-cli/src/cmd/deaddrop/v2/need.rs @@ -0,0 +1,455 @@ +//! v2 need-list channel. +//! +//! Spec: see *Need-List Feedback Channel* in `DEADDROP_V2.md (and `docs/src/dd/`)`. +//! +//! Wire format: +//! `[VERSION][count: u16 LE][count × {start: u32 LE, end: u32 LE}]` +//! +//! Each entry expresses a half-open `[start, end)` range of *data chunk +//! indices in DFS file order*. The receiver expresses missing pieces in +//! these terms; the sender translates them into the data chunks plus the +//! full path of index chunks they require. + +#![allow(dead_code)] + +use super::build::{BuiltTree, IndexChunk}; +use super::tree::{compute_layout, TreeLayout}; +use super::wire::{ + NEED_ENTRY_SIZE, NEED_LIST_ENTRY_CAP, NEED_LIST_HEADER_SIZE, NON_ROOT_INDEX_SLOT_CAP, VERSION, + WireError, +}; + +/// A `[start, end)` range of data chunk indices in DFS file order. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct NeedEntry { + pub start: u32, + pub end: u32, +} + +impl NeedEntry { + pub fn new(start: u32, end: u32) -> Self { + debug_assert!(start < end, "NeedEntry requires start < end"); + Self { start, end } + } +} + +/// Encode a need-list record. Length is `3 + entries.len() * 8` bytes. +/// +/// Returns the raw bytes to publish via `mutable_put` to the receiver's +/// ephemeral need-keypair. +pub fn encode_need_list(entries: &[NeedEntry]) -> Vec { + let count = entries.len(); + debug_assert!( + count <= NEED_LIST_ENTRY_CAP, + "need-list entry count {} exceeds cap {}", + count, + NEED_LIST_ENTRY_CAP + ); + let mut buf = Vec::with_capacity(NEED_LIST_HEADER_SIZE + count * NEED_ENTRY_SIZE); + buf.push(VERSION); + buf.extend_from_slice(&(count as u16).to_le_bytes()); + for entry in entries { + buf.extend_from_slice(&entry.start.to_le_bytes()); + buf.extend_from_slice(&entry.end.to_le_bytes()); + } + buf +} + +/// Decode a need-list record. +/// +/// An empty `bytes` slice is the receiver-done sentinel and decodes as an +/// empty entry list (Ok with empty Vec). +pub fn decode_need_list(bytes: &[u8]) -> Result, WireError> { + if bytes.is_empty() { + return Ok(Vec::new()); + } + if bytes[0] != VERSION { + return Err(WireError::BadVersion(bytes[0])); + } + if bytes.len() < NEED_LIST_HEADER_SIZE { + return Err(WireError::Truncated { + needed: NEED_LIST_HEADER_SIZE, + got: bytes.len(), + }); + } + let declared = u16::from_le_bytes(bytes[1..3].try_into().unwrap()); + let entry_bytes = &bytes[NEED_LIST_HEADER_SIZE..]; + if entry_bytes.len() % NEED_ENTRY_SIZE != 0 { + return Err(WireError::Truncated { + needed: NEED_LIST_HEADER_SIZE + + entry_bytes.len().div_ceil(NEED_ENTRY_SIZE) * NEED_ENTRY_SIZE, + got: bytes.len(), + }); + } + let computed = entry_bytes.len() / NEED_ENTRY_SIZE; + if computed != declared as usize { + return Err(WireError::BadCount { + declared, + computed, + }); + } + + let mut entries = Vec::with_capacity(computed); + for chunk in entry_bytes.chunks_exact(NEED_ENTRY_SIZE) { + let start = u32::from_le_bytes(chunk[0..4].try_into().unwrap()); + let end = u32::from_le_bytes(chunk[4..8].try_into().unwrap()); + if start >= end { + return Err(WireError::InvalidEntry { start, end }); + } + entries.push(NeedEntry { start, end }); + } + Ok(entries) +} + +/// Coalesce a sorted list of missing chunk positions into `[start, end)` ranges. +/// +/// Input MUST be sorted ascending and unique. Output is the minimal set of +/// half-open ranges covering the input. +pub fn coalesce_missing_ranges(missing_positions: &[u32]) -> Vec { + if missing_positions.is_empty() { + return Vec::new(); + } + let mut out = Vec::new(); + let mut start = missing_positions[0]; + let mut prev = missing_positions[0]; + for &p in &missing_positions[1..] { + if p == prev + 1 { + prev = p; + } else { + out.push(NeedEntry::new(start, prev + 1)); + start = p; + prev = p; + } + } + out.push(NeedEntry::new(start, prev + 1)); + out +} + +/// Tree-position metadata for a non-root index chunk's *contribution* to a +/// data chunk index range. +/// +/// Used by `full_path_chunks_for` to look up which index chunks back which +/// data chunks. Senders precompute this at tree-build time; receivers don't +/// need it. +pub struct ChunkPath<'a> { + /// All non-root index chunks on the path from root → leaf, in + /// root-to-leaf order (root itself excluded). + pub index_chain: Vec<&'a IndexChunk>, +} + +/// Compute the set of chunks the sender MUST republish in response to a +/// need-list entry, per the spec's *full-path republish* requirement. +/// +/// Returns indices into `tree.data_chunks` and `tree.index_chunks` (NOT +/// the root). Caller fans those out via `mutable_put` / `immutable_put`. +pub struct ResponseChunks { + pub data_chunk_indices: Vec, + pub index_chunk_indices: Vec, +} + +/// Compute the response chunk set for a single need-list entry. +/// +/// The full-path republish covers: +/// 1. Data chunks `entry.start..entry.end`. +/// 2. Every leaf-index chunk that holds any of those data hashes. +/// 3. Every ancestor non-root index chunk whose subtree intersects the entry. +pub fn response_chunks_for_entry(tree: &BuiltTree, entry: NeedEntry) -> ResponseChunks { + let n = tree.data_chunks.len() as u64; + let start = entry.start as u64; + let end = (entry.end as u64).min(n); + if start >= end { + return ResponseChunks { + data_chunk_indices: Vec::new(), + index_chunk_indices: Vec::new(), + }; + } + + // Data chunks are easy. + let data_chunk_indices: Vec = (start as usize..end as usize).collect(); + + if tree.layout.depth == 0 { + // No non-root index chunks exist. + return ResponseChunks { + data_chunk_indices, + index_chunk_indices: Vec::new(), + }; + } + + // Compute touched chunk ranges per layer, bottom-up. + let mut touched_at_layer: Vec<(u64, u64)> = Vec::with_capacity(tree.layout.depth as usize); + + // Leaf layer (layer 0): data hashes are packed 31 per chunk in file order, + // so data position p sits in leaf-index `p / 31`. + let leaf_lo = start / NON_ROOT_INDEX_SLOT_CAP as u64; + let leaf_hi_inclusive = (end - 1) / NON_ROOT_INDEX_SLOT_CAP as u64; + touched_at_layer.push((leaf_lo, leaf_hi_inclusive + 1)); + + // Higher layers: each higher chunk holds 31 lower chunks, packed in order. + for _ in 1..tree.layout.depth { + let (prev_lo, prev_hi_excl) = *touched_at_layer.last().unwrap(); + let prev_hi_inclusive = prev_hi_excl - 1; + let lo = prev_lo / NON_ROOT_INDEX_SLOT_CAP as u64; + let hi_inclusive = prev_hi_inclusive / NON_ROOT_INDEX_SLOT_CAP as u64; + touched_at_layer.push((lo, hi_inclusive + 1)); + } + + // Translate (layer, position_in_layer) → IndexChunk slice index. + // `tree.index_chunks` is in bottom-up build order: all of layer 0, + // then all of layer 1, etc. + let mut layer_offset: Vec = Vec::with_capacity(tree.layout.depth as usize + 1); + let mut acc = 0u64; + layer_offset.push(0); + for &count in &tree.layout.layer_counts { + acc += count; + layer_offset.push(acc); + } + + let mut index_chunk_indices: Vec = Vec::new(); + for (layer, &(lo, hi)) in touched_at_layer.iter().enumerate() { + let layer_chunks = tree.layout.layer_counts[layer]; + let lo = lo.min(layer_chunks); + let hi = hi.min(layer_chunks); + for pos in lo..hi { + let abs_index = (layer_offset[layer] + pos) as usize; + index_chunk_indices.push(abs_index); + } + } + + ResponseChunks { + data_chunk_indices, + index_chunk_indices, + } +} + +/// Compute response chunks for an entire need-list record. +/// +/// Deduplicates so that overlapping ranges don't produce duplicate +/// republishes. Output indices are sorted ascending. +pub fn response_chunks_for_list(tree: &BuiltTree, entries: &[NeedEntry]) -> ResponseChunks { + use std::collections::BTreeSet; + let mut data_set: BTreeSet = BTreeSet::new(); + let mut index_set: BTreeSet = BTreeSet::new(); + + for &entry in entries { + let r = response_chunks_for_entry(tree, entry); + data_set.extend(r.data_chunk_indices); + index_set.extend(r.index_chunk_indices); + } + + ResponseChunks { + data_chunk_indices: data_set.into_iter().collect(), + index_chunk_indices: index_set.into_iter().collect(), + } +} + +/// Helper: data-chunk-index range that a tree of given layout covers. +pub fn full_data_range(layout: &TreeLayout) -> NeedEntry { + NeedEntry { + start: 0, + end: layout.data_chunk_count as u32, + } +} + +/// Convenience: build a need-list covering all chunks (for "send me everything" +/// initial requests if a receiver wants to bootstrap fully). Not normally used. +pub fn full_need_list(file_size: u64) -> Vec { + let layout = compute_layout(file_size); + if layout.data_chunk_count == 0 { + Vec::new() + } else { + vec![NeedEntry { + start: 0, + end: layout.data_chunk_count as u32, + }] + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::cmd::deaddrop::v2::build::build_tree_from_bytes; + use crate::cmd::deaddrop::v2::wire::{DATA_PAYLOAD_MAX, NEED_LIST_ENTRY_CAP}; + + #[test] + fn need_list_roundtrip_empty() { + let encoded = encode_need_list(&[]); + assert_eq!(encoded.len(), NEED_LIST_HEADER_SIZE); + let decoded = decode_need_list(&encoded).unwrap(); + assert!(decoded.is_empty()); + } + + #[test] + fn need_list_roundtrip_single_entry() { + let entries = vec![NeedEntry::new(10, 20)]; + let encoded = encode_need_list(&entries); + assert_eq!(encoded.len(), NEED_LIST_HEADER_SIZE + 8); + let decoded = decode_need_list(&encoded).unwrap(); + assert_eq!(decoded, entries); + } + + #[test] + fn need_list_roundtrip_many_entries() { + let entries: Vec = (0..50) + .map(|i| NeedEntry::new(i * 100, i * 100 + 50)) + .collect(); + let encoded = encode_need_list(&entries); + assert_eq!(encoded.len(), NEED_LIST_HEADER_SIZE + 50 * 8); + assert_eq!(decode_need_list(&encoded).unwrap(), entries); + } + + #[test] + fn need_list_at_capacity() { + let entries: Vec = (0..NEED_LIST_ENTRY_CAP as u32) + .map(|i| NeedEntry::new(i, i + 1)) + .collect(); + let encoded = encode_need_list(&entries); + assert!(encoded.len() <= 1000); + assert_eq!(decode_need_list(&encoded).unwrap(), entries); + } + + #[test] + fn empty_bytes_is_done_sentinel() { + let decoded = decode_need_list(&[]).unwrap(); + assert!(decoded.is_empty()); + } + + #[test] + fn rejects_bad_version() { + let bad = vec![0x01, 0x00, 0x00]; + assert_eq!(decode_need_list(&bad), Err(WireError::BadVersion(0x01))); + } + + #[test] + fn rejects_count_mismatch() { + let mut bytes = vec![VERSION, 0x05, 0x00]; // says 5 entries + bytes.extend_from_slice(&[0u8; 8]); // but only 1 + assert!(matches!( + decode_need_list(&bytes), + Err(WireError::BadCount { declared: 5, computed: 1 }) + )); + } + + #[test] + fn rejects_invalid_entry() { + let entries = [ + 5u32.to_le_bytes(), + 5u32.to_le_bytes(), // start == end + ]; + let mut bytes = vec![VERSION, 0x01, 0x00]; + bytes.extend_from_slice(&entries[0]); + bytes.extend_from_slice(&entries[1]); + assert!(matches!( + decode_need_list(&bytes), + Err(WireError::InvalidEntry { start: 5, end: 5 }) + )); + } + + #[test] + fn coalesce_empty() { + assert!(coalesce_missing_ranges(&[]).is_empty()); + } + + #[test] + fn coalesce_single() { + assert_eq!(coalesce_missing_ranges(&[5]), vec![NeedEntry::new(5, 6)]); + } + + #[test] + fn coalesce_contiguous() { + assert_eq!( + coalesce_missing_ranges(&[1, 2, 3, 4, 5]), + vec![NeedEntry::new(1, 6)] + ); + } + + #[test] + fn coalesce_gaps() { + assert_eq!( + coalesce_missing_ranges(&[1, 2, 5, 6, 7, 10]), + vec![ + NeedEntry::new(1, 3), + NeedEntry::new(5, 8), + NeedEntry::new(10, 11), + ] + ); + } + + fn make_data(n_chunks: usize) -> Vec { + let mut data = Vec::new(); + for i in 0..n_chunks { + data.extend(std::iter::repeat_n((i % 256) as u8, DATA_PAYLOAD_MAX)); + } + data + } + + #[test] + fn response_chunks_depth_0_no_index() { + let seed = [1u8; 32]; + let data = make_data(20); + let tree = build_tree_from_bytes(&seed, &data).unwrap(); + let r = response_chunks_for_entry(&tree, NeedEntry::new(5, 10)); + assert_eq!(r.data_chunk_indices, vec![5, 6, 7, 8, 9]); + assert!(r.index_chunk_indices.is_empty()); + } + + #[test] + fn response_chunks_depth_1_one_leaf() { + let seed = [2u8; 32]; + let data = make_data(70); + let tree = build_tree_from_bytes(&seed, &data).unwrap(); + // Need data chunks 5..10 — all in leaf 0 (data 0..30). + let r = response_chunks_for_entry(&tree, NeedEntry::new(5, 10)); + assert_eq!(r.data_chunk_indices, vec![5, 6, 7, 8, 9]); + // Should include exactly leaf 0 (index_chunks[0]). + assert_eq!(r.index_chunk_indices, vec![0]); + } + + #[test] + fn response_chunks_depth_1_spans_leaves() { + let seed = [3u8; 32]; + let data = make_data(70); + let tree = build_tree_from_bytes(&seed, &data).unwrap(); + // Need data 25..40 — spans leaf 0 (data 0..31) and leaf 1 (data 31..62). + let r = response_chunks_for_entry(&tree, NeedEntry::new(25, 40)); + assert_eq!(r.data_chunk_indices, (25..40).collect::>()); + assert_eq!(r.index_chunk_indices, vec![0, 1]); + } + + #[test] + fn response_chunks_depth_2_full_path() { + let seed = [4u8; 32]; + let data = make_data(931); // depth 2: 31 leaves + 1 L1 + let tree = build_tree_from_bytes(&seed, &data).unwrap(); + // Need just data position 0 — should pull leaf 0 + L1 0. + let r = response_chunks_for_entry(&tree, NeedEntry::new(0, 1)); + assert_eq!(r.data_chunk_indices, vec![0]); + // Layer offsets: leaves at 0..31, L1 at 31..32. + // Leaf 0 → index_chunks[0]; L1 0 → index_chunks[31]. + assert_eq!(r.index_chunk_indices, vec![0, 31]); + } + + #[test] + fn response_chunks_dedup_across_entries() { + let seed = [5u8; 32]; + let data = make_data(70); + let tree = build_tree_from_bytes(&seed, &data).unwrap(); + // Two entries that both touch leaf 0. + let entries = vec![NeedEntry::new(0, 5), NeedEntry::new(10, 15)]; + let r = response_chunks_for_list(&tree, &entries); + // Leaf 0 should only appear once. + assert_eq!(r.index_chunk_indices, vec![0]); + let mut expected_data: Vec = (0..5).chain(10..15).collect(); + expected_data.sort(); + assert_eq!(r.data_chunk_indices, expected_data); + } + + #[test] + fn response_clamps_to_file_size() { + let seed = [6u8; 32]; + let data = make_data(20); + let tree = build_tree_from_bytes(&seed, &data).unwrap(); + // Request bytes past the end. + let r = response_chunks_for_entry(&tree, NeedEntry::new(15, 100)); + assert_eq!(r.data_chunk_indices, (15..20).collect::>()); + } +} diff --git a/peeroxide-cli/src/cmd/deaddrop/v2/publish.rs b/peeroxide-cli/src/cmd/deaddrop/v2/publish.rs new file mode 100644 index 0000000..ff4aa58 --- /dev/null +++ b/peeroxide-cli/src/cmd/deaddrop/v2/publish.rs @@ -0,0 +1,1085 @@ +//! v2 sender: tree build + dependency-ordered publish + refresh + need-watch. + +#![allow(dead_code)] + +use std::collections::HashMap; +use std::sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}; +use std::sync::Arc; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; + +use libudx::UdxRuntime; +use peeroxide::KeyPair; +use peeroxide_dht::hyperdht::{self, HyperDhtHandle}; +use rand::RngCore; +use tokio::signal; +use tokio::sync::{Mutex, Notify, Semaphore}; + +use crate::cmd::deaddrop::progress::reporter::{OperationFactory, ProgressReporter}; +use crate::cmd::deaddrop::progress::state::{Phase, ProgressState}; +use crate::cmd::sigterm_recv; +use crate::config::ResolvedConfig; + +use super::super::{build_dht_config, to_hex, PutArgs}; +use super::build::{build_tree, BuiltTree}; +use super::keys::{ack_topic, need_topic}; +use super::need::{decode_need_list, response_chunks_for_list}; +use super::queue::{ChunkId, Lane, Operation, WorkQueue}; +use super::tree::data_chunk_count; +use super::wire::DATA_PAYLOAD_MAX; + +/// Maximum tree depth the sender will produce by default. Beyond this, +/// the sender refuses to build the tree. Depth 4 supports up to +/// 27,705,630 data chunks (~27 GB). +pub const SOFT_DEPTH_CAP: u32 = 4; + +/// How often the sender polls for need-list publishers from receivers. +const NEED_POLL_INTERVAL: Duration = Duration::from_secs(5); + +/// Hard wall-clock cap on a single DHT put. The DHT layer has no terminal +/// timeout on `query` — a degenerate convergence can keep iterating +/// indefinitely while holding the publish-pipeline permit. Healthy puts +/// finish in 4–6s; 30s is ~5–7× that, well outside healthy variance. +/// On timeout the future is dropped (freeing the permit) and the outcome +/// is reported as degraded so AIMD reacts. +const PUT_TIMEOUT: Duration = Duration::from_secs(30); + +/// How often the sender re-announces its presence on the need topic +/// (this is on the receiver side; we keep the constant here for the +/// equivalent receiver-side use). +#[allow(dead_code)] +const NEED_REANNOUNCE_INTERVAL: Duration = Duration::from_secs(60); + +/// Cooperative cancellation primitive shared across every long-running task +/// and `.await` in this module. +/// +/// `Notify::notified()` is edge-triggered — a late waiter misses a past +/// `notify_waiters()` call — so we pair it with an `AtomicBool` flag so the +/// signal is sticky: once tripped, every future call to `cancelled()` +/// resolves immediately. +/// +/// **Discipline rule:** any `.await` inside a `tokio::select!` arm body +/// must itself `select!` against `shutdown.cancelled()`. Once a select arm +/// body starts executing, the other arms are dropped, so an unguarded +/// `.await` deep inside one will deafen ctrl-c. The double-ctrl-c hard +/// exit in `spawn_signal_handler` is insurance against this rule being +/// violated — graceful shutdown still depends on the rule itself. +#[derive(Clone)] +pub(super) struct Shutdown { + flag: Arc, + notify: Arc, +} + +impl Shutdown { + pub fn new() -> Self { + Self { + flag: Arc::new(AtomicBool::new(false)), + notify: Arc::new(Notify::new()), + } + } + + /// Trip the signal. Idempotent; safe to call from any task. + pub fn cancel(&self) { + if !self.flag.swap(true, Ordering::SeqCst) { + self.notify.notify_waiters(); + } + } + + pub fn is_cancelled(&self) -> bool { + self.flag.load(Ordering::SeqCst) + } + + /// Resolves once shutdown has been requested. Safe to await repeatedly. + pub async fn cancelled(&self) { + if self.is_cancelled() { + return; + } + // Subscribe before the second flag check to close the race between + // `is_cancelled()` returning false and `notify_waiters()` firing. + let waiter = self.notify.notified(); + if self.is_cancelled() { + return; + } + waiter.await; + } +} + +/// Spawn the global signal handler. +/// +/// First SIGINT/SIGTERM trips `shutdown` (graceful). A second signal after +/// that — with no timer — calls `std::process::exit(130)` unconditionally. +/// The user's patience is the timer; if they're hitting ctrl-c twice they +/// want out regardless of whether graceful shutdown is making progress. +fn spawn_signal_handler(shutdown: Shutdown) { + tokio::spawn(async move { + tokio::select! { + _ = signal::ctrl_c() => {} + _ = sigterm_recv() => {} + } + eprintln!("\n shutting down (press ctrl-c again to force-exit)..."); + shutdown.cancel(); + + tokio::select! { + _ = signal::ctrl_c() => {} + _ = sigterm_recv() => {} + } + eprintln!(" force-exit"); + std::process::exit(130); + }); +} + +/// AIMD controller: monitors put-result degradation and adjusts an effective +/// concurrency target. +/// +/// Reacts continuously via an EWMA of the degraded-put rate, with two +/// decision paths: +/// +/// 1. **Normal**: every `decision_interval` samples, consult the EWMA and +/// shrink (>30%), grow (<5%), or hold (the dead band in between). +/// 2. **Fast trip**: if `fast_trip_threshold` degraded puts accumulate within +/// a single decision interval, shrink immediately without waiting for the +/// boundary. This catches sudden cliffs (e.g. a DHT region going dark) +/// that the EWMA alone would smear over. +/// +/// A one-shot `shrink_cooldown` damps back-to-back shrinks so in-flight puts +/// from the larger target have a chance to drain before the next contraction. +struct AimdController { + current: usize, + /// Original target chosen at startup. Used by the stall watchdog as the + /// reference for its recovery floor. + initial: usize, + max_cap: Option, + /// EWMA of degradation in [0.0, 1.0]. Updated on every sample. + ewma: f64, + /// EWMA smoothing factor (per-sample weight). Smaller = smoother / slower + /// to react; larger = more reactive but jumpier. + alpha: f64, + /// Samples observed since the last decision (gates the normal path). + samples_since_decision: u32, + /// Make a normal decision every `decision_interval` samples. + decision_interval: u32, + /// Degraded samples observed since the last decision (gates fast-trip). + degraded_since_decision: u32, + /// If degraded count reaches this *within* a decision interval, shrink + /// immediately rather than waiting for the boundary. + fast_trip_threshold: u32, + /// If true, the previous decision shrank; suppress the next shrink so + /// the system can drain before contracting again. + shrink_cooldown: bool, +} + +impl AimdController { + fn new(initial: usize, max_cap: Option) -> Self { + Self { + current: initial, + initial, + max_cap, + ewma: 0.0, + // alpha = 0.1 → ~7-sample half-life; comparable reactivity to the + // old 10-sample tumbling window but smooth and never blind. + alpha: 0.1, + samples_since_decision: 0, + decision_interval: 20, + degraded_since_decision: 0, + // 50% degraded inside one decision interval → emergency shrink. + fast_trip_threshold: 10, + shrink_cooldown: false, + } + } + + /// Watchdog escape hatch: forcibly lift `current` to a recovery floor + /// (half of initial) and clear adaptive state so the next real samples + /// drive the decision afresh. Only returns Some when it actually raises + /// current — if we're already at/above the floor, the stall is not an + /// AIMD-wedge problem and we leave things alone. + fn kick_stall(&mut self) -> Option { + let floor = (self.initial / 2).max(1); + if self.current >= floor { + return None; + } + self.current = floor; + self.ewma = 0.0; + self.shrink_cooldown = false; + self.samples_since_decision = 0; + self.degraded_since_decision = 0; + Some(self.current) + } + + fn shrink_step(&mut self) -> usize { + self.current = ((self.current as f64 * 0.75) as usize).max(1); + self.shrink_cooldown = true; + self.current + } + + fn grow_step(&mut self) -> usize { + let next = self.current + 2; + self.current = match self.max_cap { + Some(cap) => next.min(cap), + None => next, + }; + self.shrink_cooldown = false; + self.current + } + + fn reset_decision_window(&mut self) { + self.samples_since_decision = 0; + self.degraded_since_decision = 0; + } + + fn record(&mut self, degraded: bool) -> Option { + // Continuous EWMA update — never blind between decisions. + let sample = if degraded { 1.0 } else { 0.0 }; + self.ewma = self.alpha * sample + (1.0 - self.alpha) * self.ewma; + self.samples_since_decision += 1; + if degraded { + self.degraded_since_decision += 1; + } + + // Fast-trip path: a burst of degradation mid-interval triggers an + // immediate shrink (still honoring back-to-back cooldown). + if self.degraded_since_decision >= self.fast_trip_threshold { + self.reset_decision_window(); + if self.shrink_cooldown { + self.shrink_cooldown = false; + return None; + } + return Some(self.shrink_step()); + } + + // Normal decision boundary. + if self.samples_since_decision >= self.decision_interval { + let ewma = self.ewma; + self.reset_decision_window(); + if ewma > 0.3 { + if self.shrink_cooldown { + self.shrink_cooldown = false; + return None; + } + return Some(self.shrink_step()); + } else if ewma < 0.05 { + return Some(self.grow_step()); + } else { + // Dead band: hold the line, but clear cooldown so a real + // spike afterwards can react without delay. + self.shrink_cooldown = false; + return None; + } + } + + None + } +} + +/// Single shared concurrency state between the publish pipeline and the AIMD +/// controller. Permits are forgotten on shrink and added back on grow. +#[derive(Clone)] +pub(super) struct ConcurrencyState { + sem: Arc, + target: Arc, + forget_pending: Arc, + aimd: Arc>, + /// Unix-ms timestamp of the most recent `record()`. Drives the stall + /// watchdog: if this stops moving, no put is resolving (success or + /// failure), which usually means AIMD has wedged itself low. + last_record_ms: Arc, + /// Unix-ms timestamp of the most recent watchdog kick. Used to + /// rate-limit kicks so a genuinely overloaded link can settle. + last_kick_ms: Arc, +} + +impl ConcurrencyState { + fn new(initial: usize, max_cap: Option) -> Self { + Self { + sem: Arc::new(Semaphore::new(initial)), + target: Arc::new(AtomicUsize::new(initial)), + forget_pending: Arc::new(AtomicUsize::new(0)), + aimd: Arc::new(Mutex::new(AimdController::new(initial, max_cap))), + last_record_ms: Arc::new(AtomicU64::new(now_ms())), + last_kick_ms: Arc::new(AtomicU64::new(0)), + } + } + + /// Acquire a permit, honoring any pending shrink (forget). + async fn acquire(&self) -> tokio::sync::OwnedSemaphorePermit { + loop { + let permit = self.sem.clone().acquire_owned().await.unwrap(); + let pending = self.forget_pending.load(Ordering::Relaxed); + if pending > 0 + && self + .forget_pending + .fetch_sub(1, Ordering::Relaxed) + > 0 + { + permit.forget(); + } else { + return permit; + } + } + } + + /// Record an outcome and rebalance permits if AIMD has changed the target. + async fn record(&self, degraded: bool) { + self.last_record_ms.store(now_ms(), Ordering::Relaxed); + let new_target = { + let mut ctrl = self.aimd.lock().await; + ctrl.record(degraded) + }; + if let Some(target) = new_target { + self.apply_target(target); + } + } + + /// Watchdog entry. If no put has resolved in `stall_threshold` and we + /// haven't kicked recently, ask AIMD to lift off the floor and rebalance + /// permits. Returns the new target on a successful kick (for logging). + async fn kick_if_stalled( + &self, + stall_threshold: Duration, + min_kick_interval: Duration, + ) -> Option { + let now = now_ms(); + let since_record = now.saturating_sub(self.last_record_ms.load(Ordering::Relaxed)); + if since_record < stall_threshold.as_millis() as u64 { + return None; + } + let since_kick = now.saturating_sub(self.last_kick_ms.load(Ordering::Relaxed)); + if since_kick < min_kick_interval.as_millis() as u64 { + return None; + } + let new_target = { + let mut ctrl = self.aimd.lock().await; + ctrl.kick_stall() + }; + if let Some(target) = new_target { + self.last_kick_ms.store(now, Ordering::Relaxed); + // Refresh the record clock so we don't immediately re-kick while + // the new permits work their way through the system. + self.last_record_ms.store(now, Ordering::Relaxed); + self.apply_target(target); + } + new_target + } + + fn apply_target(&self, target: usize) { + let current_target = self.target.load(Ordering::Relaxed); + match target.cmp(¤t_target) { + std::cmp::Ordering::Greater => { + self.sem.add_permits(target - current_target); + self.target.store(target, Ordering::Relaxed); + } + std::cmp::Ordering::Less => { + self.forget_pending + .fetch_add(current_target - target, Ordering::Relaxed); + self.target.store(target, Ordering::Relaxed); + } + std::cmp::Ordering::Equal => {} + } + } +} + +fn now_ms() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .map(|d| d.as_millis() as u64) + .unwrap_or(0) +} + +/// Publish a single mutable record (signed by `kp` with the current Unix +/// timestamp as `seq`). Returns whether the put was degraded (commit +/// timeouts > 0). +async fn put_mutable( + handle: &HyperDhtHandle, + kp: &KeyPair, + bytes: &[u8], +) -> Result { + let seq = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_secs(); + match handle.mutable_put(kp, bytes, seq).await { + Ok(r) => Ok(r.commit_timeouts > 0), + Err(e) => Err(format!("mutable_put failed: {e}")), + } +} + +/// One unit of work for the publish pipeline. +pub(super) enum PublishUnit { + /// An immutable data chunk (`immutable_put`). + Data { encoded: Vec }, + /// A signed mutable index chunk (`mutable_put`). + Index { keypair: KeyPair, encoded: Vec }, +} + +/// Long-lived dispatcher: pull from the queue, acquire a permit, spawn the +/// put. Exits cleanly on `shutdown`. +async fn dispatcher( + handle: HyperDhtHandle, + queue: Arc, + state: ConcurrencyState, + shutdown: Shutdown, +) { + loop { + let pop_fut = queue.pop(); + tokio::pin!(pop_fut); + let (id, unit, _subs) = tokio::select! { + _ = shutdown.cancelled() => break, + r = &mut pop_fut => r, + }; + let permit = state.acquire().await; + let h = handle.clone(); + let st = state.clone(); + let q = queue.clone(); + tokio::spawn(async move { + let (degraded, bytes, is_data) = match unit { + PublishUnit::Data { encoded } => { + let len = encoded.len() as u64; + let r = tokio::time::timeout(PUT_TIMEOUT, h.immutable_put(&encoded)).await; + let degraded = match r { + Ok(Ok(_)) => false, + Ok(Err(_)) | Err(_) => true, + }; + (degraded, len, true) + } + PublishUnit::Index { keypair, encoded } => { + let r = tokio::time::timeout(PUT_TIMEOUT, put_mutable(&h, &keypair, &encoded)) + .await; + let degraded = match r { + Ok(Ok(d)) => d, + Ok(Err(_)) | Err(_) => true, + }; + (degraded, 0, false) + } + }; + st.record(degraded).await; + q.mark_done(id, bytes, is_data).await; + drop(permit); + }); + } +} + +/// Enqueue every non-root chunk of `tree` against `op` on the given lane. +async fn enqueue_tree_non_root( + queue: &WorkQueue, + tree: &BuiltTree, + lane: Lane, + op: &Operation, +) { + for (i, c) in tree.data_chunks.iter().enumerate() { + queue + .enqueue( + ChunkId::Data(i), + PublishUnit::Data { + encoded: c.encoded.clone(), + }, + lane, + op.subscriber(), + ) + .await; + } + for (i, c) in tree.index_chunks.iter().enumerate() { + queue + .enqueue( + ChunkId::Index(i), + PublishUnit::Index { + keypair: c.keypair.clone(), + encoded: c.encoded.clone(), + }, + lane, + op.subscriber(), + ) + .await; + } +} + +/// Enqueue only the chunks listed in `data_idx` / `index_idx` (need-list +/// response). Always uses the High lane. +async fn enqueue_partial( + queue: &WorkQueue, + tree: &BuiltTree, + data_idx: &[usize], + index_idx: &[usize], + op: &Operation, +) { + for &i in data_idx { + let c = &tree.data_chunks[i]; + queue + .enqueue( + ChunkId::Data(i), + PublishUnit::Data { + encoded: c.encoded.clone(), + }, + Lane::High, + op.subscriber(), + ) + .await; + } + for &i in index_idx { + let c = &tree.index_chunks[i]; + queue + .enqueue( + ChunkId::Index(i), + PublishUnit::Index { + keypair: c.keypair.clone(), + encoded: c.encoded.clone(), + }, + Lane::High, + op.subscriber(), + ) + .await; + } +} + +/// Enqueue the root index chunk. +async fn enqueue_root(queue: &WorkQueue, tree: &BuiltTree, lane: Lane, op: &Operation) { + queue + .enqueue( + ChunkId::Root, + PublishUnit::Index { + keypair: tree.root_keypair.clone(), + encoded: tree.root_encoded.clone(), + }, + lane, + op.subscriber(), + ) + .await; +} + +/// Per-peer state tracked by the need-watcher. Dedups by value-hash so a +/// receiver's 10-minute keepalive republish (identical content, bumped +/// seq) doesn't trigger a duplicate service. +#[derive(Default)] +struct PeerState { + /// Hash of the last value bytes we serviced (or empty-marker for done). + last_value_hash: Option<[u8; 32]>, + /// True once we've observed the empty-need-list "done" sentinel; we + /// stop fetching from this peer thereafter. + completed: bool, + /// Last seq we observed — informational, only used for log clarity. + last_seq: Option, +} + +fn hash_bytes(bytes: &[u8]) -> [u8; 32] { + use sha2::{Digest, Sha256}; + let mut h = Sha256::new(); + h.update(bytes); + let out = h.finalize(); + let mut arr = [0u8; 32]; + arr.copy_from_slice(&out); + arr +} + +/// Background task: poll the need topic and enqueue chunks as receivers +/// request them. Dedups by per-peer value hash so identical keepalive +/// republishes from the receiver cost only a single `mutable_get`. +async fn run_need_watcher( + handle: HyperDhtHandle, + tree: Arc, + queue: Arc, + need_topic_key: [u8; 32], + op_factory: OperationFactory, + shutdown: Shutdown, +) { + let mut peers: HashMap<[u8; 32], PeerState> = HashMap::new(); + eprintln!( + " need-list watcher started (poll every {}s)", + NEED_POLL_INTERVAL.as_secs() + ); + loop { + tokio::select! { + _ = shutdown.cancelled() => break, + _ = tokio::time::sleep(NEED_POLL_INTERVAL) => { + let lookup = match handle.lookup(need_topic_key).await { + Ok(r) => r, + Err(e) => { + eprintln!(" warning: need-topic lookup failed: {e}"); + continue; + } + }; + for result in &lookup { + for peer in &result.peers { + let pk_short = to_hex(&peer.public_key); + let entry = peers.entry(peer.public_key).or_insert_with(|| { + eprintln!( + " need-list peer discovered: {}", + &pk_short[..8] + ); + PeerState::default() + }); + if entry.completed { + continue; + } + let mv = match handle.mutable_get(&peer.public_key, 0).await { + Ok(Some(v)) => v, + Ok(None) => continue, + Err(e) => { + eprintln!( + " warning: need-list get from {} failed: {e}", + &pk_short[..8] + ); + continue; + } + }; + let value_hash = hash_bytes(&mv.value); + if entry.last_value_hash == Some(value_hash) { + // Same content as last time we serviced — keepalive + // republish from the receiver. Skip. + entry.last_seq = Some(mv.seq); + continue; + } + let entries = match decode_need_list(&mv.value) { + Ok(v) => v, + Err(e) => { + eprintln!( + " warning: malformed need-list from {}: {e}", + &pk_short[..8] + ); + continue; + } + }; + if entries.is_empty() { + entry.completed = true; + entry.last_value_hash = Some(value_hash); + entry.last_seq = Some(mv.seq); + eprintln!( + " need-list peer {} signaled done", + &pk_short[..8] + ); + continue; + } + let resp = response_chunks_for_list(&tree, &entries); + let n_data = resp.data_chunk_indices.len(); + let n_index = resp.index_chunk_indices.len(); + eprintln!( + " need-list received from {} (seq {}): {} data + {} index chunks to republish", + &pk_short[..8], + mv.seq, + n_data, + n_index + ); + let bytes_total: u64 = resp + .data_chunk_indices + .iter() + .map(|&i| tree.data_chunks[i].encoded.len() as u64) + .sum(); + let handle_op = op_factory.begin_operation( + bytes_total, + n_index as u32, + n_data as u32, + ); + let op = Operation::new(handle_op.state(), n_data + n_index); + enqueue_partial( + &queue, + &tree, + &resp.data_chunk_indices, + &resp.index_chunk_indices, + &op, + ) + .await; + // Mark as serviced on enqueue (not completion) — a + // failed put causes AIMD shrink, the receiver + // times out and publishes a fresh seq with the + // still-missing set, which we'll see as a new + // value hash and service again. + entry.last_value_hash = Some(value_hash); + entry.last_seq = Some(mv.seq); + tokio::select! { + _ = shutdown.cancelled() => return, + _ = op.await_done() => {} + } + handle_op.finish().await; + eprintln!( + " need-list republish complete: {n_data} data + {n_index} index" + ); + } + } + } + } + } +} + +fn parse_max_speed(s: &str) -> Result { + let s = s.trim().to_lowercase(); + if let Some(num) = s.strip_suffix('m') { + num.parse::() + .map(|n| n * 1_000_000) + .map_err(|e| format!("invalid --max-speed: {e}")) + } else if let Some(num) = s.strip_suffix('k') { + num.parse::() + .map(|n| n * 1_000) + .map_err(|e| format!("invalid --max-speed: {e}")) + } else { + s.parse::() + .map_err(|e| format!("invalid --max-speed: {e}")) + } +} + +fn rpassword_read() -> String { + use std::io::{BufRead, BufReader}; + let tty = match std::fs::File::open("/dev/tty") { + Ok(f) => f, + Err(_) => { + let mut line = String::new(); + std::io::stdin().read_line(&mut line).unwrap_or(0); + return line.trim_end_matches('\n').trim_end_matches('\r').to_string(); + } + }; + let mut reader = BufReader::new(tty); + let mut line = String::new(); + reader.read_line(&mut line).unwrap_or(0); + line.trim_end_matches('\n').trim_end_matches('\r').to_string() +} + +/// Read input bytes for the put operation. Uses mmap when reading from a +/// regular file (low RAM footprint); falls back to in-memory buffering for +/// stdin (where mmap is not applicable). +fn read_input(path: &str) -> Result, String> { + if path == "-" { + use std::io::Read; + let mut buf = Vec::new(); + std::io::stdin() + .read_to_end(&mut buf) + .map_err(|e| format!("failed to read stdin: {e}"))?; + Ok(buf) + } else { + // Open + mmap. We materialize into Vec here so build_tree's + // chunk iterator can hold simple slices. The mmap is dropped at + // function end. For the strict zero-RAM path, build_tree should + // accept a borrowed slice (which is what `Vec` provides via deref); + // future iteration could pass the Mmap's Deref directly through. + let file = std::fs::File::open(path).map_err(|e| format!("failed to open {path}: {e}"))?; + let metadata = file + .metadata() + .map_err(|e| format!("failed to stat {path}: {e}"))?; + if metadata.len() == 0 { + return Ok(Vec::new()); + } + let mmap = unsafe { + memmap2::Mmap::map(&file).map_err(|e| format!("mmap failed for {path}: {e}"))? + }; + Ok(mmap.to_vec()) + } +} + +/// Top-level PUT entry point. +pub async fn run_put(args: &PutArgs, cfg: &ResolvedConfig) -> i32 { + if args.refresh_interval == 0 { + eprintln!("error: --refresh-interval must be greater than 0"); + return 1; + } + if args.ttl == Some(0) { + eprintln!("error: --ttl must be greater than 0"); + return 1; + } + if args.max_pickups == Some(0) { + eprintln!("error: --max-pickups must be greater than 0"); + return 1; + } + + // 1. Read input. + let data = match read_input(&args.file) { + Ok(d) => d, + Err(e) => { + eprintln!("error: {e}"); + return 1; + } + }; + + // 2. Tree-shape soft cap check. + let n = data_chunk_count(data.len() as u64); + let depth = super::tree::canonical_depth(n); + if depth > SOFT_DEPTH_CAP { + eprintln!( + "error: file requires tree depth {depth}, which exceeds the soft cap of {SOFT_DEPTH_CAP} (~27 GB at the current 998-byte chunk size); refusing to build" + ); + return 1; + } + + // 3. Resolve root_seed. + let root_seed: [u8; 32] = if let Some(ref phrase) = args.passphrase { + if phrase.is_empty() { + eprintln!("error: passphrase cannot be empty"); + return 1; + } + peeroxide::discovery_key(phrase.as_bytes()) + } else if args.interactive_passphrase { + eprintln!("Enter passphrase: "); + let passphrase = rpassword_read(); + if passphrase.is_empty() { + eprintln!("error: passphrase cannot be empty"); + return 1; + } + peeroxide::discovery_key(passphrase.as_bytes()) + } else { + let mut seed = [0u8; 32]; + rand::rng().fill_bytes(&mut seed); + seed + }; + + // 4. Build the tree. + let tree = match build_tree( + &root_seed, + data.len() as u64, + crc32c::crc32c(&data), + data.chunks(DATA_PAYLOAD_MAX), + ) { + Ok(t) => t, + Err(e) => { + eprintln!("error: {e}"); + return 1; + } + }; + + // 5. Spawn DHT. + let dht_config = build_dht_config(cfg); + let runtime = match UdxRuntime::new() { + Ok(r) => r, + Err(e) => { + eprintln!("error: failed to create UDP runtime: {e}"); + return 1; + } + }; + let (task, handle, _rx) = match hyperdht::spawn(&runtime, dht_config).await { + Ok(v) => v, + Err(e) => { + eprintln!("error: failed to start DHT: {e}"); + return 1; + } + }; + if let Err(e) = handle.bootstrapped().await { + eprintln!("error: bootstrap failed: {e}"); + let _ = handle.destroy().await; + let _ = task.await; + return 1; + } + + // 6. Concurrency / rate-limit setup. + let (max_concurrency, _dispatch_delay): (Option, Option) = + if let Some(ref speed_str) = args.max_speed { + match parse_max_speed(speed_str) { + Ok(speed) => { + let cap = ((speed / 22000) as usize).max(1); + let delay = Duration::from_secs_f64(22000.0 / speed as f64); + (Some(cap), Some(delay)) + } + Err(e) => { + eprintln!("error: {e}"); + let _ = handle.destroy().await; + let _ = task.await; + return 1; + } + } + } else { + (None, None) + }; + // Initial concurrency. AIMD will adjust based on observed degradation; + // starting higher gives better throughput on healthy networks while still + // allowing the controller to shrink if puts start timing out. + let initial_concurrency = 128usize; + let conc = ConcurrencyState::new(initial_concurrency, max_concurrency); + + // 7. Progress reporter. + let filename: Arc = if args.file == "-" { + Arc::from("") + } else { + let base = std::path::Path::new(&args.file) + .file_name() + .map(|n| n.to_string_lossy().into_owned()) + .unwrap_or_else(|| args.file.clone()); + Arc::from(base.as_str()) + }; + let state = ProgressState::new_with_wire(Phase::Put, 2, filename, handle.wire_counters()); + state.set_length( + data.len() as u64, + (tree.index_chunks.len() + 1) as u32, // include root + tree.data_chunks.len() as u32, + ); + let mut reporter = ProgressReporter::from_args(state.clone(), args.no_progress, args.json); + reporter.on_start(); + + // 8. Set up shared shutdown signal + signal handler, then spawn the + // dispatcher and run the initial publish through the queue. + let shutdown = Shutdown::new(); + spawn_signal_handler(shutdown.clone()); + + let queue = WorkQueue::new(); + let dispatcher_handle = tokio::spawn(dispatcher( + handle.clone(), + queue.clone(), + conc.clone(), + shutdown.clone(), + )); + + // Stall watchdog: if no put has resolved in 30s, kick AIMD off the floor. + // Rate-limited to once per 2 min so a genuinely overloaded link can settle + // at its true ceiling rather than oscillating around the kick target. + let watchdog_shutdown = shutdown.clone(); + let watchdog_conc = conc.clone(); + let watchdog_handle = tokio::spawn(async move { + let mut tick = tokio::time::interval(Duration::from_secs(5)); + tick.tick().await; + loop { + tokio::select! { + _ = watchdog_shutdown.cancelled() => break, + _ = tick.tick() => { + if let Some(t) = watchdog_conc + .kick_if_stalled(Duration::from_secs(30), Duration::from_secs(120)) + .await + { + eprintln!(" stall watchdog: AIMD kicked → target {t}"); + } + } + } + } + }); + + // Initial publish: non-root chunks first (data + index layers), then + // the root last. The "root last" rule is the only ordering constraint + // in v2: until the root is published, no other pubkey is derivable. + // + // Everything below the initial publish runs inside a labeled block so + // any cancel-aware await can `break 'main 0` straight to cleanup. + let tree_arc = Arc::new(tree); + let op_factory = reporter.operation_factory(); + let mut watcher_handle: Option> = None; + + let exit_code: i32 = 'main: { + let non_root_count = tree_arc.data_chunks.len() + tree_arc.index_chunks.len(); + let initial_op = Operation::new(state.clone(), non_root_count); + enqueue_tree_non_root(&queue, &tree_arc, Lane::Normal, &initial_op).await; + tokio::select! { + biased; + _ = shutdown.cancelled() => break 'main 0, + _ = initial_op.await_done() => {} + } + + let root_op = Operation::new(state.clone(), 1); + enqueue_root(&queue, &tree_arc, Lane::Normal, &root_op).await; + tokio::select! { + biased; + _ = shutdown.cancelled() => break 'main 0, + _ = root_op.await_done() => {} + } + + // 9. Print pickup key. + let pickup_key = to_hex(&tree_arc.root_keypair.public_key); + reporter.emit_initial_publish_complete(&pickup_key).await; + + eprintln!(" published to DHT (best-effort)"); + eprintln!(" pickup key printed to stdout"); + eprintln!( + " refreshing every {}s, polling needs every {}s, monitoring for acks every 30s...", + args.refresh_interval, + NEED_POLL_INTERVAL.as_secs() + ); + + // 10. Spawn need-watcher. + let need_topic_key = need_topic(&tree_arc.root_keypair.public_key); + watcher_handle = Some(tokio::spawn(run_need_watcher( + handle.clone(), + tree_arc.clone(), + queue.clone(), + need_topic_key, + op_factory.clone(), + shutdown.clone(), + ))); + + // 11. Refresh + ack loop. + let ack_topic_key = ack_topic(&tree_arc.root_keypair.public_key); + let mut seen_acks: std::collections::HashSet<[u8; 32]> = std::collections::HashSet::new(); + let mut pickup_count: u64 = 0; + let ttl_deadline = args + .ttl + .map(|t| tokio::time::Instant::now() + Duration::from_secs(t)); + let mut refresh_interval = + tokio::time::interval(Duration::from_secs(args.refresh_interval)); + refresh_interval.tick().await; + let mut ack_interval = tokio::time::interval(Duration::from_secs(30)); + ack_interval.tick().await; + + loop { + tokio::select! { + biased; + _ = shutdown.cancelled() => break 'main 0, + _ = async { + if let Some(deadline) = ttl_deadline { + tokio::time::sleep_until(deadline).await; + } else { + std::future::pending::<()>().await; + } + } => break 'main 0, + _ = refresh_interval.tick() => { + eprintln!( + " refreshing tree ({} index + {} data chunks)...", + tree_arc.index_chunks.len() + 1, + tree_arc.data_chunks.len() + ); + let bytes_total: u64 = tree_arc + .data_chunks + .iter() + .map(|c| c.encoded.len() as u64) + .sum(); + let idx_total = (tree_arc.index_chunks.len() + 1) as u32; + let data_total = tree_arc.data_chunks.len() as u32; + let total_chunks = + tree_arc.index_chunks.len() + tree_arc.data_chunks.len() + 1; + let handle_op = + op_factory.begin_operation(bytes_total, idx_total, data_total); + let op = Operation::new(handle_op.state(), total_chunks); + // Concurrent refresh ticks coalesce naturally: chunks still + // queued or in flight from a prior tick attach this op as a + // subscriber rather than producing a duplicate put. + enqueue_tree_non_root(&queue, &tree_arc, Lane::Normal, &op).await; + enqueue_root(&queue, &tree_arc, Lane::Normal, &op).await; + tokio::select! { + _ = shutdown.cancelled() => break 'main 0, + _ = op.await_done() => {} + } + handle_op.finish().await; + } + _ = ack_interval.tick() => { + let lookup_fut = handle.lookup(ack_topic_key); + let lookup_res = tokio::select! { + _ = shutdown.cancelled() => break 'main 0, + r = lookup_fut => r, + }; + let mut max_reached = false; + if let Ok(results) = lookup_res { + 'outer: for result in &results { + for peer in &result.peers { + if seen_acks.insert(peer.public_key) { + pickup_count += 1; + reporter.on_ack(pickup_count, &to_hex(&peer.public_key)); + eprintln!(" [ack] pickup #{pickup_count} detected"); + if let Some(max) = args.max_pickups { + if pickup_count >= max { + eprintln!(" max pickups reached, stopping"); + max_reached = true; + break 'outer; + } + } + } + } + } + } + if max_reached { + break 'main 0; + } + } + } + } + }; + + // 12. Cleanup. A single `cancel()` notifies every subsystem; they all + // share the same `Shutdown`. Idempotent — safe if the signal handler + // already tripped it. + shutdown.cancel(); + if let Some(h) = watcher_handle { + let _ = h.await; + } + let _ = dispatcher_handle.await; + let _ = watchdog_handle.await; + eprintln!(" stopped refreshing; records expire in ~20m"); + reporter.finish().await; + let _ = handle.destroy().await; + let _ = task.await; + exit_code +} diff --git a/peeroxide-cli/src/cmd/deaddrop/v2/queue.rs b/peeroxide-cli/src/cmd/deaddrop/v2/queue.rs new file mode 100644 index 0000000..4f9b56c --- /dev/null +++ b/peeroxide-cli/src/cmd/deaddrop/v2/queue.rs @@ -0,0 +1,214 @@ +//! Shared, dedup'd, priority work queue for the v2 sender. +//! +//! A single dispatcher pulls `(ChunkId, PublishUnit, subscribers)` triples +//! out of the queue, acquires a permit from the shared `ConcurrencyState`, +//! and spawns a put. Triggers (initial publish, refresh tick, need-list +//! response) only *enqueue* — they never spawn put tasks themselves. +//! +//! Each trigger gets an [`Operation`] whose [`Operation::await_done`] +//! resolves when every chunk it asked for has been put — whether by this +//! trigger's enqueue or by an overlapping one that arrived first. That's +//! how a single physical put can satisfy a need-list response *and* a +//! concurrent refresh tick simultaneously. + +#![allow(dead_code)] + +use std::collections::{HashMap, VecDeque}; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::Arc; + +use tokio::sync::{Mutex, Notify}; + +use super::publish::PublishUnit; +use crate::cmd::deaddrop::progress::state::ProgressState; + +/// Identifies one chunk in the tree. Stable across re-enqueues. +#[derive(Clone, Copy, Eq, PartialEq, Hash, Debug)] +pub enum ChunkId { + /// Index into `BuiltTree::data_chunks`. + Data(usize), + /// Index into `BuiltTree::index_chunks`. + Index(usize), + /// The root index chunk. + Root, +} + +/// Priority lane. High drains before Normal. +#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Debug)] +pub enum Lane { + Normal = 0, + High = 1, +} + +/// One operation's hook on a chunk: progress state to advance + a remaining +/// counter to decrement + a notify to fire when the operation finishes. +#[derive(Clone)] +pub struct Subscriber { + state: Arc, + remaining: Arc, + done: Arc, +} + +struct Entry { + unit: PublishUnit, + lane: Lane, + subs: Vec, +} + +struct Inner { + queued: HashMap, + high: VecDeque, + normal: VecDeque, + /// Chunks currently being put. Subscribers attached while a chunk is in + /// flight are recorded here so they get fired on completion. + inflight: HashMap>, +} + +pub struct WorkQueue { + inner: Mutex, + have_work: Notify, +} + +impl WorkQueue { + pub fn new() -> Arc { + Arc::new(Self { + inner: Mutex::new(Inner { + queued: HashMap::new(), + high: VecDeque::new(), + normal: VecDeque::new(), + inflight: HashMap::new(), + }), + have_work: Notify::new(), + }) + } + + /// Enqueue one chunk on behalf of `sub`. If the chunk is already queued + /// or in flight, `sub` attaches to the existing entry instead of + /// triggering a duplicate put. A lane upgrade (Normal → High) re-pushes + /// the id onto the High deque; stale Normal-deque ids are skipped at + /// pop time. + pub(super) async fn enqueue( + &self, + id: ChunkId, + unit: PublishUnit, + lane: Lane, + sub: Subscriber, + ) { + let mut inner = self.inner.lock().await; + if let Some(subs) = inner.inflight.get_mut(&id) { + subs.push(sub); + return; + } + if let Some(entry) = inner.queued.get_mut(&id) { + entry.subs.push(sub); + if lane > entry.lane { + entry.lane = lane; + inner.high.push_back(id); + } + return; + } + inner.queued.insert(id, Entry { unit, lane, subs: vec![sub] }); + match lane { + Lane::High => inner.high.push_back(id), + Lane::Normal => inner.normal.push_back(id), + } + drop(inner); + self.have_work.notify_one(); + } + + /// Pop the next chunk (High lane first). Awaits if the queue is empty. + /// Returns the unit to publish and the subscribers that should be + /// notified on completion. + pub(super) async fn pop(&self) -> (ChunkId, PublishUnit, Vec) { + loop { + { + let mut inner = self.inner.lock().await; + while let Some(id) = inner.high.pop_front() { + if let Some(entry) = inner.queued.remove(&id) { + inner.inflight.insert(id, entry.subs.clone()); + return (id, entry.unit, entry.subs); + } + } + while let Some(id) = inner.normal.pop_front() { + if let Some(entry) = inner.queued.remove(&id) { + if entry.lane == Lane::High { + // Was upgraded after being pushed onto Normal; + // the High-deque copy will handle it. Re-insert + // and skip this stale entry. + inner.queued.insert(id, entry); + continue; + } + inner.inflight.insert(id, entry.subs.clone()); + return (id, entry.unit, entry.subs); + } + } + } + self.have_work.notified().await; + } + } + + /// Mark a put complete. Fires every subscriber that was registered + /// either at pop time or while the chunk was in flight. + pub async fn mark_done(&self, id: ChunkId, bytes: u64, is_data: bool) { + let subs = { + let mut inner = self.inner.lock().await; + inner.inflight.remove(&id).unwrap_or_default() + }; + for sub in subs { + if is_data { + sub.state.inc_data(bytes); + } else { + sub.state.inc_index(); + } + if sub.remaining.fetch_sub(1, Ordering::AcqRel) == 1 { + sub.done.notify_waiters(); + } + } + } +} + +/// Trigger-side handle that registers chunks of interest and awaits their +/// completion. Each trigger (initial, refresh, need-list) creates one of +/// these, enqueues its chunks, then awaits. +pub struct Operation { + pub state: Arc, + remaining: Arc, + done: Arc, +} + +impl Operation { + pub fn new(state: Arc, chunk_count: usize) -> Self { + Self { + state, + remaining: Arc::new(AtomicUsize::new(chunk_count)), + done: Arc::new(Notify::new()), + } + } + + pub fn subscriber(&self) -> Subscriber { + Subscriber { + state: self.state.clone(), + remaining: self.remaining.clone(), + done: self.done.clone(), + } + } + + /// Block until every chunk this operation subscribed to has been + /// marked done. Fast-path returns immediately if `chunk_count == 0` + /// or all subscriptions completed before the await. + pub async fn await_done(&self) { + if self.remaining.load(Ordering::Acquire) == 0 { + return; + } + // `notify_waiters` fires once when `remaining` reaches zero; check + // again after registering to avoid the race where it fires between + // the load above and the registration below. + let notified = self.done.notified(); + tokio::pin!(notified); + notified.as_mut().enable(); + if self.remaining.load(Ordering::Acquire) == 0 { + return; + } + notified.await; + } +} diff --git a/peeroxide-cli/src/cmd/deaddrop/v2/stream.rs b/peeroxide-cli/src/cmd/deaddrop/v2/stream.rs new file mode 100644 index 0000000..305d11a --- /dev/null +++ b/peeroxide-cli/src/cmd/deaddrop/v2/stream.rs @@ -0,0 +1,165 @@ +//! v2 streaming-stdout reorder buffer. +//! +//! Spec: see *Output Strategies* in `DEADDROP_V2.md (and `docs/src/dd/`)` (stdout case). +//! +//! The receiver maintains an `emit_pos` cursor indexing the next +//! data-chunk-in-DFS-order it will emit to stdout. Out-of-order arrivals +//! are held in a small reorder buffer keyed by file position. When the +//! awaited position arrives, it is emitted along with any contiguous +//! successors held in the buffer. +//! +//! Buffer size is bounded — at the default `PARALLEL_FETCH_CAP = 64`, +//! at most ~64 KB of in-flight data sits in the reorder buffer. + +#![allow(dead_code)] + +use std::collections::BTreeMap; + +/// In-memory reorder buffer that emits data chunks in DFS file order. +pub struct StreamSink { + /// Next file-order position to emit. + emit_pos: u64, + /// Total expected data chunks (so we know when we're done). + expected: u64, + /// Out-of-order chunks waiting for their turn, keyed by file position. + reorder: BTreeMap>, + /// Bytes emitted so far. Useful for caller bookkeeping. + emitted_bytes: u64, +} + +impl StreamSink { + pub fn new(expected_data_chunks: u64) -> Self { + Self { + emit_pos: 0, + expected: expected_data_chunks, + reorder: BTreeMap::new(), + emitted_bytes: 0, + } + } + + /// Accept a data chunk arrival. Returns the sequence of payloads (in + /// file order) that should be written to stdout right now. + /// + /// Calls beyond `expected` are silently ignored; calls with a position + /// already past `emit_pos` are buffered. + pub fn accept(&mut self, position: u64, payload: Vec) -> Vec> { + if position >= self.expected || position < self.emit_pos { + // Already emitted or out of range — drop. + return Vec::new(); + } + + let mut out = Vec::new(); + if position == self.emit_pos { + self.emitted_bytes += payload.len() as u64; + out.push(payload); + self.emit_pos += 1; + // Drain any contiguous successors held in the buffer. + while let Some(next) = self.reorder.remove(&self.emit_pos) { + self.emitted_bytes += next.len() as u64; + out.push(next); + self.emit_pos += 1; + } + } else { + self.reorder.insert(position, payload); + } + out + } + + /// Have we emitted every expected chunk? + pub fn is_complete(&self) -> bool { + self.emit_pos >= self.expected + } + + /// Position of the next chunk we are waiting on (`expected` if done). + pub fn next_emit_pos(&self) -> u64 { + self.emit_pos + } + + /// Number of chunks held in the reorder buffer. + pub fn buffered_count(&self) -> usize { + self.reorder.len() + } + + /// Total bytes emitted so far. + pub fn emitted_bytes(&self) -> u64 { + self.emitted_bytes + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn p(n: u8, len: usize) -> Vec { + vec![n; len] + } + + #[test] + fn empty_sink_is_complete() { + let s = StreamSink::new(0); + assert!(s.is_complete()); + } + + #[test] + fn in_order_emits_immediately() { + let mut s = StreamSink::new(3); + let out = s.accept(0, p(1, 10)); + assert_eq!(out, vec![p(1, 10)]); + let out = s.accept(1, p(2, 20)); + assert_eq!(out, vec![p(2, 20)]); + let out = s.accept(2, p(3, 30)); + assert_eq!(out, vec![p(3, 30)]); + assert!(s.is_complete()); + assert_eq!(s.emitted_bytes(), 60); + } + + #[test] + fn out_of_order_waits_then_drains() { + let mut s = StreamSink::new(3); + // Position 2 arrives first — buffer it. + let out = s.accept(2, p(3, 30)); + assert!(out.is_empty()); + assert_eq!(s.buffered_count(), 1); + + // Position 1 arrives — buffer it (still waiting on 0). + let out = s.accept(1, p(2, 20)); + assert!(out.is_empty()); + assert_eq!(s.buffered_count(), 2); + + // Position 0 arrives — drains everything in order. + let out = s.accept(0, p(1, 10)); + assert_eq!(out, vec![p(1, 10), p(2, 20), p(3, 30)]); + assert!(s.is_complete()); + assert_eq!(s.emitted_bytes(), 60); + } + + #[test] + fn reverse_order_full_drain() { + let mut s = StreamSink::new(5); + for pos in (1..5).rev() { + assert!(s.accept(pos, p(pos as u8, 10)).is_empty()); + } + assert_eq!(s.buffered_count(), 4); + let out = s.accept(0, p(0, 10)); + assert_eq!(out.len(), 5); + assert!(s.is_complete()); + } + + #[test] + fn duplicate_position_dropped() { + let mut s = StreamSink::new(2); + let out = s.accept(0, p(1, 10)); + assert_eq!(out, vec![p(1, 10)]); + // Replay position 0 (e.g. a duplicate fetch result) — ignored. + let out2 = s.accept(0, p(99, 10)); + assert!(out2.is_empty()); + } + + #[test] + fn position_past_expected_dropped() { + let mut s = StreamSink::new(2); + let out = s.accept(5, p(1, 10)); + assert!(out.is_empty()); + assert_eq!(s.buffered_count(), 0); + } +} diff --git a/peeroxide-cli/src/cmd/deaddrop/v2/tree.rs b/peeroxide-cli/src/cmd/deaddrop/v2/tree.rs new file mode 100644 index 0000000..4d8fe56 --- /dev/null +++ b/peeroxide-cli/src/cmd/deaddrop/v2/tree.rs @@ -0,0 +1,234 @@ +//! v2 tree-shape rules. +//! +//! The shape of the index tree is fully determined by `file_size`. Both +//! senders and receivers compute it deterministically via `canonical_depth`. +//! The wire format encodes neither N (data chunk count) nor tree depth +//! directly; both derive from `file_size`. +//! +//! Slot kind (data hash vs child index pubkey) is determined by a chunk's +//! `remaining_depth` in the tree, which the receiver tracks during BFS: +//! - remaining_depth == 0 → leaf (slots are data hashes) +//! - remaining_depth > 0 → non-leaf (slots are child index pubkeys) + +#![allow(dead_code)] + +use super::wire::{DATA_PAYLOAD_MAX, NON_ROOT_INDEX_SLOT_CAP, ROOT_INDEX_SLOT_CAP}; + +/// Compute the total number of data chunks for a given file size. +/// +/// `0 → 0`. Otherwise `ceil(file_size / DATA_PAYLOAD_MAX)`. +pub fn data_chunk_count(file_size: u64) -> u64 { + if file_size == 0 { + 0 + } else { + file_size.div_ceil(DATA_PAYLOAD_MAX as u64) + } +} + +/// The canonical tree depth for `n` data chunks. +/// +/// Depth is the number of index layers below the root before reaching the +/// leaf-index level (or before reaching data, if N ≤ 30 and root holds +/// data hashes directly). +/// +/// Examples: +/// n = 0 → depth 0 (root holds zero slots) +/// n ≤ 30 → depth 0 (root holds data hashes directly) +/// n ≤ 930 → depth 1 (root → leaf-index → data) +/// n ≤ 28,830 → depth 2 +/// ... +pub fn canonical_depth(n: u64) -> u32 { + if n == 0 || n <= ROOT_INDEX_SLOT_CAP as u64 { + return 0; + } + // n > 30: at least one leaf-index layer. + let mut layer_count = div_ceil_u64(n, NON_ROOT_INDEX_SLOT_CAP as u64); + let mut depth = 1u32; + while layer_count > ROOT_INDEX_SLOT_CAP as u64 { + layer_count = div_ceil_u64(layer_count, NON_ROOT_INDEX_SLOT_CAP as u64); + depth += 1; + } + depth +} + +fn div_ceil_u64(a: u64, b: u64) -> u64 { + a.div_ceil(b) +} + +/// Maximum number of data chunks that fit in a tree of the given depth. +/// +/// `depth = 0 → 30` (root direct). +/// `depth = d → 30 × 31^d`. +pub fn max_data_chunks_for_depth(depth: u32) -> u64 { + let mut cap = ROOT_INDEX_SLOT_CAP as u64; + for _ in 0..depth { + cap = cap.saturating_mul(NON_ROOT_INDEX_SLOT_CAP as u64); + } + cap +} + +/// Layout description of a fully-built canonical tree. +/// +/// `layer_chunk_counts[0]` is the leaf-index layer (or data direct if depth 0). +/// Higher indices are further from the data, ending with the count of +/// children directly under the root (or N data chunks if depth 0). +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct TreeLayout { + /// Total data chunks (`N`). + pub data_chunk_count: u64, + /// Tree depth (number of index layers below root). + pub depth: u32, + /// Per-layer chunk counts, indexed from leaf-index (`0`) upward. + /// For depth 0, this is empty (root contains data hashes directly). + /// For depth d ≥ 1, length is d. The last element is the root-children count. + pub layer_counts: Vec, +} + +/// Compute the canonical layout for a file of size `file_size`. +pub fn compute_layout(file_size: u64) -> TreeLayout { + let n = data_chunk_count(file_size); + let depth = canonical_depth(n); + + let mut layer_counts = Vec::with_capacity(depth as usize); + if depth >= 1 { + // Leaf-index layer: ceil(N / 31) + let mut count = div_ceil_u64(n, NON_ROOT_INDEX_SLOT_CAP as u64); + layer_counts.push(count); + // Each higher layer + for _ in 1..depth { + count = div_ceil_u64(count, NON_ROOT_INDEX_SLOT_CAP as u64); + layer_counts.push(count); + } + } + + TreeLayout { + data_chunk_count: n, + depth, + layer_counts, + } +} + +/// Total non-root index chunk count for the canonical tree of a given file size. +pub fn total_non_root_index_chunks(file_size: u64) -> u64 { + compute_layout(file_size).layer_counts.iter().sum() +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn data_chunk_count_basic() { + assert_eq!(data_chunk_count(0), 0); + assert_eq!(data_chunk_count(1), 1); + assert_eq!(data_chunk_count(998), 1); + assert_eq!(data_chunk_count(999), 2); + assert_eq!(data_chunk_count(1996), 2); + assert_eq!(data_chunk_count(1997), 3); + } + + #[test] + fn canonical_depth_boundaries() { + assert_eq!(canonical_depth(0), 0); + assert_eq!(canonical_depth(1), 0); + assert_eq!(canonical_depth(29), 0); + assert_eq!(canonical_depth(30), 0); + assert_eq!(canonical_depth(31), 1); + assert_eq!(canonical_depth(930), 1); // 30 * 31 + assert_eq!(canonical_depth(931), 2); + assert_eq!(canonical_depth(28_830), 2); // 30 * 31^2 + assert_eq!(canonical_depth(28_831), 3); + assert_eq!(canonical_depth(893_730), 3); // 30 * 31^3 + assert_eq!(canonical_depth(893_731), 4); + assert_eq!(canonical_depth(27_705_630), 4); // 30 * 31^4 + assert_eq!(canonical_depth(27_705_631), 5); + } + + #[test] + fn max_data_chunks_matches_spec() { + assert_eq!(max_data_chunks_for_depth(0), 30); + assert_eq!(max_data_chunks_for_depth(1), 930); + assert_eq!(max_data_chunks_for_depth(2), 28_830); + assert_eq!(max_data_chunks_for_depth(3), 893_730); + assert_eq!(max_data_chunks_for_depth(4), 27_705_630); + assert_eq!(max_data_chunks_for_depth(5), 858_874_530); + assert_eq!(max_data_chunks_for_depth(6), 26_625_110_430); + } + + #[test] + fn layout_empty_file() { + let layout = compute_layout(0); + assert_eq!(layout.data_chunk_count, 0); + assert_eq!(layout.depth, 0); + assert!(layout.layer_counts.is_empty()); + } + + #[test] + fn layout_small_file_n_eq_1() { + let layout = compute_layout(100); + assert_eq!(layout.data_chunk_count, 1); + assert_eq!(layout.depth, 0); + assert!(layout.layer_counts.is_empty()); + } + + #[test] + fn layout_n_eq_30() { + let layout = compute_layout(30 * DATA_PAYLOAD_MAX as u64); + assert_eq!(layout.data_chunk_count, 30); + assert_eq!(layout.depth, 0); + assert!(layout.layer_counts.is_empty()); + } + + #[test] + fn layout_n_eq_31() { + let layout = compute_layout(31 * DATA_PAYLOAD_MAX as u64); + assert_eq!(layout.data_chunk_count, 31); + assert_eq!(layout.depth, 1); + // 31 data → 1 leaf-index node → 1 root child. + assert_eq!(layout.layer_counts, vec![1]); + } + + #[test] + fn layout_n_eq_70() { + let layout = compute_layout(70 * DATA_PAYLOAD_MAX as u64); + assert_eq!(layout.data_chunk_count, 70); + assert_eq!(layout.depth, 1); + // 70 data → 3 leaf-index nodes (31 + 31 + 8) → 3 root children. + assert_eq!(layout.layer_counts, vec![3]); + } + + #[test] + fn layout_n_eq_930() { + // 930 = 30 * 31, exactly fills depth 1. + let layout = compute_layout(930 * DATA_PAYLOAD_MAX as u64); + assert_eq!(layout.data_chunk_count, 930); + assert_eq!(layout.depth, 1); + // 930 data → 30 leaf-index → 30 root children. + assert_eq!(layout.layer_counts, vec![30]); + } + + #[test] + fn layout_n_eq_931_triggers_depth_2() { + let layout = compute_layout(931 * DATA_PAYLOAD_MAX as u64); + assert_eq!(layout.data_chunk_count, 931); + assert_eq!(layout.depth, 2); + // 931 data → ceil(931/31) = 31 leaves → ceil(31/31) = 1 L1 node → 1 root child. + assert_eq!(layout.layer_counts, vec![31, 1]); + } + + #[test] + fn layout_1gb() { + let layout = compute_layout(1_073_741_824); + assert_eq!(layout.data_chunk_count, 1_075_894); + assert_eq!(layout.depth, 4); + // 1,075,894 data → 34,707 leaves → 1,120 L1 → 37 L2 → 2 L3 → root with K=2. + assert_eq!(layout.layer_counts, vec![34_707, 1_120, 37, 2]); + } + + #[test] + fn total_non_root_index_chunks_1gb() { + let total = total_non_root_index_chunks(1_073_741_824); + assert_eq!(total, 34_707 + 1_120 + 37 + 2); + assert_eq!(total, 35_866); + } +} diff --git a/peeroxide-cli/src/cmd/deaddrop/v2/wire.rs b/peeroxide-cli/src/cmd/deaddrop/v2/wire.rs new file mode 100644 index 0000000..f67762c --- /dev/null +++ b/peeroxide-cli/src/cmd/deaddrop/v2/wire.rs @@ -0,0 +1,392 @@ +//! v2 wire-format encoders and decoders. +//! +//! Spec: see *Frame Formats* section of `DEADDROP_V2.md (and `docs/src/dd/`)`. +//! +//! Layouts: +//! data chunk: `[ver: 0x02][salt: u8][payload: ≤998 B]` +//! non-root index: `[ver: 0x02][N × 32 B slots]` with N ≤ 31 +//! root index: `[ver: 0x02][file_size: u64 LE][crc32c: u32 LE][N × 32 B slots]` with N ≤ 30 +//! need-list record: `[ver: 0x02][count: u16 LE][count × {start: u32 LE, end: u32 LE}]` +//! +//! Slot kind (data hash vs child index pubkey) is derived from the chunk's +//! tree position, not encoded in the chunk. See `tree.rs`. + +#![allow(dead_code)] + +/// All v2 frames begin with this version byte. +pub const VERSION: u8 = 0x02; + +/// DHT max-record size (set by hyperdht). Every encoded chunk must fit. +pub const MAX_CHUNK_SIZE: usize = 1000; + +/// Data chunk header: version + salt. +pub const DATA_HEADER_SIZE: usize = 2; + +/// Maximum payload bytes per data chunk (998 B). +pub const DATA_PAYLOAD_MAX: usize = MAX_CHUNK_SIZE - DATA_HEADER_SIZE; + +/// Non-root index chunk header: version only. +pub const NON_ROOT_INDEX_HEADER_SIZE: usize = 1; + +/// Maximum slots per non-root index chunk (31). +pub const NON_ROOT_INDEX_SLOT_CAP: usize = (MAX_CHUNK_SIZE - NON_ROOT_INDEX_HEADER_SIZE) / 32; + +/// Root index chunk header: version + file_size (u64) + crc32c (u32). +pub const ROOT_INDEX_HEADER_SIZE: usize = 1 + 8 + 4; + +/// Maximum slots per root index chunk (30). +pub const ROOT_INDEX_SLOT_CAP: usize = (MAX_CHUNK_SIZE - ROOT_INDEX_HEADER_SIZE) / 32; + +/// Need-list header: version + u16 count. +pub const NEED_LIST_HEADER_SIZE: usize = 1 + 2; + +/// Bytes per `NeedEntry`: u32 start + u32 end. +pub const NEED_ENTRY_SIZE: usize = 8; + +/// Maximum entries per need-list record (124). +pub const NEED_LIST_ENTRY_CAP: usize = + (MAX_CHUNK_SIZE - NEED_LIST_HEADER_SIZE) / NEED_ENTRY_SIZE; + +/// SHA/BLAKE-256 size in bytes (for slot entries). +pub const HASH_LEN: usize = 32; + +/// Errors that can arise when decoding v2 chunks. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum WireError { + Empty, + BadVersion(u8), + Truncated { needed: usize, got: usize }, + BadSlotByteLength(usize), + OversizedChunk(usize), + BadCount { declared: u16, computed: usize }, + InvalidEntry { start: u32, end: u32 }, +} + +impl std::fmt::Display for WireError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + WireError::Empty => write!(f, "empty chunk"), + WireError::BadVersion(b) => write!(f, "bad version byte 0x{b:02x}, expected 0x02"), + WireError::Truncated { needed, got } => { + write!(f, "truncated chunk: need {needed} bytes, got {got}") + } + WireError::BadSlotByteLength(n) => { + write!(f, "slot byte length {n} not a multiple of 32") + } + WireError::OversizedChunk(n) => { + write!(f, "chunk size {n} exceeds MAX_CHUNK_SIZE ({MAX_CHUNK_SIZE})") + } + WireError::BadCount { declared, computed } => write!( + f, + "need-list count mismatch: declared {declared}, computed from length {computed}" + ), + WireError::InvalidEntry { start, end } => { + write!(f, "invalid need-list entry: start={start} end={end} (need start < end)") + } + } + } +} + +impl std::error::Error for WireError {} + +// ── Data chunks ───────────────────────────────────────────────────────────── + +/// Encode a data chunk: `[VERSION][salt][payload]`. +pub fn encode_data_chunk(salt: u8, payload: &[u8]) -> Vec { + debug_assert!( + payload.len() <= DATA_PAYLOAD_MAX, + "data payload {} exceeds DATA_PAYLOAD_MAX ({})", + payload.len(), + DATA_PAYLOAD_MAX + ); + let mut buf = Vec::with_capacity(DATA_HEADER_SIZE + payload.len()); + buf.push(VERSION); + buf.push(salt); + buf.extend_from_slice(payload); + buf +} + +/// Verify a fetched data chunk and extract its payload bytes. +/// +/// The DHT validates `discovery_key(chunk_bytes) == expected_address` before +/// returning, so the bytes here are already content-verified. We just check +/// that they have the right shape. +pub fn decode_data_chunk(bytes: &[u8]) -> Result<&[u8], WireError> { + if bytes.is_empty() { + return Err(WireError::Empty); + } + if bytes[0] != VERSION { + return Err(WireError::BadVersion(bytes[0])); + } + if bytes.len() < DATA_HEADER_SIZE { + return Err(WireError::Truncated { + needed: DATA_HEADER_SIZE, + got: bytes.len(), + }); + } + Ok(&bytes[DATA_HEADER_SIZE..]) +} + +// ── Index chunks ──────────────────────────────────────────────────────────── + +/// Encode the root index chunk: `[VERSION][file_size_u64_le][crc32c_u32_le][slots]`. +/// +/// Slots are 32 bytes each. Their kind (data hash vs child index pubkey) is +/// determined by the canonical tree shape derived from `file_size`; the wire +/// format does not encode it. +pub fn encode_root_index(file_size: u64, crc32c: u32, slots: &[[u8; HASH_LEN]]) -> Vec { + debug_assert!( + slots.len() <= ROOT_INDEX_SLOT_CAP, + "root slot count {} exceeds ROOT_INDEX_SLOT_CAP ({})", + slots.len(), + ROOT_INDEX_SLOT_CAP + ); + let mut buf = Vec::with_capacity(ROOT_INDEX_HEADER_SIZE + slots.len() * HASH_LEN); + buf.push(VERSION); + buf.extend_from_slice(&file_size.to_le_bytes()); + buf.extend_from_slice(&crc32c.to_le_bytes()); + for slot in slots { + buf.extend_from_slice(slot); + } + buf +} + +/// Encode a non-root index chunk: `[VERSION][slots]`. +pub fn encode_non_root_index(slots: &[[u8; HASH_LEN]]) -> Vec { + debug_assert!( + slots.len() <= NON_ROOT_INDEX_SLOT_CAP, + "non-root slot count {} exceeds NON_ROOT_INDEX_SLOT_CAP ({})", + slots.len(), + NON_ROOT_INDEX_SLOT_CAP + ); + let mut buf = Vec::with_capacity(NON_ROOT_INDEX_HEADER_SIZE + slots.len() * HASH_LEN); + buf.push(VERSION); + for slot in slots { + buf.extend_from_slice(slot); + } + buf +} + +/// Parsed root index chunk. +#[derive(Debug, Clone)] +pub struct RootIndex { + pub file_size: u64, + pub crc32c: u32, + pub slots: Vec<[u8; HASH_LEN]>, +} + +/// Decode a root index chunk into its fields plus slot vector. +pub fn decode_root_index(bytes: &[u8]) -> Result { + if bytes.is_empty() { + return Err(WireError::Empty); + } + if bytes[0] != VERSION { + return Err(WireError::BadVersion(bytes[0])); + } + if bytes.len() < ROOT_INDEX_HEADER_SIZE { + return Err(WireError::Truncated { + needed: ROOT_INDEX_HEADER_SIZE, + got: bytes.len(), + }); + } + if bytes.len() > MAX_CHUNK_SIZE { + return Err(WireError::OversizedChunk(bytes.len())); + } + + let file_size = u64::from_le_bytes(bytes[1..9].try_into().unwrap()); + let crc32c = u32::from_le_bytes(bytes[9..13].try_into().unwrap()); + let slot_bytes = &bytes[ROOT_INDEX_HEADER_SIZE..]; + if slot_bytes.len() % HASH_LEN != 0 { + return Err(WireError::BadSlotByteLength(slot_bytes.len())); + } + let slots: Vec<[u8; HASH_LEN]> = slot_bytes + .chunks_exact(HASH_LEN) + .map(|c| { + let mut h = [0u8; HASH_LEN]; + h.copy_from_slice(c); + h + }) + .collect(); + Ok(RootIndex { + file_size, + crc32c, + slots, + }) +} + +/// Decode a non-root index chunk into its slot vector. +pub fn decode_non_root_index(bytes: &[u8]) -> Result, WireError> { + if bytes.is_empty() { + return Err(WireError::Empty); + } + if bytes[0] != VERSION { + return Err(WireError::BadVersion(bytes[0])); + } + if bytes.len() > MAX_CHUNK_SIZE { + return Err(WireError::OversizedChunk(bytes.len())); + } + let slot_bytes = &bytes[NON_ROOT_INDEX_HEADER_SIZE..]; + if slot_bytes.len() % HASH_LEN != 0 { + return Err(WireError::BadSlotByteLength(slot_bytes.len())); + } + let slots: Vec<[u8; HASH_LEN]> = slot_bytes + .chunks_exact(HASH_LEN) + .map(|c| { + let mut h = [0u8; HASH_LEN]; + h.copy_from_slice(c); + h + }) + .collect(); + Ok(slots) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn slot_capacities_match_spec() { + assert_eq!(ROOT_INDEX_SLOT_CAP, 30); + assert_eq!(NON_ROOT_INDEX_SLOT_CAP, 31); + assert_eq!(DATA_PAYLOAD_MAX, 998); + assert_eq!(NEED_LIST_ENTRY_CAP, 124); + } + + #[test] + fn data_chunk_roundtrip() { + let payload = b"hello world"; + let encoded = encode_data_chunk(0xAB, payload); + assert_eq!(encoded[0], VERSION); + assert_eq!(encoded[1], 0xAB); + assert_eq!(&encoded[2..], payload); + assert_eq!(decode_data_chunk(&encoded).unwrap(), payload); + } + + #[test] + fn data_chunk_max_payload() { + let payload = vec![0xFFu8; DATA_PAYLOAD_MAX]; + let encoded = encode_data_chunk(0, &payload); + assert_eq!(encoded.len(), MAX_CHUNK_SIZE); + assert_eq!(decode_data_chunk(&encoded).unwrap(), &payload[..]); + } + + #[test] + fn data_chunk_empty_payload() { + // Theoretically allowed by the format but never produced by the canonical + // sender (data chunks always carry at least 1 byte). + let encoded = encode_data_chunk(0, b""); + assert_eq!(encoded.len(), DATA_HEADER_SIZE); + assert_eq!(decode_data_chunk(&encoded).unwrap(), b""); + } + + #[test] + fn data_chunk_rejects_bad_version() { + assert_eq!( + decode_data_chunk(&[0x01, 0xAA, 0xBB]), + Err(WireError::BadVersion(0x01)) + ); + } + + #[test] + fn data_chunk_rejects_empty() { + assert_eq!(decode_data_chunk(&[]), Err(WireError::Empty)); + } + + #[test] + fn data_chunk_rejects_truncated_header() { + assert_eq!( + decode_data_chunk(&[VERSION]), + Err(WireError::Truncated { + needed: DATA_HEADER_SIZE, + got: 1 + }) + ); + } + + #[test] + fn root_index_roundtrip_with_slots() { + let slots: Vec<[u8; 32]> = (0..30).map(|i| [i as u8; 32]).collect(); + let encoded = encode_root_index(123_456_789_u64, 0xDEAD_BEEF_u32, &slots); + assert_eq!(encoded.len(), ROOT_INDEX_HEADER_SIZE + 30 * 32); + let decoded = decode_root_index(&encoded).unwrap(); + assert_eq!(decoded.file_size, 123_456_789); + assert_eq!(decoded.crc32c, 0xDEAD_BEEF); + assert_eq!(decoded.slots, slots); + } + + #[test] + fn root_index_empty_slots() { + let encoded = encode_root_index(0, 0, &[]); + assert_eq!(encoded.len(), ROOT_INDEX_HEADER_SIZE); + let decoded = decode_root_index(&encoded).unwrap(); + assert_eq!(decoded.file_size, 0); + assert_eq!(decoded.crc32c, 0); + assert!(decoded.slots.is_empty()); + } + + #[test] + fn root_index_rejects_bad_slot_alignment() { + let mut bytes = encode_root_index(100, 0, &[[0u8; 32]; 1]); + bytes.pop(); // drop one byte → slot bytes are 31, not multiple of 32 + assert!(matches!( + decode_root_index(&bytes), + Err(WireError::BadSlotByteLength(_)) + )); + } + + #[test] + fn root_index_rejects_truncated_header() { + let bytes = vec![VERSION, 0, 0, 0]; // less than 13 bytes + assert!(matches!( + decode_root_index(&bytes), + Err(WireError::Truncated { .. }) + )); + } + + #[test] + fn root_index_rejects_oversized() { + let bytes = vec![VERSION; MAX_CHUNK_SIZE + 1]; + assert!(matches!( + decode_root_index(&bytes), + Err(WireError::OversizedChunk(_)) + )); + } + + #[test] + fn non_root_index_roundtrip() { + let slots: Vec<[u8; 32]> = (0..31).map(|i| [(i * 3) as u8; 32]).collect(); + let encoded = encode_non_root_index(&slots); + assert_eq!(encoded.len(), NON_ROOT_INDEX_HEADER_SIZE + 31 * 32); + let decoded = decode_non_root_index(&encoded).unwrap(); + assert_eq!(decoded, slots); + } + + #[test] + fn non_root_index_partial_slots() { + let slots: Vec<[u8; 32]> = vec![[1u8; 32], [2u8; 32], [3u8; 32]]; + let encoded = encode_non_root_index(&slots); + assert_eq!(encoded.len(), NON_ROOT_INDEX_HEADER_SIZE + 3 * 32); + let decoded = decode_non_root_index(&encoded).unwrap(); + assert_eq!(decoded, slots); + } + + #[test] + fn non_root_index_rejects_bad_alignment() { + let mut bytes = encode_non_root_index(&[[0u8; 32]]); + bytes.pop(); + assert!(matches!( + decode_non_root_index(&bytes), + Err(WireError::BadSlotByteLength(_)) + )); + } + + #[test] + fn non_root_index_rejects_bad_version() { + let mut bytes = vec![0u8; 33]; + bytes[0] = 0x01; + assert_eq!( + decode_non_root_index(&bytes), + Err(WireError::BadVersion(0x01)) + ); + } +} diff --git a/peeroxide-cli/src/cmd/init.rs b/peeroxide-cli/src/cmd/init.rs new file mode 100644 index 0000000..2ac54bb --- /dev/null +++ b/peeroxide-cli/src/cmd/init.rs @@ -0,0 +1,491 @@ +use std::path::{Path, PathBuf}; + +use clap::Args; + +use crate::manpage; + +/// Context from global CLI flags needed by the init command. +pub struct InitContext { + /// Global --config path override + pub config_path: Option, +} + +#[derive(Args)] +pub struct InitArgs { + /// Overwrite existing config file + #[arg(long, conflicts_with = "update")] + force: bool, + + /// Update specific fields in existing config without overwriting other settings + #[arg(long, conflicts_with = "force")] + update: bool, + + /// Set network.public = true in the generated config (adds default public HyperDHT bootstrap nodes at runtime) + #[arg(long)] + public: bool, + + /// Bootstrap node addresses to set in config (repeatable) + #[arg(long, action = clap::ArgAction::Append)] + bootstrap: Vec, + + /// Generate and install man pages instead of config. + /// If PATH is omitted, defaults to /usr/local/share/man/. + #[arg(long, value_name = "PATH", num_args = 0..=1, default_missing_value = "/usr/local/share/man/", conflicts_with_all = ["force", "update", "public", "bootstrap"])] + man_pages: Option, +} + +pub fn run(args: InitArgs, ctx: InitContext) -> i32 { + if let Some(man_path) = args.man_pages { + return run_man_pages(&man_path); + } + run_config(args, ctx) +} + +// ── Mode 2: Man pages ──────────────────────────────────────────────────────── + +fn run_man_pages(base_path: &Path) -> i32 { + let man1_dir = base_path.join("man1"); + if let Err(e) = std::fs::create_dir_all(&man1_dir) { + eprintln!( + "error: cannot create directory {}: {e}\n\n\ + Try: sudo peeroxide init --man-pages {}\n\ + Or specify a writable path: peeroxide init --man-pages ~/.local/share/man/", + man1_dir.display(), + base_path.display() + ); + return 1; + } + + let pages = manpage::generate_all(); + let mut generated_filenames: std::collections::HashSet = + std::collections::HashSet::new(); + + for (name, content) in &pages { + let filename = format!("{name}.1"); + generated_filenames.insert(std::ffi::OsString::from(&filename)); + let path = man1_dir.join(&filename); + if let Err(e) = std::fs::write(&path, content) { + eprintln!( + "error: failed to write {}: {e}\n\n\ + Try: sudo peeroxide init --man-pages {}\n\ + Or specify a writable path: peeroxide init --man-pages ~/.local/share/man/", + path.display(), + base_path.display() + ); + return 1; + } + eprintln!("{}", path.display()); + } + + // Clean up stale peeroxide-*.1 pages from previous installations (e.g. renamed commands). + if let Ok(entries) = std::fs::read_dir(&man1_dir) { + for entry in entries.flatten() { + let name = entry.file_name(); + let name_str = name.to_string_lossy(); + if name_str.starts_with("peeroxide") + && name_str.ends_with(".1") + && !generated_filenames.contains(&name) + && std::fs::remove_file(entry.path()).is_ok() + { + eprintln!("removed stale: {}", entry.path().display()); + } + } + } + + eprintln!( + "Generated {} man page(s) in {}", + pages.len(), + man1_dir.display() + ); + 0 +} + +// ── Mode 1: Config initialization ─────────────────────────────────────────── + +fn run_config(args: InitArgs, ctx: InitContext) -> i32 { + let config_path = resolve_config_path(&ctx.config_path); + + if config_path.is_dir() { + eprintln!( + "error: {} is a directory, not a file\n\ + Specify a file path: --config {}/config.toml", + config_path.display(), + config_path.display() + ); + return 1; + } + + if args.update { + return run_update(&config_path, &args); + } + + // Fresh creation mode + if config_path.exists() && !args.force { + println!("config already exists at {}", config_path.display()); + return 0; + } + + // Ensure parent directory exists + if let Some(parent) = config_path.parent() { + if !parent.as_os_str().is_empty() { + if let Err(e) = std::fs::create_dir_all(parent) { + eprintln!("error: cannot create directory: {e}"); + return 1; + } + } + } + + let content = generate_config_content(args.public, &args.bootstrap); + if let Err(e) = std::fs::write(&config_path, &content) { + eprintln!("error: failed to write config: {e}"); + return 1; + } + eprintln!("Config written to {}", config_path.display()); + 0 +} + +fn run_update(config_path: &Path, args: &InitArgs) -> i32 { + if !config_path.exists() { + eprintln!( + "error: no config to update at {}\n\ + Run `peeroxide init` first to create a config file.", + config_path.display() + ); + return 1; + } + + let has_public = args.public; + let has_bootstrap = !args.bootstrap.is_empty(); + + if !has_public && !has_bootstrap { + eprintln!("error: nothing to update; specify --public or --bootstrap"); + return 1; + } + + let content = match std::fs::read_to_string(config_path) { + Ok(c) => c, + Err(e) => { + eprintln!("error: cannot read config file {}: {e}", config_path.display()); + return 1; + } + }; + + let mut doc = match content.parse::() { + Ok(d) => d, + Err(e) => { + eprintln!("error: invalid TOML in {}: {e}", config_path.display()); + return 1; + } + }; + + if let Some(item) = doc.get("network") { + if !item.is_table() && !item.is_inline_table() && !item.is_none() { + eprintln!( + "error: 'network' in {} is not a table; cannot update fields", + config_path.display() + ); + return 1; + } + } + + // Ensure [network] exists as a standard table (not inline) before inserting keys + if doc.get("network").is_none() { + doc["network"] = toml_edit::Item::Table(toml_edit::Table::new()); + } + + if has_public { + let old_decor = doc + .get("network") + .and_then(|n| n.get("public")) + .and_then(|item| item.as_value()) + .map(|v| (v.decor().prefix().cloned(), v.decor().suffix().cloned())); + + doc["network"]["public"] = toml_edit::value(true); + + if let Some((prefix, suffix)) = old_decor { + if let Some(val) = doc["network"]["public"].as_value_mut() { + if let Some(p) = prefix { + val.decor_mut().set_prefix(p); + } + if let Some(s) = suffix { + val.decor_mut().set_suffix(s); + } + } + } + } + + if has_bootstrap { + let old_decor = doc + .get("network") + .and_then(|n| n.get("bootstrap")) + .and_then(|item| item.as_value()) + .map(|v| (v.decor().prefix().cloned(), v.decor().suffix().cloned())); + + let arr: toml_edit::Array = args.bootstrap.iter().collect(); + doc["network"]["bootstrap"] = toml_edit::value(arr); + + if let Some((prefix, suffix)) = old_decor { + if let Some(val) = doc["network"]["bootstrap"].as_value_mut() { + if let Some(p) = prefix { + val.decor_mut().set_prefix(p); + } + if let Some(s) = suffix { + val.decor_mut().set_suffix(s); + } + } + } + } + + if let Err(e) = std::fs::write(config_path, doc.to_string()) { + eprintln!("error: failed to write config: {e}"); + return 1; + } + eprintln!("Config updated at {}", config_path.display()); + 0 +} + +fn resolve_config_path(cli_config: &Option) -> PathBuf { + if let Some(path) = cli_config { + return PathBuf::from(path); + } + + if let Ok(env_path) = std::env::var("PEEROXIDE_CONFIG") { + return PathBuf::from(env_path); + } + + if let Some(xdg) = std::env::var_os("XDG_CONFIG_HOME") { + return PathBuf::from(xdg).join("peeroxide").join("config.toml"); + } + + if let Some(home) = dirs::home_dir() { + return home.join(".config").join("peeroxide").join("config.toml"); + } + + PathBuf::from(".config/peeroxide/config.toml") +} + +fn generate_config_content(public: bool, bootstrap: &[String]) -> String { + let mut content = String::from( + "# Peeroxide configuration file\n\ + # Place at ~/.config/peeroxide/config.toml or set PEEROXIDE_CONFIG env var\n\ + \n\ + [network]\n\ + # public = true tells runtime subcommands to add the default public HyperDHT bootstrap nodes.\n\ + # When public is unset, runtime subcommands auto-fill the default public bootstrap nodes anyway\n\ + # if the resolved bootstrap list would otherwise be empty.\n", + ); + + if public { + content.push_str("public = true\n"); + } else { + content.push_str("# public = false\n"); + } + + content.push_str("\n# Bootstrap node addresses (host:port). CLI --bootstrap overrides this list at runtime.\n# An empty list auto-fills with the default public bootstrap nodes unless --no-public is set.\n"); + + if bootstrap.is_empty() { + content.push_str("# bootstrap = [\"bootstrap1.example.com:49737\"]\n"); + } else { + let entries: Vec = bootstrap.iter().map(|b| format!("\"{b}\"")).collect(); + content.push_str(&format!("bootstrap = [{}]\n", entries.join(", "))); + } + + content.push_str( + "\n[node]\n\ + # Bind port for the DHT node (default: 49737)\n\ + # port = 49737\n\ + \n\ + # Bind address (default: 0.0.0.0)\n\ + # host = \"0.0.0.0\"\n\ + \n\ + # How often to log stats in seconds (default: 60)\n\ + # stats_interval = 60\n\ + \n\ + # Max announcement records stored (default: 65536)\n\ + # max_records = 65536\n\ + \n\ + # Max entries per LRU cache (default: 65536)\n\ + # max_lru_size = 65536\n\ + \n\ + # Max peer announcements per topic (default: 20)\n\ + # max_per_key = 20\n\ + \n\ + # TTL for announcement records in seconds (default: 1200)\n\ + # max_record_age = 1200\n\ + \n\ + # TTL for LRU cache entries in seconds (default: 1200)\n\ + # max_lru_age = 1200\n\ + \n\ + [announce]\n\ + # (No configurable options currently)\n\ + \n\ + [cp]\n\ + # (No configurable options currently)\n", + ); + + content +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::config::ConfigFile; + + #[test] + fn generated_config_default_is_valid_toml() { + let content = generate_config_content(false, &[]); + let parsed: ConfigFile = toml::from_str(&content).unwrap(); + assert!(parsed.network.public.is_none()); + assert!(parsed.network.bootstrap.is_none()); + assert!(parsed.node.port.is_none()); + } + + #[test] + fn generated_config_with_public_sets_field() { + let content = generate_config_content(true, &[]); + let parsed: ConfigFile = toml::from_str(&content).unwrap(); + assert_eq!(parsed.network.public, Some(true)); + } + + #[test] + fn generated_config_with_bootstrap_sets_field() { + let content = generate_config_content(false, &["10.0.0.1:49737".to_string()]); + let parsed: ConfigFile = toml::from_str(&content).unwrap(); + assert_eq!( + parsed.network.bootstrap, + Some(vec!["10.0.0.1:49737".to_string()]) + ); + } + + #[test] + fn resolve_config_path_uses_cli_override() { + let path = resolve_config_path(&Some("/tmp/custom.toml".to_string())); + assert_eq!(path, PathBuf::from("/tmp/custom.toml")); + } + + #[test] + fn update_preserves_inline_table_siblings() { + let src = r#"network = { public = false, bootstrap = ["old:1234"] }"#; + let mut doc: toml_edit::DocumentMut = src.parse().unwrap(); + + doc["network"]["public"] = toml_edit::value(true); + let result = doc.to_string(); + + assert!(result.contains("true"), "public should be set to true"); + assert!(result.contains("old:1234"), "bootstrap should be preserved, got: {result}"); + } + + #[test] + fn update_auto_creates_network_table() { + let src = "[node]\nport = 49737\n"; + let mut doc: toml_edit::DocumentMut = src.parse().unwrap(); + + if doc.get("network").is_none() { + doc["network"] = toml_edit::Item::Table(toml_edit::Table::new()); + } + doc["network"]["public"] = toml_edit::value(true); + let result = doc.to_string(); + + assert!( + result.contains("[network]"), + "should create standard [network] table, got: {result}" + ); + assert!(result.contains("public = true"), "public should be set, got: {result}"); + assert!(result.contains("port = 49737"), "existing content preserved, got: {result}"); + } + + #[test] + fn update_preserves_leading_comments() { + let src = "[network]\n# Whether public\npublic = false\n# Bootstrap nodes\nbootstrap = [\"old:1\"]\n"; + let mut doc: toml_edit::DocumentMut = src.parse().unwrap(); + + doc["network"]["public"] = toml_edit::value(true); + let result = doc.to_string(); + + assert!(result.contains("# Whether public"), "leading comment should be preserved, got: {result}"); + assert!(result.contains("# Bootstrap nodes"), "other comments preserved, got: {result}"); + assert!(result.contains("old:1"), "bootstrap preserved, got: {result}"); + } + + #[test] + fn update_preserves_trailing_comment() { + let src = "[network]\npublic = false # keep this\nbootstrap = [\"old:1\"]\n"; + let mut doc: toml_edit::DocumentMut = src.parse().unwrap(); + + let old_decor = doc["network"]["public"] + .as_value() + .map(|v| (v.decor().prefix().cloned(), v.decor().suffix().cloned())); + + doc["network"]["public"] = toml_edit::value(true); + + if let Some((prefix, suffix)) = old_decor { + if let Some(val) = doc["network"]["public"].as_value_mut() { + if let Some(p) = prefix { + val.decor_mut().set_prefix(p); + } + if let Some(s) = suffix { + val.decor_mut().set_suffix(s); + } + } + } + + let result = doc.to_string(); + assert!(result.contains("# keep this"), "trailing comment should be preserved, got: {result}"); + } + + #[test] + fn update_creates_standard_table_when_network_missing() { + let src = "[node]\nport = 49737\n"; + let mut doc: toml_edit::DocumentMut = src.parse().unwrap(); + + if doc.get("network").is_none() { + doc["network"] = toml_edit::Item::Table(toml_edit::Table::new()); + } + doc["network"]["public"] = toml_edit::value(true); + let result = doc.to_string(); + + assert!( + result.contains("[network]"), + "should create [network] table header, got: {result}" + ); + assert!( + !result.contains("network = {"), + "should NOT create inline table, got: {result}" + ); + assert!(result.contains("port = 49737"), "existing content preserved, got: {result}"); + } + + #[test] + fn update_preserves_value_prefix_spacing() { + let src = "[network]\nbootstrap = [\"a:1\", \"b:2\"] # keep\n"; + let mut doc: toml_edit::DocumentMut = src.parse().unwrap(); + + let old_decor = doc["network"]["bootstrap"] + .as_value() + .map(|v| (v.decor().prefix().cloned(), v.decor().suffix().cloned())); + + let arr: toml_edit::Array = ["x:9", "y:10"].iter().copied().collect(); + doc["network"]["bootstrap"] = toml_edit::value(arr); + + if let Some((prefix, suffix)) = old_decor { + if let Some(val) = doc["network"]["bootstrap"].as_value_mut() { + if let Some(p) = prefix { + val.decor_mut().set_prefix(p); + } + if let Some(s) = suffix { + val.decor_mut().set_suffix(s); + } + } + } + + let result = doc.to_string(); + assert!( + result.contains("= ["), + "prefix spacing between = and value should be preserved, got: {result}" + ); + assert!( + result.contains("# keep"), + "trailing comment should be preserved, got: {result}" + ); + } +} diff --git a/peeroxide-cli/src/cmd/mod.rs b/peeroxide-cli/src/cmd/mod.rs index 28585a5..0fc6b17 100644 --- a/peeroxide-cli/src/cmd/mod.rs +++ b/peeroxide-cli/src/cmd/mod.rs @@ -1,13 +1,13 @@ pub mod announce; -pub mod config; +pub mod chat; pub mod cp; pub mod deaddrop; +pub mod init; pub mod lookup; pub mod node; pub mod ping; use peeroxide_dht::hyperdht::HyperDhtConfig; -use peeroxide_dht::hyperdht_messages::{FIREWALL_CONSISTENT, FIREWALL_OPEN}; use peeroxide_dht::rpc::DhtConfig; use crate::config::ResolvedConfig; @@ -25,19 +25,50 @@ pub fn parse_topic(input: &str) -> [u8; 32] { peeroxide::discovery_key(input.as_bytes()) } +/// Resolve the bootstrap list using additive semantics: +/// +/// 1. Start with bootstrap addresses from ResolvedConfig (CLI --bootstrap or config file). +/// 2. If `public` is Some(true), add DEFAULT_BOOTSTRAP. +/// 3. If the list is still empty, add DEFAULT_BOOTSTRAP (auto-public default). +/// 4. If `public` is Some(false) (--no-public or config public=false), remove +/// DEFAULT_BOOTSTRAP entries by value. +pub fn resolve_bootstrap(cfg: &ResolvedConfig) -> Vec { + let default_bootstrap: Vec = peeroxide::DEFAULT_BOOTSTRAP + .iter() + .map(|s| (*s).to_string()) + .collect(); + + let mut bootstrap = cfg.bootstrap.clone(); + + if cfg.public == Some(true) { + tracing::debug!("--public: adding default bootstrap nodes"); + for addr in &default_bootstrap { + if !bootstrap.contains(addr) { + bootstrap.push(addr.clone()); + } + } + } + + if bootstrap.is_empty() { + tracing::debug!("no bootstrap configured, using public defaults (auto-public)"); + bootstrap = default_bootstrap.clone(); + } + + if cfg.public == Some(false) { + tracing::debug!("--no-public: removing default bootstrap nodes"); + bootstrap.retain(|addr| !default_bootstrap.contains(addr)); + } + + tracing::info!(nodes = %bootstrap.join(", "), count = bootstrap.len(), "bootstrap resolved"); + + bootstrap +} + pub fn build_dht_config(cfg: &ResolvedConfig) -> HyperDhtConfig { - let bootstrap = if cfg.bootstrap.is_empty() && cfg.public { - peeroxide::DEFAULT_BOOTSTRAP - .iter() - .map(|s| (*s).to_string()) - .collect() - } else { - cfg.bootstrap.clone() - }; + let bootstrap = resolve_bootstrap(cfg); let mut dht_cfg = DhtConfig::default(); dht_cfg.bootstrap = bootstrap; - dht_cfg.firewalled = !cfg.public || cfg.firewalled; let mut hyper_cfg = HyperDhtConfig::default(); hyper_cfg.dht = dht_cfg; hyper_cfg @@ -106,51 +137,146 @@ mod tests { #[test] fn build_dht_config_uses_defaults_when_public_no_bootstrap() { let cfg = ResolvedConfig { - public: true, - firewalled: false, + public: Some(true), bootstrap: vec![], node: Default::default(), }; let dht_cfg = build_dht_config(&cfg); assert!(!dht_cfg.dht.bootstrap.is_empty()); - assert!(!dht_cfg.dht.firewalled); } #[test] fn build_dht_config_uses_provided_bootstrap() { let cfg = ResolvedConfig { - public: true, - firewalled: false, + public: Some(true), bootstrap: vec!["1.2.3.4:49737".to_string()], node: Default::default(), }; let dht_cfg = build_dht_config(&cfg); - assert_eq!(dht_cfg.dht.bootstrap, vec!["1.2.3.4:49737"]); + assert!(dht_cfg.dht.bootstrap.contains(&"1.2.3.4:49737".to_string())); } #[test] - fn build_dht_config_firewalled_when_not_public() { + fn no_public_with_no_bootstrap_produces_empty() { let cfg = ResolvedConfig { - public: false, - firewalled: false, + public: Some(false), bootstrap: vec![], node: Default::default(), }; let dht_cfg = build_dht_config(&cfg); - assert!(dht_cfg.dht.firewalled); assert!( dht_cfg.dht.bootstrap.is_empty(), - "isolated mode should have no bootstrap nodes" + "--no-public with no custom bootstrap should produce empty list" ); } + // ── Additive bootstrap resolution scenarios ──────────────────────────── + + #[test] + fn bare_command_no_flags_auto_public() { + let cfg = ResolvedConfig { + public: None, + bootstrap: vec![], + node: Default::default(), + }; + let bootstrap = resolve_bootstrap(&cfg); + let default: Vec = peeroxide::DEFAULT_BOOTSTRAP.iter().map(|s| s.to_string()).collect(); + assert_eq!(bootstrap, default, "bare command with no config should auto-public"); + } + + #[test] + fn explicit_public_adds_defaults() { + let cfg = ResolvedConfig { + public: Some(true), + bootstrap: vec![], + node: Default::default(), + }; + let bootstrap = resolve_bootstrap(&cfg); + let default: Vec = peeroxide::DEFAULT_BOOTSTRAP.iter().map(|s| s.to_string()).collect(); + assert_eq!(bootstrap, default); + } + + #[test] + fn no_public_removes_defaults() { + let cfg = ResolvedConfig { + public: Some(false), + bootstrap: vec![], + node: Default::default(), + }; + let bootstrap = resolve_bootstrap(&cfg); + assert!(bootstrap.is_empty(), "--no-public with no custom bootstrap → empty"); + } + + #[test] + fn custom_bootstrap_only() { + let cfg = ResolvedConfig { + public: None, + bootstrap: vec!["x:1234".to_string()], + node: Default::default(), + }; + let bootstrap = resolve_bootstrap(&cfg); + assert_eq!(bootstrap, vec!["x:1234"]); + } + + #[test] + fn public_with_custom_bootstrap() { + let cfg = ResolvedConfig { + public: Some(true), + bootstrap: vec!["x:1234".to_string()], + node: Default::default(), + }; + let bootstrap = resolve_bootstrap(&cfg); + assert!(bootstrap.contains(&"x:1234".to_string())); + let default: Vec = peeroxide::DEFAULT_BOOTSTRAP.iter().map(|s| s.to_string()).collect(); + for addr in &default { + assert!(bootstrap.contains(addr), "public should add default bootstrap"); + } + } + + #[test] + fn no_public_with_custom_bootstrap() { + let cfg = ResolvedConfig { + public: Some(false), + bootstrap: vec!["x:1234".to_string()], + node: Default::default(), + }; + let bootstrap = resolve_bootstrap(&cfg); + assert_eq!(bootstrap, vec!["x:1234"], "--no-public keeps custom, removes defaults"); + } + + #[test] + fn config_public_true_with_custom_bootstrap() { + let cfg = ResolvedConfig { + public: Some(true), + bootstrap: vec!["y:5678".to_string()], + node: Default::default(), + }; + let bootstrap = resolve_bootstrap(&cfg); + assert!(bootstrap.contains(&"y:5678".to_string())); + let default: Vec = peeroxide::DEFAULT_BOOTSTRAP.iter().map(|s| s.to_string()).collect(); + for addr in &default { + assert!(bootstrap.contains(addr)); + } + } + + #[test] + fn config_public_false_with_custom_bootstrap() { + let cfg = ResolvedConfig { + public: Some(false), + bootstrap: vec!["y:5678".to_string()], + node: Default::default(), + }; + let bootstrap = resolve_bootstrap(&cfg); + assert_eq!(bootstrap, vec!["y:5678"]); + } + // ── 3×6 Scenario Matrix: Bootstrap Type × Network Topology ──────────── // // This test enumerates every combination of: // Bootstrap types (B1-B3): - // B1: Public default (empty bootstrap + public=true → DEFAULT_BOOTSTRAP) - // B2: Explicit/custom (user-provided bootstrap addresses) - // B3: Isolated (empty bootstrap + public=false → empty, firewalled) + // B1: Public default (empty bootstrap + public=Some(true) → DEFAULT_BOOTSTRAP) + // B2: Explicit/custom (user-provided bootstrap addresses + public=Some(true)) + // B3: Isolated (empty bootstrap + public=Some(false) → empty) // // Network topologies (T1-T6): // T1: Both open @@ -161,42 +287,34 @@ mod tests { // T6: One behind CGNAT (FIREWALL_RANDOM — distinct firewall type) // // For each cell we assert: - // 1. Bootstrap config output (bootstrap list, firewalled flag) + // 1. Bootstrap config output (bootstrap list presence) // 2. Connection-path decision (should_direct_connect result) // 3. Combined expected behavior (discovery feasible + connection path) - /// Bootstrap mode B1: public=true, no explicit bootstrap → uses DEFAULT_BOOTSTRAP fn b1_config() -> ResolvedConfig { ResolvedConfig { - public: true, - firewalled: false, + public: Some(true), bootstrap: vec![], node: Default::default(), } } - /// Bootstrap mode B2: public=true, explicit bootstrap provided fn b2_config() -> ResolvedConfig { ResolvedConfig { - public: true, - firewalled: false, + public: Some(true), bootstrap: vec!["10.0.0.1:49737".to_string()], node: Default::default(), } } - /// Bootstrap mode B3: isolated (public=false, no bootstrap) fn b3_config() -> ResolvedConfig { ResolvedConfig { - public: false, - firewalled: false, + public: Some(false), bootstrap: vec![], node: Default::default(), } } - /// Topology parameters for should_direct_connect. - /// (relayed, remote_firewall, remote_holepunchable, same_host) struct TopologyParams { relayed: bool, firewall: u64, @@ -204,23 +322,15 @@ mod tests { same_host: bool, } - /// Expected outcomes for a matrix cell. struct MatrixExpectation { - /// Bootstrap list non-empty (discovery is possible) has_bootstrap: bool, - /// Node is firewalled (affects announce behavior) - firewalled: bool, - /// should_direct_connect result direct_connect: bool, - /// Human-readable expected behavior behavior: &'static str, } - /// Full 3×6 scenario matrix test. #[test] fn scenario_matrix_3x6_cross_product() { - // Define topology parameters for T1-T6 - let topologies: [(& str, TopologyParams); 6] = [ + let topologies: [(&str, TopologyParams); 6] = [ ( "T1: both open", TopologyParams { @@ -277,53 +387,39 @@ mod tests { ), ]; - // Define expected outcomes for each (bootstrap_type, topology) pair. - // Format: (bootstrap_name, config_fn, expected_per_topology) type MatrixRow = (&'static str, fn() -> ResolvedConfig, [MatrixExpectation; 6]); let matrix: [MatrixRow; 3] = [ ( "B1: public default", b1_config as fn() -> ResolvedConfig, [ - // T1: both open → direct connect, discovery via public DHT MatrixExpectation { has_bootstrap: true, - firewalled: false, direct_connect: true, behavior: "direct connect via public DHT", }, - // T2: sender fw, receiver open → direct (receiver OPEN) MatrixExpectation { has_bootstrap: true, - firewalled: false, direct_connect: true, behavior: "direct connect (receiver is open)", }, - // T3: sender open, receiver fw → holepunch MatrixExpectation { has_bootstrap: true, - firewalled: false, direct_connect: false, behavior: "holepunch (receiver firewalled, holepunchable)", }, - // T4: both fw, same host → direct (same_host bypass) MatrixExpectation { has_bootstrap: true, - firewalled: false, direct_connect: true, behavior: "direct connect (same host bypass)", }, - // T5: both fw, different networks → holepunch (same decision as T3) MatrixExpectation { has_bootstrap: true, - firewalled: false, direct_connect: false, behavior: "holepunch (both firewalled, receiver holepunchable)", }, - // T6: CGNAT/symmetric NAT → holepunch (low success rate) MatrixExpectation { has_bootstrap: true, - firewalled: false, direct_connect: false, behavior: "holepunch (CGNAT/symmetric NAT, FIREWALL_RANDOM)", }, @@ -335,37 +431,31 @@ mod tests { [ MatrixExpectation { has_bootstrap: true, - firewalled: false, direct_connect: true, behavior: "direct connect via custom bootstrap", }, MatrixExpectation { has_bootstrap: true, - firewalled: false, direct_connect: true, behavior: "direct connect (receiver is open)", }, MatrixExpectation { has_bootstrap: true, - firewalled: false, direct_connect: false, behavior: "holepunch (receiver firewalled, holepunchable)", }, MatrixExpectation { has_bootstrap: true, - firewalled: false, direct_connect: true, behavior: "direct connect (same host bypass)", }, MatrixExpectation { has_bootstrap: true, - firewalled: false, direct_connect: false, behavior: "holepunch (both firewalled, receiver holepunchable)", }, MatrixExpectation { has_bootstrap: true, - firewalled: false, direct_connect: false, behavior: "holepunch (CGNAT/symmetric NAT, FIREWALL_RANDOM)", }, @@ -375,42 +465,33 @@ mod tests { "B3: isolated", b3_config as fn() -> ResolvedConfig, [ - // Isolated mode: no bootstrap → no DHT discovery possible. - // Connection path decision is still valid but moot since - // peers cannot discover each other without bootstrap. MatrixExpectation { has_bootstrap: false, - firewalled: true, direct_connect: true, behavior: "no discovery (isolated); direct if manually connected", }, MatrixExpectation { has_bootstrap: false, - firewalled: true, direct_connect: true, behavior: "no discovery (isolated); direct if manually connected", }, MatrixExpectation { has_bootstrap: false, - firewalled: true, direct_connect: false, behavior: "no discovery (isolated); would holepunch if connected", }, MatrixExpectation { has_bootstrap: false, - firewalled: true, direct_connect: true, behavior: "no discovery (isolated); same host bypass", }, MatrixExpectation { has_bootstrap: false, - firewalled: true, direct_connect: false, behavior: "no discovery (isolated); would holepunch if connected", }, MatrixExpectation { has_bootstrap: false, - firewalled: true, direct_connect: false, behavior: "no discovery (isolated); would holepunch if connected", }, @@ -418,7 +499,6 @@ mod tests { ), ]; - // Run all 18 cases for (b_name, config_fn, expectations) in &matrix { let cfg = config_fn(); let dht_cfg = build_dht_config(&cfg); @@ -426,7 +506,6 @@ mod tests { for (i, (t_name, topo)) in topologies.iter().enumerate() { let exp = &expectations[i]; - // Assert bootstrap config assert_eq!( !dht_cfg.dht.bootstrap.is_empty(), exp.has_bootstrap, @@ -435,15 +514,6 @@ mod tests { dht_cfg.dht.bootstrap, ); - assert_eq!( - dht_cfg.dht.firewalled, - exp.firewalled, - "[{b_name} × {t_name}] firewalled mismatch: expected {}, got {}", - exp.firewalled, - dht_cfg.dht.firewalled, - ); - - // Assert connection path decision let direct = should_direct_connect( topo.relayed, topo.firewall, @@ -459,115 +529,24 @@ mod tests { } } - /// Verifies that isolated mode (B3) with no bootstrap produces a config - /// that makes DHT discovery impossible — the expected graceful degradation. #[test] fn isolated_mode_no_discovery_semantics() { let cfg = b3_config(); let dht_cfg = build_dht_config(&cfg); - // No bootstrap → no DHT nodes to query → no discovery assert!( dht_cfg.dht.bootstrap.is_empty(), "isolated mode must have empty bootstrap" ); - // Firewalled → won't accept incoming connections - assert!( - dht_cfg.dht.firewalled, - "isolated mode must be firewalled" - ); - // This means: announce will have no nodes to announce to, - // lookup will have no nodes to query, and incoming connections - // are blocked. The peer is effectively unreachable. } - /// Verifies the CGNAT topology (T6) uses FIREWALL_RANDOM, which is the - /// correct representation per Node.js reference (symmetric NAT = random - /// port allocation = FIREWALL_RANDOM). #[test] fn cgnat_represented_as_firewall_random() { - // CGNAT/symmetric NAT: each new connection gets a different external - // port, making port prediction impossible. Node.js classifies this as - // FIREWALL_RANDOM. Verify the constant value matches expectation. assert_eq!(FIREWALL_RANDOM, 3); assert_eq!(FIREWALL_UNKNOWN, 0); assert_eq!(FIREWALL_OPEN, 1); assert_eq!(FIREWALL_CONSISTENT, 2); - // With FIREWALL_RANDOM + relayed + holepunchable: holepunch is attempted assert!(!should_direct_connect(true, FIREWALL_RANDOM, true, false)); - // But CGNAT holepunch success rate is low in practice — this is - // documented as a known limitation of symmetric NAT traversal. - } - - #[test] - fn public_flag_sets_firewall_open_in_swarm_config() { - use peeroxide::SwarmConfig; - - let public_cfg = ResolvedConfig { - public: true, - firewalled: false, - bootstrap: vec![], - node: Default::default(), - }; - let private_cfg = ResolvedConfig { - public: false, - firewalled: false, - bootstrap: vec![], - node: Default::default(), - }; - - let dht_config = build_dht_config(&public_cfg); - let mut swarm_config = SwarmConfig::default(); - swarm_config.dht = dht_config; - if public_cfg.public { - swarm_config.firewall = FIREWALL_OPEN; - } - assert_eq!( - swarm_config.firewall, FIREWALL_OPEN, - "public=true must set SwarmConfig.firewall to FIREWALL_OPEN" - ); - - let dht_config = build_dht_config(&private_cfg); - let mut swarm_config = SwarmConfig::default(); - swarm_config.dht = dht_config; - if private_cfg.public { - swarm_config.firewall = FIREWALL_OPEN; - } - assert_eq!( - swarm_config.firewall, 0, - "public=false must leave SwarmConfig.firewall at default (UNKNOWN=0)" - ); - } - - #[test] - fn firewalled_flag_sets_firewall_consistent_in_swarm_config() { - use peeroxide::SwarmConfig; - - let firewalled_cfg = ResolvedConfig { - public: false, - firewalled: true, - bootstrap: vec!["10.0.0.1:49737".to_string()], - node: Default::default(), - }; - - let dht_config = build_dht_config(&firewalled_cfg); - assert!( - dht_config.dht.firewalled, - "--firewalled must set dht.firewalled=true" - ); - - let mut swarm_config = SwarmConfig::default(); - swarm_config.dht = dht_config; - if firewalled_cfg.public { - swarm_config.firewall = FIREWALL_OPEN; - } else if firewalled_cfg.firewalled { - swarm_config.firewall = FIREWALL_CONSISTENT; - } - assert_eq!( - swarm_config.firewall, FIREWALL_CONSISTENT, - "--firewalled must set SwarmConfig.firewall to FIREWALL_CONSISTENT (2)" - ); - assert_eq!(FIREWALL_CONSISTENT, 2); } } diff --git a/peeroxide-cli/src/cmd/node.rs b/peeroxide-cli/src/cmd/node.rs index f78e997..d8b25f6 100644 --- a/peeroxide-cli/src/cmd/node.rs +++ b/peeroxide-cli/src/cmd/node.rs @@ -7,6 +7,7 @@ use tokio::signal; use std::time::Duration; use crate::config::ResolvedConfig; +use super::resolve_bootstrap; #[derive(Args)] pub struct NodeArgs { @@ -70,16 +71,9 @@ pub async fn run(args: NodeArgs, cfg: &ResolvedConfig) -> i32 { persistent.max_lru_age = Duration::from_secs(v); } - let bootstrap: Vec = if cfg.bootstrap.is_empty() && cfg.public { - peeroxide::DEFAULT_BOOTSTRAP - .iter() - .map(|s| (*s).to_string()) - .collect() - } else { - cfg.bootstrap.clone() - }; + let bootstrap = resolve_bootstrap(cfg); - let is_networked = cfg.public || !bootstrap.is_empty(); + let is_networked = cfg.public == Some(true) || !bootstrap.is_empty(); let mut dht_cfg = DhtConfig::default(); dht_cfg.bootstrap = bootstrap; diff --git a/peeroxide-cli/src/config.rs b/peeroxide-cli/src/config.rs index de880b3..dced86a 100644 --- a/peeroxide-cli/src/config.rs +++ b/peeroxide-cli/src/config.rs @@ -7,7 +7,6 @@ pub struct GlobalFlags { pub config_path: Option, pub no_default_config: bool, pub public: Option, - pub firewalled: bool, pub bootstrap: Option>, } @@ -50,8 +49,7 @@ pub struct CpConfig {} #[derive(Debug, Clone)] pub struct ResolvedConfig { - pub public: bool, - pub firewalled: bool, + pub public: Option, pub bootstrap: Vec, pub node: NodeConfig, } @@ -60,6 +58,7 @@ pub fn load_config(flags: &GlobalFlags) -> Result { let file_config = if let Some(ref path) = flags.config_path { let contents = std::fs::read_to_string(path) .map_err(|e| format!("cannot read config file {path}: {e}"))?; + tracing::info!(path, "config loaded"); Some( toml::from_str::(&contents) .map_err(|e| format!("invalid config file {path}: {e}"))?, @@ -67,30 +66,39 @@ pub fn load_config(flags: &GlobalFlags) -> Result { } else if let Some(path) = env_config_path() { let contents = std::fs::read_to_string(&path) .map_err(|e| format!("cannot read config file {}: {e}", path.display()))?; + tracing::info!(path = %path.display(), "config loaded via $PEEROXIDE_CONFIG"); Some( toml::from_str::(&contents) .map_err(|e| format!("invalid config file {}: {e}", path.display()))?, ) } else if !flags.no_default_config { - default_config_path() - .and_then(|p| std::fs::read_to_string(&p).ok()) - .and_then(|contents| toml::from_str::(&contents).ok()) + match default_config_path() { + Some(p) => { + let contents = std::fs::read_to_string(&p).ok(); + if let Some(ref contents) = contents { + tracing::info!(path = %p.display(), "config loaded"); + Some( + toml::from_str::(contents) + .map_err(|e| format!("invalid config file {}: {e}", p.display()))?, + ) + } else { + tracing::debug!("no config file found at default location"); + None + } + } + None => { + tracing::debug!("no default config path available"); + None + } + } } else { + tracing::debug!("config file loading skipped (--no-default-config)"); None }; let file_config = file_config.unwrap_or_default(); - let mut public = flags - .public - .or(file_config.network.public) - .unwrap_or(false); - - // --firewalled explicitly overrides any config-derived public=true. - // You cannot be both public and firewalled simultaneously. - if flags.firewalled { - public = false; - } + let public = flags.public.or(file_config.network.public); let bootstrap = flags .bootstrap @@ -100,7 +108,6 @@ pub fn load_config(flags: &GlobalFlags) -> Result { Ok(ResolvedConfig { public, - firewalled: flags.firewalled, bootstrap, node: file_config.node, }) @@ -110,6 +117,46 @@ fn env_config_path() -> Option { std::env::var("PEEROXIDE_CONFIG").ok().map(PathBuf::from) } +/// Returns a footer string for help output showing the active or expected config path. +pub fn config_path_footer() -> String { + if let Some(env_path) = env_config_path() { + return if env_path.exists() { + format!("Config: {} (via $PEEROXIDE_CONFIG)", env_path.display()) + } else { + format!( + "Config: {} (via $PEEROXIDE_CONFIG, not found)", + env_path.display() + ) + }; + } + + if let Some(path) = default_config_path() { + return format!("Config: {}", path.display()); + } + + match expected_default_path() { + Some(path) => format!( + "Config: {} (not found; create with 'peeroxide init')", + path.display() + ), + None => "Config: not found (create with 'peeroxide init')".to_string(), + } +} + +/// Returns the default config path without checking if the file exists. +fn expected_default_path() -> Option { + if let Some(xdg) = std::env::var_os("XDG_CONFIG_HOME") { + return Some(PathBuf::from(xdg).join("peeroxide").join("config.toml")); + } + if let Some(config_dir) = dirs::config_dir() { + return Some(config_dir.join("peeroxide").join("config.toml")); + } + if let Some(home) = dirs::home_dir() { + return Some(home.join(".config").join("peeroxide").join("config.toml")); + } + None +} + fn default_config_path() -> Option { if let Some(xdg) = std::env::var_os("XDG_CONFIG_HOME") { let p = PathBuf::from(xdg).join("peeroxide").join("config.toml"); @@ -181,11 +228,10 @@ max_lru_age = 1200 config_path: None, no_default_config: true, public: Some(true), - firewalled: false, bootstrap: Some(vec!["1.2.3.4:49737".to_string()]), }; let cfg = load_config(&flags).unwrap(); - assert!(cfg.public); + assert_eq!(cfg.public, Some(true)); assert_eq!(cfg.bootstrap, vec!["1.2.3.4:49737"]); } @@ -195,29 +241,10 @@ max_lru_age = 1200 config_path: None, no_default_config: true, public: None, - firewalled: false, bootstrap: None, }; let cfg = load_config(&flags).unwrap(); - assert!(!cfg.public); + assert_eq!(cfg.public, None); assert!(cfg.bootstrap.is_empty()); } - - #[test] - fn firewalled_flag_overrides_config_public() { - let dir = tempfile::tempdir().unwrap(); - let config_path = dir.path().join("config.toml"); - std::fs::write(&config_path, "[network]\npublic = true\n").unwrap(); - - let flags = GlobalFlags { - config_path: Some(config_path.to_str().unwrap().to_string()), - no_default_config: false, - public: None, - firewalled: true, - bootstrap: None, - }; - let cfg = load_config(&flags).unwrap(); - assert!(!cfg.public, "--firewalled must force public=false even when config says public=true"); - assert!(cfg.firewalled); - } } diff --git a/peeroxide-cli/src/main.rs b/peeroxide-cli/src/main.rs index 2cbc77b..af627d5 100644 --- a/peeroxide-cli/src/main.rs +++ b/peeroxide-cli/src/main.rs @@ -1,14 +1,25 @@ #![deny(clippy::all)] -use clap::{CommandFactory, Parser, Subcommand}; +use clap::{CommandFactory, FromArgMatches, Parser, Subcommand}; use tracing_subscriber::EnvFilter; mod cmd; mod config; mod manpage; +// Shown by `peeroxide --version` (long form). `-V` keeps showing just +// the bare semver, which is what scripts expect. Clap automatically +// prefixes `--version` output with the binary name, so starting this +// const with the version number yields the standard `peeroxide X.Y.Z` +// header followed by the banner. +const LONG_VERSION: &str = concat!( + env!("CARGO_PKG_VERSION"), + "\n\n", + include_str!("../../docs/ascii_art.txt"), +); + #[derive(Parser)] -#[command(name = "peeroxide", version, about = "P2P networking CLI for the Hyperswarm-compatible network")] +#[command(name = "peeroxide", version, long_version = LONG_VERSION, about = "P2P networking CLI for the Hyperswarm-compatible network")] struct Cli { #[command(subcommand)] command: Option, @@ -21,30 +32,27 @@ struct Cli { #[arg(long, global = true)] no_default_config: bool, - /// Mark this node as publicly reachable + /// Use the public HyperDHT bootstrap network #[arg(long, global = true, conflicts_with = "no_public")] public: bool, - /// Mark this node as NOT publicly reachable (override config) + /// Do not use the public HyperDHT bootstrap network #[arg(long, global = true, conflicts_with = "public")] no_public: bool, - /// Force this node to report as firewalled (FIREWALL_CONSISTENT). - /// Useful for testing firewall-specific connection paths. - #[arg(long, global = true, conflicts_with = "public")] - firewalled: bool, - /// Bootstrap node addresses (host:port or ip:port), repeatable #[arg(long, global = true, action = clap::ArgAction::Append)] bootstrap: Vec, - /// Generate man pages to the specified directory - #[arg(long, value_name = "DIR")] - generate_man: Option, + /// Increase output verbosity (-v info, -vv debug) + #[arg(short = 'v', long, global = true, action = clap::ArgAction::Count)] + verbose: u8, } #[derive(Subcommand)] enum Commands { + /// Initialize config file or install man pages + Init(cmd::init::InitArgs), /// Run a long-running DHT coordination (bootstrap) node Node(cmd::node::NodeArgs), /// Query the DHT for peers announcing a topic @@ -58,32 +66,56 @@ enum Commands { #[command(subcommand)] command: cmd::cp::CpCommands, }, - /// Configuration management - Config { + /// Dead Drop: anonymous store-and-forward via the DHT + #[command(name = "dd")] + Dd { #[command(subcommand)] - command: cmd::config::ConfigCommands, - }, - /// Anonymous store-and-forward via the DHT - Deaddrop { - #[command(subcommand)] - command: cmd::deaddrop::DeaddropCommands, + command: cmd::deaddrop::DdCommands, }, + /// Anonymous verifiable P2P chat + Chat(cmd::chat::ChatArgs), } -fn main() { +fn apply_config_footer(cmd: clap::Command, footer: &str) -> clap::Command { + let sub_names: Vec = cmd + .get_subcommands() + .map(|s| s.get_name().to_string()) + .collect(); + let mut cmd = cmd; + for name in sub_names { + let f = footer.to_string(); + cmd = cmd.mut_subcommand(name, |sub| apply_config_footer(sub, &f)); + } + cmd.after_help(footer.to_string()) +} + +fn init_tracing(verbose: u8) { + let filter = if std::env::var("RUST_LOG").is_ok() { + EnvFilter::from_default_env() + } else { + match verbose { + 0 => EnvFilter::new("warn"), + 1 => EnvFilter::new("peeroxide=info,warn"), + _ => EnvFilter::new("peeroxide=debug,info"), + } + }; tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) + .with_env_filter(filter) .with_writer(std::io::stderr) .init(); +} - let cli = Cli::parse(); +fn main() { + let footer = config::config_path_footer(); + let cmd = apply_config_footer(Cli::command(), &footer); + let mut help_cmd = cmd.clone(); + let matches = cmd.get_matches(); + let cli = Cli::from_arg_matches(&matches).unwrap_or_else(|e: clap::Error| e.exit()); - if let Some(dir) = cli.generate_man { - std::process::exit(generate_manpages(&dir)); - } + init_tracing(cli.verbose); let Some(command) = cli.command else { - Cli::command().print_help().ok(); + help_cmd.print_help().ok(); eprintln!(); std::process::exit(2); }; @@ -91,7 +123,12 @@ fn main() { let rt = tokio::runtime::Runtime::new().expect("failed to create tokio runtime"); let exit_code = rt.block_on(async { match command { - Commands::Config { command } => cmd::config::run(command).await, + Commands::Init(args) => { + let ctx = cmd::init::InitContext { + config_path: cli.config, + }; + cmd::init::run(args, ctx) + } command => { let global = config::GlobalFlags { config_path: cli.config, @@ -103,7 +140,6 @@ fn main() { } else { None }, - firewalled: cli.firewalled, bootstrap: if cli.bootstrap.is_empty() { None } else { @@ -125,31 +161,12 @@ fn main() { Commands::Announce(args) => cmd::announce::run(args, &cfg).await, Commands::Ping(args) => cmd::ping::run(args, &cfg).await, Commands::Cp { command } => cmd::cp::run(command, &cfg).await, - Commands::Deaddrop { command } => cmd::deaddrop::run(command, &cfg).await, - Commands::Config { .. } => unreachable!(), + Commands::Dd { command } => cmd::deaddrop::run(command, &cfg).await, + Commands::Chat(args) => cmd::chat::run(args, &cfg).await, + Commands::Init(_) => unreachable!(), } } } }); std::process::exit(exit_code); } - -fn generate_manpages(dir: &std::path::Path) -> i32 { - if let Err(e) = std::fs::create_dir_all(dir) { - eprintln!("error: cannot create directory {}: {e}", dir.display()); - return 1; - } - - let pages = manpage::generate_all(); - for (name, content) in &pages { - let path = dir.join(format!("{name}.1")); - if let Err(e) = std::fs::write(&path, content) { - eprintln!("error: failed to write {}: {e}", path.display()); - return 1; - } - eprintln!("{}", path.display()); - } - - eprintln!("Generated {} man page(s) in {}", pages.len(), dir.display()); - 0 -} diff --git a/peeroxide-cli/src/manpage.rs b/peeroxide-cli/src/manpage.rs index 3f12110..35ddea4 100644 --- a/peeroxide-cli/src/manpage.rs +++ b/peeroxide-cli/src/manpage.rs @@ -3,7 +3,7 @@ use clap::CommandFactory; use std::io::Write; -const CONSOLIDATED: &[&str] = &["peeroxide-cp", "peeroxide-config", "peeroxide-deaddrop"]; +const CONSOLIDATED: &[&str] = &["peeroxide-cp", "peeroxide-dd", "peeroxide-chat"]; /// Generate all man pages and return them as (filename_stem, content) pairs. pub fn generate_all() -> Vec<(String, Vec)> { @@ -91,6 +91,17 @@ fn render_consolidated_page(cmd: clap::Command, name: &str) -> Vec { write_consolidated_synopsis(&mut buf, &cmd, name); man.render_description_section(&mut buf).unwrap(); + + // If the parent command has its own non-global, non-hidden args + // (e.g. peeroxide chat carries --debug / --probe / --line-mode), + // surface them in an OPTIONS section before listing subcommands. + let parent_has_own_args = cmd + .get_arguments() + .any(|a| !a.is_hide_set() && !is_global_arg(a) && !a.is_positional()); + if parent_has_own_args { + man.render_options_section(&mut buf).unwrap(); + } + write_consolidated_commands(&mut buf, &cmd, name); if let Some(examples) = examples_for(name) { @@ -111,11 +122,24 @@ fn render_consolidated_page(cmd: clap::Command, name: &str) -> Vec { fn write_consolidated_synopsis(buf: &mut Vec, cmd: &clap::Command, parent_name: &str) { buf.write_all(b".SH SYNOPSIS\n").unwrap(); let invocation_base = parent_name.replace('-', " "); + write_synopsis_recursive(buf, cmd, &invocation_base); +} + +fn write_synopsis_recursive(buf: &mut Vec, cmd: &clap::Command, invocation: &str) { for sub in cmd.get_subcommands() { if sub.is_hide_set() || sub.get_name() == "help" { continue; } - writeln!(buf, ".B {invocation_base} {}", sub.get_name()).unwrap(); + let sub_invocation = format!("{invocation} {}", sub.get_name()); + + if sub.get_subcommands().next().is_some() { + // Subgroup: recurse to enumerate its leaves; do not emit a + // synopsis line for the group itself. + write_synopsis_recursive(buf, sub, &sub_invocation); + continue; + } + + writeln!(buf, ".B {sub_invocation}").unwrap(); let mut opts = Vec::new(); for arg in sub.get_arguments().filter(|a| !a.is_hide_set() && !is_global_arg(a)) { if arg.is_positional() { @@ -141,13 +165,32 @@ fn write_consolidated_synopsis(buf: &mut Vec, cmd: &clap::Command, parent_na fn write_consolidated_commands(buf: &mut Vec, cmd: &clap::Command, parent_name: &str) { buf.write_all(b".SH COMMANDS\n").unwrap(); + write_commands_recursive(buf, cmd, parent_name, ""); +} + +fn write_commands_recursive( + buf: &mut Vec, + cmd: &clap::Command, + parent_name: &str, + path: &str, +) { for sub in cmd.get_subcommands() { if sub.is_hide_set() || sub.get_name() == "help" { continue; } let sub_key = format!("{parent_name}-{}", sub.get_name()); - writeln!(buf, ".SS {}", sub.get_name()).unwrap(); + let display_path = if path.is_empty() { + sub.get_name().to_string() + } else { + format!("{path} {}", sub.get_name()) + }; + + let is_group = sub.get_subcommands().next().is_some(); + writeln!(buf, ".SS {display_path}").unwrap(); + + // Render a description for this command/group. Prefer the + // long_about_for override; fall back to the clap short about. if let Some(long) = long_about_for(&sub_key) { for line in long.lines() { if line.trim().is_empty() { @@ -160,15 +203,21 @@ fn write_consolidated_commands(buf: &mut Vec, cmd: &clap::Command, parent_na writeln!(buf, "{about}").unwrap(); } - let args: Vec<_> = sub - .get_arguments() - .filter(|a| !a.is_hide_set() && !is_global_arg(a)) - .collect(); - if !args.is_empty() { - buf.write_all(b".PP\n").unwrap(); - for arg in args { - write_arg_tp(buf, arg); + // For leaves, emit the argument list. Groups don't have their + // own args (their leaves do), so skip. + if !is_group { + let args: Vec<_> = sub + .get_arguments() + .filter(|a| !a.is_hide_set() && !is_global_arg(a)) + .collect(); + if !args.is_empty() { + buf.write_all(b".PP\n").unwrap(); + for arg in args { + write_arg_tp(buf, arg); + } } + } else { + write_commands_recursive(buf, sub, &sub_key, &display_path); } } } @@ -180,8 +229,8 @@ fn is_global_arg(arg: &clap::Arg) -> bool { | "no_default_config" | "public" | "no_public" - | "firewalled" | "bootstrap" + | "verbose" | "help" ) } @@ -260,18 +309,18 @@ fn long_about_for(name: &str) -> Option<&'static str> { The tool connects to the public Hyperswarm DHT by default, or to custom \ bootstrap nodes specified via --bootstrap flags or the configuration file. \ All subcommands share a common set of global options for network configuration.\n\n\ - Use --public to mark this node as publicly reachable (not behind NAT), \ - --no-public to force NAT mode, or --firewalled to simulate a consistently \ - firewalled node for testing firewall-specific connection paths.", + Use --public to include the public HyperDHT bootstrap nodes, or --no-public \ + to exclude them. If no bootstrap nodes are configured and --no-public is not \ + given, the public bootstrap is used automatically.", ), "peeroxide-node" => Some( "Run a long-lived DHT coordination (bootstrap) node that participates in the \ distributed hash table routing layer. Bootstrap nodes help new peers discover \ the network and facilitate Kademlia routing table population.\n\n\ A node listens for incoming DHT RPC requests and maintains routing state. \ - Use --public to mark the node as publicly reachable (required for production \ - bootstrap nodes). The --port flag binds to a specific UDP port for consistent \ - addressing.\n\n\ + Use --public to include the public HyperDHT bootstrap nodes (required for \ + production bootstrap nodes to join the network). The --port flag binds to a \ + specific UDP port for consistent addressing.\n\n\ The node runs until terminated by SIGTERM or SIGINT.", ), "peeroxide-lookup" => Some( @@ -307,7 +356,8 @@ fn long_about_for(name: &str) -> Option<&'static str> { Noise-encrypted connection and PING/PONG echo exchange.\n\n\ — Look up the topic in the DHT, then ping all discovered peers.\n\n\ In bootstrap check mode (no target), the resolved bootstrap list comes from \ - the config file, --bootstrap flags, or public defaults (with --public). The \ + the config file, --bootstrap flags, or public defaults (with --public or by \ + default when no other bootstrap is configured). The \ output includes per-node reachability and routing table size, your reflexive \ public address, a NAT type classification (open, consistent, random, or \ multi-homed), and the total unique peers discovered across all bootstraps.\n\n\ @@ -347,16 +397,30 @@ fn long_about_for(name: &str) -> Option<&'static str> { renamed to the final path only after the full transfer succeeds and the size \ is validated.", ), - "peeroxide-deaddrop" => Some( - "Anonymous store-and-forward messaging via the DHT's mutable record storage. \ - Messages are encrypted with a passphrase-derived key and stored as mutable \ - DHT records that any peer can retrieve without knowing the sender's identity.\n\n\ - The dead drop uses a chunked binary format with CRC32c integrity checks. \ - Messages are limited to approximately 1000 bytes per chunk (with multi-chunk \ - support for larger payloads).", + "peeroxide-dd" => Some( + "Dead Drop: anonymous store-and-forward messaging via the DHT. Two wire protocols \ + ship in this binary, distinguished by their leading version byte.\n\n\ + Version 1 (0x01) is the original single-chain format: chunks form a \ + linked list of mutable DHT records (~1 KB each), each pointing to the next. \ + Simple, capped near 60 MB of payload, suitable for short messages. Still used \ + when the sender passes --v1 on dd put.\n\n\ + Version 2 (0x02) is a tree-indexed protocol: data chunks are stored \ + content-addressed via immutable_put, and a tree of mutable index records \ + names them. The receiver fetches the index tree breadth-first in parallel \ + and reconstructs the file in DFS order. Default protocol for dd put. \ + The soft depth cap of 4 supports up to about 27 GB at the current 998-byte \ + chunk size; depth 5+ would extend that further but is rejected at PUT time \ + to keep tree-walk latency bounded.\n\n\ + dd get detects the protocol from the first byte of the root record \ + and runs the matching v1 or v2 fetch path automatically; there is no --v1 \ + flag on the get side.\n\n\ + Both protocols periodically refresh their records to keep them alive in the \ + DHT (records age out of node storage after about 20 minutes by default). \ + A passphrase-derived keypair can be used so both sender and receiver agree \ + on the pickup key without exchanging it directly.", ), - "peeroxide-deaddrop-leave" => Some( - "Leave an anonymous message at a dead drop location in the DHT. The message \ + "peeroxide-dd-put" => Some( + "Store an anonymous message at a dead drop location in the DHT. The message \ is encrypted with a passphrase-derived keypair and stored as a mutable DHT \ record.\n\n\ The passphrase can be provided inline with --passphrase or prompted \ @@ -368,7 +432,7 @@ fn long_about_for(name: &str) -> Option<&'static str> { peers. Records persist in the DHT as long as nodes cache them (typically hours \ to days depending on network conditions).", ), - "peeroxide-deaddrop-pickup" => Some( + "peeroxide-dd-get" => Some( "Retrieve a message from a dead drop location in the DHT. The pickup key \ can be a 64-character hex public key, a passphrase string (if less than 64 \ hex chars), or derived interactively.\n\n\ @@ -378,21 +442,151 @@ fn long_about_for(name: &str) -> Option<&'static str> { The retrieved message is written to stdout (or to a file with --output). If \ no message is found at the specified location, or if decryption fails (wrong \ passphrase), an error is reported.\n\n\ - The pickup operation is read-only and does not modify or consume the stored \ - record -- the same message can be picked up multiple times by different peers.", + The get operation is read-only and does not modify or consume the stored \ + record -- the same message can be retrieved multiple times by different peers.", + ), + + "peeroxide-init" => Some( + "Initialize a peeroxide config file or install man pages. This command has \ + two mutually exclusive modes:\n\n\ + Config mode (default): Creates a commented TOML config file with sane defaults \ + at ~/.config/peeroxide/config.toml (or the path given by --config). Use --force \ + to overwrite an existing config, or --update to patch specific fields without \ + disturbing other settings.\n\n\ + Man page mode (--man-pages): Generates and installs roff man pages into the \ + specified directory (default: /usr/local/share/man/man1/). No config is touched \ + in this mode.", + ), + "peeroxide-chat" => Some( + "End-to-end-encrypted peer-to-peer chat over the Hyperswarm DHT. No central \ + server, no account signup, no message storage beyond the ephemeral DHT.\n\n\ + Identity is a local Ed25519 keypair stored per profile under \ + ~/.config/peeroxide/chat/profiles/. A separate process-wide \ + ~/.config/peeroxide/chat/known_users cache records the most-recent \ + screen name observed for each pubkey, shared across all profiles on the \ + machine.\n\n\ + Two conversation shapes are supported. Channels are public or private \ + group rooms keyed by a channel name (plus an optional group salt for \ + privacy). Direct messages are 1:1 between two identity public keys; the \ + DM channel key is derived deterministically from the pair, so both sides \ + arrive at the same key without prior coordination.\n\n\ + Discovery uses the DHT announce/lookup rendezvous pattern across rotating \ + epoch+bucket topics, so an observer cannot trivially correlate one feed \ + across long time windows. Message records are encrypted with \ + XSalsa20-Poly1305 and signed with the author's Ed25519 key; readers verify \ + both the chain-of-prev-hashes and the per-message signature before \ + releasing a message to the UI.\n\n\ + The TUI auto-activates when both stdin and stdout are terminals; otherwise \ + chat runs in line mode (one message per line on stdout). Force line mode \ + with --line-mode or PEEROXIDE_LINE_MODE=1.", + ), + "peeroxide-chat-join" => Some( + "Join a channel. Interactive TUI by default on a terminal; line mode \ + otherwise (one message per line on stdout).\n\n\ + Channel name is positional. Pass \\fB--group \\fR (or read the salt \ + from a file with \\fB--keyfile \\fR) to join a private channel \ + whose discovery topic is derived from both the channel name and the \ + salt -- two people who don't share the salt cannot find each other or \ + decrypt each other's messages.\n\n\ + By default the session also publishes the local profile's Nexus record \ + and refreshes friend Nexus data in the background; suppress with \ + \\fB--no-nexus\\fR / \\fB--no-friends\\fR, or use \\fB--stealth\\fR for both \ + plus \\fB--read-only\\fR.\n\n\ + Stdin EOF exits the session by default. Pass \\fB--stay-after-eof\\fR to \ + keep the session listening after the input stream closes -- useful when \ + piping a transcript and then watching for replies.", + ), + "peeroxide-chat-dm" => Some( + "Open a direct-message session with another identity. Interactive TUI by \ + default; line mode otherwise.\n\n\ + The recipient is resolved in this order: a 64-char hex public key, \ + \\fB@SHORTKEY\\fR (the first 8 hex characters of a pubkey), \ + \\fBNAME@SHORTKEY\\fR (validates the screen name against the known_users \ + cache), a bare 8-char shortkey, a friend alias from the current profile, \ + or a screen name from the shared known_users cache. The DM channel key \ + is derived deterministically from your identity pubkey and theirs, so \ + you both arrive at the same key without coordination.\n\n\ + Pass \\fB--message \\fR to seed an initial inbox-invite lure for the \ + recipient -- their inbox monitor surfaces a notification with this text \ + so they know who is reaching out and on what topic.\n\n\ + Other session flags mirror \\fBchat join\\fR (\\fB--no-nexus\\fR, \ + \\fB--no-friends\\fR, \\fB--read-only\\fR, \\fB--stealth\\fR, \ + \\fB--feed-lifetime\\fR, \\fB--batch-size\\fR, \\fB--batch-wait-ms\\fR, \ + \\fB--stay-after-eof\\fR, \\fB--no-inbox\\fR, \\fB--inbox-poll-interval\\fR). \ + \\fB--group\\fR / \\fB--keyfile\\fR do NOT apply to DMs (the channel key \ + is derived from the participants).", + ), + "peeroxide-chat-inbox" => Some( + "Monitor the local profile's inbox for new invites (DMs from new senders \ + and private-channel invites). Prints each new invite to stdout as it \ + arrives; does NOT enter the interactive chat -- use \\fBchat dm\\fR or \ + \\fBchat join\\fR to act on an invite.\n\n\ + Each poll scans the current and previous inbox epochs across all 4 \ + buckets in parallel (8 DHT lookups). \\fB--poll-interval\\fR sets the \ + cycle length in seconds; values below 1 are clamped to 1.\n\n\ + \\fB--no-nexus\\fR and \\fB--no-friends\\fR are accepted for flag-surface \ + parity with \\fBchat join\\fR / \\fBchat dm\\fR but have no effect here -- \ + the inbox CLI does not run nexus publish or friend refresh tasks.", + ), + "peeroxide-chat-whoami" => Some( + "Print the current profile's identity: profile name, full 64-char identity \ + public key, screen name (if set), and the topic hash other peers would \ + use to discover this identity's Nexus record.", + ), + "peeroxide-chat-profiles" => Some( + "Manage local identity profiles. Each profile is a directory under \ + \\fB~/.config/peeroxide/chat/profiles//\\fR containing the Ed25519 \ + seed, an optional screen name and bio, and a friend list. The \\fBdefault\\fR \ + profile is auto-created on first run and cannot be deleted.", + ), + "peeroxide-chat-profiles-list" => Some( + "List all locally-known profile names.", + ), + "peeroxide-chat-profiles-create" => Some( + "Create a new profile with a freshly generated Ed25519 keypair. If \ + \\fB--screen-name\\fR is omitted, a vendor name is generated deterministically \ + from the public key and stored in the profile.", ), - "peeroxide-config" => Some( - "Manage peeroxide configuration files. The config subcommands help with \ - initial setup and inspection of the TOML-based configuration.\n\n\ - peeroxide reads its configuration from ~/.config/peeroxide/config.toml by \ - default. Override the path with --config or the PEEROXIDE_CONFIG environment \ - variable. Use --no-default-config to ignore the config file entirely.", + "peeroxide-chat-profiles-delete" => Some( + "Delete a profile and all of its local state (seed, screen name, bio, \ + friend list). The \\fBdefault\\fR profile is rejected.", ), - "peeroxide-config-init" => Some( - "Generate a commented configuration file with sane defaults. The output is \ - valid TOML with all options commented out, ready for customization.\n\n\ - By default the config is printed to stdout. Use --output to write directly \ - to a file (parent directories are created if needed).", + "peeroxide-chat-friends" => Some( + "Manage the current profile's friend list. Friends are saved by identity \ + public key with an optional local alias and the last-seen screen name / \ + bio fetched from their Nexus. Friend Nexus data is refreshed periodically \ + during chat sessions.", + ), + "peeroxide-chat-friends-list" => Some( + "List the friends recorded under the current profile, with their aliases, \ + screen names, and shortened public keys.", + ), + "peeroxide-chat-friends-add" => Some( + "Add a friend to the current profile. The key argument follows the same \ + resolution rules as \\fBchat dm\\fR's recipient. If \\fB--alias\\fR is omitted, \ + the alias is auto-filled from the known_users cache (or a generated vendor \ + name if no cached screen name is available).", + ), + "peeroxide-chat-friends-remove" => Some( + "Remove a friend from the current profile's friend list. The key argument \ + follows the same resolution rules as \\fBchat friends add\\fR.", + ), + "peeroxide-chat-friends-refresh" => Some( + "Perform a one-shot DHT refresh of the friend Nexus records for the \ + \\fBdefault\\fR profile. Does not accept \\fB--profile\\fR.", + ), + "peeroxide-chat-nexus" => Some( + "Manage the current profile's Nexus record (a public-key-addressed mutable \ + DHT record carrying your screen name and bio).\n\n\ + By default \\fBchat nexus\\fR performs a one-shot publish of the current \ + profile's Nexus. With \\fB--set-name\\fR or \\fB--set-bio\\fR (or both), \ + the new values are written to the profile first; if neither \\fB--publish\\fR \ + nor \\fB--daemon\\fR is supplied with the setters, the command exits after \ + writing without publishing. \\fB--publish\\fR forces a one-shot publish. \ + \\fB--daemon\\fR runs continuously, publishing your own Nexus every 480 \ + seconds and refreshing all friend Nexus records every 600 seconds.\n\n\ + \\fB--lookup \\fR short-circuits all other modes: fetch and print \ + the named identity's Nexus record (screen name + bio).", ), _ => None, } @@ -524,36 +718,104 @@ fn examples_for(name: &str) -> Option<&'static [(&'static str, &'static str)]> { "Receive to stdout:", ), ]), - "peeroxide-deaddrop" => Some(&[ + "peeroxide-dd" => Some(&[ + ( + "echo 'secret message' | peeroxide dd put - --passphrase s3cret", + "Put a v2 message at a dead drop with an inline passphrase (read from stdin):", + ), ( - "echo 'secret message' | peeroxide deaddrop leave - --passphrase s3cret", - "Leave a message with an inline passphrase (read from stdin):", + "peeroxide dd put ./msg.txt --interactive-passphrase", + "Put a file at a dead drop with a prompted passphrase (hidden input):", ), ( - "peeroxide deaddrop leave ./msg.txt --interactive-passphrase", - "Leave a file with a prompted passphrase (hidden input):", + "peeroxide dd put ./large.tar --passphrase s3cret --v1", + "Force the legacy v1 single-chain protocol on put:", ), ( - "peeroxide deaddrop pickup --passphrase s3cret", - "Pick up a message using the same passphrase:", + "peeroxide dd put ./file.bin --passphrase s3cret --no-progress", + "Suppress the progress bar (useful in scripts or when stderr is not a TTY):", ), ( - "peeroxide deaddrop pickup --interactive-passphrase --output ./msg.txt", - "Pick up with prompted passphrase, write to file:", + "peeroxide dd put ./file.bin --passphrase s3cret --json", + "Emit JSON-Lines progress events on stdout (suitable for scripting):", ), ( - "peeroxide deaddrop pickup a1b2c3...64chars", - "Pick up using a raw hex public key:", + "peeroxide dd get --passphrase s3cret", + "Get a message from a dead drop using the same passphrase (auto-detects v1/v2):", + ), + ( + "peeroxide dd get --interactive-passphrase --output ./msg.txt", + "Get with prompted passphrase, write to file:", + ), + ( + "peeroxide dd get a1b2c3...64chars --output ./out.bin --json", + "Get using a raw hex public key and emit JSON progress (requires --output):", ), ]), - "peeroxide-config" => Some(&[ + + "peeroxide-chat" => Some(&[ ( - "peeroxide config init", - "Print a default config file to stdout:", + "peeroxide chat join general", + "Join a public channel named \"general\":", ), ( - "peeroxide config init --output ~/.config/peeroxide/config.toml", - "Write config to the default location:", + "peeroxide chat join dev-room --group s3cret-salt", + "Join a private channel (only peers with the same salt can find each other):", + ), + ( + "peeroxide chat dm @a1b2c3d4 --message 'hey, got a minute?'", + "Open a DM to a peer by 8-char shortkey, leaving an inbox lure:", + ), + ( + "peeroxide chat inbox --poll-interval 30", + "Watch the local inbox for new invites, polling every 30 seconds:", + ), + ( + "peeroxide chat nexus --set-name 'Alice' --set-bio 'building stuff'", + "Update your screen name and bio in your profile (no DHT publish):", + ), + ( + "peeroxide chat nexus --set-name 'Alice' --publish", + "Update your screen name and immediately publish to the DHT:", + ), + ( + "peeroxide chat nexus --daemon", + "Run a background Nexus refresher (publish self every 480s, refresh friends every 600s):", + ), + ( + "peeroxide chat profiles create work --screen-name 'Alice (work)'", + "Create a second profile with its own identity keypair:", + ), + ( + "peeroxide chat friends add @a1b2c3d4 --alias bob", + "Add a friend to the current profile under a local alias:", + ), + ]), + + "peeroxide-init" => Some(&[ + ( + "peeroxide init", + "Create a default config file at ~/.config/peeroxide/config.toml:", + ), + ( + "peeroxide init --public --bootstrap node1.example.com:49737", + "Create a config with public mode and custom bootstrap:", + ), + ( + "peeroxide init --force", + "Overwrite an existing config file:", + ), + ( + "peeroxide init --update --public", + "Enable public mode in an existing config without changing other settings:", + ), + ( + "peeroxide init --man-pages", + "Install man pages to /usr/local/share/man/man1/:", + ), + ( + "peeroxide init --man-pages ~/.local/share/man/", + "Install man pages to a custom directory:", ), ]), _ => None, @@ -562,8 +824,8 @@ fn examples_for(name: &str) -> Option<&'static [(&'static str, &'static str)]> { fn exit_status_for(name: &str) -> Option<&'static str> { match name { - "peeroxide" | "peeroxide-node" | "peeroxide-lookup" | "peeroxide-announce" - | "peeroxide-cp" | "peeroxide-config" | "peeroxide-deaddrop" => Some( + "peeroxide" | "peeroxide-init" | "peeroxide-node" | "peeroxide-lookup" + | "peeroxide-announce" | "peeroxide-cp" | "peeroxide-dd" | "peeroxide-chat" => Some( ".TP\n\\fB0\\fR\nSuccess.\n\ .TP\n\\fB1\\fR\nFailure or partial failure.\n\ .TP\n\\fB2\\fR\nUsage error (invalid arguments).\n\ @@ -582,14 +844,16 @@ fn exit_status_for(name: &str) -> Option<&'static str> { fn see_also_for(name: &str) -> Option<&'static [&'static str]> { match name { "peeroxide" => Some(&[ + "peeroxide-init", "peeroxide-node", "peeroxide-lookup", "peeroxide-announce", "peeroxide-ping", "peeroxide-cp", - "peeroxide-config", - "peeroxide-deaddrop", + "peeroxide-dd", + "peeroxide-chat", ]), + "peeroxide-init" => Some(&["peeroxide"]), "peeroxide-node" => Some(&["peeroxide"]), "peeroxide-lookup" => Some(&["peeroxide-announce", "peeroxide"]), "peeroxide-announce" => Some(&["peeroxide-lookup", "peeroxide-ping", "peeroxide"]), @@ -599,9 +863,10 @@ fn see_also_for(name: &str) -> Option<&'static [&'static str]> { "peeroxide-lookup", "peeroxide", ]), - "peeroxide-cp" => Some(&["peeroxide-deaddrop", "peeroxide"]), - "peeroxide-config" => Some(&["peeroxide"]), - "peeroxide-deaddrop" => Some(&["peeroxide-cp", "peeroxide"]), + "peeroxide-cp" => Some(&["peeroxide-dd", "peeroxide"]), + + "peeroxide-dd" => Some(&["peeroxide-cp", "peeroxide"]), + "peeroxide-chat" => Some(&["peeroxide", "peeroxide-init"]), _ => None, } } diff --git a/peeroxide-cli/tests/chat_integration.rs b/peeroxide-cli/tests/chat_integration.rs new file mode 100644 index 0000000..14790f7 --- /dev/null +++ b/peeroxide-cli/tests/chat_integration.rs @@ -0,0 +1,889 @@ +//! Integration tests for `peeroxide chat` — multi-instance DHT interaction. +//! +//! Tests in this file exercise the full chat system including: +//! - Profile CRUD (no network) +//! - Nexus publish + lookup (local DHT cluster) +//! - Message exchange between two instances (local DHT cluster) +//! - Read-only mode verification +//! +//! Run with: `cargo test -p peeroxide-cli --test chat_integration` + +#![deny(clippy::all)] + +use std::io::{BufRead, BufReader, Write}; +use std::process::{Child, Command, Stdio}; +use std::time::Duration; + +use libudx::UdxRuntime; +use peeroxide_dht::hyperdht::{self, HyperDhtConfig, HyperDhtError, HyperDhtHandle, ServerEvent}; +use peeroxide_dht::rpc::DhtConfig; + +fn bin_path() -> std::path::PathBuf { + assert_cmd::cargo::cargo_bin("peeroxide") +} + +async fn spawn_bootstrap() -> (u16, BootstrapNode) { + let rt = UdxRuntime::new().unwrap(); + let mut dht_cfg = DhtConfig::default(); + dht_cfg.bootstrap = vec![]; + dht_cfg.port = 0; + dht_cfg.host = "127.0.0.1".to_string(); + dht_cfg.firewalled = false; + + let mut cfg = HyperDhtConfig::default(); + cfg.dht = dht_cfg; + + let (task, handle, rx) = hyperdht::spawn(&rt, cfg).await.unwrap(); + let port = handle.local_port().await.unwrap(); + + (port, BootstrapNode { _rt: rt, _task: task, _handle: handle, _rx: rx }) +} + +struct BootstrapNode { + _rt: UdxRuntime, + _task: tokio::task::JoinHandle>, + _handle: HyperDhtHandle, + _rx: tokio::sync::mpsc::UnboundedReceiver, +} + +async fn spawn_dht_cluster(n: usize) -> (Vec, Vec) { + assert!(n >= 2, "cluster requires at least 2 nodes"); + + let (first_port, first_node) = spawn_bootstrap().await; + let mut ports = vec![first_port]; + let mut nodes = vec![first_node]; + + for _ in 1..n { + let rt = UdxRuntime::new().unwrap(); + let mut dht_cfg = DhtConfig::default(); + dht_cfg.bootstrap = vec![format!("127.0.0.1:{first_port}")]; + dht_cfg.port = 0; + dht_cfg.host = "127.0.0.1".to_string(); + dht_cfg.firewalled = false; + + let mut cfg = HyperDhtConfig::default(); + cfg.dht = dht_cfg; + + let (task, handle, rx) = hyperdht::spawn(&rt, cfg).await.unwrap(); + handle.bootstrapped().await.unwrap(); + let port = handle.local_port().await.unwrap(); + + ports.push(port); + nodes.push(BootstrapNode { _rt: rt, _task: task, _handle: handle, _rx: rx }); + } + + tokio::time::sleep(Duration::from_secs(2)).await; + + (ports, nodes) +} + +fn kill_child(child: &mut Child) { + let _ = child.kill(); + let _ = child.wait(); +} + +fn setup_profile_home(screen_name: &str) -> tempfile::TempDir { + let dir = tempfile::tempdir().unwrap(); + let profiles_dir = dir.path().join(".config/peeroxide/chat/profiles/default"); + + std::fs::create_dir_all(&profiles_dir).unwrap(); + + let seed: [u8; 32] = rand::random(); + std::fs::write(profiles_dir.join("seed"), seed).unwrap(); + std::fs::write(profiles_dir.join("name"), screen_name).unwrap(); + + dir +} + +// ── Test: chat --help ────────────────────────────────────────────────────────── + +#[tokio::test] +async fn test_chat_help() { + let output = tokio::task::spawn_blocking(|| { + Command::new(bin_path()) + .args(["chat", "--help"]) + .output() + .expect("failed to run chat --help") + }) + .await + .unwrap(); + + assert!(output.status.success()); + let stdout = String::from_utf8_lossy(&output.stdout); + assert!(stdout.contains("join"), "help should mention 'join'"); + assert!(stdout.contains("dm"), "help should mention 'dm'"); + assert!(stdout.contains("inbox"), "help should mention 'inbox'"); + assert!(stdout.contains("whoami"), "help should mention 'whoami'"); + assert!(stdout.contains("profiles"), "help should mention 'profiles'"); + assert!(stdout.contains("nexus"), "help should mention 'nexus'"); + assert!(stdout.contains("friends"), "help should mention 'friends'"); +} + +// ── Test: profile CRUD ───────────────────────────────────────────────────────── + +#[tokio::test] +async fn test_chat_profiles_create_list_delete() { + let dir = tempfile::tempdir().unwrap(); + let home = dir.path().to_str().unwrap().to_string(); + + let home_create = home.clone(); + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .env("HOME", &home_create) + .args(["chat", "profiles", "create", "alice", "--screen-name", "Alice"]) + .output() + .expect("failed to run profiles create") + }) + .await + .unwrap(); + + let stdout = String::from_utf8_lossy(&output.stdout); + assert!( + output.status.success(), + "profiles create failed: {}", + String::from_utf8_lossy(&output.stderr) + ); + assert!(stdout.contains("Created profile 'alice'"), "got: {stdout}"); + assert!(stdout.contains("Public key:"), "got: {stdout}"); + + let home_list = home.clone(); + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .env("HOME", &home_list) + .args(["chat", "profiles", "list"]) + .output() + .expect("failed to run profiles list") + }) + .await + .unwrap(); + + let stdout = String::from_utf8_lossy(&output.stdout); + assert!(output.status.success()); + assert!(stdout.contains("alice"), "profile list should contain 'alice', got: {stdout}"); + + let home_delete = home.clone(); + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .env("HOME", &home_delete) + .args(["chat", "profiles", "delete", "alice"]) + .output() + .expect("failed to run profiles delete") + }) + .await + .unwrap(); + + let stdout = String::from_utf8_lossy(&output.stdout); + assert!(output.status.success()); + assert!(stdout.contains("Deleted profile 'alice'"), "got: {stdout}"); + + let home_verify = home.clone(); + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .env("HOME", &home_verify) + .args(["chat", "profiles", "list"]) + .output() + .expect("failed to run profiles list after delete") + }) + .await + .unwrap(); + + let stdout = String::from_utf8_lossy(&output.stdout); + assert!(output.status.success()); + assert!(!stdout.contains("alice"), "deleted profile should not appear, got: {stdout}"); +} + +// ── Test: whoami ──────────────────────────────────────────────────────────────── + +#[tokio::test] +async fn test_chat_whoami() { + let home_dir = setup_profile_home("TestUser"); + let home = home_dir.path().to_str().unwrap().to_string(); + + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .env("HOME", &home) + .args(["chat", "whoami"]) + .output() + .expect("failed to run whoami") + }) + .await + .unwrap(); + + let stdout = String::from_utf8_lossy(&output.stdout); + assert!( + output.status.success(), + "whoami failed: {}", + String::from_utf8_lossy(&output.stderr) + ); + assert!(stdout.contains("Profile: default"), "got: {stdout}"); + assert!(stdout.contains("Public key:"), "got: {stdout}"); + assert!(stdout.contains("Screen name: TestUser"), "got: {stdout}"); + assert!(stdout.contains("Nexus topic:"), "got: {stdout}"); +} + +// ── Test: nexus set-name and set-bio (local, no network) ────────────────────── + +#[tokio::test] +async fn test_chat_nexus_set_name_and_bio() { + let home_dir = setup_profile_home("OldName"); + let home = home_dir.path().to_str().unwrap().to_string(); + + let home_name = home.clone(); + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .env("HOME", &home_name) + .args(["chat", "nexus", "--set-name", "NewName"]) + .output() + .expect("failed to run nexus --set-name") + }) + .await + .unwrap(); + + assert!( + output.status.success(), + "nexus --set-name failed: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8_lossy(&output.stdout); + assert!(stdout.contains("Screen name updated to: NewName"), "got: {stdout}"); + + let home_bio = home.clone(); + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .env("HOME", &home_bio) + .args(["chat", "nexus", "--set-bio", "A test bio"]) + .output() + .expect("failed to run nexus --set-bio") + }) + .await + .unwrap(); + + assert!( + output.status.success(), + "nexus --set-bio failed: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stdout = String::from_utf8_lossy(&output.stdout); + assert!(stdout.contains("Bio updated"), "got: {stdout}"); + + let home_verify = home.clone(); + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .env("HOME", &home_verify) + .args(["chat", "whoami"]) + .output() + .expect("failed to run whoami after set") + }) + .await + .unwrap(); + + let stdout = String::from_utf8_lossy(&output.stdout); + assert!(stdout.contains("Screen name: NewName"), "got: {stdout}"); +} + +// ── Test: nexus publish + lookup round-trip ───────────────────────────────────── + +#[tokio::test] +async fn test_chat_nexus_publish_and_lookup() { + let result = tokio::time::timeout(Duration::from_secs(60), async { + let (ports, _cluster) = spawn_dht_cluster(3).await; + let bs_addr = format!("127.0.0.1:{}", ports[0]); + + let pub_home = setup_profile_home("NexusAlice"); + let pub_home_str = pub_home.path().to_str().unwrap().to_string(); + + let pub_home_whoami = pub_home_str.clone(); + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .env("HOME", &pub_home_whoami) + .args(["chat", "whoami"]) + .output() + .expect("failed to run whoami") + }) + .await + .unwrap(); + + let stdout = String::from_utf8_lossy(&output.stdout); + let pubkey_line = stdout + .lines() + .find(|l| l.starts_with("Public key:")) + .expect("no Public key line"); + let pubkey = pubkey_line.trim_start_matches("Public key:").trim().to_string(); + assert_eq!(pubkey.len(), 64, "pubkey should be 64 hex chars"); + + let pub_home_publish = pub_home_str.clone(); + let bs_addr_pub = bs_addr.clone(); + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .env("HOME", &pub_home_publish) + .args([ + "--no-default-config", + "chat", "nexus", "--publish", + "--bootstrap", &bs_addr_pub, + ]) + .output() + .expect("failed to run nexus --publish") + }) + .await + .unwrap(); + + assert!( + output.status.success(), + "nexus publish failed: {}", + String::from_utf8_lossy(&output.stderr) + ); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!( + stderr.contains("nexus published"), + "expected 'nexus published' in stderr, got: {stderr}" + ); + + tokio::time::sleep(Duration::from_secs(2)).await; + + let lookup_home = tempfile::tempdir().unwrap(); + let lookup_home_str = lookup_home.path().to_str().unwrap().to_string(); + let bs_addr_lookup = bs_addr.clone(); + let pubkey_lookup = pubkey.clone(); + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .env("HOME", &lookup_home_str) + .args([ + "--no-default-config", + "chat", "nexus", "--lookup", &pubkey_lookup, + "--bootstrap", &bs_addr_lookup, + ]) + .output() + .expect("failed to run nexus --lookup") + }) + .await + .unwrap(); + + let stdout = String::from_utf8_lossy(&output.stdout); + let stderr_lookup = String::from_utf8_lossy(&output.stderr); + assert!( + output.status.success(), + "nexus lookup failed: {stderr_lookup}" + ); + assert!( + stdout.contains("Name: NexusAlice"), + "expected 'Name: NexusAlice' in stdout, got: {stdout}\nstderr: {stderr_lookup}" + ); + }) + .await; + + assert!(result.is_ok(), "test_chat_nexus_publish_and_lookup timed out"); +} + +// ── Test: two instances exchange a message ────────────────────────────────────── + +#[tokio::test] +#[ignore = "requires multi-node DHT — local cluster cannot propagate announcements for discovery"] +async fn test_chat_message_exchange() { + let result = tokio::time::timeout(Duration::from_secs(90), async { + let (ports, _cluster) = spawn_dht_cluster(3).await; + let bs_addr = format!("127.0.0.1:{}", ports[0]); + + let alice_home = setup_profile_home("Alice"); + let bob_home = setup_profile_home("Bob"); + + let alice_home_str = alice_home.path().to_str().unwrap().to_string(); + let bob_home_str = bob_home.path().to_str().unwrap().to_string(); + + let bs_alice = bs_addr.clone(); + let mut alice = Command::new(bin_path()) + .env("HOME", &alice_home_str) + .args([ + "--no-default-config", + "chat", "join", "test-chat-exchange", + "--bootstrap", &bs_alice, + "--no-nexus", "--no-friends", + "--feed-lifetime", "60", + ]) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .expect("failed to spawn Alice's chat join"); + + let alice_stderr = alice.stderr.take().unwrap(); + let alice_stderr_reader = BufReader::new(alice_stderr); + let alice_live = tokio::task::spawn_blocking(move || { + for line in alice_stderr_reader.lines() { + let line = line.unwrap_or_default(); + if line.contains("— live —") { + return true; + } + } + false + }); + + let alice_ready = tokio::time::timeout(Duration::from_secs(30), alice_live).await; + assert!( + matches!(alice_ready, Ok(Ok(true))), + "Alice did not reach live state" + ); + + let bs_bob = bs_addr.clone(); + let mut bob = Command::new(bin_path()) + .env("HOME", &bob_home_str) + .args([ + "--no-default-config", + "chat", "join", "test-chat-exchange", + "--bootstrap", &bs_bob, + "--no-nexus", "--no-friends", + "--feed-lifetime", "60", + ]) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .expect("failed to spawn Bob's chat join"); + + let bob_stderr = bob.stderr.take().unwrap(); + let bob_stderr_reader = BufReader::new(bob_stderr); + let bob_live = tokio::task::spawn_blocking(move || { + for line in bob_stderr_reader.lines() { + let line = line.unwrap_or_default(); + if line.contains("— live —") { + return true; + } + } + false + }); + + let bob_ready = tokio::time::timeout(Duration::from_secs(30), bob_live).await; + assert!( + matches!(bob_ready, Ok(Ok(true))), + "Bob did not reach live state" + ); + + tokio::time::sleep(Duration::from_secs(3)).await; + + let alice_stdin = alice.stdin.as_mut().expect("no stdin for Alice"); + writeln!(alice_stdin, "hello from alice").expect("failed to write to Alice stdin"); + alice_stdin.flush().expect("failed to flush Alice stdin"); + + let bob_stdout = bob.stdout.take().unwrap(); + let bob_stdout_reader = BufReader::new(bob_stdout); + let received = tokio::task::spawn_blocking(move || { + for line in bob_stdout_reader.lines() { + let line = line.unwrap_or_default(); + if line.contains("hello from alice") { + return Some(line); + } + } + None + }); + + let msg_result = tokio::time::timeout(Duration::from_secs(45), received).await; + + kill_child(&mut alice); + kill_child(&mut bob); + + match msg_result { + Ok(Ok(Some(line))) => { + assert!( + line.contains("hello from alice"), + "received line should contain the message: {line}" + ); + assert!( + line.contains('[') && line.contains(']'), + "message should have display formatting: {line}" + ); + } + Ok(Ok(None)) => { + panic!("Bob's stdout closed without receiving Alice's message"); + } + Ok(Err(e)) => { + panic!("Bob's reader thread panicked: {e}"); + } + Err(_) => { + panic!("Timed out waiting for Bob to receive Alice's message"); + } + } + }) + .await; + + assert!(result.is_ok(), "test_chat_message_exchange timed out"); +} + +// ── Test: burst of rapid messages from one sender arrives in chain order ──────── + +#[tokio::test] +#[ignore = "requires multi-node DHT — local cluster cannot propagate announcements for discovery"] +async fn test_chat_burst_ordering() { + const BURST_SIZE: usize = 50; + + let result = tokio::time::timeout(Duration::from_secs(180), async { + let (ports, _cluster) = spawn_dht_cluster(3).await; + let bs_addr = format!("127.0.0.1:{}", ports[0]); + + let alice_home = setup_profile_home("Alice"); + let bob_home = setup_profile_home("Bob"); + let alice_home_str = alice_home.path().to_str().unwrap().to_string(); + let bob_home_str = bob_home.path().to_str().unwrap().to_string(); + + let bs_alice = bs_addr.clone(); + let mut alice = Command::new(bin_path()) + .env("HOME", &alice_home_str) + .args([ + "--no-default-config", + "chat", "join", "test-chat-burst", + "--bootstrap", &bs_alice, + "--no-nexus", "--no-friends", + "--feed-lifetime", "60", + ]) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .expect("failed to spawn Alice"); + + let alice_stderr = BufReader::new(alice.stderr.take().unwrap()); + let alice_live = tokio::task::spawn_blocking(move || { + for line in alice_stderr.lines() { + if line.unwrap_or_default().contains("— live —") { + return true; + } + } + false + }); + assert!( + matches!( + tokio::time::timeout(Duration::from_secs(30), alice_live).await, + Ok(Ok(true)) + ), + "Alice did not reach live state" + ); + + let bs_bob = bs_addr.clone(); + let mut bob = Command::new(bin_path()) + .env("HOME", &bob_home_str) + .args([ + "--no-default-config", + "chat", "join", "test-chat-burst", + "--bootstrap", &bs_bob, + "--read-only", + "--no-nexus", "--no-friends", + "--feed-lifetime", "60", + ]) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .expect("failed to spawn Bob"); + + let bob_stderr = BufReader::new(bob.stderr.take().unwrap()); + let bob_live = tokio::task::spawn_blocking(move || { + for line in bob_stderr.lines() { + if line.unwrap_or_default().contains("— live —") { + return true; + } + } + false + }); + assert!( + matches!( + tokio::time::timeout(Duration::from_secs(30), bob_live).await, + Ok(Ok(true)) + ), + "Bob did not reach live state" + ); + + tokio::time::sleep(Duration::from_secs(3)).await; + + let alice_stdin = alice.stdin.as_mut().expect("no stdin for Alice"); + for i in 1..=BURST_SIZE { + writeln!(alice_stdin, "burst-line-{i:03}") + .expect("failed to write burst line"); + } + alice_stdin.flush().expect("failed to flush Alice stdin"); + + let bob_stdout = BufReader::new(bob.stdout.take().unwrap()); + let collector = tokio::task::spawn_blocking(move || { + let mut seen: Vec = Vec::new(); + for line in bob_stdout.lines() { + let line = line.unwrap_or_default(); + if let Some(idx) = line + .rsplit_once("burst-line-") + .and_then(|(_, tail)| tail.get(..3)) + .and_then(|s| s.parse::().ok()) + { + seen.push(idx); + if seen.len() >= BURST_SIZE { + break; + } + } + } + seen + }); + + let collect_result = + tokio::time::timeout(Duration::from_secs(120), collector).await; + + kill_child(&mut alice); + kill_child(&mut bob); + + let seen = collect_result + .expect("timed out collecting burst lines") + .expect("collector thread panicked"); + + assert_eq!( + seen.len(), + BURST_SIZE, + "expected {BURST_SIZE} lines, got {}", + seen.len() + ); + let expected: Vec = (1..=BURST_SIZE).collect(); + assert_eq!(seen, expected, "messages arrived out of order: {seen:?}"); + }) + .await; + + assert!(result.is_ok(), "test_chat_burst_ordering timed out"); +} + +// ── Test: read-only mode does not post or announce ────────────────────────────── + +#[tokio::test] +async fn test_chat_read_only_no_post() { + let result = tokio::time::timeout(Duration::from_secs(30), async { + let (port, _bs) = spawn_bootstrap().await; + let bs_addr = format!("127.0.0.1:{port}"); + + let home_dir = setup_profile_home("ReadOnlyUser"); + let home = home_dir.path().to_str().unwrap().to_string(); + + let bs_clone = bs_addr.clone(); + let mut child = Command::new(bin_path()) + .env("HOME", &home) + .args([ + "--no-default-config", + "chat", "join", "readonly-test-channel", + "--bootstrap", &bs_clone, + "--read-only", + "--no-nexus", "--no-friends", + ]) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .expect("failed to spawn read-only chat"); + + let stderr = child.stderr.take().unwrap(); + let stderr_reader = BufReader::new(stderr); + let live_check = tokio::task::spawn_blocking(move || { + for line in stderr_reader.lines() { + let line = line.unwrap_or_default(); + if line.contains("— live —") { + return true; + } + } + false + }); + + let ready = tokio::time::timeout(Duration::from_secs(20), live_check).await; + assert!(matches!(ready, Ok(Ok(true))), "read-only instance did not reach live state"); + + if let Some(ref mut stdin) = child.stdin { + let _ = writeln!(stdin, "this should not post"); + let _ = stdin.flush(); + } + + tokio::time::sleep(Duration::from_secs(2)).await; + + kill_child(&mut child); + }) + .await; + + assert!(result.is_ok(), "test_chat_read_only_no_post timed out"); +} + +// ── Test: cannot delete default profile ───────────────────────────────────────── + +#[tokio::test] +async fn test_chat_cannot_delete_default_profile() { + let dir = tempfile::tempdir().unwrap(); + let home = dir.path().to_str().unwrap().to_string(); + + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .env("HOME", &home) + .args(["chat", "profiles", "delete", "default"]) + .output() + .expect("failed to run profiles delete default") + }) + .await + .unwrap(); + + assert!(!output.status.success(), "should fail to delete default profile"); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!( + stderr.contains("cannot delete the default profile"), + "expected error about default profile, got: {stderr}" + ); +} + +// ── Test: friends add and list ────────────────────────────────────────────────── + +#[tokio::test] +async fn test_chat_friends_add_list() { + let home_dir = setup_profile_home("FriendlyUser"); + let home = home_dir.path().to_str().unwrap().to_string(); + + let fake_pubkey = "abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789"; + let home_add = home.clone(); + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .env("HOME", &home_add) + .args(["chat", "friends", "add", fake_pubkey, "--alias", "TestBuddy"]) + .output() + .expect("failed to run friends add") + }) + .await + .unwrap(); + + let stdout = String::from_utf8_lossy(&output.stdout); + assert!( + output.status.success(), + "friends add failed: {}", + String::from_utf8_lossy(&output.stderr) + ); + assert!(stdout.contains("Added friend"), "got: {stdout}"); + + let home_list = home.clone(); + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .env("HOME", &home_list) + .args(["chat", "friends", "list"]) + .output() + .expect("failed to run friends list") + }) + .await + .unwrap(); + + let stdout = String::from_utf8_lossy(&output.stdout); + assert!(output.status.success()); + assert!( + stdout.contains("TestBuddy"), + "friends list should show alias 'TestBuddy', got: {stdout}" + ); +} + +// ── Test: piped stdin auto-detects line mode without --line-mode ──────────────── +// +// Regression for the case where `cat msgs | peeroxide chat join …` from a +// shell would crash because the interactive TUI was selected (stdout was a +// TTY) but stdin was a pipe — crossterm's `EventStream` cannot read events +// from a non-TTY stdin. +// +// In a cargo test subprocess both stdout and stdin are pipes, so this test +// can't fully reproduce the "TTY stdout + pipe stdin" shell scenario without +// pulling in a pty crate. What it DOES verify: +// +// - Spawning the binary with piped stdin and no `--line-mode` flag does +// not crash (clean exit status 0). +// - Lines piped to stdin are consumed; `/quit` triggers graceful shutdown. +// +// That guards against future regressions in the line-mode path itself and in +// the stdin-handling code. The TTY-stdout-plus-pipe-stdin shell scenario +// should still be smoke-tested manually after touching `make_ui`. +#[tokio::test] +async fn test_chat_join_piped_stdin_auto_line_mode() { + let result = tokio::time::timeout(Duration::from_secs(30), async { + let (port, _bs) = spawn_bootstrap().await; + let bs_addr = format!("127.0.0.1:{port}"); + + let home_dir = setup_profile_home("PipedStdinUser"); + let home = home_dir.path().to_str().unwrap().to_string(); + + let bs_clone = bs_addr.clone(); + let mut child = Command::new(bin_path()) + .env("HOME", &home) + .args([ + "--no-default-config", + "chat", + "join", + "piped-stdin-test", + "--bootstrap", + &bs_clone, + "--read-only", + "--no-nexus", + "--no-friends", + // Deliberately NO --line-mode — proving auto-detection works. + ]) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .expect("failed to spawn chat join with piped stdin"); + + // Wait until the chat is live before feeding stdin so we don't race + // the startup. Crucially, the stderr drain must KEEP running for the + // lifetime of the child — if we drop the BufReader after spotting + // "— live —" the child's next stderr write hits EPIPE and the + // binary panics. We use a oneshot to surface the live signal while + // a background thread silently drains the remainder. + let stderr = child.stderr.take().unwrap(); + let (live_tx, live_rx) = tokio::sync::oneshot::channel::(); + let _stderr_drain = std::thread::spawn(move || { + let stderr_reader = BufReader::new(stderr); + let mut live_tx = Some(live_tx); + for line in stderr_reader.lines() { + let line = line.unwrap_or_default(); + if line.contains("— live —") { + if let Some(tx) = live_tx.take() { + let _ = tx.send(true); + } + } + // After signalling live, keep reading & discarding so the + // child's stderr pipe doesn't fill or close. + } + if let Some(tx) = live_tx.take() { + // Stream ended without seeing "— live —". + let _ = tx.send(false); + } + }); + + let saw_live = match tokio::time::timeout(Duration::from_secs(20), live_rx).await { + Ok(Ok(b)) => b, + _ => false, + }; + assert!( + saw_live, + "piped-stdin instance did not reach live state — auto line-mode may have failed" + ); + + // Also drain stdout in the background to keep that pipe healthy too. + let stdout_handle = child.stdout.take().unwrap(); + let _stdout_drain = std::thread::spawn(move || { + let r = BufReader::new(stdout_handle); + for _line in r.lines().map_while(Result::ok) {} + }); + + // Feed `/quit` and expect a graceful exit. + { + let mut stdin = child.stdin.take().expect("child has no stdin"); + writeln!(stdin, "/quit").expect("failed to write /quit to stdin"); + stdin.flush().expect("failed to flush stdin"); + // Drop stdin to signal EOF as well — line-mode's default + // behaviour also exits cleanly on stdin EOF. + } + + // Wait for graceful exit. If the binary crashed (panic / abort) + // we'd see a non-zero status; if it hung we'd time out. + let status = tokio::task::spawn_blocking(move || child.wait()) + .await + .expect("join wait task") + .expect("child wait"); + assert!( + status.success(), + "piped-stdin chat exited non-zero: {status:?}" + ); + }) + .await; + + assert!( + result.is_ok(), + "test_chat_join_piped_stdin_auto_line_mode timed out — binary likely hung instead of exiting on /quit" + ); +} diff --git a/peeroxide-cli/tests/live_commands.rs b/peeroxide-cli/tests/live_commands.rs index 82e34ec..ab4ae76 100644 --- a/peeroxide-cli/tests/live_commands.rs +++ b/peeroxide-cli/tests/live_commands.rs @@ -106,23 +106,23 @@ async fn test_live_announce_then_lookup() { } #[tokio::test] -#[ignore = "requires internet — deaddrop roundtrip on public HyperDHT"] -async fn test_live_deaddrop_roundtrip() { +#[ignore = "requires internet — dd roundtrip on public HyperDHT"] +async fn test_live_dd_roundtrip() { let result = tokio::time::timeout(Duration::from_secs(60), async { let dir = tempfile::tempdir().unwrap(); let msg_path = dir.path().join("live-msg.txt"); - std::fs::write(&msg_path, b"live deaddrop test message").unwrap(); + std::fs::write(&msg_path, b"live dd test message").unwrap(); let msg_path_str = msg_path.to_str().unwrap().to_string(); let mut leave_child = Command::new(bin_path()) .args([ "--no-default-config", "--public", - "deaddrop", "leave", &msg_path_str, "--ttl", "45", + "dd", "put", &msg_path_str, "--ttl", "45", ]) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn() - .expect("failed to spawn deaddrop leave"); + .expect("failed to spawn dd put"); let stdout = leave_child.stdout.take().unwrap(); let pickup_key = tokio::task::spawn_blocking(move || { @@ -139,7 +139,7 @@ async fn test_live_deaddrop_roundtrip() { .await .unwrap(); - let pickup_key = pickup_key.expect("deaddrop leave did not output pickup key"); + let pickup_key = pickup_key.expect("dd put did not output pickup key"); tokio::time::sleep(Duration::from_secs(3)).await; @@ -147,12 +147,12 @@ async fn test_live_deaddrop_roundtrip() { Command::new(bin_path()) .args([ "--no-default-config", "--public", - "deaddrop", "pickup", &pickup_key, + "dd", "get", &pickup_key, "--timeout", "30", "--no-ack", ]) .output() - .expect("failed to run deaddrop pickup") + .expect("failed to run dd get") }) .await .unwrap(); @@ -164,17 +164,17 @@ async fn test_live_deaddrop_roundtrip() { assert!( pickup_output.status.success(), - "live deaddrop pickup failed: {pickup_stderr}" + "live dd get failed: {pickup_stderr}" ); assert_eq!( - pickup_stdout.as_ref(), "live deaddrop test message", - "pickup content mismatch.\nstdout: {pickup_stdout}\nstderr: {pickup_stderr}" + pickup_stdout.as_ref(), "live dd test message", + "get content mismatch.\nstdout: {pickup_stdout}\nstderr: {pickup_stderr}" ); }) .await; - assert!(result.is_ok(), "test_live_deaddrop_roundtrip timed out after 60s"); + assert!(result.is_ok(), "test_live_dd_roundtrip timed out after 60s"); } #[tokio::test] diff --git a/peeroxide-cli/tests/local_commands.rs b/peeroxide-cli/tests/local_commands.rs index 235a6f6..3ae7ad3 100644 --- a/peeroxide-cli/tests/local_commands.rs +++ b/peeroxide-cli/tests/local_commands.rs @@ -167,7 +167,7 @@ async fn test_announce_then_lookup() { let mut announce = Command::new(bin_path()) .args([ - "--no-default-config", "--public", + "--no-default-config", "--no-public", "announce", "local-test-announce-lookup", "--bootstrap", &bs_addr, "--duration", "20", @@ -183,7 +183,7 @@ async fn test_announce_then_lookup() { let output = tokio::task::spawn_blocking(move || { Command::new(bin_path()) .args([ - "--no-default-config", "--public", + "--no-default-config", "--no-public", "lookup", "local-test-announce-lookup", "--bootstrap", &bs_addr_clone, "--json", @@ -253,10 +253,10 @@ async fn test_config_file_loading() { assert!(result.is_ok(), "test_config_file_loading timed out"); } -// ── Test: deaddrop leave then pickup (local DHT) ──────────────────────────── +// ── Test: dd put then get (local DHT) ─────────────────────────────────────── #[tokio::test] -async fn test_deaddrop_local_roundtrip() { +async fn test_dd_local_roundtrip() { let result = tokio::time::timeout(Duration::from_secs(45), async { let (ports, _cluster) = spawn_dht_cluster(3).await; let bs_addr = format!("127.0.0.1:{}", ports[0]); @@ -264,22 +264,22 @@ async fn test_deaddrop_local_roundtrip() { let input_path = dir.path().join("input.txt"); let output_path = dir.path().join("output.txt"); - let msg = b"local deaddrop test payload"; + let msg = b"local dd test payload"; std::fs::write(&input_path, msg).unwrap(); let input_path_str = input_path.to_str().unwrap().to_string(); let bs_addr_clone = bs_addr.clone(); let mut leave = Command::new(bin_path()) .args([ - "--no-default-config", "--public", - "deaddrop", "leave", &input_path_str, + "--no-default-config", "--no-public", + "dd", "put", &input_path_str, "--bootstrap", &bs_addr_clone, "--ttl", "35", ]) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn() - .expect("failed to spawn deaddrop leave"); + .expect("failed to spawn dd put"); let stdout = leave.stdout.take().unwrap(); let pickup_key = tokio::task::spawn_blocking(move || { @@ -296,7 +296,7 @@ async fn test_deaddrop_local_roundtrip() { .await .unwrap(); - let pickup_key = pickup_key.expect("deaddrop leave did not output a pickup key"); + let pickup_key = pickup_key.expect("dd put did not output a pickup key"); tokio::time::sleep(Duration::from_secs(5)).await; @@ -305,15 +305,15 @@ async fn test_deaddrop_local_roundtrip() { let pickup_output = tokio::task::spawn_blocking(move || { Command::new(bin_path()) .args([ - "--no-default-config", "--public", - "deaddrop", "pickup", &pickup_key, + "--no-default-config", "--no-public", + "dd", "get", &pickup_key, "--bootstrap", &bs_addr_clone2, "--output", &output_path_str, "--timeout", "20", "--no-ack", ]) .output() - .expect("failed to run deaddrop pickup") + .expect("failed to run dd get") }) .await .unwrap(); @@ -323,7 +323,7 @@ async fn test_deaddrop_local_roundtrip() { let stderr = String::from_utf8_lossy(&pickup_output.stderr); assert!( pickup_output.status.success(), - "deaddrop pickup failed: {stderr}" + "dd get failed: {stderr}" ); let received = std::fs::read(&output_path).expect("output file not found"); @@ -331,7 +331,31 @@ async fn test_deaddrop_local_roundtrip() { }) .await; - assert!(result.is_ok(), "test_deaddrop_local_roundtrip timed out"); + assert!(result.is_ok(), "test_dd_local_roundtrip timed out"); +} + +#[tokio::test] +async fn test_dd_get_json_requires_output() { + let result = tokio::time::timeout(Duration::from_secs(10), async { + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .args(["dd", "get", "--json", "--passphrase", "x"]) + .output() + .expect("failed to run dd get --json") + }) + .await + .unwrap(); + + assert!(!output.status.success(), "command unexpectedly succeeded"); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!( + stderr.contains("--output") || stderr.contains("required"), + "expected validation message mentioning --output, got: {stderr}" + ); + }) + .await; + + assert!(result.is_ok(), "test_dd_get_json_requires_output timed out"); } // ── Test: --help works for all subcommands ────────────────────────────────── @@ -339,7 +363,7 @@ async fn test_deaddrop_local_roundtrip() { #[tokio::test] async fn test_help_all_subcommands() { let result = tokio::time::timeout(Duration::from_secs(10), async { - let subcommands = ["node", "lookup", "announce", "ping", "cp", "deaddrop"]; + let subcommands = ["init", "node", "lookup", "announce", "ping", "cp", "dd"]; for subcmd in subcommands { let subcmd_owned = subcmd.to_string(); @@ -365,50 +389,542 @@ async fn test_help_all_subcommands() { ); } }) - .await; + .await; + + assert!(result.is_ok(), "test_help_all_subcommands timed out"); +} + +// ── Test: init creates config file ────────────────────────────────────────── + +#[tokio::test] +async fn test_init_creates_config() { + let dir = tempfile::tempdir().unwrap(); + let config_path = dir.path().join("peeroxide").join("config.toml"); + let config_str = config_path.to_str().unwrap().to_string(); + + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .args(["--config", &config_str, "init"]) + .output() + .expect("failed to run init") + }) + .await + .unwrap(); + + assert!( + output.status.success(), + "init failed: {}", + String::from_utf8_lossy(&output.stderr) + ); + + assert!(config_path.exists(), "config file not created"); + let content = std::fs::read_to_string(&config_path).unwrap(); + assert!(content.contains("[network]"), "config missing [network] section"); + assert!(content.contains("[node]"), "config missing [node] section"); +} + +// ── Test: init with --public sets public in config ────────────────────────── + +#[tokio::test] +async fn test_init_public_flag() { + let dir = tempfile::tempdir().unwrap(); + let config_path = dir.path().join("config.toml"); + let config_str = config_path.to_str().unwrap().to_string(); + + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .args(["--config", &config_str, "init", "--public"]) + .output() + .expect("failed to run init --public") + }) + .await + .unwrap(); + + assert!( + output.status.success(), + "init --public failed: {}", + String::from_utf8_lossy(&output.stderr) + ); + + let content = std::fs::read_to_string(&config_path).unwrap(); + assert!( + content.contains("public = true"), + "config should contain 'public = true', got:\n{content}" + ); +} + +// ── Test: init existing config without --force is no-op ───────────────────── + +#[tokio::test] +async fn test_init_existing_no_force() { + let dir = tempfile::tempdir().unwrap(); + let config_path = dir.path().join("config.toml"); + std::fs::write(&config_path, "[network]\npublic = true\n").unwrap(); + let config_str = config_path.to_str().unwrap().to_string(); + + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .args(["--config", &config_str, "init"]) + .output() + .expect("failed to run init (existing)") + }) + .await + .unwrap(); + + assert!(output.status.success()); + let stdout = String::from_utf8_lossy(&output.stdout); + assert!( + stdout.contains("config already exists"), + "expected 'config already exists' message, got: {stdout}" + ); + + let content = std::fs::read_to_string(&config_path).unwrap(); + assert_eq!(content, "[network]\npublic = true\n", "config should not be modified"); +} + +// ── Test: init --force overwrites existing config ─────────────────────────── + +#[tokio::test] +async fn test_init_force_overwrites() { + let dir = tempfile::tempdir().unwrap(); + let config_path = dir.path().join("config.toml"); + std::fs::write(&config_path, "old content").unwrap(); + let config_str = config_path.to_str().unwrap().to_string(); + + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .args(["--config", &config_str, "init", "--force"]) + .output() + .expect("failed to run init --force") + }) + .await + .unwrap(); + + assert!( + output.status.success(), + "init --force failed: {}", + String::from_utf8_lossy(&output.stderr) + ); + + let content = std::fs::read_to_string(&config_path).unwrap(); + assert!(content.contains("[network]"), "config should be regenerated"); + assert_ne!(content, "old content", "config should be overwritten"); +} + +// ── Test: init --update patches fields ────────────────────────────────────── + +#[tokio::test] +async fn test_init_update_patches() { + let dir = tempfile::tempdir().unwrap(); + let config_path = dir.path().join("config.toml"); + std::fs::write(&config_path, "[network]\n# public = false\n\n[node]\nport = 49737\n").unwrap(); + let config_str = config_path.to_str().unwrap().to_string(); + + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .args(["--config", &config_str, "init", "--update", "--public"]) + .output() + .expect("failed to run init --update") + }) + .await + .unwrap(); + + assert!( + output.status.success(), + "init --update failed: {}", + String::from_utf8_lossy(&output.stderr) + ); + + let content = std::fs::read_to_string(&config_path).unwrap(); + assert!( + content.contains("public = true"), + "config should have public = true after update, got:\n{content}" + ); + assert!( + content.contains("port = 49737"), + "config should preserve existing port setting, got:\n{content}" + ); +} + +// ── Test: init --update on nonexistent config errors ──────────────────────── + +#[tokio::test] +async fn test_init_update_no_config_errors() { + let dir = tempfile::tempdir().unwrap(); + let config_path = dir.path().join("nonexistent.toml"); + let config_str = config_path.to_str().unwrap().to_string(); + + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .args(["--config", &config_str, "init", "--update", "--public"]) + .output() + .expect("failed to run init --update (nonexistent)") + }) + .await + .unwrap(); + + assert!( + !output.status.success(), + "init --update on nonexistent config should fail" + ); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!( + stderr.contains("no config to update"), + "expected 'no config to update' error, got: {stderr}" + ); +} + +#[tokio::test] +async fn test_init_update_no_flags_errors() { + let dir = tempfile::tempdir().unwrap(); + let config_path = dir.path().join("config.toml"); + std::fs::write(&config_path, "[network]\npublic = false\n").unwrap(); + + let config_str = config_path.to_str().unwrap().to_string(); + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .args(["init", "--config", &config_str, "--update"]) + .output() + .expect("failed to run init --update") + }) + .await + .unwrap(); + + assert!( + !output.status.success(), + "init --update with no flags should fail (exit non-zero)" + ); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!( + stderr.contains("nothing to update"), + "expected 'nothing to update' error, got: {stderr}" + ); +} + +#[tokio::test] +async fn test_init_update_preserves_trailing_comments() { + let dir = tempfile::tempdir().unwrap(); + let config_path = dir.path().join("config.toml"); + std::fs::write( + &config_path, + "[network]\npublic = false # important note\nbootstrap = [\"keep:1\"] # node list\n", + ) + .unwrap(); + + let config_str = config_path.to_str().unwrap().to_string(); + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .args(["init", "--config", &config_str, "--update", "--public"]) + .output() + .expect("failed to run init --update") + }) + .await + .unwrap(); + + assert!( + output.status.success(), + "init --update failed: {}", + String::from_utf8_lossy(&output.stderr) + ); + + let content = std::fs::read_to_string(&config_path).unwrap(); + assert!( + content.contains("# important note"), + "trailing comment on updated key should be preserved, got: {content}" + ); + assert!( + content.contains("# node list"), + "trailing comment on untouched key should be preserved, got: {content}" + ); + assert!( + content.contains("true"), + "public should be updated to true, got: {content}" + ); + assert!( + content.contains("keep:1"), + "bootstrap should be untouched, got: {content}" + ); +} + +// ── Test: init --man-pages generates manpages ─────────────────────────────── + +#[tokio::test] +async fn test_init_man_pages() { + let dir = tempfile::tempdir().unwrap(); + let dir_str = dir.path().to_str().unwrap().to_string(); + + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .args(["init", "--man-pages", &dir_str]) + .output() + .expect("failed to run init --man-pages") + }) + .await + .unwrap(); + + assert!( + output.status.success(), + "init --man-pages failed: {}", + String::from_utf8_lossy(&output.stderr) + ); + + let man1_dir = dir.path().join("man1"); + assert!(man1_dir.exists(), "man1/ subdirectory not created"); + + let expected_pages = [ + "peeroxide.1", + "peeroxide-init.1", + "peeroxide-node.1", + "peeroxide-lookup.1", + "peeroxide-announce.1", + "peeroxide-ping.1", + "peeroxide-cp.1", + "peeroxide-dd.1", + ]; + + for page in &expected_pages { + let path = man1_dir.join(page); + assert!(path.exists(), "missing manpage: {page}"); + let content = std::fs::read(&path).unwrap(); + assert!(!content.is_empty(), "empty manpage: {page}"); + } +} + +// ── Test: init --man-pages removes stale pages ───────────────────────────── + +#[tokio::test] +async fn test_init_man_pages_removes_stale() { + let dir = tempfile::tempdir().unwrap(); + let man1_dir = dir.path().join("man1"); + std::fs::create_dir_all(&man1_dir).unwrap(); + + std::fs::write(man1_dir.join("peeroxide-deaddrop.1"), b"stale").unwrap(); + std::fs::write(man1_dir.join("peeroxide-config.1"), b"stale").unwrap(); + std::fs::write(man1_dir.join("unrelated.1"), b"keep").unwrap(); + + let dir_str = dir.path().to_str().unwrap().to_string(); + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .args(["init", "--man-pages", &dir_str]) + .output() + .expect("failed to run init --man-pages") + }) + .await + .unwrap(); + + assert!( + output.status.success(), + "init --man-pages failed: {}", + String::from_utf8_lossy(&output.stderr) + ); + + assert!( + !man1_dir.join("peeroxide-deaddrop.1").exists(), + "stale peeroxide-deaddrop.1 should have been removed" + ); + assert!( + !man1_dir.join("peeroxide-config.1").exists(), + "stale peeroxide-config.1 should have been removed" + ); + assert!( + man1_dir.join("unrelated.1").exists(), + "non-peeroxide files should be preserved" + ); + assert!( + man1_dir.join("peeroxide-dd.1").exists(), + "current peeroxide-dd.1 should exist" + ); +} + +// ── Test: init --man-pages conflicts with config flags ────────────────────── + +#[tokio::test] +async fn test_init_man_pages_conflicts_with_force() { + let output = tokio::task::spawn_blocking(|| { + Command::new(bin_path()) + .args(["init", "--man-pages", "/tmp", "--force"]) + .output() + .expect("failed to run init") + }) + .await + .unwrap(); + + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!( + stderr.contains("cannot be used with") || stderr.contains("conflict"), + "expected conflict error, got: {stderr}" + ); +} + +#[tokio::test] +async fn test_init_man_pages_conflicts_with_update() { + let output = tokio::task::spawn_blocking(|| { + Command::new(bin_path()) + .args(["init", "--man-pages", "/tmp", "--update"]) + .output() + .expect("failed to run init") + }) + .await + .unwrap(); + + assert!(!output.status.success()); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!( + stderr.contains("cannot be used with") || stderr.contains("conflict"), + "expected conflict error, got: {stderr}" + ); +} + +// ── Test: init respects PEEROXIDE_CONFIG env ──────────────────────────────── + +#[tokio::test] +async fn test_init_respects_peeroxide_config_env() { + let dir = tempfile::tempdir().unwrap(); + let config_path = dir.path().join("custom.toml"); + let config_str = config_path.to_str().unwrap().to_string(); + + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .env("PEEROXIDE_CONFIG", &config_str) + .args(["init"]) + .output() + .expect("failed to run init") + }) + .await + .unwrap(); + + assert!( + output.status.success(), + "init with PEEROXIDE_CONFIG failed: {}", + String::from_utf8_lossy(&output.stderr) + ); + assert!(config_path.exists(), "config not created at PEEROXIDE_CONFIG path"); +} + +// ── Test: init --man-pages default path (no argument) ─────────────────────── + +#[tokio::test] +async fn test_init_man_pages_default_path() { + let dir = tempfile::tempdir().unwrap(); + let dir_str = dir.path().to_str().unwrap().to_string(); + + // When --man-pages is given WITH a path, it uses that path (already tested). + // This test verifies the flag accepts no value (uses default_missing_value). + // We can't test writing to /usr/local/share/man/ in CI, so we verify the + // flag parses without a value by checking it doesn't fail with "missing value". + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .env("HOME", &dir_str) + .args(["init", "--man-pages"]) + .output() + .expect("failed to run init --man-pages") + }) + .await + .unwrap(); + + // It will likely fail due to permissions on /usr/local/share/man/, + // but it should NOT fail with a clap parsing error. + let stderr = String::from_utf8_lossy(&output.stderr); + assert!( + !stderr.contains("error: a value is required"), + "--man-pages should accept zero arguments, got: {stderr}" + ); +} + +// ── Test: init --update preserves inline table fields ──────────────────────── + +#[tokio::test] +async fn test_init_update_preserves_inline_table() { + let dir = tempfile::tempdir().unwrap(); + let config_path = dir.path().join("config.toml"); + std::fs::write( + &config_path, + r#"network = { public = false, bootstrap = ["keep:1234"] }"#, + ) + .unwrap(); + + let config_str = config_path.to_str().unwrap().to_string(); + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .args(["init", "--config", &config_str, "--update", "--public"]) + .output() + .expect("failed to run init --update") + }) + .await + .unwrap(); - assert!(result.is_ok(), "test_help_all_subcommands timed out"); + assert!( + output.status.success(), + "init --update failed: {}", + String::from_utf8_lossy(&output.stderr) + ); + + let content = std::fs::read_to_string(&config_path).unwrap(); + assert!( + content.contains("keep:1234"), + "bootstrap should be preserved in inline table, got: {content}" + ); + assert!( + content.contains("true"), + "public should be set to true, got: {content}" + ); } -// ── Test: --generate-man produces manpages ────────────────────────────────── +// ── Test: init rejects directory as config path ───────────────────────────── #[tokio::test] -async fn test_generate_man() { +async fn test_init_rejects_directory_path() { let dir = tempfile::tempdir().unwrap(); let dir_str = dir.path().to_str().unwrap().to_string(); let output = tokio::task::spawn_blocking(move || { Command::new(bin_path()) - .args(["--generate-man", &dir_str]) + .args(["init", "--config", &dir_str]) .output() - .expect("failed to run --generate-man") + .expect("failed to run init") }) .await .unwrap(); assert!( - output.status.success(), - "--generate-man failed: {}", - String::from_utf8_lossy(&output.stderr) + !output.status.success(), + "init should fail when --config points to a directory" + ); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!( + stderr.contains("is a directory"), + "error should mention directory, got: {stderr}" ); +} - let expected_pages = [ - "peeroxide.1", - "peeroxide-node.1", - "peeroxide-lookup.1", - "peeroxide-announce.1", - "peeroxide-ping.1", - "peeroxide-cp.1", - "peeroxide-config.1", - "peeroxide-deaddrop.1", - ]; +// ── Test: init --update rejects non-table network value ───────────────────── - for page in &expected_pages { - let path = dir.path().join(page); - assert!(path.exists(), "missing manpage: {page}"); - let content = std::fs::read(&path).unwrap(); - assert!(!content.is_empty(), "empty manpage: {page}"); - } +#[tokio::test] +async fn test_init_update_rejects_non_table_network() { + let dir = tempfile::tempdir().unwrap(); + let config_path = dir.path().join("config.toml"); + std::fs::write(&config_path, "network = \"oops\"\n").unwrap(); + + let config_str = config_path.to_str().unwrap().to_string(); + let output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .args(["init", "--config", &config_str, "--update", "--public"]) + .output() + .expect("failed to run init --update") + }) + .await + .unwrap(); + + assert!( + !output.status.success(), + "init --update should fail on non-table network value" + ); + let stderr = String::from_utf8_lossy(&output.stderr); + assert!( + stderr.contains("not a table"), + "error should mention non-table, got: {stderr}" + ); } // ── Test: global --help ───────────────────────────────────────────────────── @@ -426,12 +942,13 @@ async fn test_global_help() { assert!(output.status.success()); let stdout = String::from_utf8_lossy(&output.stdout); + assert!(stdout.contains("init")); assert!(stdout.contains("node")); assert!(stdout.contains("lookup")); assert!(stdout.contains("announce")); assert!(stdout.contains("ping")); assert!(stdout.contains("cp")); - assert!(stdout.contains("deaddrop")); + assert!(stdout.contains("dd")); } // ── Test: ping direct with --json produces valid NDJSON ───────────────────── @@ -630,12 +1147,12 @@ async fn test_ping_by_topic() { // // LIMITATION: On same-host, `should_direct_connect` always returns true // (same_host=true), so ALL these tests take the direct-connect path regardless -// of --public/--firewalled flags. They do NOT verify topology-specific +// of --public/--no-public flags. They do NOT verify topology-specific // relay/holepunch behavior (T3/T5/T6). Topology-specific connection path // decisions are covered by the unit-level 3×6 scenario matrix in cmd/mod.rs. // -// The flag combinations (--public, --firewalled, default) verify that the -// CLI correctly passes firewall config through to the swarm without causing +// The flag combinations (--public, --no-public, default) verify that the +// CLI correctly passes bootstrap config through to the swarm without causing // connection failures. Actual firewall-differentiated behavior requires // multi-host or network-namespace testing. @@ -659,7 +1176,7 @@ async fn test_cp_local_roundtrip() { let src_str = src_path.to_str().unwrap().to_string(); let mut sender = Command::new(bin_path()) .args([ - "--no-default-config", "--public", + "--no-default-config", "--no-public", "cp", "send", &src_str, "--bootstrap", &bs_for_send, ]) @@ -696,7 +1213,7 @@ async fn test_cp_local_roundtrip() { let recv_output = tokio::task::spawn_blocking(move || { Command::new(bin_path()) .args([ - "--no-default-config", "--public", + "--no-default-config", "--no-public", "cp", "recv", &topic, &dest_str, "--bootstrap", &bs_for_recv, @@ -733,13 +1250,13 @@ async fn test_cp_local_roundtrip() { assert!(result.is_ok(), "test_cp_local_roundtrip timed out"); } -async fn cp_roundtrip_with_flags(sender_public: bool, receiver_public: bool, test_name: &str) { +async fn cp_roundtrip_with_flags(sender_no_public: bool, receiver_no_public: bool, test_name: &str) { let (port, _bs) = spawn_bootstrap().await; let bs_addr = format!("127.0.0.1:{port}"); let dir = tempfile::tempdir().unwrap(); let src_path = dir.path().join("testfile.txt"); - let payload = b"firewall scenario test payload\n"; + let payload = b"bootstrap scenario test payload\n"; std::fs::write(&src_path, payload).unwrap(); let dest_path = dir.path().join("received.txt"); @@ -748,8 +1265,8 @@ async fn cp_roundtrip_with_flags(sender_public: bool, receiver_public: bool, tes let src_str = src_path.to_str().unwrap().to_string(); let mut send_args: Vec<&str> = vec!["--no-default-config"]; - if sender_public { - send_args.push("--public"); + if sender_no_public { + send_args.push("--no-public"); } send_args.extend(["cp", "send", &src_str, "--bootstrap"]); @@ -786,8 +1303,8 @@ async fn cp_roundtrip_with_flags(sender_public: bool, receiver_public: bool, tes let tn2 = test_name.to_string(); let mut recv_args: Vec = vec!["--no-default-config".to_string()]; - if receiver_public { - recv_args.push("--public".to_string()); + if receiver_no_public { + recv_args.push("--no-public".to_string()); } recv_args.extend([ "cp".to_string(), @@ -833,7 +1350,7 @@ async fn cp_roundtrip_with_flags(sender_public: bool, receiver_public: bool, tes #[tokio::test] async fn test_cp_sender_default_receiver_public() { let result = tokio::time::timeout(Duration::from_secs(45), async { - cp_roundtrip_with_flags(false, true, "sender_default_receiver_public").await; + cp_roundtrip_with_flags(false, true, "sender_default_receiver_no_public").await; }) .await; assert!(result.is_ok(), "test_cp_sender_default_receiver_public timed out"); @@ -842,7 +1359,7 @@ async fn test_cp_sender_default_receiver_public() { #[tokio::test] async fn test_cp_sender_public_receiver_default() { let result = tokio::time::timeout(Duration::from_secs(45), async { - cp_roundtrip_with_flags(true, false, "sender_public_receiver_default").await; + cp_roundtrip_with_flags(true, false, "sender_no_public_receiver_default").await; }) .await; assert!(result.is_ok(), "test_cp_sender_public_receiver_default timed out"); @@ -860,14 +1377,14 @@ async fn test_cp_both_default_same_host() { // ── Isolated mode: no bootstrap, graceful failure ──────────────────────────── #[tokio::test] -async fn test_cp_firewalled_flag_roundtrip() { +async fn test_cp_no_public_flag_roundtrip() { let result = tokio::time::timeout(Duration::from_secs(45), async { let (port, _bs) = spawn_bootstrap().await; let bs_addr = format!("127.0.0.1:{port}"); let dir = tempfile::tempdir().unwrap(); let src_path = dir.path().join("testfile.txt"); - let payload = b"firewalled flag e2e test\n"; + let payload = b"no-public flag e2e test\n"; std::fs::write(&src_path, payload).unwrap(); let dest_path = dir.path().join("received.txt"); @@ -877,14 +1394,14 @@ async fn test_cp_firewalled_flag_roundtrip() { let mut sender = Command::new(bin_path()) .args([ - "--no-default-config", "--firewalled", + "--no-default-config", "--no-public", "cp", "send", &src_str, "--bootstrap", &bs_for_send, ]) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn() - .expect("failed to spawn cp send with --firewalled"); + .expect("failed to spawn cp send with --no-public"); let stdout = sender.stdout.take().unwrap(); let topic = tokio::task::spawn_blocking(move || { @@ -901,7 +1418,7 @@ async fn test_cp_firewalled_flag_roundtrip() { .await .unwrap(); - let topic = topic.expect("cp send --firewalled did not output topic"); + let topic = topic.expect("cp send --no-public did not output topic"); tokio::time::sleep(Duration::from_secs(5)).await; @@ -910,7 +1427,7 @@ async fn test_cp_firewalled_flag_roundtrip() { let recv_output = tokio::task::spawn_blocking(move || { Command::new(bin_path()) .args([ - "--no-default-config", "--firewalled", + "--no-default-config", "--no-public", "cp", "recv", &topic, &dest_str, "--bootstrap", &bs_for_recv, @@ -919,7 +1436,7 @@ async fn test_cp_firewalled_flag_roundtrip() { "--timeout", "30", ]) .output() - .expect("failed to run cp recv with --firewalled") + .expect("failed to run cp recv with --no-public") }) .await .unwrap(); @@ -929,18 +1446,18 @@ async fn test_cp_firewalled_flag_roundtrip() { let stderr = String::from_utf8_lossy(&recv_output.stderr); assert!( recv_output.status.success(), - "cp recv --firewalled failed: {stderr}" + "cp recv --no-public failed: {stderr}" ); let received = std::fs::read(&dest_path) .unwrap_or_else(|_| panic!("output file not found\nstderr: {stderr}")); assert_eq!( received, payload, - "file content mismatch with --firewalled flag.\nstderr: {stderr}" + "file content mismatch with --no-public flag.\nstderr: {stderr}" ); }) .await; - assert!(result.is_ok(), "test_cp_firewalled_flag_roundtrip timed out"); + assert!(result.is_ok(), "test_cp_no_public_flag_roundtrip timed out"); } #[tokio::test] @@ -981,7 +1498,7 @@ async fn test_cp_isolated_no_bootstrap_times_out() { } #[tokio::test] -async fn test_deaddrop_passphrase_roundtrip() { +async fn test_dd_passphrase_roundtrip() { let result = tokio::time::timeout(Duration::from_secs(60), async { let (ports, _cluster) = spawn_dht_cluster(3).await; let bs_addr = format!("127.0.0.1:{}", ports[0]); @@ -989,7 +1506,7 @@ async fn test_deaddrop_passphrase_roundtrip() { let input_path = dir.path().join("input.txt"); let output_path = dir.path().join("output.txt"); - let msg = b"passphrase deaddrop roundtrip payload"; + let msg = b"passphrase dd roundtrip payload"; std::fs::write(&input_path, msg).unwrap(); let input_path_str = input_path.to_str().unwrap().to_string(); @@ -998,11 +1515,11 @@ async fn test_deaddrop_passphrase_roundtrip() { let mut leave_cmd = Command::new(bin_path()); leave_cmd .args([ - "--no-default-config", "--public", - "deaddrop", "leave", &input_path_str, + "--no-default-config", "--no-public", + "dd", "put", &input_path_str, "--bootstrap", &bs_addr_clone, "--ttl", "40", - "--passphrase", "deaddrop-test-pass-abc", + "--passphrase", "dd-test-pass-abc", ]) .stdout(Stdio::piped()) .stderr(Stdio::piped()); @@ -1014,7 +1531,7 @@ async fn test_deaddrop_passphrase_roundtrip() { unsafe { leave_cmd.pre_exec(|| { setsid(); Ok(()) }); } } - let mut leave = leave_cmd.spawn().expect("failed to spawn deaddrop leave --passphrase"); + let mut leave = leave_cmd.spawn().expect("failed to spawn dd put --passphrase"); let stdout = leave.stdout.take().unwrap(); let pickup_key = tokio::task::spawn_blocking(move || { @@ -1031,7 +1548,7 @@ async fn test_deaddrop_passphrase_roundtrip() { .await .unwrap(); - let pickup_key = pickup_key.expect("deaddrop leave --passphrase did not output pickup key"); + let pickup_key = pickup_key.expect("dd put --passphrase did not output pickup key"); tokio::time::sleep(Duration::from_secs(5)).await; @@ -1040,15 +1557,15 @@ async fn test_deaddrop_passphrase_roundtrip() { let pickup_output = tokio::task::spawn_blocking(move || { Command::new(bin_path()) .args([ - "--no-default-config", "--public", - "deaddrop", "pickup", &pickup_key, + "--no-default-config", "--no-public", + "dd", "get", &pickup_key, "--bootstrap", &bs_addr_clone2, "--output", &output_path_str, "--timeout", "20", "--no-ack", ]) .output() - .expect("failed to run deaddrop pickup after passphrase leave") + .expect("failed to run dd get after passphrase put") }) .await .unwrap(); @@ -1058,19 +1575,19 @@ async fn test_deaddrop_passphrase_roundtrip() { let stderr = String::from_utf8_lossy(&pickup_output.stderr); assert!( pickup_output.status.success(), - "pickup after passphrase leave failed: {stderr}" + "get after passphrase put failed: {stderr}" ); let received = std::fs::read(&output_path).expect("output file not found"); - assert_eq!(received, msg, "payload mismatch after passphrase leave.\nstderr: {stderr}"); + assert_eq!(received, msg, "payload mismatch after passphrase put.\nstderr: {stderr}"); }) .await; - assert!(result.is_ok(), "test_deaddrop_passphrase_roundtrip timed out"); + assert!(result.is_ok(), "test_dd_passphrase_roundtrip timed out"); } #[tokio::test] -async fn test_deaddrop_large_payload() { +async fn test_dd_large_payload() { let result = tokio::time::timeout(Duration::from_secs(60), async { let (ports, _cluster) = spawn_dht_cluster(3).await; let bs_addr = format!("127.0.0.1:{}", ports[0]); @@ -1085,15 +1602,15 @@ async fn test_deaddrop_large_payload() { let bs_addr_clone = bs_addr.clone(); let mut leave = Command::new(bin_path()) .args([ - "--no-default-config", "--public", - "deaddrop", "leave", &input_path_str, + "--no-default-config", "--no-public", + "dd", "put", &input_path_str, "--bootstrap", &bs_addr_clone, "--ttl", "40", ]) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn() - .expect("failed to spawn deaddrop leave (large payload)"); + .expect("failed to spawn dd put (large payload)"); let stdout = leave.stdout.take().unwrap(); let pickup_key = tokio::task::spawn_blocking(move || { @@ -1110,7 +1627,7 @@ async fn test_deaddrop_large_payload() { .await .unwrap(); - let pickup_key = pickup_key.expect("deaddrop leave (large) did not output pickup key"); + let pickup_key = pickup_key.expect("dd put (large) did not output pickup key"); tokio::time::sleep(Duration::from_secs(5)).await; @@ -1119,15 +1636,15 @@ async fn test_deaddrop_large_payload() { let pickup_output = tokio::task::spawn_blocking(move || { Command::new(bin_path()) .args([ - "--no-default-config", "--public", - "deaddrop", "pickup", &pickup_key, + "--no-default-config", "--no-public", + "dd", "get", &pickup_key, "--bootstrap", &bs_addr_clone2, "--output", &output_path_str, "--timeout", "25", "--no-ack", ]) .output() - .expect("failed to run deaddrop pickup (large payload)") + .expect("failed to run dd get (large payload)") }) .await .unwrap(); @@ -1137,7 +1654,7 @@ async fn test_deaddrop_large_payload() { let stderr = String::from_utf8_lossy(&pickup_output.stderr); assert!( pickup_output.status.success(), - "deaddrop pickup (large payload) failed: {stderr}" + "dd get (large payload) failed: {stderr}" ); let received = std::fs::read(&output_path).expect("output file not found (large payload)"); @@ -1150,22 +1667,22 @@ async fn test_deaddrop_large_payload() { }) .await; - assert!(result.is_ok(), "test_deaddrop_large_payload timed out"); + assert!(result.is_ok(), "test_dd_large_payload timed out"); } #[tokio::test] -async fn test_deaddrop_stdin_stdout() { +async fn test_dd_stdin_stdout() { let result = tokio::time::timeout(Duration::from_secs(60), async { let (ports, _cluster) = spawn_dht_cluster(3).await; let bs_addr = format!("127.0.0.1:{}", ports[0]); - let msg = b"stdin-to-stdout deaddrop test payload"; + let msg = b"stdin-to-stdout dd test payload"; let bs_addr_clone = bs_addr.clone(); let mut leave = Command::new(bin_path()) .args([ - "--no-default-config", "--public", - "deaddrop", "leave", "-", + "--no-default-config", "--no-public", + "dd", "put", "-", "--bootstrap", &bs_addr_clone, "--ttl", "40", ]) @@ -1173,7 +1690,7 @@ async fn test_deaddrop_stdin_stdout() { .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn() - .expect("failed to spawn deaddrop leave (stdin)"); + .expect("failed to spawn dd put (stdin)"); let mut leave_stdin = leave.stdin.take().unwrap(); let msg_clone = msg.to_vec(); @@ -1195,7 +1712,7 @@ async fn test_deaddrop_stdin_stdout() { }); let (_, pickup_key_result) = tokio::join!(stdin_writer, key_reader); - let pickup_key = pickup_key_result.unwrap().expect("deaddrop leave (stdin) did not output pickup key"); + let pickup_key = pickup_key_result.unwrap().expect("dd put (stdin) did not output pickup key"); tokio::time::sleep(Duration::from_secs(5)).await; @@ -1203,14 +1720,14 @@ async fn test_deaddrop_stdin_stdout() { let pickup_output = tokio::task::spawn_blocking(move || { Command::new(bin_path()) .args([ - "--no-default-config", "--public", - "deaddrop", "pickup", &pickup_key, + "--no-default-config", "--no-public", + "dd", "get", &pickup_key, "--bootstrap", &bs_addr_clone2, "--timeout", "20", "--no-ack", ]) .output() - .expect("failed to run deaddrop pickup (stdout mode)") + .expect("failed to run dd get (stdout mode)") }) .await .unwrap(); @@ -1220,7 +1737,7 @@ async fn test_deaddrop_stdin_stdout() { let stderr = String::from_utf8_lossy(&pickup_output.stderr); assert!( pickup_output.status.success(), - "deaddrop pickup (stdout mode) failed: {stderr}" + "dd get (stdout mode) failed: {stderr}" ); assert_eq!( @@ -1230,11 +1747,11 @@ async fn test_deaddrop_stdin_stdout() { }) .await; - assert!(result.is_ok(), "test_deaddrop_stdin_stdout timed out"); + assert!(result.is_ok(), "test_dd_stdin_stdout timed out"); } #[tokio::test] -async fn test_deaddrop_pickup_timeout() { +async fn test_dd_get_timeout() { let result = tokio::time::timeout(Duration::from_secs(30), async { let (ports, _cluster) = spawn_dht_cluster(3).await; let bs_addr = format!("127.0.0.1:{}", ports[0]); @@ -1244,21 +1761,21 @@ async fn test_deaddrop_pickup_timeout() { let pickup_output = tokio::task::spawn_blocking(move || { Command::new(bin_path()) .args([ - "--no-default-config", "--public", - "deaddrop", "pickup", nonexistent_key, + "--no-default-config", "--no-public", + "dd", "get", nonexistent_key, "--bootstrap", &bs_addr, "--timeout", "5", "--no-ack", ]) .output() - .expect("failed to run deaddrop pickup (timeout test)") + .expect("failed to run dd get (timeout test)") }) .await .unwrap(); assert!( !pickup_output.status.success(), - "pickup of nonexistent key should fail, but exited 0" + "get of nonexistent key should fail, but exited 0" ); let stderr = String::from_utf8_lossy(&pickup_output.stderr); @@ -1269,11 +1786,11 @@ async fn test_deaddrop_pickup_timeout() { }) .await; - assert!(result.is_ok(), "test_deaddrop_pickup_timeout timed out"); + assert!(result.is_ok(), "test_dd_get_timeout timed out"); } #[tokio::test] -async fn test_deaddrop_wrong_passphrase_fails() { +async fn test_dd_wrong_passphrase_fails() { let result = tokio::time::timeout(Duration::from_secs(60), async { let (ports, _cluster) = spawn_dht_cluster(3).await; let bs_addr = format!("127.0.0.1:{}", ports[0]); @@ -1289,8 +1806,8 @@ async fn test_deaddrop_wrong_passphrase_fails() { let mut leave_cmd = Command::new(bin_path()); leave_cmd .args([ - "--no-default-config", "--public", - "deaddrop", "leave", &input_path_str, + "--no-default-config", "--no-public", + "dd", "put", &input_path_str, "--bootstrap", &bs_addr_clone, "--ttl", "40", "--passphrase", "correct-secret-passphrase", @@ -1305,7 +1822,7 @@ async fn test_deaddrop_wrong_passphrase_fails() { unsafe { leave_cmd.pre_exec(|| { setsid(); Ok(()) }); } } - let mut leave = leave_cmd.spawn().expect("failed to spawn deaddrop leave (wrong passphrase test)"); + let mut leave = leave_cmd.spawn().expect("failed to spawn dd put (wrong passphrase test)"); let stdout = leave.stdout.take().unwrap(); let leave_key_result = tokio::task::spawn_blocking(move || { @@ -1322,7 +1839,7 @@ async fn test_deaddrop_wrong_passphrase_fails() { .await .unwrap(); - assert!(leave_key_result.is_some(), "deaddrop leave did not output a key"); + assert!(leave_key_result.is_some(), "dd put did not output a key"); tokio::time::sleep(Duration::from_secs(3)).await; @@ -1331,14 +1848,14 @@ async fn test_deaddrop_wrong_passphrase_fails() { let pickup_output = tokio::task::spawn_blocking(move || { Command::new(bin_path()) .args([ - "--no-default-config", "--public", - "deaddrop", "pickup", wrong_key, + "--no-default-config", "--no-public", + "dd", "get", wrong_key, "--bootstrap", &bs_addr_clone2, "--timeout", "8", "--no-ack", ]) .output() - .expect("failed to run deaddrop pickup (wrong passphrase test)") + .expect("failed to run dd get (wrong passphrase test)") }) .await .unwrap(); @@ -1347,10 +1864,328 @@ async fn test_deaddrop_wrong_passphrase_fails() { assert!( !pickup_output.status.success(), - "pickup with wrong key should fail, but succeeded" + "get with wrong key should fail, but succeeded" + ); + }) + .await; + + assert!(result.is_ok(), "test_dd_wrong_passphrase_fails timed out"); +} + +#[tokio::test] +async fn test_dd_v2_multi_index() { + let result = tokio::time::timeout(Duration::from_secs(90), async { + let (ports, _cluster) = spawn_dht_cluster(3).await; + let bs_addr = format!("127.0.0.1:{}", ports[0]); + let dir = tempfile::tempdir().unwrap(); + let input_path = dir.path().join("large.bin"); + let output_path = dir.path().join("out.bin"); + + let msg: Vec = (0..30_000u32).map(|i| (i % 251) as u8).collect(); + std::fs::write(&input_path, &msg).unwrap(); + + let input_str = input_path.to_str().unwrap().to_string(); + let bs_clone = bs_addr.clone(); + let mut leave = Command::new(bin_path()) + .args([ + "--no-default-config", "--no-public", + "dd", "put", &input_str, + "--bootstrap", &bs_clone, + "--ttl", "60", + ]) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .expect("failed to spawn dd put"); + + let stdout = leave.stdout.take().unwrap(); + let pickup_key = tokio::task::spawn_blocking(move || { + let reader = BufReader::new(stdout); + for line in reader.lines() { + let line = line.unwrap_or_default(); + let t = line.trim().to_string(); + if t.len() == 64 && t.chars().all(|c| c.is_ascii_hexdigit()) { + return Some(t); + } + } + None + }) + .await + .unwrap() + .expect("no pickup key from dd put"); + + tokio::time::sleep(Duration::from_secs(5)).await; + + let out_str = output_path.to_str().unwrap().to_string(); + let bs_clone2 = bs_addr.clone(); + let get_output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .args([ + "--no-default-config", "--no-public", + "dd", "get", &pickup_key, + "--bootstrap", &bs_clone2, + "--output", &out_str, + "--timeout", "40", + "--no-ack", + ]) + .output() + .expect("failed to run dd get") + }) + .await + .unwrap(); + + kill_child(&mut leave); + + let stderr = String::from_utf8_lossy(&get_output.stderr); + assert!( + get_output.status.success(), + "dd get (multi-index) failed: {stderr}" + ); + + let received = std::fs::read(&output_path).expect("output file not found"); + assert_eq!(received, msg, "payload mismatch. stderr: {stderr}"); + }) + .await; + assert!(result.is_ok(), "test_dd_v2_multi_index timed out"); +} + +#[tokio::test] +async fn test_dd_v2_empty_file() { + let result = tokio::time::timeout(Duration::from_secs(60), async { + let (ports, _cluster) = spawn_dht_cluster(3).await; + let bs_addr = format!("127.0.0.1:{}", ports[0]); + let dir = tempfile::tempdir().unwrap(); + let input_path = dir.path().join("empty.bin"); + let output_path = dir.path().join("out.bin"); + + std::fs::write(&input_path, b"").unwrap(); + + let input_str = input_path.to_str().unwrap().to_string(); + let bs_clone = bs_addr.clone(); + let mut leave = Command::new(bin_path()) + .args([ + "--no-default-config", "--no-public", + "dd", "put", &input_str, + "--bootstrap", &bs_clone, + "--ttl", "40", + ]) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .expect("failed to spawn dd put (empty)"); + + let stdout = leave.stdout.take().unwrap(); + let pickup_key = tokio::task::spawn_blocking(move || { + let reader = BufReader::new(stdout); + for line in reader.lines() { + let line = line.unwrap_or_default(); + let t = line.trim().to_string(); + if t.len() == 64 && t.chars().all(|c| c.is_ascii_hexdigit()) { + return Some(t); + } + } + None + }) + .await + .unwrap() + .expect("no pickup key from dd put (empty)"); + + tokio::time::sleep(Duration::from_secs(5)).await; + + let out_str = output_path.to_str().unwrap().to_string(); + let bs_clone2 = bs_addr.clone(); + let get_output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .args([ + "--no-default-config", "--no-public", + "dd", "get", &pickup_key, + "--bootstrap", &bs_clone2, + "--output", &out_str, + "--timeout", "20", + "--no-ack", + ]) + .output() + .expect("failed to run dd get (empty)") + }) + .await + .unwrap(); + + kill_child(&mut leave); + + let stderr = String::from_utf8_lossy(&get_output.stderr); + assert!( + get_output.status.success(), + "dd get (empty) failed: {stderr}" + ); + + let received = std::fs::read(&output_path).expect("output file not found"); + assert!(received.is_empty(), "expected empty output, got {} bytes. stderr: {stderr}", received.len()); + }) + .await; + assert!(result.is_ok(), "test_dd_v2_empty_file timed out"); +} + +#[tokio::test] +async fn test_dd_v1_flag_roundtrip() { + let result = tokio::time::timeout(Duration::from_secs(60), async { + let (ports, _cluster) = spawn_dht_cluster(3).await; + let bs_addr = format!("127.0.0.1:{}", ports[0]); + let dir = tempfile::tempdir().unwrap(); + let input_path = dir.path().join("v1input.txt"); + let output_path = dir.path().join("v1out.txt"); + + let msg = b"v1 flag roundtrip test payload"; + std::fs::write(&input_path, msg).unwrap(); + + let input_str = input_path.to_str().unwrap().to_string(); + let bs_clone = bs_addr.clone(); + let mut leave = Command::new(bin_path()) + .args([ + "--no-default-config", "--no-public", + "dd", "put", &input_str, + "--v1", + "--bootstrap", &bs_clone, + "--ttl", "40", + ]) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .expect("failed to spawn dd put --v1"); + + let stdout = leave.stdout.take().unwrap(); + let pickup_key = tokio::task::spawn_blocking(move || { + let reader = BufReader::new(stdout); + for line in reader.lines() { + let line = line.unwrap_or_default(); + let t = line.trim().to_string(); + if t.len() == 64 && t.chars().all(|c| c.is_ascii_hexdigit()) { + return Some(t); + } + } + None + }) + .await + .unwrap() + .expect("no pickup key from dd put --v1"); + + tokio::time::sleep(Duration::from_secs(5)).await; + + let out_str = output_path.to_str().unwrap().to_string(); + let bs_clone2 = bs_addr.clone(); + let get_output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .args([ + "--no-default-config", "--no-public", + "dd", "get", &pickup_key, + "--bootstrap", &bs_clone2, + "--output", &out_str, + "--timeout", "20", + "--no-ack", + ]) + .output() + .expect("failed to run dd get after --v1 put") + }) + .await + .unwrap(); + + kill_child(&mut leave); + + let stderr = String::from_utf8_lossy(&get_output.stderr); + assert!( + get_output.status.success(), + "dd get after --v1 put failed: {stderr}" ); + + let received = std::fs::read(&output_path).expect("output file not found"); + assert_eq!(received, msg, "v1 flag roundtrip payload mismatch. stderr: {stderr}"); }) .await; + assert!(result.is_ok(), "test_dd_v1_flag_roundtrip timed out"); +} + +#[tokio::test] +async fn test_dd_v2_passphrase_roundtrip() { + let result = tokio::time::timeout(Duration::from_secs(60), async { + let (ports, _cluster) = spawn_dht_cluster(3).await; + let bs_addr = format!("127.0.0.1:{}", ports[0]); + let dir = tempfile::tempdir().unwrap(); + let input_path = dir.path().join("input.txt"); + let output_path = dir.path().join("output.txt"); + + let msg = b"v2 passphrase roundtrip payload"; + std::fs::write(&input_path, msg).unwrap(); + + let input_str = input_path.to_str().unwrap().to_string(); + let bs_clone = bs_addr.clone(); + + let mut leave_cmd = Command::new(bin_path()); + leave_cmd + .args([ + "--no-default-config", "--no-public", + "dd", "put", &input_str, + "--bootstrap", &bs_clone, + "--ttl", "40", + "--passphrase", "v2-test-passphrase-xyz", + ]) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()); + + #[cfg(unix)] + { + use std::os::unix::process::CommandExt as _; + unsafe extern "C" { fn setsid() -> i32; } + unsafe { leave_cmd.pre_exec(|| { setsid(); Ok(()) }); } + } - assert!(result.is_ok(), "test_deaddrop_wrong_passphrase_fails timed out"); + let mut leave = leave_cmd.spawn().expect("failed to spawn dd put v2 passphrase"); + + let stdout = leave.stdout.take().unwrap(); + let pickup_key = tokio::task::spawn_blocking(move || { + let reader = BufReader::new(stdout); + for line in reader.lines() { + let line = line.unwrap_or_default(); + let t = line.trim().to_string(); + if t.len() == 64 && t.chars().all(|c| c.is_ascii_hexdigit()) { + return Some(t); + } + } + None + }) + .await + .unwrap() + .expect("no pickup key from dd put v2 passphrase"); + + tokio::time::sleep(Duration::from_secs(5)).await; + + let out_str = output_path.to_str().unwrap().to_string(); + let bs_clone2 = bs_addr.clone(); + let get_output = tokio::task::spawn_blocking(move || { + Command::new(bin_path()) + .args([ + "--no-default-config", "--no-public", + "dd", "get", &pickup_key, + "--bootstrap", &bs_clone2, + "--output", &out_str, + "--timeout", "20", + "--no-ack", + ]) + .output() + .expect("failed to run dd get (v2 passphrase)") + }) + .await + .unwrap(); + + kill_child(&mut leave); + + let stderr = String::from_utf8_lossy(&get_output.stderr); + assert!( + get_output.status.success(), + "dd get (v2 passphrase) failed: {stderr}" + ); + + let received = std::fs::read(&output_path).expect("output file not found"); + assert_eq!(received, msg, "v2 passphrase payload mismatch. stderr: {stderr}"); + }) + .await; + assert!(result.is_ok(), "test_dd_v2_passphrase_roundtrip timed out"); } diff --git a/peeroxide-dht/CHANGELOG.md b/peeroxide-dht/CHANGELOG.md index 17c633e..47e7d28 100644 --- a/peeroxide-dht/CHANGELOG.md +++ b/peeroxide-dht/CHANGELOG.md @@ -7,6 +7,18 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [1.3.0](https://github.com/Rightbracket/peeroxide/compare/peeroxide-dht-v1.2.0...peeroxide-dht-v1.3.0) - 2026-05-13 + +### Added + +- `WireCounters` struct — provides atomic, shareable counters for tracking total bytes sent and received. Includes `new()` for initialization and `snapshot()` for retrieving current totals. +- `Io::wire` field — public access to the IO layer's `WireCounters`. +- `Io::wire_counters()` — returns a handle to the IO layer's wire byte counters. +- `DhtHandle::wire_stats()` — returns a snapshot of cumulative wire bytes `(sent, received)` for the DHT node. +- `DhtHandle::wire_counters()` — returns a handle to the node-wide `WireCounters`. +- `HyperDhtHandle::wire_stats()` — returns a snapshot of total wire bytes processed by the DHT. +- `HyperDhtHandle::wire_counters()` — returns a handle to the shared wire byte counters for the running instance. + ## [1.2.0](https://github.com/Rightbracket/peeroxide/compare/peeroxide-dht-v1.1.0...peeroxide-dht-v1.2.0) - 2026-04-30 ### Added diff --git a/peeroxide-dht/Cargo.toml b/peeroxide-dht/Cargo.toml index 139e811..419b3b8 100644 --- a/peeroxide-dht/Cargo.toml +++ b/peeroxide-dht/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "peeroxide-dht" -version = "1.2.0" +version = "1.3.0" edition.workspace = true license.workspace = true rust-version.workspace = true diff --git a/peeroxide-dht/src/hyperdht.rs b/peeroxide-dht/src/hyperdht.rs index a579088..4b3dc8d 100644 --- a/peeroxide-dht/src/hyperdht.rs +++ b/peeroxide-dht/src/hyperdht.rs @@ -452,6 +452,26 @@ pub struct HyperDhtHandle { } impl HyperDhtHandle { + // ── WIRE STATS ──────────────────────────────────────────────────────────── + + /// Snapshot of cumulative wire bytes (sent, received) since this DHT + /// node started. Counts every UDP datagram exchanged at the IO layer + /// — queries, requests, replies, retries, relays, and any user-issued + /// puts/gets — regardless of which higher-level operation produced them. + /// + /// Useful for distinguishing "useful payload throughput" (what consumers + /// see) from "raw network throughput" (what the OS sees). The ratio + /// between them is the DHT's protocol amplification factor. + pub fn wire_stats(&self) -> (u64, u64) { + self.dht.wire_stats() + } + + /// Borrow the shared wire-counter handle for long-lived sampling. The + /// returned counters are `Arc` internally; cloning is cheap. + pub fn wire_counters(&self) -> crate::io::WireCounters { + self.dht.wire_counters() + } + // ── LOOKUP ──────────────────────────────────────────────────────────────── /// Query the DHT for peers advertising the target. @@ -2283,6 +2303,37 @@ mod tests { .await; } + #[tokio::test] + async fn wire_stats_starts_at_zero_and_is_addressable() { + let runtime = libudx::UdxRuntime::new().expect("runtime"); + let config = HyperDhtConfig { + dht: DhtConfig { + bootstrap: vec![], + port: 0, + ..DhtConfig::default() + }, + persistent: PersistentConfig::default(), + }; + let (join, handle, _rx) = spawn(&runtime, config).await.expect("spawn"); + let (sent, received) = handle.wire_stats(); + assert_eq!(sent, 0, "no traffic yet"); + assert_eq!(received, 0); + // Counters are shared via Arc — incrementing through `wire_counters()` + // must be visible via `wire_stats()`. + let counters = handle.wire_counters(); + counters + .bytes_sent + .fetch_add(123, std::sync::atomic::Ordering::Relaxed); + counters + .bytes_received + .fetch_add(456, std::sync::atomic::Ordering::Relaxed); + let (sent, received) = handle.wire_stats(); + assert_eq!(sent, 123); + assert_eq!(received, 456); + handle.destroy().await.expect("destroy"); + let _ = tokio::time::timeout(std::time::Duration::from_secs(5), join).await; + } + #[test] fn next_stream_id_is_unique() { let a = next_stream_id(); diff --git a/peeroxide-dht/src/io.rs b/peeroxide-dht/src/io.rs index 88ca12e..5d2e112 100644 --- a/peeroxide-dht/src/io.rs +++ b/peeroxide-dht/src/io.rs @@ -5,6 +5,7 @@ use std::collections::VecDeque; use std::net::SocketAddr; +use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{Arc, Mutex}; use std::time::Duration; @@ -76,6 +77,32 @@ pub struct IoStats { pub retries: u64, } +/// Wire-byte counters shared between the IO layer and consumers (e.g. progress +/// reporters in `peeroxide-cli`). Increments are `Relaxed` — these are +/// observability metrics, not synchronization primitives. +/// +/// The counters track every UDP datagram the IO layer hands to or receives +/// from the OS sockets, regardless of which protocol layer originated it +/// (queries, requests, replies, relays, retries — all counted). +#[derive(Debug, Clone, Default)] +pub struct WireCounters { + pub bytes_sent: Arc, + pub bytes_received: Arc, +} + +impl WireCounters { + pub fn new() -> Self { + Self::default() + } + + pub fn snapshot(&self) -> (u64, u64) { + ( + self.bytes_sent.load(Ordering::Relaxed), + self.bytes_received.load(Ordering::Relaxed), + ) + } +} + /// Which socket was used for a message. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum SocketKind { @@ -251,6 +278,7 @@ pub struct Io { firewalled: bool, pub ephemeral: bool, pub stats: IoStats, + pub wire: WireCounters, table: Arc>, destroying: bool, } @@ -293,11 +321,17 @@ impl Io { firewalled: config.firewalled, ephemeral: config.ephemeral, stats: IoStats::default(), + wire: WireCounters::default(), table, destroying: false, }) } + /// Get a clone of the wire-byte counters. Cheap (Arc clone). + pub fn wire_counters(&self) -> WireCounters { + self.wire.clone() + } + pub async fn server_local_addr(&self) -> IoResult { self.server_socket.local_addr().await.map_err(IoError::from) } @@ -323,6 +357,9 @@ impl Io { msg = self.client_rx.recv() => (msg?, SocketKind::Client), msg = self.server_rx.recv() => (msg?, SocketKind::Server), }; + self.wire + .bytes_received + .fetch_add(datagram.data.len() as u64, Ordering::Relaxed); tracing::debug!( from = %datagram.addr, len = datagram.data.len(), @@ -637,10 +674,12 @@ impl Io { SocketKind::Server => &self.server_socket, }; + let buffer_len = buffer.len() as u64; if let Err(e) = socket.send_to(&buffer, addr) { tracing::warn!(err = %e, "relay: send_to failed"); return false; } + self.wire.bytes_sent.fetch_add(buffer_len, Ordering::Relaxed); true } @@ -734,8 +773,11 @@ impl Io { SocketKind::Server => &self.server_socket, }; + let bytes_len = bytes.len() as u64; if let Err(e) = socket.send_to(&bytes, addr) { tracing::warn!(err = %e, "send_reply_internal: send_to failed"); + } else { + self.wire.bytes_sent.fetch_add(bytes_len, Ordering::Relaxed); } } @@ -755,8 +797,11 @@ impl Io { SocketKind::Server => &self.server_socket, }; + let buffer_len = buffer.len() as u64; if let Err(e) = socket.send_to(&buffer, addr) { tracing::warn!(err = %e, "send_inflight_at: send_to failed"); + } else { + self.wire.bytes_sent.fetch_add(buffer_len, Ordering::Relaxed); } } diff --git a/peeroxide-dht/src/rpc.rs b/peeroxide-dht/src/rpc.rs index 05f2a42..4eb0d85 100644 --- a/peeroxide-dht/src/rpc.rs +++ b/peeroxide-dht/src/rpc.rs @@ -278,6 +278,23 @@ struct DeferredReply { #[derive(Clone)] pub struct DhtHandle { cmd_tx: mpsc::UnboundedSender, + wire: crate::io::WireCounters, +} + +impl DhtHandle { + /// Snapshot of cumulative wire bytes (sent, received) since the DHT + /// started. Counts every UDP datagram exchanged by this node, including + /// retries, queries, replies, and relays. + pub fn wire_stats(&self) -> (u64, u64) { + self.wire.snapshot() + } + + /// Borrow the shared wire-counter handle. Useful when you want a long- + /// lived reference (e.g. for periodic sampling from a UI thread) without + /// going through `wire_stats()` repeatedly. + pub fn wire_counters(&self) -> crate::io::WireCounters { + self.wire.clone() + } } impl DhtHandle { @@ -1546,8 +1563,9 @@ pub async fn spawn( addr_samples: Vec::new(), }; + let wire = node.io.wire_counters(); let handle = tokio::spawn(node.run()); - let dht_handle = DhtHandle { cmd_tx }; + let dht_handle = DhtHandle { cmd_tx, wire }; Ok((handle, dht_handle)) } diff --git a/peeroxide/CHANGELOG.md b/peeroxide/CHANGELOG.md index 4e9a679..53180ba 100644 --- a/peeroxide/CHANGELOG.md +++ b/peeroxide/CHANGELOG.md @@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Changed + +- Bumped `peeroxide-dht` dependency from 1.2.0 to 1.3.0. This update adds new public wire-byte counter accessors to `HyperDhtHandle` and `DhtHandle`. See `peeroxide-dht/CHANGELOG.md` for the full list of new additive symbols. + ## [1.2.0](https://github.com/Rightbracket/peeroxide/compare/peeroxide-v1.1.0...peeroxide-v1.2.0) - 2026-04-30 ### Added diff --git a/peeroxide/Cargo.toml b/peeroxide/Cargo.toml index 5ff5002..f09252c 100644 --- a/peeroxide/Cargo.toml +++ b/peeroxide/Cargo.toml @@ -15,7 +15,7 @@ all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] -peeroxide-dht = { path = "../peeroxide-dht", version = "1.2.0" } +peeroxide-dht = { path = "../peeroxide-dht", version = "1.3.0" } libudx = { path = "../libudx", version = "1.2.0" } tokio = { workspace = true } tracing = { workspace = true }