From 72384ce63a178b3bf7ea7df6fc3e68690d08c1f4 Mon Sep 17 00:00:00 2001 From: Franz Heinzmann Date: Mon, 15 Apr 2024 12:49:06 +0200 Subject: [PATCH] feat: node discovery via DNS (#2045) ## Description This enables global node discovery over DNS, i.e. dialing nodes by just their node id. Current setup is as follows: * When dialing a node only by its NodeId, the new `DnsDiscovery` service is invoked. It will lookup a TXT record at (by default) `_iroh_node.b32encodednodeid.testdns.iroh.link` over regular DNS or DNS-over-http. Right now the Cloudflare DNS servers are configured. At `testdns.iroh.link` we run a custom [DNS server](https://github.com/n0-computer/iroh-dns-server/tree/main) * Nodes publish their Derp address to this DNS server through Pkarr signed packets. This is an intermediate step, we decided that the publishing by default should not happen by the nodes directly but mediated through the Derp servers. Work for the latter happens in #2052 This PR thus allows for the following: ```sh # terminal/computer 1 $ iroh console --start Iroh is running Node ID: qp2znfedwdij4llc5noizwfemfgba7bzxozvr4bp7hfsdmwqbpua $ blob add ./myfile ... Blob: o5uanh5s2zwn2sucy47puqidsfx2advxos7kajq3ajwitcwobhba ... # terminal/computer 2 iroh console --start blob get o5uanh5s2zwn2sucy47puqidsfx2advxos7kajq3ajwitcwobhba --node qp2znfedwdij4llc5noizwfemfgba7bzxozvr4bp7hfsdmwqbpua ``` ## Notes & open questions * Misses node configuration in the CLI for the node origin domain (right now hardcoded to `testdns.iroh.link`). How do we want to expose this - CLI flag? Or in the config file? I'd say the latter. * Offload publishing to the Derpers - see #2052 * Right now the records published via pkarr have a TTL of 30s - the iroh-dns-server will use that TTL as-is when serving the records over DNS. both can/should change? * We can also *very* easily allow to lookup nodes not only by NodeId, but by any domain name. In the `iroh-dns` crate I included an example `resolve` that does just that. By setting a `CNAME` record you can even use any domain and simply point to the record hosted at the `testdns.iroh.link` server. So if, on your custom domain, you added a record like this ``` _iroh_node.frando.n0.computer CNAME _iroh_node.qp2znfedwdij4llc5noizwfemfgba7bzxozvr4bp7hfsdmwqbpua.iroh.link. ``` You can use this with the example to resolve to the node id and derp addresses: ``` cargo run --example resolve -- domain frando.n0.computer ``` ## Change checklist - [x] Self-review. - [x] Documentation updates if relevant. - [x] Tests if relevant. Closes #1248 --------- Co-authored-by: Kasey Co-authored-by: Asmir Avdicevic Co-authored-by: Ruediger Klaehn --- Cargo.lock | 620 +++++++++++++------- Cargo.toml | 1 + iroh-base/src/node_addr.rs | 14 +- iroh-cli/src/commands/blob.rs | 5 - iroh-dns-server/Cargo.toml | 54 ++ iroh-dns-server/README.md | 38 ++ iroh-dns-server/config.dev.toml | 18 + iroh-dns-server/config.prod.toml | 13 + iroh-dns-server/examples/convert.rs | 33 ++ iroh-dns-server/examples/publish.rs | 106 ++++ iroh-dns-server/examples/resolve.rs | 77 +++ iroh-dns-server/src/config.rs | 133 +++++ iroh-dns-server/src/dns.rs | 277 +++++++++ iroh-dns-server/src/dns/node_authority.rs | 190 ++++++ iroh-dns-server/src/http.rs | 258 ++++++++ iroh-dns-server/src/http/doh.rs | 77 +++ iroh-dns-server/src/http/doh/extract.rs | 250 ++++++++ iroh-dns-server/src/http/doh/response.rs | 144 +++++ iroh-dns-server/src/http/error.rs | 101 ++++ iroh-dns-server/src/http/extract.rs | 1 + iroh-dns-server/src/http/pkarr.rs | 52 ++ iroh-dns-server/src/http/rate_limiting.rs | 40 ++ iroh-dns-server/src/http/tls.rs | 184 ++++++ iroh-dns-server/src/lib.rs | 73 +++ iroh-dns-server/src/main.rs | 36 ++ iroh-dns-server/src/metrics.rs | 64 ++ iroh-dns-server/src/server.rs | 107 ++++ iroh-dns-server/src/state.rs | 12 + iroh-dns-server/src/store.rs | 190 ++++++ iroh-dns-server/src/store/signed_packets.rs | 98 ++++ iroh-dns-server/src/util.rs | 151 +++++ iroh-net/Cargo.toml | 10 +- iroh-net/src/discovery.rs | 365 +++++++++++- iroh-net/src/discovery/dns.rs | 70 +++ iroh-net/src/discovery/pkarr_publish.rs | 186 ++++++ iroh-net/src/dns.rs | 9 +- iroh-net/src/dns/node_info.rs | 362 ++++++++++++ iroh-net/src/magic_endpoint.rs | 5 + iroh-net/src/magicsock.rs | 5 + iroh-net/src/test_utils.rs | 104 ++++ iroh/Cargo.toml | 1 - iroh/src/node.rs | 2 +- iroh/src/node/builder.rs | 53 +- 43 files changed, 4354 insertions(+), 235 deletions(-) create mode 100644 iroh-dns-server/Cargo.toml create mode 100644 iroh-dns-server/README.md create mode 100644 iroh-dns-server/config.dev.toml create mode 100644 iroh-dns-server/config.prod.toml create mode 100644 iroh-dns-server/examples/convert.rs create mode 100644 iroh-dns-server/examples/publish.rs create mode 100644 iroh-dns-server/examples/resolve.rs create mode 100644 iroh-dns-server/src/config.rs create mode 100644 iroh-dns-server/src/dns.rs create mode 100644 iroh-dns-server/src/dns/node_authority.rs create mode 100644 iroh-dns-server/src/http.rs create mode 100644 iroh-dns-server/src/http/doh.rs create mode 100644 iroh-dns-server/src/http/doh/extract.rs create mode 100644 iroh-dns-server/src/http/doh/response.rs create mode 100644 iroh-dns-server/src/http/error.rs create mode 100644 iroh-dns-server/src/http/extract.rs create mode 100644 iroh-dns-server/src/http/pkarr.rs create mode 100644 iroh-dns-server/src/http/rate_limiting.rs create mode 100644 iroh-dns-server/src/http/tls.rs create mode 100644 iroh-dns-server/src/lib.rs create mode 100644 iroh-dns-server/src/main.rs create mode 100644 iroh-dns-server/src/metrics.rs create mode 100644 iroh-dns-server/src/server.rs create mode 100644 iroh-dns-server/src/state.rs create mode 100644 iroh-dns-server/src/store.rs create mode 100644 iroh-dns-server/src/store/signed_packets.rs create mode 100644 iroh-dns-server/src/util.rs create mode 100644 iroh-net/src/discovery/dns.rs create mode 100644 iroh-net/src/discovery/pkarr_publish.rs create mode 100644 iroh-net/src/dns/node_info.rs diff --git a/Cargo.lock b/Cargo.lock index ffa2ae6e3a..473c489a4c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -130,6 +130,12 @@ version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247" +[[package]] +name = "arc-swap" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" + [[package]] name = "arrayref" version = "0.3.7" @@ -148,8 +154,24 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" dependencies = [ - "asn1-rs-derive", - "asn1-rs-impl", + "asn1-rs-derive 0.4.0", + "asn1-rs-impl 0.1.0", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", + "thiserror", + "time", +] + +[[package]] +name = "asn1-rs" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22ad1373757efa0f70ec53939aabc7152e1591cb485208052993070ac8d2429d" +dependencies = [ + "asn1-rs-derive 0.5.0", + "asn1-rs-impl 0.2.0", "displaydoc", "nom", "num-traits", @@ -167,36 +189,37 @@ dependencies = [ "proc-macro2", "quote", "syn 1.0.109", - "synstructure", + "synstructure 0.12.6", ] [[package]] -name = "asn1-rs-impl" -version = "0.1.0" +name = "asn1-rs-derive" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" +checksum = "7378575ff571966e99a744addeff0bff98b8ada0dedf1956d59e634db95eaac1" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.53", + "synstructure 0.13.1", ] [[package]] -name = "async-stream" -version = "0.3.5" +name = "asn1-rs-impl" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" dependencies = [ - "async-stream-impl", - "futures-core", - "pin-project-lite", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] -name = "async-stream-impl" -version = "0.3.5" +name = "asn1-rs-impl" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", @@ -242,18 +265,20 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.6.20" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" +checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" dependencies = [ "async-trait", "axum-core", - "bitflags 1.3.2", + "axum-macros", "bytes", "futures-util", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.28", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.2.0", + "hyper-util", "itoa", "matchit", "memchr", @@ -262,27 +287,71 @@ dependencies = [ "pin-project-lite", "rustversion", "serde", - "sync_wrapper", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper 1.0.1", + "tokio", "tower", "tower-layer", "tower-service", + "tracing", ] [[package]] name = "axum-core" -version = "0.3.4" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" dependencies = [ "async-trait", "bytes", "futures-util", - "http 0.2.12", - "http-body 0.4.6", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", "mime", + "pin-project-lite", "rustversion", + "sync_wrapper 0.1.2", "tower-layer", "tower-service", + "tracing", +] + +[[package]] +name = "axum-macros" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00c055ee2d014ae5981ce1016374e8213682aa14d9bf40e48ab48b5f3ef20eaa" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "quote", + "syn 2.0.53", +] + +[[package]] +name = "axum-server" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1ad46c3ec4e12f4a4b6835e173ba21c25e484c9d02b49770bf006ce5367c036" +dependencies = [ + "arc-swap", + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.2.0", + "hyper-util", + "pin-project-lite", + "rustls", + "rustls-pemfile 2.1.2", + "tokio", + "tokio-rustls", + "tower", + "tower-service", ] [[package]] @@ -352,6 +421,15 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" +[[package]] +name = "base64-url" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb9fb9fb058cc3063b5fc88d9a21eefa2735871498a04e1650da76ed511c8569" +dependencies = [ + "base64 0.21.7", +] + [[package]] name = "base64ct" version = "1.6.0" @@ -673,43 +751,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "console-api" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd326812b3fd01da5bb1af7d340d0d555fd3d4b641e7f1dfcf5962a902952787" -dependencies = [ - "futures-core", - "prost", - "prost-types", - "tonic", - "tracing-core", -] - -[[package]] -name = "console-subscriber" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7481d4c57092cd1c19dd541b92bdce883de840df30aa5d03fd48a3935c01842e" -dependencies = [ - "console-api", - "crossbeam-channel", - "crossbeam-utils", - "futures-task", - "hdrhistogram", - "humantime", - "prost-types", - "serde", - "serde_json", - "thread_local", - "tokio", - "tokio-stream", - "tonic", - "tracing", - "tracing-core", - "tracing-subscriber", -] - [[package]] name = "const-oid" version = "0.9.6" @@ -782,15 +823,6 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" -[[package]] -name = "crc32fast" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" -dependencies = [ - "cfg-if", -] - [[package]] name = "criterion" version = "0.5.1" @@ -803,7 +835,7 @@ dependencies = [ "clap", "criterion-plot", "is-terminal", - "itertools 0.10.5", + "itertools", "num-traits", "once_cell", "oorandom", @@ -824,7 +856,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" dependencies = [ "cast", - "itertools 0.10.5", + "itertools", ] [[package]] @@ -1066,7 +1098,21 @@ version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" dependencies = [ - "asn1-rs", + "asn1-rs 0.5.2", + "displaydoc", + "nom", + "num-bigint", + "num-traits", + "rusticata-macros", +] + +[[package]] +name = "der-parser" +version = "9.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cd0a5c643689626bec213c4d8bd4d96acc8ffdb4ad4bb6bc16abf27d5f4b553" +dependencies = [ + "asn1-rs 0.6.1", "displaydoc", "nom", "num-bigint", @@ -1467,16 +1513,6 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c007b1ae3abe1cb6f85a16305acd418b7ca6343b953633fee2b76d8f108b830f" -[[package]] -name = "flate2" -version = "1.0.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" -dependencies = [ - "crc32fast", - "miniz_oxide", -] - [[package]] name = "flume" version = "0.11.0" @@ -1504,6 +1540,16 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "forwarded-header-value" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8835f84f38484cc86f110a805655697908257fb9a7af005234060891557198e9" +dependencies = [ + "nonempty", + "thiserror", +] + [[package]] name = "futures" version = "0.3.30" @@ -1740,6 +1786,25 @@ dependencies = [ "tracing", ] +[[package]] +name = "h2" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "816ec7294445779408f36fe57bc5b7fc1cf59664059096c65f905c1c61f58069" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 1.1.0", + "indexmap 2.2.5", + "slab", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "half" version = "2.4.0" @@ -1781,10 +1846,7 @@ version = "7.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d" dependencies = [ - "base64 0.21.7", "byteorder", - "flate2", - "nom", "num-traits", ] @@ -1843,9 +1905,14 @@ dependencies = [ "ipnet", "once_cell", "rand", + "ring 0.16.20", + "rustls", + "rustls-pemfile 1.0.4", + "serde", "thiserror", "tinyvec", "tokio", + "tokio-rustls", "tracing", "url", ] @@ -1865,9 +1932,35 @@ dependencies = [ "parking_lot", "rand", "resolv-conf", + "rustls", + "serde", "smallvec", "thiserror", "tokio", + "tokio-rustls", + "tracing", +] + +[[package]] +name = "hickory-server" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fbbb45bc4dcb456445732c705e3cfdc7393b8bcae5c36ecec36b9d76bd67cb5" +dependencies = [ + "async-trait", + "bytes", + "cfg-if", + "enum-as-inner", + "futures-util", + "hickory-proto", + "hickory-resolver", + "rustls", + "serde", + "thiserror", + "time", + "tokio", + "tokio-rustls", + "tokio-util", "tracing", ] @@ -2009,12 +2102,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "humantime" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" - [[package]] name = "hyper" version = "0.14.28" @@ -2025,7 +2112,7 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2", + "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", "httparse", @@ -2048,6 +2135,7 @@ dependencies = [ "bytes", "futures-channel", "futures-util", + "h2 0.4.4", "http 1.1.0", "http-body 1.0.0", "httparse", @@ -2073,18 +2161,6 @@ dependencies = [ "tokio-rustls", ] -[[package]] -name = "hyper-timeout" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" -dependencies = [ - "hyper 0.14.28", - "pin-project-lite", - "tokio", - "tokio-io-timeout", -] - [[package]] name = "hyper-util" version = "0.1.3" @@ -2258,7 +2334,6 @@ dependencies = [ "bao-tree", "bytes", "clap", - "console-subscriber", "derive_more", "flume", "futures", @@ -2435,6 +2510,52 @@ dependencies = [ "walkdir", ] +[[package]] +name = "iroh-dns-server" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "axum", + "axum-server", + "base64-url", + "bytes", + "clap", + "derive_more", + "dirs-next", + "futures", + "governor", + "hickory-proto", + "hickory-resolver", + "hickory-server", + "http 1.1.0", + "iroh-metrics", + "iroh-net", + "lru", + "parking_lot", + "pkarr", + "rcgen 0.12.1", + "redb 2.0.0", + "regex", + "rustls", + "rustls-pemfile 1.0.4", + "serde", + "struct_iterable", + "strum 0.26.2", + "tokio", + "tokio-rustls", + "tokio-rustls-acme", + "tokio-stream", + "tokio-util", + "toml 0.8.12", + "tower-http", + "tower_governor", + "tracing", + "tracing-subscriber", + "url", + "z32", +] + [[package]] name = "iroh-gossip" version = "0.13.0" @@ -2503,6 +2624,7 @@ version = "0.13.0" dependencies = [ "aead", "anyhow", + "axum", "backoff", "bytes", "clap", @@ -2516,7 +2638,9 @@ dependencies = [ "futures", "governor", "hex", + "hickory-proto", "hickory-resolver", + "hostname", "http 1.1.0", "http-body-util", "hyper 1.2.0", @@ -2533,6 +2657,7 @@ dependencies = [ "num_enum", "once_cell", "parking_lot", + "pkarr", "postcard", "pretty_assertions", "proptest", @@ -2548,13 +2673,14 @@ dependencies = [ "ring 0.17.8", "rtnetlink", "rustls", - "rustls-pemfile", + "rustls-pemfile 1.0.4", "rustls-webpki", "serde", "serde_json", "serde_with", "smallvec", "socket2", + "strum 0.26.2", "stun-rs", "surge-ping", "testdir", @@ -2572,7 +2698,8 @@ dependencies = [ "webpki-roots", "windows 0.51.1", "wmi", - "x509-parser", + "x509-parser 0.15.1", + "z32", ] [[package]] @@ -2657,15 +2784,6 @@ dependencies = [ "either", ] -[[package]] -name = "itertools" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" -dependencies = [ - "either", -] - [[package]] name = "itoa" version = "1.0.10" @@ -2741,6 +2859,15 @@ version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +[[package]] +name = "lru" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" +dependencies = [ + "hashbrown 0.14.3", +] + [[package]] name = "lru-cache" version = "0.1.2" @@ -2948,6 +3075,12 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "nonempty" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" + [[package]] name = "nonzero_ext" version = "0.3.0" @@ -3131,7 +3264,16 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" dependencies = [ - "asn1-rs", + "asn1-rs 0.5.2", +] + +[[package]] +name = "oid-registry" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c958dd45046245b9c3c2547369bb634eb461670b2e7e0de552905801a648d1d" +dependencies = [ + "asn1-rs 0.6.1", ] [[package]] @@ -3259,16 +3401,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" -[[package]] -name = "pem" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b13fe415cdf3c8e44518e18a7c95a13431d9bdf6d15367d82b23c377fdd441a" -dependencies = [ - "base64 0.21.7", - "serde", -] - [[package]] name = "pem" version = "3.0.3" @@ -3371,6 +3503,23 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkarr" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d4025a211a70a716314d4ea6464aed150f696deb81651bebf62f874cee5aac7" +dependencies = [ + "bytes", + "ed25519-dalek", + "rand", + "reqwest", + "self_cell", + "simple-dns", + "thiserror", + "url", + "z32", +] + [[package]] name = "pkcs1" version = "0.7.5" @@ -3678,38 +3827,6 @@ dependencies = [ "unarray", ] -[[package]] -name = "prost" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146c289cda302b98a28d40c8b3b90498d6e526dd24ac2ecea73e4e491685b94a" -dependencies = [ - "bytes", - "prost-derive", -] - -[[package]] -name = "prost-derive" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efb6c9a1dd1def8e2124d17e83a20af56f1570d6c2d2bd9e266ccb768df3840e" -dependencies = [ - "anyhow", - "itertools 0.11.0", - "proc-macro2", - "quote", - "syn 2.0.53", -] - -[[package]] -name = "prost-types" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "193898f59edcf43c26227dcd4c8427f00d99d61e95dcde58dabd49fa291d470e" -dependencies = [ - "prost", -] - [[package]] name = "quanta" version = "0.12.2" @@ -3913,7 +4030,7 @@ version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52c4f3084aa3bc7dfbba4eff4fab2a54db4324965d8872ab933565e6fbd83bc6" dependencies = [ - "pem 3.0.3", + "pem", "ring 0.16.20", "time", "yasna", @@ -3925,7 +4042,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48406db8ac1f3cbc7dcdb56ec355343817958a356ff430259bb07baf7607e1e1" dependencies = [ - "pem 3.0.3", + "pem", "ring 0.17.8", "time", "yasna", @@ -4061,7 +4178,7 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "h2", + "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", "hyper 0.14.28", @@ -4074,11 +4191,11 @@ dependencies = [ "percent-encoding", "pin-project-lite", "rustls", - "rustls-pemfile", + "rustls-pemfile 1.0.4", "serde", "serde_json", "serde_urlencoded", - "sync_wrapper", + "sync_wrapper 0.1.2", "system-configuration", "tokio", "tokio-rustls", @@ -4254,7 +4371,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" dependencies = [ "openssl-probe", - "rustls-pemfile", + "rustls-pemfile 1.0.4", "schannel", "security-framework", ] @@ -4268,6 +4385,22 @@ dependencies = [ "base64 0.21.7", ] +[[package]] +name = "rustls-pemfile" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" +dependencies = [ + "base64 0.22.0", + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecd36cc4259e3e4514335c4a138c6b43171a8d61d8f5c9348f9fc7529416f247" + [[package]] name = "rustls-webpki" version = "0.101.7" @@ -4461,6 +4594,16 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" +dependencies = [ + "itoa", + "serde", +] + [[package]] name = "serde_spanned" version = "0.6.5" @@ -4606,6 +4749,15 @@ dependencies = [ "rand_core", ] +[[package]] +name = "simple-dns" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01607fe2e61894468c6dc0b26103abb073fb08b79a3d9e4b6d76a1a341549958" +dependencies = [ + "bitflags 2.5.0", +] + [[package]] name = "slab" version = "0.4.9" @@ -4915,6 +5067,12 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "sync_wrapper" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" + [[package]] name = "synstructure" version = "0.12.6" @@ -4927,6 +5085,17 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.53", +] + [[package]] name = "sysinfo" version = "0.26.9" @@ -5104,20 +5273,9 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "tracing", "windows-sys 0.48.0", ] -[[package]] -name = "tokio-io-timeout" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" -dependencies = [ - "pin-project-lite", - "tokio", -] - [[package]] name = "tokio-macros" version = "2.2.0" @@ -5141,19 +5299,22 @@ dependencies = [ [[package]] name = "tokio-rustls-acme" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfb6f50b5523d014ba161512c37457acb16fd8218c883c7152e0a67ab763f2d4" +checksum = "2ebc06d846f8367f24c3a8882328707d1a5e507ef4f40943723ddbe2c17b9f24" dependencies = [ "async-trait", + "axum-server", "base64 0.21.7", "chrono", "futures", "log", - "pem 2.0.1", - "rcgen 0.11.3", + "num-bigint", + "pem", + "proc-macro2", + "rcgen 0.12.1", "reqwest", - "ring 0.16.20", + "ring 0.17.8", "rustls", "serde", "serde_json", @@ -5162,7 +5323,7 @@ dependencies = [ "tokio-rustls", "url", "webpki-roots", - "x509-parser", + "x509-parser 0.16.0", ] [[package]] @@ -5265,47 +5426,33 @@ dependencies = [ ] [[package]] -name = "tonic" -version = "0.10.2" +name = "tower" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d560933a0de61cf715926b9cac824d4c883c2c43142f787595e48280c40a1d0e" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ - "async-stream", - "async-trait", - "axum", - "base64 0.21.7", - "bytes", - "h2", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.28", - "hyper-timeout", - "percent-encoding", + "futures-core", + "futures-util", "pin-project", - "prost", + "pin-project-lite", "tokio", - "tokio-stream", - "tower", "tower-layer", "tower-service", "tracing", ] [[package]] -name = "tower" -version = "0.4.13" +name = "tower-http" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ - "futures-core", - "futures-util", - "indexmap 1.9.3", - "pin-project", + "bitflags 2.5.0", + "bytes", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", "pin-project-lite", - "rand", - "slab", - "tokio", - "tokio-util", "tower-layer", "tower-service", "tracing", @@ -5323,6 +5470,22 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +[[package]] +name = "tower_governor" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3790eac6ad3fb8d9d96c2b040ae06e2517aa24b067545d1078b96ae72f7bb9a7" +dependencies = [ + "axum", + "forwarded-header-value", + "governor", + "http 1.1.0", + "pin-project", + "thiserror", + "tower", + "tracing", +] + [[package]] name = "tracing" version = "0.1.40" @@ -6003,12 +6166,29 @@ version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7069fba5b66b9193bd2c5d3d4ff12b839118f6bcbef5328efafafb5395cf63da" dependencies = [ - "asn1-rs", + "asn1-rs 0.5.2", "data-encoding", - "der-parser", + "der-parser 8.2.0", "lazy_static", "nom", - "oid-registry", + "oid-registry 0.6.1", + "rusticata-macros", + "thiserror", + "time", +] + +[[package]] +name = "x509-parser" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcbc162f30700d6f3f82a24bf7cc62ffe7caea42c0b2cba8bf7f3ae50cf51f69" +dependencies = [ + "asn1-rs 0.6.1", + "data-encoding", + "der-parser 9.0.0", + "lazy_static", + "nom", + "oid-registry 0.7.0", "rusticata-macros", "thiserror", "time", @@ -6044,6 +6224,12 @@ dependencies = [ "time", ] +[[package]] +name = "z32" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edb37266251c28b03d08162174a91c3a092e3bd4f476f8205ee1c507b78b7bdc" + [[package]] name = "zerocopy" version = "0.7.32" diff --git a/Cargo.toml b/Cargo.toml index d945bda86c..24dc3874d3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,6 +3,7 @@ members = [ "iroh", "iroh-bytes", "iroh-base", + "iroh-dns-server", "iroh-gossip", "iroh-metrics", "iroh-net", diff --git a/iroh-base/src/node_addr.rs b/iroh-base/src/node_addr.rs index bf54e1293e..5a29122382 100644 --- a/iroh-base/src/node_addr.rs +++ b/iroh-base/src/node_addr.rs @@ -4,7 +4,7 @@ use anyhow::Context; use serde::{Deserialize, Serialize}; use url::Url; -use crate::key::PublicKey; +use crate::key::{NodeId, PublicKey}; /// A peer and it's addressing information. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] @@ -63,6 +63,12 @@ impl From<(PublicKey, Option, &[SocketAddr])> for NodeAddr { } } +impl From for NodeAddr { + fn from(node_id: NodeId) -> Self { + NodeAddr::new(node_id) + } +} + /// Addressing information to connect to a peer. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)] pub struct AddrInfo { @@ -142,6 +148,12 @@ impl FromStr for RelayUrl { } } +impl From for Url { + fn from(value: RelayUrl) -> Self { + value.0 + } +} + /// Dereference to the wrapped [`Url`]. /// /// Note that [`DerefMut`] is not implemented on purpose, so this type has more flexibility diff --git a/iroh-cli/src/commands/blob.rs b/iroh-cli/src/commands/blob.rs index 6030dbd1cb..621d6c6b98 100644 --- a/iroh-cli/src/commands/blob.rs +++ b/iroh-cli/src/commands/blob.rs @@ -236,11 +236,6 @@ impl BlobCommands { return Err(anyhow::anyhow!("The input arguments refer to a collection of blobs and output is set to STDOUT. Only single blobs may be passed in this case.")); } - if node_addr.info.is_empty() { - return Err(anyhow::anyhow!( - "no relay url provided and no direct addresses provided" - )); - } let tag = match tag { Some(tag) => SetTagOption::Named(Tag::from(tag)), None => SetTagOption::Auto, diff --git a/iroh-dns-server/Cargo.toml b/iroh-dns-server/Cargo.toml new file mode 100644 index 0000000000..96afd9d4bf --- /dev/null +++ b/iroh-dns-server/Cargo.toml @@ -0,0 +1,54 @@ +[package] +name = "iroh-dns-server" +version = "0.13.0" +edition = "2021" +description = "A pkarr relay and DNS server" +license = "MIT OR Apache-2.0" +authors = ["Frando ", "n0 team"] +repository = "https://github.com/n0-computer/iroh" +keywords = ["networking", "pkarr", "dns", "dns-server", "iroh"] +readme = "README.md" + +[dependencies] +anyhow = "1.0.80" +async-trait = "0.1.77" +axum = { version = "0.7.4", features = ["macros"] } +axum-server = { version = "0.6.0", features = ["tls-rustls"] } +base64-url = "2.0.2" +bytes = "1.5.0" +clap = { version = "4.5.1", features = ["derive"] } +derive_more = { version = "1.0.0-beta.1", features = ["debug", "display", "into", "from"] } +dirs-next = "2.0.0" +futures = "0.3.30" +governor = "0.6.3" +hickory-proto = "0.24.0" +hickory-server = { version = "0.24.0", features = ["dns-over-rustls"] } +http = "1.0.0" +iroh-metrics = { version = "0.13.0", path = "../iroh-metrics" } +lru = "0.12.3" +parking_lot = "0.12.1" +pkarr = { version = "1.1.2", features = [ "async", "relay"], default_features = false } +rcgen = "0.12.1" +redb = "2.0.0" +regex = "1.10.3" +rustls = "0.21" +rustls-pemfile = "1" +serde = { version = "1.0.197", features = ["derive"] } +struct_iterable = "0.1.1" +strum = { version = "0.26.1", features = ["derive"] } +tokio = { version = "1.36.0", features = ["full"] } +tokio-rustls = "0.24" +tokio-rustls-acme = { version = "0.3", features = ["axum"] } +tokio-stream = "0.1.14" +tokio-util = "0.7.10" +toml = "0.8.10" +tower-http = { version = "0.5.2", features = ["cors", "trace"] } +tower_governor = "0.3.2" +tracing = "0.1.40" +tracing-subscriber = "0.3.18" +url = "2.5.0" +z32 = "1.1.1" + +[dev-dependencies] +hickory-resolver = "0.24.0" +iroh-net = { version = "0.13.0", path = "../iroh-net" } diff --git a/iroh-dns-server/README.md b/iroh-dns-server/README.md new file mode 100644 index 0000000000..e6f68c7784 --- /dev/null +++ b/iroh-dns-server/README.md @@ -0,0 +1,38 @@ +# iroh-dns-server + +A server that functions as a [pkarr](https://github.com/Nuhvi/pkarr/) relay and +[DNS](https://de.wikipedia.org/wiki/Domain_Name_System) server. + +This server compiles to a binary `iroh-dns-server`. It needs a config file, of +which there are two examples included: + +- [`config.dev.toml`](./config.dev.toml) - suitable for local development +- [`config.prod.toml`](./config.dev.toml) - suitable for production, after + adjusting the domain names and IP addresses + +The server will expose the following services: + +- A DNS server listening on UDP and TCP for DNS queries +- A HTTP and/or HTTPS server which provides the following routes: + - `/pkarr`: `GET` and `PUT` for pkarr signed packets + - `/dns-query`: Answer DNS queries over + [DNS-over-HTTPS](https://datatracker.ietf.org/doc/html/rfc8484) + +All received and valid pkarr signed packets will be served over DNS. The pkarr +packet origin will be appended with the origin as configured by this server. + +# License + +This project is licensed under either of + +- Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or + http://www.apache.org/licenses/LICENSE-2.0) +- MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in this project by you, as defined in the Apache-2.0 license, +shall be dual licensed as above, without any additional terms or conditions. diff --git a/iroh-dns-server/config.dev.toml b/iroh-dns-server/config.dev.toml new file mode 100644 index 0000000000..80db595573 --- /dev/null +++ b/iroh-dns-server/config.dev.toml @@ -0,0 +1,18 @@ +[http] +port = 8080 +bind_addr = "127.0.0.1" + +[https] +port = 8443 +bind_addr = "127.0.0.1" +domains = ["localhost"] +cert_mode = "self_signed" + +[dns] +port = 5300 +bind_addr = "127.0.0.1" +default_soa = "dns1.irohdns.example hostmaster.irohdns.example 0 10800 3600 604800 3600" +default_ttl = 900 +origins = ["irohdns.example.", "."] +rr_a = "127.0.0.1" +rr_ns = "ns1.irohdns.example." diff --git a/iroh-dns-server/config.prod.toml b/iroh-dns-server/config.prod.toml new file mode 100644 index 0000000000..8dde5fb6ba --- /dev/null +++ b/iroh-dns-server/config.prod.toml @@ -0,0 +1,13 @@ +[https] +port = 443 +domains = ["irohdns.example.org"] +cert_mode = "lets_encrypt" +letsencrypt_prod = true + +[dns] +port = 53 +default_soa = "dns1.irohdns.example.org hostmaster.irohdns.example.org 0 10800 3600 604800 3600" +default_ttl = 30 +origins = ["irohdns.example.org", "."] +rr_a = "203.0.10.10" +rr_ns = "ns1.irohdns.example.org." diff --git a/iroh-dns-server/examples/convert.rs b/iroh-dns-server/examples/convert.rs new file mode 100644 index 0000000000..401b28f585 --- /dev/null +++ b/iroh-dns-server/examples/convert.rs @@ -0,0 +1,33 @@ +use std::str::FromStr; + +use clap::Parser; +use iroh_net::NodeId; + +#[derive(Debug, Parser)] +struct Cli { + #[clap(subcommand)] + command: Command, +} + +#[derive(Debug, Parser)] +enum Command { + NodeToPkarr { node_id: String }, + PkarrToNode { z32_pubkey: String }, +} + +fn main() -> anyhow::Result<()> { + let args = Cli::parse(); + match args.command { + Command::NodeToPkarr { node_id } => { + let node_id = NodeId::from_str(&node_id)?; + let public_key = pkarr::PublicKey::try_from(*node_id.as_bytes())?; + println!("{}", public_key.to_z32()) + } + Command::PkarrToNode { z32_pubkey } => { + let public_key = pkarr::PublicKey::try_from(z32_pubkey.as_str())?; + let node_id = NodeId::from_bytes(public_key.as_bytes())?; + println!("{}", node_id) + } + } + Ok(()) +} diff --git a/iroh-dns-server/examples/publish.rs b/iroh-dns-server/examples/publish.rs new file mode 100644 index 0000000000..3bad9bb9f5 --- /dev/null +++ b/iroh-dns-server/examples/publish.rs @@ -0,0 +1,106 @@ +use std::str::FromStr; + +use anyhow::{bail, Result}; +use clap::{Parser, ValueEnum}; +use iroh_net::{ + discovery::{ + dns::N0_DNS_NODE_ORIGIN, + pkarr_publish::{PkarrRelayClient, N0_DNS_PKARR_RELAY}, + }, + dns::node_info::{to_z32, NodeInfo, IROH_TXT_NAME}, + key::SecretKey, + NodeId, +}; +use url::Url; + +const LOCALHOST_PKARR: &str = "http://localhost:8080/pkarr"; +const EXAMPLE_ORIGIN: &str = "irohdns.example"; + +#[derive(ValueEnum, Clone, Debug, Default, Copy, strum::Display)] +#[strum(serialize_all = "kebab-case")] +pub enum Env { + /// Use the pkarr relay run by number0. + #[default] + Default, + /// Use a relay listening at http://localhost:8080 + Dev, +} + +/// Publish a record to an irohdns server. +/// +/// You have to set the IROH_SECRET environment variable to the node secret for which to publish. +#[derive(Parser, Debug)] +struct Cli { + /// Environment to publish to. + #[clap(value_enum, short, long, default_value_t = Env::Default)] + env: Env, + /// Pkarr Relay URL. If set, the --env option will be ignored. + #[clap(long, conflicts_with = "env")] + pkarr_relay: Option, + /// Home relay server to publish for this node + relay_url: Url, + /// Create a new node secret if IROH_SECRET is unset. Only for development / debugging. + #[clap(short, long)] + create: bool, +} + +#[tokio::main] +async fn main() -> Result<()> { + tracing_subscriber::fmt::init(); + let args = Cli::parse(); + + let secret_key = match std::env::var("IROH_SECRET") { + Ok(s) => SecretKey::from_str(&s)?, + Err(_) if args.create => { + let s = SecretKey::generate(); + println!("Generated a new node secret. To reuse, set"); + println!("IROH_SECRET={s}"); + s + } + Err(_) => { + bail!("Environtment variable IROH_SECRET is not set. To create a new secret, use the --create option.") + } + }; + + let node_id = secret_key.public(); + let pkarr_relay = match (args.pkarr_relay, args.env) { + (Some(pkarr_relay), _) => pkarr_relay, + (None, Env::Default) => N0_DNS_PKARR_RELAY.parse().expect("valid url"), + (None, Env::Dev) => LOCALHOST_PKARR.parse().expect("valid url"), + }; + + println!("announce {node_id}:"); + println!(" relay={}", args.relay_url); + println!(); + println!("publish to {pkarr_relay} ..."); + + let pkarr = PkarrRelayClient::new(pkarr_relay); + let node_info = NodeInfo::new(node_id, Some(args.relay_url)); + let signed_packet = node_info.to_pkarr_signed_packet(&secret_key, 30)?; + pkarr.publish(&signed_packet).await?; + + println!("signed packet published."); + println!("resolve with:"); + + match args.env { + Env::Default => { + println!(" cargo run --example resolve -- node {}", node_id); + println!(" dig {} TXT", fmt_domain(&node_id, N0_DNS_NODE_ORIGIN)) + } + Env::Dev => { + println!( + " cargo run --example resolve -- --env dev node {}", + node_id + ); + println!( + " dig @localhost -p 5300 {} TXT", + fmt_domain(&node_id, EXAMPLE_ORIGIN) + ) + } + } + Ok(()) +} + +fn fmt_domain(node_id: &NodeId, origin: &str) -> String { + format!("{}.{}.{}", IROH_TXT_NAME, to_z32(node_id), origin) +} diff --git a/iroh-dns-server/examples/resolve.rs b/iroh-dns-server/examples/resolve.rs new file mode 100644 index 0000000000..b9464ab38c --- /dev/null +++ b/iroh-dns-server/examples/resolve.rs @@ -0,0 +1,77 @@ +use std::net::SocketAddr; + +use clap::{Parser, ValueEnum}; +use hickory_resolver::{ + config::{NameServerConfig, Protocol, ResolverConfig}, + AsyncResolver, +}; +use iroh_net::{ + discovery::dns::N0_DNS_NODE_ORIGIN, + dns::{node_info::TxtAttrs, DnsResolver}, + NodeId, +}; + +const LOCALHOST_DNS: &str = "127.0.0.1:5300"; +const EXAMPLE_ORIGIN: &str = "irohdns.example"; + +#[derive(ValueEnum, Clone, Debug, Default)] +pub enum Env { + /// Use the system's nameservers with origin domain dns.iroh.link + #[default] + Default, + /// Use a localhost DNS server listening on port 5300 + Dev, +} + +#[derive(Debug, Parser)] +struct Cli { + #[clap(value_enum, short, long, default_value_t = Env::Default)] + env: Env, + #[clap(subcommand)] + command: Command, +} + +#[derive(Debug, Parser)] +enum Command { + /// Resolve node info by node id. + Node { node_id: NodeId }, + /// Resolve node info by domain. + Domain { domain: String }, +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let args = Cli::parse(); + let (resolver, origin) = match args.env { + Env::Default => ( + iroh_net::dns::default_resolver().clone(), + N0_DNS_NODE_ORIGIN, + ), + Env::Dev => ( + resolver_with_nameserver(LOCALHOST_DNS.parse()?), + EXAMPLE_ORIGIN, + ), + }; + let resolved = match args.command { + Command::Node { node_id } => { + TxtAttrs::::lookup_by_id(&resolver, &node_id, origin).await? + } + Command::Domain { domain } => { + TxtAttrs::::lookup_by_domain(&resolver, &domain).await? + } + }; + println!("resolved node {}", resolved.node_id()); + for (key, values) in resolved.attrs() { + for value in values { + println!(" {key}={value}"); + } + } + Ok(()) +} + +fn resolver_with_nameserver(nameserver: SocketAddr) -> DnsResolver { + let mut config = ResolverConfig::new(); + let nameserver_config = NameServerConfig::new(nameserver, Protocol::Udp); + config.add_name_server(nameserver_config); + AsyncResolver::tokio(config, Default::default()) +} diff --git a/iroh-dns-server/src/config.rs b/iroh-dns-server/src/config.rs new file mode 100644 index 0000000000..4f50fbc46b --- /dev/null +++ b/iroh-dns-server/src/config.rs @@ -0,0 +1,133 @@ +//! Configuration for the server + +use anyhow::{anyhow, Context, Result}; +use serde::{Deserialize, Serialize}; +use std::{ + env, + net::{IpAddr, Ipv4Addr, SocketAddr}, + path::{Path, PathBuf}, +}; + +use crate::{ + dns::DnsConfig, + http::{CertMode, HttpConfig, HttpsConfig}, +}; + +const DEFAULT_METRICS_ADDR: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9117); + +/// Server configuration +/// +/// The config is usually loaded from a file with [`Self::load`]. +/// +/// The struct also implements [`Default`] which creates a config suitable for local development +/// and testing. +#[derive(Debug, Serialize, Deserialize)] +pub struct Config { + /// Config for the HTTP server + /// + /// If set to `None` no HTTP server will be started. + pub http: Option, + /// Config for the HTTPS server + /// + /// If set to `None` no HTTPS server will be started. + pub https: Option, + /// Config for the DNS server. + pub dns: DnsConfig, + /// Config for the metrics server. + /// + /// The metrics server is started by default. To disable the metrics server, set to + /// `Some(MetricsConfig::disabled())`. + pub metrics: Option, +} + +/// The config for the metrics server. +#[derive(Debug, Serialize, Deserialize)] +pub struct MetricsConfig { + /// Set to true to disable the metrics server. + pub disabled: bool, + /// Optionally set a custom address to bind to. + pub bind_addr: Option, +} + +impl MetricsConfig { + /// Disable the metrics server. + pub fn disabled() -> Self { + Self { + disabled: true, + bind_addr: None, + } + } +} + +impl Config { + /// Load the config from a file. + pub async fn load(path: impl AsRef) -> Result { + let s = tokio::fs::read_to_string(path.as_ref()) + .await + .with_context(|| format!("failed to read {}", path.as_ref().to_string_lossy()))?; + let config: Config = toml::from_str(&s)?; + Ok(config) + } + + /// Get the data directory. + pub fn data_dir() -> Result { + let dir = if let Some(val) = env::var_os("IROH_DNS_DATA_DIR") { + PathBuf::from(val) + } else { + let path = dirs_next::data_dir().ok_or_else(|| { + anyhow!("operating environment provides no directory for application data") + })?; + path.join("iroh-dns") + }; + Ok(dir) + } + + /// Get the path to the store database file. + pub fn signed_packet_store_path() -> Result { + Ok(Self::data_dir()?.join("signed-packets-1.db")) + } + + /// Get the address where the metrics server should be bound, if set. + pub(crate) fn metrics_addr(&self) -> Option { + match &self.metrics { + None => Some(DEFAULT_METRICS_ADDR), + Some(conf) => match conf.disabled { + true => None, + false => Some(conf.bind_addr.unwrap_or(DEFAULT_METRICS_ADDR)), + }, + } + } +} + +impl Default for Config { + fn default() -> Self { + Self { + http: Some(HttpConfig { + port: 8080, + bind_addr: None, + }), + https: Some(HttpsConfig { + port: 8443, + bind_addr: None, + domains: vec!["localhost".to_string()], + cert_mode: CertMode::SelfSigned, + letsencrypt_contact: None, + letsencrypt_prod: None, + }), + dns: DnsConfig { + port: 5300, + bind_addr: None, + origins: vec!["irohdns.example.".to_string(), ".".to_string()], + + default_soa: "irohdns.example hostmaster.irohdns.example 0 10800 3600 604800 3600" + .to_string(), + default_ttl: 900, + + rr_a: Some(Ipv4Addr::LOCALHOST), + rr_aaaa: None, + rr_ns: Some("ns1.irohdns.example.".to_string()), + }, + metrics: None, + } + } +} diff --git a/iroh-dns-server/src/dns.rs b/iroh-dns-server/src/dns.rs new file mode 100644 index 0000000000..2faca9df6f --- /dev/null +++ b/iroh-dns-server/src/dns.rs @@ -0,0 +1,277 @@ +//! Implementation of a DNS name server for iroh node announces + +use std::{ + collections::BTreeMap, + io, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + sync::Arc, + time::Duration, +}; + +use anyhow::{anyhow, Result}; +use async_trait::async_trait; +use bytes::Bytes; +use hickory_server::{ + authority::{Catalog, MessageResponse, ZoneType}, + proto::{ + self, + rr::{ + rdata::{self}, + RData, Record, RecordSet, RecordType, RrKey, + }, + serialize::{binary::BinEncoder, txt::RDataParser}, + }, + resolver::Name, + server::{Request, RequestHandler, ResponseHandler, ResponseInfo}, + store::in_memory::InMemoryAuthority, +}; + +use iroh_metrics::inc; +use proto::{op::ResponseCode, rr::LowerName}; +use serde::{Deserialize, Serialize}; +use tokio::{ + net::{TcpListener, UdpSocket}, + sync::broadcast, +}; + +use crate::{metrics::Metrics, store::ZoneStore}; + +use self::node_authority::NodeAuthority; + +mod node_authority; + +const DEFAULT_NS_TTL: u32 = 60 * 60 * 12; // 12h +const DEFAULT_SOA_TTL: u32 = 60 * 60 * 24 * 14; // 14d +const DEFAULT_A_TTL: u32 = 60 * 60; // 1h + +/// DNS server settings +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct DnsConfig { + /// The port to serve a local UDP DNS server at + pub port: u16, + /// The IPv4 or IPv6 address to bind the UDP DNS server. + /// Uses `0.0.0.0` if unspecified. + pub bind_addr: Option, + /// SOA record data for any authoritative DNS records + pub default_soa: String, + /// Default time to live for returned DNS records (TXT & SOA) + pub default_ttl: u32, + /// Domain used for serving the `_iroh_node..` DNS TXT entry + pub origins: Vec, + + /// `A` record to set for all origins + pub rr_a: Option, + /// `AAAA` record to set for all origins + pub rr_aaaa: Option, + /// `NS` record to set for all origins + pub rr_ns: Option, +} + +/// A DNS server that serves pkarr signed packets. +pub struct DnsServer { + local_addr: SocketAddr, + server: hickory_server::ServerFuture, +} + +impl DnsServer { + /// Spawn the server. + pub async fn spawn(config: DnsConfig, dns_handler: DnsHandler) -> Result { + const TCP_TIMEOUT: Duration = Duration::from_millis(1000); + let mut server = hickory_server::ServerFuture::new(dns_handler); + + let bind_addr = SocketAddr::new( + config.bind_addr.unwrap_or(Ipv4Addr::UNSPECIFIED.into()), + config.port, + ); + + let socket = UdpSocket::bind(bind_addr).await?; + + let socket_addr = socket.local_addr()?; + + server.register_socket(socket); + server.register_listener(TcpListener::bind(bind_addr).await?, TCP_TIMEOUT); + tracing::info!("DNS server listening on {}", bind_addr); + + Ok(Self { + server, + local_addr: socket_addr, + }) + } + + /// Get the local address of the UDP/TCP socket. + pub fn local_addr(&self) -> SocketAddr { + self.local_addr + } + + /// Shutdown the server an wait for all tasks to complete. + pub async fn shutdown(mut self) -> Result<()> { + self.server.shutdown_gracefully().await?; + Ok(()) + } + + /// Wait for all tasks to complete. + /// + /// Runs forever unless tasks fail. + pub async fn run_until_done(mut self) -> Result<()> { + self.server.block_until_done().await?; + Ok(()) + } +} + +/// State for serving DNS +#[derive(Clone, derive_more::Debug)] +pub struct DnsHandler { + #[debug("Catalog")] + catalog: Arc, +} + +impl DnsHandler { + /// Create a DNS server given some settings, a connection to the DB for DID-by-username lookups + /// and the server DID to serve under `_did.`. + pub fn new(zone_store: ZoneStore, config: &DnsConfig) -> Result { + let origins = config + .origins + .iter() + .map(Name::from_utf8) + .collect::, _>>()?; + + let (static_authority, serial) = create_static_authority(&origins, config)?; + let authority = NodeAuthority::new(zone_store, static_authority, origins, serial)?; + let authority = Arc::new(authority); + + let mut catalog = Catalog::new(); + for origin in authority.origins() { + catalog.upsert(LowerName::from(origin), Box::new(Arc::clone(&authority))); + } + + Ok(Self { + catalog: Arc::new(catalog), + }) + } + + /// Handle a DNS request + pub async fn answer_request(&self, request: Request) -> Result { + tracing::info!(?request, "Got DNS request"); + + let (tx, mut rx) = broadcast::channel(1); + let response_handle = Handle(tx); + + self.handle_request(&request, response_handle).await; + + tracing::debug!("Done handling request, trying to resolve response"); + Ok(rx.recv().await?) + } +} + +#[async_trait::async_trait] +impl RequestHandler for DnsHandler { + async fn handle_request( + &self, + request: &Request, + response_handle: R, + ) -> ResponseInfo { + inc!(Metrics, dns_requests); + match request.protocol() { + hickory_server::server::Protocol::Udp => inc!(Metrics, dns_requests_udp), + hickory_server::server::Protocol::Https => inc!(Metrics, dns_requests_https), + _ => {} + } + + let res = self.catalog.handle_request(request, response_handle).await; + match &res.response_code() { + ResponseCode::NoError => match res.answer_count() { + 0 => inc!(Metrics, dns_lookup_notfound), + _ => inc!(Metrics, dns_lookup_success), + }, + ResponseCode::NXDomain => inc!(Metrics, dns_lookup_notfound), + _ => inc!(Metrics, dns_lookup_error), + } + res + } +} + +/// A handle to the channel over which the response to a DNS request will be sent +#[derive(Debug, Clone)] +pub struct Handle(pub broadcast::Sender); + +#[async_trait] +impl ResponseHandler for Handle { + async fn send_response<'a>( + &mut self, + response: MessageResponse< + '_, + 'a, + impl Iterator + Send + 'a, + impl Iterator + Send + 'a, + impl Iterator + Send + 'a, + impl Iterator + Send + 'a, + >, + ) -> io::Result { + let mut bytes = Vec::with_capacity(512); + let info = { + let mut encoder = BinEncoder::new(&mut bytes); + response.destructive_emit(&mut encoder)? + }; + + let bytes = Bytes::from(bytes); + self.0.send(bytes).unwrap(); + + Ok(info) + } +} + +fn create_static_authority( + origins: &[Name], + config: &DnsConfig, +) -> Result<(InMemoryAuthority, u32)> { + let soa = RData::parse( + RecordType::SOA, + config.default_soa.split_ascii_whitespace(), + None, + )? + .into_soa() + .map_err(|_| anyhow!("Couldn't parse SOA: {}", config.default_soa))?; + let serial = soa.serial(); + let mut records = BTreeMap::new(); + for name in origins { + push_record( + &mut records, + serial, + Record::from_rdata(name.clone(), DEFAULT_SOA_TTL, RData::SOA(soa.clone())), + ); + if let Some(addr) = config.rr_a { + push_record( + &mut records, + serial, + Record::from_rdata(name.clone(), DEFAULT_A_TTL, RData::A(addr.into())), + ); + } + if let Some(addr) = config.rr_aaaa { + push_record( + &mut records, + serial, + Record::from_rdata(name.clone(), DEFAULT_A_TTL, RData::AAAA(addr.into())), + ); + } + if let Some(ns) = &config.rr_ns { + let ns = Name::parse(ns, Some(&Name::root()))?; + push_record( + &mut records, + serial, + Record::from_rdata(name.clone(), DEFAULT_NS_TTL, RData::NS(rdata::NS(ns))), + ); + } + } + + let static_authority = InMemoryAuthority::new(Name::root(), records, ZoneType::Primary, false) + .map_err(|e| anyhow!(e))?; + + Ok((static_authority, serial)) +} + +fn push_record(records: &mut BTreeMap, serial: u32, record: Record) { + let key = RrKey::new(record.name().clone().into(), record.record_type()); + let mut record_set = RecordSet::new(record.name(), record.record_type(), serial); + record_set.insert(record, serial); + records.insert(key, record_set); +} diff --git a/iroh-dns-server/src/dns/node_authority.rs b/iroh-dns-server/src/dns/node_authority.rs new file mode 100644 index 0000000000..67b498fc55 --- /dev/null +++ b/iroh-dns-server/src/dns/node_authority.rs @@ -0,0 +1,190 @@ +use std::{fmt, sync::Arc}; + +use anyhow::{bail, ensure, Result}; +use async_trait::async_trait; +use hickory_proto::{ + op::ResponseCode, + rr::{LowerName, Name, RecordType}, +}; +use hickory_server::{ + authority::{ + AuthLookup, Authority, LookupError, LookupOptions, LookupRecords, MessageRequest, + UpdateResult, ZoneType, + }, + server::RequestInfo, + store::in_memory::InMemoryAuthority, +}; + +use tracing::{debug, trace}; + +use crate::{ + store::ZoneStore, + util::{record_set_append_origin, PublicKeyBytes}, +}; + +#[derive(derive_more::Debug)] +pub struct NodeAuthority { + serial: u32, + origins: Vec, + #[debug("InMemoryAuthority")] + static_authority: InMemoryAuthority, + zones: ZoneStore, + // TODO: This is used by Authority::origin + // Find out what exactly this is used for - we don't have a primary origin. + first_origin: LowerName, +} + +impl NodeAuthority { + pub fn new( + zones: ZoneStore, + static_authority: InMemoryAuthority, + origins: Vec, + serial: u32, + ) -> Result { + ensure!(!origins.is_empty(), "at least one origin is required"); + let first_origin = LowerName::from(&origins[0]); + Ok(Self { + static_authority, + origins, + serial, + zones, + first_origin, + }) + } + + pub fn origins(&self) -> impl Iterator { + self.origins.iter() + } + + pub fn serial(&self) -> u32 { + self.serial + } +} + +#[async_trait] +impl Authority for NodeAuthority { + type Lookup = AuthLookup; + + fn zone_type(&self) -> ZoneType { + ZoneType::Primary + } + + fn is_axfr_allowed(&self) -> bool { + false + } + + async fn update(&self, _update: &MessageRequest) -> UpdateResult { + Err(ResponseCode::NotImp) + } + + fn origin(&self) -> &LowerName { + &self.first_origin + } + + async fn lookup( + &self, + name: &LowerName, + record_type: RecordType, + lookup_options: LookupOptions, + ) -> Result { + match record_type { + RecordType::SOA | RecordType::NS => { + self.static_authority + .lookup(name, record_type, lookup_options) + .await + } + _ => match split_and_parse_pkarr(name, &self.origins) { + Err(err) => { + trace!(%name, ?err, "name is not a pkarr zone"); + debug!("resolve static: name {name}"); + self.static_authority + .lookup(name, record_type, lookup_options) + .await + } + Ok((name, pubkey, origin)) => { + debug!(%origin, "resolve pkarr: {name} {pubkey}"); + match self + .zones + .resolve(&pubkey, &name, record_type) + .await + .map_err(err_refused)? + { + Some(pkarr_set) => { + let new_origin = Name::parse(&pubkey.to_z32(), Some(&origin)) + .map_err(err_refused)?; + let record_set = + record_set_append_origin(&pkarr_set, &new_origin, self.serial()) + .map_err(err_refused)?; + let records = LookupRecords::new(lookup_options, Arc::new(record_set)); + let answers = AuthLookup::answers(records, None); + Ok(answers) + } + None => Err(err_nx_domain("not found")), + } + } + }, + } + } + + async fn search( + &self, + request_info: RequestInfo<'_>, + lookup_options: LookupOptions, + ) -> Result { + debug!("searching NodeAuthority for: {}", request_info.query); + let lookup_name = request_info.query.name(); + let record_type: RecordType = request_info.query.query_type(); + match record_type { + RecordType::SOA => { + self.static_authority + .lookup(self.origin(), record_type, lookup_options) + .await + } + RecordType::AXFR => Err(LookupError::from(ResponseCode::Refused)), + _ => self.lookup(lookup_name, record_type, lookup_options).await, + } + } + + async fn get_nsec_records( + &self, + _name: &LowerName, + _lookup_options: LookupOptions, + ) -> Result { + Ok(AuthLookup::default()) + } +} + +fn split_and_parse_pkarr( + name: impl Into, + allowed_origins: &[Name], +) -> Result<(Name, PublicKeyBytes, Name)> { + let name = name.into(); + trace!("resolve {name}"); + for origin in allowed_origins.iter() { + trace!("try {origin}"); + if !origin.zone_of(&name) { + continue; + } + if name.num_labels() < origin.num_labels() + 1 { + bail!("invalid name"); + } + trace!("parse {origin}"); + let labels = name.iter().rev(); + let mut labels_without_origin = labels.skip(origin.num_labels() as usize); + let pkey_label = labels_without_origin.next().expect("length checked above"); + let pkey_str = std::str::from_utf8(pkey_label)?; + let pkey = PublicKeyBytes::from_z32(pkey_str)?; + let remaining_name = Name::from_labels(labels_without_origin)?; + return Ok((remaining_name, pkey, origin.clone())); + } + bail!("name does not match any origin"); +} + +fn err_refused(e: impl fmt::Debug) -> LookupError { + trace!("lookup failed (refused): {e:?}"); + LookupError::from(ResponseCode::Refused) +} +fn err_nx_domain(e: impl fmt::Debug) -> LookupError { + trace!("lookup failed (nxdomain): {e:?}"); + LookupError::from(ResponseCode::NXDomain) +} diff --git a/iroh-dns-server/src/http.rs b/iroh-dns-server/src/http.rs new file mode 100644 index 0000000000..34a1af8161 --- /dev/null +++ b/iroh-dns-server/src/http.rs @@ -0,0 +1,258 @@ +//! HTTP server part of iroh-dns-server + +use std::{ + net::{IpAddr, Ipv4Addr, SocketAddr}, + time::Instant, +}; + +use anyhow::{bail, Context, Result}; +use axum::{ + extract::{ConnectInfo, Request}, + handler::Handler, + http::Method, + middleware::{self, Next}, + response::IntoResponse, + routing::get, + Router, +}; +use iroh_metrics::{inc, inc_by}; +use serde::{Deserialize, Serialize}; +use tokio::{net::TcpListener, task::JoinSet}; +use tower_http::{ + cors::{self, CorsLayer}, + trace::TraceLayer, +}; +use tracing::{info, span, warn, Level}; + +mod doh; +mod error; +mod pkarr; +mod rate_limiting; +mod tls; + +use crate::state::AppState; +use crate::{config::Config, metrics::Metrics}; + +pub use self::tls::CertMode; + +/// Config for the HTTP server +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct HttpConfig { + /// Port to bind to + pub port: u16, + /// Optionally set a custom bind address (will use 0.0.0.0 if unset) + pub bind_addr: Option, +} + +/// Config for the HTTPS server +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct HttpsConfig { + /// Port to bind to + pub port: u16, + /// Optionally set a custom bind address (will use 0.0.0.0 if unset) + pub bind_addr: Option, + /// The list of domains for which SSL certificates should be created. + pub domains: Vec, + /// The mode of SSL certificate creation + pub cert_mode: CertMode, + /// Letsencrypt contact email address (required if using [`CertMode::LetsEncrypt`]) + pub letsencrypt_contact: Option, + /// Whether to use the letsenrypt production servers (only applies to [`CertMode::LetsEncrypt`]) + pub letsencrypt_prod: Option, +} + +/// The HTTP(S) server part of iroh-dns-server +pub struct HttpServer { + tasks: JoinSet>, + http_addr: Option, + https_addr: Option, +} + +impl HttpServer { + /// Spawn the server + pub async fn spawn( + http_config: Option, + https_config: Option, + state: AppState, + ) -> Result { + if http_config.is_none() && https_config.is_none() { + bail!("Either http or https config is required"); + } + + let app = create_app(state); + + let mut tasks = JoinSet::new(); + + // launch http + let http_addr = if let Some(config) = http_config { + let bind_addr = SocketAddr::new( + config.bind_addr.unwrap_or(Ipv4Addr::UNSPECIFIED.into()), + config.port, + ); + let app = app.clone(); + let listener = TcpListener::bind(bind_addr).await?.into_std()?; + let bound_addr = listener.local_addr()?; + let fut = axum_server::from_tcp(listener) + .serve(app.into_make_service_with_connect_info::()); + info!("HTTP server listening on {bind_addr}"); + tasks.spawn(fut); + Some(bound_addr) + } else { + None + }; + + // launch https + let https_addr = if let Some(config) = https_config { + let bind_addr = SocketAddr::new( + config.bind_addr.unwrap_or(Ipv4Addr::UNSPECIFIED.into()), + config.port, + ); + let acceptor = { + let cache_path = Config::data_dir()? + .join("cert_cache") + .join(config.cert_mode.to_string()); + tokio::fs::create_dir_all(&cache_path) + .await + .with_context(|| { + format!("failed to create cert cache dir at {cache_path:?}") + })?; + config + .cert_mode + .build( + config.domains, + cache_path, + config.letsencrypt_contact, + config.letsencrypt_prod.unwrap_or(false), + ) + .await? + }; + let listener = TcpListener::bind(bind_addr).await?.into_std()?; + let bound_addr = listener.local_addr()?; + let fut = axum_server::from_tcp(listener) + .acceptor(acceptor) + .serve(app.into_make_service_with_connect_info::()); + info!("HTTPS server listening on {bind_addr}"); + tasks.spawn(fut); + Some(bound_addr) + } else { + None + }; + + Ok(HttpServer { + tasks, + http_addr, + https_addr, + }) + } + + /// Get the bound address of the HTTP socket. + pub fn http_addr(&self) -> Option { + self.http_addr + } + + /// Get the bound address of the HTTPS socket. + pub fn https_addr(&self) -> Option { + self.https_addr + } + + /// Shutdown the server and wait for all tasks to complete. + pub async fn shutdown(mut self) -> Result<()> { + // TODO: Graceful cancellation. + self.tasks.abort_all(); + self.run_until_done().await?; + Ok(()) + } + + /// Wait for all tasks to complete. + /// + /// Runs forever unless tasks fail. + pub async fn run_until_done(mut self) -> Result<()> { + let mut final_res: anyhow::Result<()> = Ok(()); + while let Some(res) = self.tasks.join_next().await { + match res { + Ok(Ok(())) => {} + Err(err) if err.is_cancelled() => {} + Ok(Err(err)) => { + warn!(?err, "task failed"); + final_res = Err(anyhow::Error::from(err)); + } + Err(err) => { + warn!(?err, "task panicked"); + final_res = Err(err.into()); + } + } + } + final_res + } +} + +pub(crate) fn create_app(state: AppState) -> Router { + // configure cors middleware + let cors = CorsLayer::new() + // allow `GET` and `POST` when accessing the resource + .allow_methods([Method::GET, Method::POST, Method::PUT]) + // allow requests from any origin + .allow_origin(cors::Any); + + // configure tracing middleware + let trace = TraceLayer::new_for_http().make_span_with(|request: &http::Request<_>| { + let conn_info = request + .extensions() + .get::>() + .expect("connectinfo extension to be present"); + let span = span!( + Level::DEBUG, + "http_request", + method = ?request.method(), + uri = ?request.uri(), + src = %conn_info.0, + ); + span + }); + + // configure rate limiting middleware + let rate_limit = rate_limiting::create(); + + // configure routes + // + // only the pkarr::put route gets a rate limit + let router = Router::new() + .route("/dns-query", get(doh::get).post(doh::post)) + .route( + "/pkarr/:key", + get(pkarr::get).put(pkarr::put.layer(rate_limit)), + ) + .route("/healthcheck", get(|| async { "OK" })) + .route("/", get(|| async { "Hi!" })) + .with_state(state); + + // configure app + router + .layer(cors) + .layer(trace) + .route_layer(middleware::from_fn(metrics_middleware)) +} + +/// Record request metrics. +/// +// TODO: +// * Request duration would be much better tracked as a histogram. +// * It would be great to attach labels to the metrics, so that the recorded metrics +// can filter by method etc. +// +// See also +// https://github.com/tokio-rs/axum/blob/main/examples/prometheus-metrics/src/main.rs#L114 +async fn metrics_middleware(req: Request, next: Next) -> impl IntoResponse { + let start = Instant::now(); + let response = next.run(req).await; + let latency = start.elapsed().as_millis(); + let status = response.status(); + inc_by!(Metrics, http_requests_duration_ms, latency as u64); + inc!(Metrics, http_requests); + if status.is_success() { + inc!(Metrics, http_requests_success); + } else { + inc!(Metrics, http_requests_error); + } + response +} diff --git a/iroh-dns-server/src/http/doh.rs b/iroh-dns-server/src/http/doh.rs new file mode 100644 index 0000000000..caa785ff14 --- /dev/null +++ b/iroh-dns-server/src/http/doh.rs @@ -0,0 +1,77 @@ +//! DNS over HTTPS + +// This module is mostly copied from +// https://github.com/fission-codes/fission-server/blob/main/fission-server/src/routes/doh.rs + +use anyhow::anyhow; +use axum::{ + extract::State, + response::{IntoResponse, Response}, + Json, +}; +use hickory_server::proto::{self, serialize::binary::BinDecodable}; +use http::{ + header::{CACHE_CONTROL, CONTENT_TYPE}, + HeaderValue, StatusCode, +}; + +use crate::state::AppState; + +use super::error::AppResult; + +mod extract; +mod response; + +use self::extract::{DnsMimeType, DnsRequestBody, DnsRequestQuery}; + +/// GET handler for resolving DoH queries +pub async fn get( + State(state): State, + DnsRequestQuery(request, accept_type): DnsRequestQuery, +) -> AppResult { + let message_bytes = state.dns_handler.answer_request(request).await?; + let message = proto::op::Message::from_bytes(&message_bytes).map_err(|e| anyhow!(e))?; + + let min_ttl = message.answers().iter().map(|rec| rec.ttl()).min(); + + let mut response = match accept_type { + DnsMimeType::Message => (StatusCode::OK, message_bytes).into_response(), + DnsMimeType::Json => { + let response = self::response::DnsResponse::from_message(message)?; + (StatusCode::OK, Json(response)).into_response() + } + }; + + response + .headers_mut() + .insert(CONTENT_TYPE, accept_type.to_header_value()); + + if let Some(min_ttl) = min_ttl { + let maxage = + HeaderValue::from_str(&format!("s-maxage={min_ttl}")).map_err(|e| anyhow!(e))?; + response.headers_mut().insert(CACHE_CONTROL, maxage); + } + + Ok(response) +} + +/// POST handler for resolvng DoH queries +pub async fn post( + State(state): State, + DnsRequestBody(request): DnsRequestBody, +) -> Response { + let response = match state.dns_handler.answer_request(request).await { + Ok(response) => response, + Err(err) => return (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()).into_response(), + }; + + ( + StatusCode::OK, + [(CONTENT_TYPE, DnsMimeType::Message.to_string())], + response, + ) + .into_response() +} + +// TODO: Port tests from +// https://github.com/fission-codes/fission-server/blob/main/fission-server/src/routes/doh.rs diff --git a/iroh-dns-server/src/http/doh/extract.rs b/iroh-dns-server/src/http/doh/extract.rs new file mode 100644 index 0000000000..819791c563 --- /dev/null +++ b/iroh-dns-server/src/http/doh/extract.rs @@ -0,0 +1,250 @@ +//! Extractors for DNS-over-HTTPS requests + +// This module is mostly copied from +// https://github.com/fission-codes/fission-server/blob/394de877fad021260c69fdb1edd7bb4b2f98108c/fission-server/src/extract/doh.rs + +use async_trait::async_trait; +use axum::{ + extract::{ConnectInfo, FromRequest, FromRequestParts, Query}, + http::Request, +}; +use bytes::Bytes; +use hickory_server::{ + authority::MessageRequest, + proto::{ + self, + serialize::binary::{BinDecodable, BinDecoder, BinEncodable, BinEncoder}, + }, + server::{Protocol, Request as DNSRequest}, +}; +use http::{header, request::Parts, HeaderValue, StatusCode}; +use serde::Deserialize; +use std::{ + fmt::{self, Display, Formatter}, + net::SocketAddr, + str::FromStr, +}; +use tracing::info; + +use crate::http::error::AppError; + +/// A DNS packet encoding type +#[derive(Debug)] +pub enum DnsMimeType { + /// application/dns-message + Message, + /// application/dns-json + Json, +} + +impl Display for DnsMimeType { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + DnsMimeType::Message => write!(f, "application/dns-message"), + DnsMimeType::Json => write!(f, "application/dns-json"), + } + } +} + +impl DnsMimeType { + /// Turn this mime type to an `Accept` HTTP header value + pub fn to_header_value(&self) -> HeaderValue { + HeaderValue::from_static(match self { + Self::Message => "application/dns-message", + Self::Json => "application/dns-json", + }) + } +} + +#[derive(Debug, Deserialize)] +struct DnsMessageQuery { + dns: String, +} + +// See: https://developers.google.com/speed/public-dns/docs/doh/json#supported_parameters +#[derive(Debug, Deserialize)] +pub struct DnsQuery { + /// Record name to look up, e.g. example.com + pub name: String, + /// Record type, e.g. A/AAAA/TXT, etc. + #[serde(rename = "type")] + pub record_type: Option, + /// Used to disable DNSSEC validation + pub cd: Option, + /// Desired content type. E.g. "application/dns-message" or "application/dns-json" + #[allow(dead_code)] + pub ct: Option, + /// Whether to return DNSSEC entries such as RRSIG, NSEC or NSEC3 + #[serde(rename = "do")] + pub dnssec_ok: Option, + /// Privacy setting for how your IP address is forwarded to authoritative nameservers + #[allow(dead_code)] + pub edns_client_subnet: Option, + /// Some url-safe random characters to pad your messages for privacy (to avoid being fingerprinted by encrytped message length) + #[allow(dead_code)] + pub random_padding: Option, + /// Whether to provide answers for all records up to the root + #[serde(rename = "rd")] + pub recursion_desired: Option, +} + +/// A DNS request encoded in the query string +#[derive(Debug)] +pub struct DnsRequestQuery(pub(crate) DNSRequest, pub(crate) DnsMimeType); + +/// A DNS request encoded in the body +#[derive(Debug)] +pub struct DnsRequestBody(pub(crate) DNSRequest); + +#[async_trait] +impl FromRequestParts for DnsRequestQuery +where + S: Send + Sync, +{ + type Rejection = AppError; + + async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { + let ConnectInfo(src_addr) = ConnectInfo::from_request_parts(parts, state).await?; + + match parts.headers.get(header::ACCEPT) { + Some(content_type) if content_type == "application/dns-message" => { + handle_dns_message_query(parts, state, src_addr).await + } + Some(content_type) if content_type == "application/dns-json" => { + handle_dns_json_query(parts, state, src_addr).await + } + Some(content_type) if content_type == "application/x-javascript" => { + handle_dns_json_query(parts, state, src_addr).await + } + None => handle_dns_message_query(parts, state, src_addr).await, + _ => Err(AppError::with_status(StatusCode::NOT_ACCEPTABLE)), + } + } +} + +#[async_trait] +impl FromRequest for DnsRequestBody +where + S: Send + Sync, +{ + type Rejection = AppError; + + async fn from_request(req: axum::extract::Request, state: &S) -> Result { + let (mut parts, body) = req.into_parts(); + + let ConnectInfo(src_addr) = ConnectInfo::from_request_parts(&mut parts, state).await?; + + let req = Request::from_parts(parts, body); + + let body = Bytes::from_request(req, state) + .await + .map_err(|_| AppError::with_status(StatusCode::INTERNAL_SERVER_ERROR))?; + + let request = decode_request(&body, src_addr)?; + + Ok(DnsRequestBody(request)) + } +} + +async fn handle_dns_message_query( + parts: &mut Parts, + state: &S, + src_addr: SocketAddr, +) -> Result +where + S: Send + Sync, +{ + let Query(params) = Query::::from_request_parts(parts, state).await?; + + let buf = base64_url::decode(params.dns.as_bytes()) + .map_err(|err| AppError::new(StatusCode::BAD_REQUEST, Some(err)))?; + + let request = decode_request(&buf, src_addr)?; + + Ok(DnsRequestQuery(request, DnsMimeType::Message)) +} + +async fn handle_dns_json_query( + parts: &mut Parts, + state: &S, + src_addr: SocketAddr, +) -> Result +where + S: Send + Sync, +{ + let Query(dns_query) = Query::::from_request_parts(parts, state).await?; + + let request = encode_query_as_request(dns_query, src_addr)?; + + Ok(DnsRequestQuery(request, DnsMimeType::Json)) +} + +/// Exposed to make it usable internally... +pub(crate) fn encode_query_as_request( + question: DnsQuery, + src_addr: SocketAddr, +) -> Result { + let query_type = if let Some(record_type) = question.record_type { + record_type + .parse::() + .map(proto::rr::RecordType::from) + .or_else(|_| FromStr::from_str(&record_type.to_uppercase())) + .map_err(|err| AppError::new(StatusCode::BAD_REQUEST, Some(err)))? + } else { + proto::rr::RecordType::A + }; + + let name = proto::rr::Name::from_utf8(question.name) + .map_err(|err| AppError::new(StatusCode::BAD_REQUEST, Some(err)))?; + + let query = proto::op::Query::query(name, query_type); + + let mut message = proto::op::Message::new(); + + message + .add_query(query) + .set_message_type(proto::op::MessageType::Query) + .set_op_code(proto::op::OpCode::Query) + .set_checking_disabled(question.cd.unwrap_or(false)) + .set_recursion_desired(question.recursion_desired.unwrap_or(true)) + .set_recursion_available(true) + .set_authentic_data(question.dnssec_ok.unwrap_or(false)); + + // This is kind of a hack, but the only way I can find to + // create a MessageRequest is by decoding a buffer of bytes, + // so we encode the message into a buffer and then decode it + let mut buf = Vec::with_capacity(4096); + let mut encoder = BinEncoder::new(&mut buf); + + message + .emit(&mut encoder) + .map_err(|err| AppError::new(StatusCode::BAD_REQUEST, Some(err)))?; + + let request = decode_request(&buf, src_addr)?; + + Ok(request) +} + +fn decode_request(bytes: &[u8], src_addr: SocketAddr) -> Result { + let mut decoder = BinDecoder::new(bytes); + + match MessageRequest::read(&mut decoder) { + Ok(message) => { + info!("received message {message:?}"); + if message.message_type() != proto::op::MessageType::Query { + return Err(AppError::new( + StatusCode::BAD_REQUEST, + Some("Invalid message type: expected query"), + )); + } + + let request = DNSRequest::new(message, src_addr, Protocol::Https); + + Ok(request) + } + Err(err) => Err(AppError::new( + StatusCode::BAD_REQUEST, + Some(format!("Invalid DNS message: {}", err)), + )), + } +} diff --git a/iroh-dns-server/src/http/doh/response.rs b/iroh-dns-server/src/http/doh/response.rs new file mode 100644 index 0000000000..f4cee805c0 --- /dev/null +++ b/iroh-dns-server/src/http/doh/response.rs @@ -0,0 +1,144 @@ +//! DNS Response + +// This module is mostly copied from +// https://github.com/fission-codes/fission-server/blob/394de877fad021260c69fdb1edd7bb4b2f98108c/fission-core/src/dns.rs + +use anyhow::{anyhow, ensure, Result}; +use hickory_proto as proto; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Serialize, Deserialize)] +/// JSON representation of a DNS response +/// See: +pub struct DnsResponse { + /// Standard DNS response code + #[serde(rename = "Status")] + pub status: u32, + /// Whether the response was truncated + #[serde(rename = "TC")] + pub tc: bool, + /// Whether recursion was desired + #[serde(rename = "RD")] + pub rd: bool, + /// Whether recursion was available + #[serde(rename = "RA")] + pub ra: bool, + /// Whether the response was validated with DNSSEC + #[serde(rename = "AD")] + pub ad: bool, + /// Whether the client asked to disable DNSSEC validation + #[serde(rename = "CD")] + pub cd: bool, + /// The questions that this request answers + #[serde(rename = "Question")] + pub question: Vec, + /// The answers to the request + #[serde(rename = "Answer")] + #[serde(skip_serializing_if = "Vec::is_empty")] + pub answer: Vec, + /// An optional comment + #[serde(rename = "Comment")] + pub comment: Option, + /// IP Address / scope prefix-length of the client + /// See: + pub edns_client_subnet: Option, +} + +impl DnsResponse { + /// Create a new JSON response from a DNS message + pub fn from_message(message: proto::op::Message) -> Result { + ensure!( + message.message_type() == proto::op::MessageType::Response, + "Expected message type to be response" + ); + + ensure!( + message.query_count() == message.queries().len() as u16, + "Query count mismatch" + ); + + ensure!( + message.answer_count() == message.answers().len() as u16, + "Answer count mismatch" + ); + + let status: u32 = + >::from(message.response_code()) as u32; + + let question: Vec<_> = message + .queries() + .iter() + .map(DohQuestionJson::from_query) + .collect(); + + let answer: Vec<_> = message + .answers() + .iter() + .map(DohRecordJson::from_record) + .collect::>()?; + + Ok(DnsResponse { + status, + tc: message.truncated(), + rd: message.recursion_desired(), + ra: message.recursion_available(), + ad: message.authentic_data(), + cd: message.checking_disabled(), + question, + answer, + comment: None, + edns_client_subnet: None, + }) + } +} + +#[derive(Debug, Serialize, Deserialize)] +/// JSON representation of a DNS question +pub struct DohQuestionJson { + /// FQDN with trailing dot + pub name: String, + /// Standard DNS RR type + #[serde(rename = "type")] + pub question_type: u16, +} + +impl DohQuestionJson { + /// Create a new JSON question from a DNS query + pub fn from_query(query: &proto::op::Query) -> Self { + Self { + name: query.name().to_string(), + question_type: query.query_type().into(), + } + } +} + +#[derive(Debug, Serialize, Deserialize)] +/// JSON representation of a DNS record +pub struct DohRecordJson { + /// FQDN with trailing dot + pub name: String, + /// Standard DNS RR type + #[serde(rename = "type")] + pub record_type: u16, + /// Time-to-live, in seconds + #[serde(rename = "TTL")] + pub ttl: u32, + /// Record data + pub data: String, +} + +impl DohRecordJson { + /// Create a new JSON record from a DNS record + pub fn from_record(record: &proto::rr::Record) -> Result { + let data = record + .data() + .ok_or_else(|| anyhow!("Missing record data"))?; + + Ok(Self { + name: record.name().to_string(), + record_type: record.record_type().into(), + ttl: record.ttl(), + data: data.to_string(), + }) + } +} diff --git a/iroh-dns-server/src/http/error.rs b/iroh-dns-server/src/http/error.rs new file mode 100644 index 0000000000..7f8ab542d8 --- /dev/null +++ b/iroh-dns-server/src/http/error.rs @@ -0,0 +1,101 @@ +use axum::{ + extract::rejection::{ExtensionRejection, QueryRejection}, + http::StatusCode, + response::IntoResponse, + Json, +}; +use serde::{Deserialize, Serialize}; + +pub type AppResult = Result; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AppError { + #[serde(with = "serde_status_code")] + status: StatusCode, + detail: Option, +} + +impl Default for AppError { + fn default() -> Self { + Self { + status: StatusCode::INTERNAL_SERVER_ERROR, + detail: None, + } + } +} + +impl AppError { + pub fn with_status(status: StatusCode) -> AppError { + Self { + status, + detail: None, + } + } + + /// Create a new [`AppError`]. + pub fn new(status_code: StatusCode, message: Option) -> AppError { + Self { + status: status_code, + // title: Self::canonical_reason_to_string(&status_code), + detail: message.map(|m| m.to_string()), + } + } +} + +impl IntoResponse for AppError { + fn into_response(self) -> axum::response::Response { + let json = Json(self.clone()); + (self.status, json).into_response() + } +} + +impl From for AppError { + fn from(value: anyhow::Error) -> Self { + Self { + status: StatusCode::INTERNAL_SERVER_ERROR, + detail: Some(value.to_string()), + } + } +} + +impl From for AppError { + fn from(value: QueryRejection) -> Self { + Self::new(StatusCode::BAD_REQUEST, Some(value)) + } +} + +impl From for AppError { + fn from(value: ExtensionRejection) -> Self { + Self::new(StatusCode::BAD_REQUEST, Some(value)) + } +} + +/// Serialize/Deserializer for status codes. +/// +/// This is needed because status code according to JSON API spec must +/// be the status code as a STRING. +/// +/// We could have used http_serde, but it encodes the status code as a NUMBER. +pub mod serde_status_code { + use http::StatusCode; + use serde::{de::Unexpected, Deserialize, Deserializer, Serialize, Serializer}; + + /// Serialize [StatusCode]s. + pub fn serialize(status: &StatusCode, ser: S) -> Result { + String::serialize(&status.as_u16().to_string(), ser) + } + + /// Deserialize [StatusCode]s. + pub fn deserialize<'de, D>(de: D) -> Result + where + D: Deserializer<'de>, + { + let str = String::deserialize(de)?; + StatusCode::from_bytes(str.as_bytes()).map_err(|_| { + serde::de::Error::invalid_value( + Unexpected::Str(str.as_str()), + &"A valid http status code", + ) + }) + } +} diff --git a/iroh-dns-server/src/http/extract.rs b/iroh-dns-server/src/http/extract.rs new file mode 100644 index 0000000000..cff0e9089e --- /dev/null +++ b/iroh-dns-server/src/http/extract.rs @@ -0,0 +1 @@ +mod json; diff --git a/iroh-dns-server/src/http/pkarr.rs b/iroh-dns-server/src/http/pkarr.rs new file mode 100644 index 0000000000..a0e81fef09 --- /dev/null +++ b/iroh-dns-server/src/http/pkarr.rs @@ -0,0 +1,52 @@ +use anyhow::Result; +use axum::extract::Path; +use axum::{extract::State, response::IntoResponse}; +use bytes::Bytes; + +use http::{header, StatusCode}; + +use tracing::info; + +use crate::util::PublicKeyBytes; +use crate::{state::AppState, store::PacketSource}; + +use super::error::AppError; + +pub async fn put( + State(state): State, + Path(key): Path, + body: Bytes, +) -> Result { + let key = pkarr::PublicKey::try_from(key.as_str()) + .map_err(|e| AppError::new(StatusCode::BAD_REQUEST, Some(format!("invalid key: {e}"))))?; + let label = &key.to_z32()[..10]; + let signed_packet = pkarr::SignedPacket::from_relay_response(key, body).map_err(|e| { + AppError::new( + StatusCode::BAD_REQUEST, + Some(format!("invalid body payload: {e}")), + ) + })?; + + let updated = state + .store + .insert(signed_packet, PacketSource::PkarrPublish) + .await?; + info!(key = %label, ?updated, "pkarr upsert"); + Ok(StatusCode::NO_CONTENT) +} + +pub async fn get( + State(state): State, + Path(pubkey): Path, +) -> Result { + let pubkey = PublicKeyBytes::from_z32(&pubkey) + .map_err(|e| AppError::new(StatusCode::BAD_REQUEST, Some(format!("invalid key: {e}"))))?; + let signed_packet = state + .store + .get_signed_packet(&pubkey) + .await? + .ok_or_else(|| AppError::with_status(StatusCode::NOT_FOUND))?; + let body = signed_packet.as_relay_request(); + let headers = [(header::CONTENT_TYPE, "application/x-pkarr-signed-packet")]; + Ok((headers, body)) +} diff --git a/iroh-dns-server/src/http/rate_limiting.rs b/iroh-dns-server/src/http/rate_limiting.rs new file mode 100644 index 0000000000..991ff6e3b0 --- /dev/null +++ b/iroh-dns-server/src/http/rate_limiting.rs @@ -0,0 +1,40 @@ +use std::time::Duration; + +use governor::{clock::QuantaInstant, middleware::NoOpMiddleware}; +use tower_governor::{ + governor::GovernorConfigBuilder, key_extractor::PeerIpKeyExtractor, GovernorLayer, +}; + +/// Create the default rate-limiting layer. +/// +/// This spawns a background thread to clean up the rate limiting cache. +pub fn create() -> GovernorLayer<'static, PeerIpKeyExtractor, NoOpMiddleware> { + // Configure rate limiting: + // * allow bursts with up to five requests per IP address + // * replenish one element every two seconds + let governor_conf = GovernorConfigBuilder::default() + // .use_headers() + .per_second(4) + .burst_size(2) + .finish() + .expect("failed to build rate-limiting governor"); + + // The governor layer needs a reference that outlives the layer. + // The tower_governor crate recommends in its examples to use Box::leak here. + // In the unreleased v0.4 of tower_governor this was changed to use an Arc instead. + // https://github.com/benwis/tower-governor/pull/27 + let governor_conf = Box::leak(Box::new(governor_conf)); + + // The governor needs a background task for garbage collection (to clear expired records) + let gc_interval = Duration::from_secs(60); + let governor_limiter = governor_conf.limiter().clone(); + std::thread::spawn(move || loop { + std::thread::sleep(gc_interval); + tracing::debug!("rate limiting storage size: {}", governor_limiter.len()); + governor_limiter.retain_recent(); + }); + + GovernorLayer { + config: &*governor_conf, + } +} diff --git a/iroh-dns-server/src/http/tls.rs b/iroh-dns-server/src/http/tls.rs new file mode 100644 index 0000000000..4e079aefd4 --- /dev/null +++ b/iroh-dns-server/src/http/tls.rs @@ -0,0 +1,184 @@ +use std::{ + borrow::Cow, + io, + path::{Path, PathBuf}, + sync::{Arc, OnceLock}, +}; + +use anyhow::{bail, Context, Result}; +use axum_server::{ + accept::Accept, + tls_rustls::{RustlsAcceptor, RustlsConfig}, +}; +use futures::{future::BoxFuture, FutureExt}; +use serde::{Deserialize, Serialize}; +use tokio::io::{AsyncRead, AsyncWrite}; +use tokio_rustls_acme::{axum::AxumAcceptor, caches::DirCache, AcmeConfig}; +use tokio_stream::StreamExt; +use tracing::{debug, error, info_span, Instrument}; + +/// The mode how SSL certificates should be created. +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, strum::Display)] +#[serde(rename_all = "snake_case")] +pub enum CertMode { + /// Certs are loaded from a the `cert_cache` path + Manual, + /// ACME with LetsEncrypt servers + LetsEncrypt, + /// Create self-signed certificates and store them in the `cert_cache` path + SelfSigned, +} + +impl CertMode { + /// Build the [`TlsAcceptor`] for this mode. + pub(crate) async fn build( + &self, + domains: Vec, + cert_cache: PathBuf, + letsencrypt_contact: Option, + letsencrypt_prod: bool, + ) -> Result { + Ok(match self { + CertMode::Manual => TlsAcceptor::manual(domains, cert_cache).await?, + CertMode::SelfSigned => TlsAcceptor::self_signed(domains).await?, + CertMode::LetsEncrypt => { + let contact = + letsencrypt_contact.context("contact is required for letsencrypt cert mode")?; + TlsAcceptor::letsencrypt(domains, &contact, letsencrypt_prod, cert_cache)? + } + }) + } +} + +/// TLS Certificate Authority acceptor. +#[derive(Clone)] +pub enum TlsAcceptor { + LetsEncrypt(AxumAcceptor), + Manual(RustlsAcceptor), +} + +impl Accept + for TlsAcceptor +{ + type Stream = tokio_rustls::server::TlsStream; + type Service = S; + type Future = BoxFuture<'static, io::Result<(Self::Stream, Self::Service)>>; + + fn accept(&self, stream: I, service: S) -> Self::Future { + match self { + Self::LetsEncrypt(a) => a.accept(stream, service).boxed(), + Self::Manual(a) => a.accept(stream, service).boxed(), + } + } +} + +impl TlsAcceptor { + async fn self_signed(domains: Vec) -> Result { + let tls_cert = rcgen::generate_simple_self_signed(domains)?; + let config = RustlsConfig::from_der( + vec![tls_cert.serialize_der()?], + tls_cert.serialize_private_key_der(), + ) + .await?; + let acceptor = RustlsAcceptor::new(config); + Ok(Self::Manual(acceptor)) + } + + async fn manual(domains: Vec, dir: PathBuf) -> Result { + let config = rustls::ServerConfig::builder() + .with_safe_defaults() + .with_no_client_auth(); + if domains.len() != 1 { + bail!("Multiple domains in manual mode are not supported"); + } + let keyname = escape_hostname(&domains[0]); + let cert_path = dir.join(format!("{keyname}.crt")); + let key_path = dir.join(format!("{keyname}.key")); + + let (certs, secret_key) = tokio::task::spawn_blocking(move || { + let certs = load_certs(cert_path)?; + let key = load_secret_key(key_path)?; + anyhow::Ok((certs, key)) + }) + .await??; + + let config = config.with_single_cert(certs, secret_key)?; + let config = RustlsConfig::from_config(Arc::new(config)); + let acceptor = RustlsAcceptor::new(config); + Ok(Self::Manual(acceptor)) + } + + fn letsencrypt( + domains: Vec, + contact: &str, + is_production: bool, + dir: PathBuf, + ) -> Result { + let config = rustls::ServerConfig::builder() + .with_safe_defaults() + .with_no_client_auth(); + let mut state = AcmeConfig::new(domains) + .contact([format!("mailto:{contact}")]) + .cache_option(Some(DirCache::new(dir))) + .directory_lets_encrypt(is_production) + .state(); + + let config = config.with_cert_resolver(state.resolver()); + let acceptor = state.acceptor(); + + tokio::spawn( + async move { + loop { + match state.next().await.unwrap() { + Ok(ok) => debug!("acme event: {:?}", ok), + Err(err) => error!("error: {:?}", err), + } + } + } + .instrument(info_span!("acme")), + ); + let config = Arc::new(config); + let acceptor = AxumAcceptor::new(acceptor, config); + Ok(Self::LetsEncrypt(acceptor)) + } +} + +fn load_certs(filename: impl AsRef) -> Result> { + let certfile = std::fs::File::open(filename).context("cannot open certificate file")?; + let mut reader = std::io::BufReader::new(certfile); + + let certs = rustls_pemfile::certs(&mut reader)? + .iter() + .map(|v| rustls::Certificate(v.clone())) + .collect(); + + Ok(certs) +} + +fn load_secret_key(filename: impl AsRef) -> Result { + let keyfile = std::fs::File::open(filename.as_ref()).context("cannot open secret key file")?; + let mut reader = std::io::BufReader::new(keyfile); + + loop { + match rustls_pemfile::read_one(&mut reader).context("cannot parse secret key .pem file")? { + Some(rustls_pemfile::Item::RSAKey(key)) => return Ok(rustls::PrivateKey(key)), + Some(rustls_pemfile::Item::PKCS8Key(key)) => return Ok(rustls::PrivateKey(key)), + Some(rustls_pemfile::Item::ECKey(key)) => return Ok(rustls::PrivateKey(key)), + None => break, + _ => {} + } + } + + bail!( + "no keys found in {} (encrypted keys not supported)", + filename.as_ref().display() + ); +} + +static UNSAFE_HOSTNAME_CHARACTERS: OnceLock = OnceLock::new(); + +fn escape_hostname(hostname: &str) -> Cow<'_, str> { + let regex = UNSAFE_HOSTNAME_CHARACTERS + .get_or_init(|| regex::Regex::new(r"[^a-zA-Z0-9-\.]").expect("valid regex")); + regex.replace_all(hostname, "") +} diff --git a/iroh-dns-server/src/lib.rs b/iroh-dns-server/src/lib.rs new file mode 100644 index 0000000000..2374b86e02 --- /dev/null +++ b/iroh-dns-server/src/lib.rs @@ -0,0 +1,73 @@ +//! A DNS server and pkarr relay + +#![deny(missing_docs, rustdoc::broken_intra_doc_links)] + +pub mod config; +pub mod dns; +pub mod http; +pub mod metrics; +pub mod server; +pub mod state; +mod store; +mod util; + +#[cfg(test)] +mod tests { + use std::net::SocketAddr; + + use anyhow::Result; + use hickory_resolver::{ + config::{NameServerConfig, Protocol, ResolverConfig}, + AsyncResolver, + }; + use iroh_net::{ + discovery::pkarr_publish::PkarrRelayClient, + dns::{ + node_info::{lookup_by_id, NodeInfo}, + DnsResolver, + }, + key::SecretKey, + }; + use url::Url; + + use crate::server::Server; + + #[tokio::test] + async fn integration_smoke() -> Result<()> { + tracing_subscriber::fmt::init(); + let (server, nameserver, http_url) = Server::spawn_for_tests().await?; + + let pkarr_relay = { + let mut url = http_url.clone(); + url.set_path("/pkarr"); + url + }; + + let origin = "irohdns.example."; + + let secret_key = SecretKey::generate(); + let node_id = secret_key.public(); + let relay_url: Url = "https://relay.example.".parse()?; + let pkarr = PkarrRelayClient::new(pkarr_relay); + let node_info = NodeInfo::new(node_id, Some(relay_url.clone())); + let signed_packet = node_info.to_pkarr_signed_packet(&secret_key, 30)?; + + pkarr.publish(&signed_packet).await?; + + let resolver = test_resolver(nameserver); + let res = lookup_by_id(&resolver, &node_id, origin).await?; + + assert_eq!(res.node_id, node_id); + assert_eq!(res.info.relay_url.map(Url::from), Some(relay_url)); + + server.shutdown().await?; + Ok(()) + } + + fn test_resolver(nameserver: SocketAddr) -> DnsResolver { + let mut config = ResolverConfig::new(); + let nameserver_config = NameServerConfig::new(nameserver, Protocol::Udp); + config.add_name_server(nameserver_config); + AsyncResolver::tokio(config, Default::default()) + } +} diff --git a/iroh-dns-server/src/main.rs b/iroh-dns-server/src/main.rs new file mode 100644 index 0000000000..6a7f88d673 --- /dev/null +++ b/iroh-dns-server/src/main.rs @@ -0,0 +1,36 @@ +#![allow(unused_imports)] + +use anyhow::Result; +use axum::{routing::get, Router}; +use clap::Parser; +use futures::{Future, FutureExt}; +use iroh_dns_server::{ + config::Config, metrics::init_metrics, server::run_with_config_until_ctrl_c, +}; +use std::net::{Ipv4Addr, SocketAddr}; +use std::path::PathBuf; +use tokio::task::JoinSet; +use tokio_util::sync::CancellationToken; +use tracing::{debug, debug_span, error, error_span, Instrument, Span}; + +#[derive(Parser, Debug)] +struct Cli { + /// Path to config file + #[clap(short, long)] + config: Option, +} + +#[tokio::main] +async fn main() -> Result<()> { + tracing_subscriber::fmt::init(); + let args = Cli::parse(); + + let config = if let Some(path) = args.config { + Config::load(path).await? + } else { + Config::default() + }; + + init_metrics(); + run_with_config_until_ctrl_c(config).await +} diff --git a/iroh-dns-server/src/metrics.rs b/iroh-dns-server/src/metrics.rs new file mode 100644 index 0000000000..7b07ac07a0 --- /dev/null +++ b/iroh-dns-server/src/metrics.rs @@ -0,0 +1,64 @@ +//! Metrics support for the server + +use iroh_metrics::core::{Core, Counter, Metric}; +use struct_iterable::Iterable; + +/// Metrics for iroh-dns-server +#[derive(Debug, Clone, Iterable)] +#[allow(missing_docs)] +pub struct Metrics { + pub pkarr_publish_update: Counter, + pub pkarr_publish_noop: Counter, + pub pkarr_publish_error: Counter, + pub dns_requests: Counter, + pub dns_requests_udp: Counter, + pub dns_requests_https: Counter, + pub dns_lookup_success: Counter, + pub dns_lookup_notfound: Counter, + pub dns_lookup_error: Counter, + pub http_requests: Counter, + pub http_requests_success: Counter, + pub http_requests_error: Counter, + pub http_requests_duration_ms: Counter, + pub store_packets_inserted: Counter, + pub store_packets_removed: Counter, + pub store_packets_updated: Counter, +} + +impl Default for Metrics { + fn default() -> Self { + Self { + pkarr_publish_update: Counter::new("Number of pkarr relay puts that updated the state"), + pkarr_publish_noop: Counter::new( + "Number of pkarr relay puts that did not update the state", + ), + pkarr_publish_error: Counter::new("Number of pkarr relay puts that failed"), + dns_requests: Counter::new("DNS requests (total)"), + dns_requests_udp: Counter::new("DNS requests via UDP"), + dns_requests_https: Counter::new("DNS requests via HTTPS (DoH)"), + dns_lookup_success: Counter::new("DNS lookup responses with at least one answer"), + dns_lookup_notfound: Counter::new("DNS lookup responses with no answers"), + dns_lookup_error: Counter::new("DNS lookup responses which failed"), + http_requests: Counter::new("Number of HTTP requests"), + http_requests_success: Counter::new("Number of HTTP requests with a 2xx status code"), + http_requests_error: Counter::new("Number of HTTP requests with a non-2xx status code"), + http_requests_duration_ms: Counter::new("Total duration of all HTTP requests"), + store_packets_inserted: Counter::new("Signed packets inserted into the store"), + store_packets_removed: Counter::new("Signed packets removed from the store"), + store_packets_updated: Counter::new("Number of updates to existing packets"), + } + } +} + +impl Metric for Metrics { + fn name() -> &'static str { + "dns_server" + } +} + +/// Init the metrics collection core. +pub fn init_metrics() { + Core::init(|reg, metrics| { + metrics.insert(Metrics::new(reg)); + }); +} diff --git a/iroh-dns-server/src/server.rs b/iroh-dns-server/src/server.rs new file mode 100644 index 0000000000..2b952a8c51 --- /dev/null +++ b/iroh-dns-server/src/server.rs @@ -0,0 +1,107 @@ +//! The main server which combines the DNS and HTTP(S) servers. + +use anyhow::Result; +use iroh_metrics::metrics::start_metrics_server; +use tracing::info; + +use crate::{ + config::Config, + dns::{DnsHandler, DnsServer}, + http::HttpServer, + state::AppState, + store::ZoneStore, +}; + +/// Spawn the server and run until the `Ctrl-C` signal is received, then shutdown. +pub async fn run_with_config_until_ctrl_c(config: Config) -> Result<()> { + let store = ZoneStore::persistent(Config::signed_packet_store_path()?)?; + let server = Server::spawn(config, store).await?; + tokio::signal::ctrl_c().await?; + info!("shutdown"); + server.shutdown().await?; + Ok(()) +} + +/// The iroh-dns server. +pub struct Server { + http_server: HttpServer, + dns_server: DnsServer, + metrics_task: tokio::task::JoinHandle>, +} + +impl Server { + /// Spawn the server. + /// + /// This will spawn several background tasks: + /// * A DNS server task + /// * A HTTP server task, if `config.http` is not empty + /// * A HTTPS server task, if `config.https` is not empty + pub async fn spawn(config: Config, store: ZoneStore) -> Result { + let dns_handler = DnsHandler::new(store.clone(), &config.dns)?; + + let state = AppState { store, dns_handler }; + + let metrics_addr = config.metrics_addr(); + let metrics_task = tokio::task::spawn(async move { + if let Some(addr) = metrics_addr { + start_metrics_server(addr).await?; + } + Ok(()) + }); + let http_server = HttpServer::spawn(config.http, config.https, state.clone()).await?; + let dns_server = DnsServer::spawn(config.dns, state.dns_handler.clone()).await?; + Ok(Self { + http_server, + dns_server, + metrics_task, + }) + } + + /// Cancel the server tasks and wait for all tasks to complete. + pub async fn shutdown(self) -> Result<()> { + self.metrics_task.abort(); + let (res1, res2) = tokio::join!(self.dns_server.shutdown(), self.http_server.shutdown(),); + res1?; + res2?; + Ok(()) + } + + /// Wait for all tasks to complete. + /// + /// This will run forever unless all tasks close with an error, or `Self::cancel` is called. + pub async fn run_until_error(self) -> Result<()> { + tokio::select! { + res = self.dns_server.run_until_done() => res?, + res = self.http_server.run_until_done() => res?, + } + self.metrics_task.abort(); + Ok(()) + } + + /// Spawn a server suitable for testing. + /// + /// This will run the DNS and HTTP servers, but not the HTTPS server. + /// + /// It returns the server handle, the [`SocketAddr`] of the DNS server and the [`Url`] of the + /// HTTP server. + #[cfg(test)] + pub async fn spawn_for_tests() -> Result<(Self, std::net::SocketAddr, url::Url)> { + use crate::config::MetricsConfig; + use std::net::{IpAddr, Ipv4Addr}; + + let mut config = Config::default(); + config.dns.port = 0; + config.dns.bind_addr = Some(IpAddr::V4(Ipv4Addr::LOCALHOST)); + config.http.as_mut().unwrap().port = 0; + config.http.as_mut().unwrap().bind_addr = Some(IpAddr::V4(Ipv4Addr::LOCALHOST)); + config.https = None; + config.metrics = Some(MetricsConfig::disabled()); + + let store = ZoneStore::in_memory()?; + let server = Self::spawn(config, store).await?; + let dns_addr = server.dns_server.local_addr(); + let http_addr = server.http_server.http_addr().expect("http is set"); + let http_url = format!("http://{http_addr}").parse()?; + Ok((server, dns_addr, http_url)) + } +} diff --git a/iroh-dns-server/src/state.rs b/iroh-dns-server/src/state.rs new file mode 100644 index 0000000000..9063d40b3d --- /dev/null +++ b/iroh-dns-server/src/state.rs @@ -0,0 +1,12 @@ +//! Shared state and store for the iroh-dns-server + +use crate::{dns::DnsHandler, store::ZoneStore}; + +/// The shared app state. +#[derive(Clone)] +pub struct AppState { + /// The pkarr DNS store + pub store: ZoneStore, + /// Handler for DNS requests + pub dns_handler: DnsHandler, +} diff --git a/iroh-dns-server/src/store.rs b/iroh-dns-server/src/store.rs new file mode 100644 index 0000000000..5877d00906 --- /dev/null +++ b/iroh-dns-server/src/store.rs @@ -0,0 +1,190 @@ +//! Pkarr packet store used to resolve DNS queries. + +use std::{collections::BTreeMap, num::NonZeroUsize, path::Path, sync::Arc}; + +use anyhow::Result; +use hickory_proto::rr::{Name, RecordSet, RecordType, RrKey}; +use iroh_metrics::inc; +use lru::LruCache; +use parking_lot::Mutex; +use pkarr::SignedPacket; + +use crate::{ + metrics::Metrics, + util::{signed_packet_to_hickory_records_without_origin, PublicKeyBytes}, +}; + +use self::signed_packets::SignedPacketStore; + +mod signed_packets; + +/// Cache up to 1 million pkarr zones by default +pub const DEFAULT_CACHE_CAPACITY: usize = 1024 * 1024; + +/// Where a new pkarr packet comes from +pub enum PacketSource { + /// Received via HTTPS relay PUT + PkarrPublish, +} + +/// A store for pkarr signed packets. +/// +/// Packets are stored in the persistent [`SignedPacketStore`], and cached on-demand in an in-memory LRU +/// cache used for resolving DNS queries. +#[derive(Debug, Clone)] +pub struct ZoneStore { + cache: Arc>, + store: Arc, +} + +impl ZoneStore { + /// Create a persistent store + pub fn persistent(path: impl AsRef) -> Result { + let packet_store = SignedPacketStore::persistent(path)?; + Ok(Self::new(packet_store)) + } + + /// Create an in-memory store. + pub fn in_memory() -> Result { + let packet_store = SignedPacketStore::in_memory()?; + Ok(Self::new(packet_store)) + } + + /// Create a new zone store. + pub fn new(store: SignedPacketStore) -> Self { + let zone_cache = ZoneCache::new(DEFAULT_CACHE_CAPACITY); + Self { + store: Arc::new(store), + cache: Arc::new(Mutex::new(zone_cache)), + } + } + + /// Resolve a DNS query. + // allow unused async: this will be async soon. + #[allow(clippy::unused_async)] + pub async fn resolve( + &self, + pubkey: &PublicKeyBytes, + name: &Name, + record_type: RecordType, + ) -> Result>> { + if let Some(rset) = self.cache.lock().resolve(pubkey, name, record_type) { + return Ok(Some(rset)); + } + + if let Some(packet) = self.store.get(pubkey)? { + return self + .cache + .lock() + .insert_and_resolve(&packet, name, record_type); + }; + + // This would be where mainline discovery could be added. + + Ok(None) + } + + /// Get the latest signed packet for a pubkey. + // allow unused async: this will be async soon. + #[allow(clippy::unused_async)] + pub async fn get_signed_packet(&self, pubkey: &PublicKeyBytes) -> Result> { + self.store.get(pubkey) + } + + /// Insert a signed packet into the cache and the store. + /// + /// Returns whether this produced an update, i.e. whether the packet is the newest for its + /// pubkey. + // allow unused async: this will be async soon. + #[allow(clippy::unused_async)] + pub async fn insert(&self, signed_packet: SignedPacket, _source: PacketSource) -> Result { + let pubkey = PublicKeyBytes::from_signed_packet(&signed_packet); + if self.store.upsert(signed_packet)? { + inc!(Metrics, pkarr_publish_update); + self.cache.lock().remove(&pubkey); + Ok(true) + } else { + inc!(Metrics, pkarr_publish_noop); + Ok(false) + } + } +} + +#[derive(Debug)] +struct ZoneCache { + cache: LruCache, +} + +impl ZoneCache { + fn new(cap: usize) -> Self { + let cache = LruCache::new(NonZeroUsize::new(cap).expect("capacity must be larger than 0")); + Self { cache } + } + + fn resolve( + &mut self, + pubkey: &PublicKeyBytes, + name: &Name, + record_type: RecordType, + ) -> Option> { + self.cache + .get(pubkey) + .and_then(|zone| zone.resolve(name, record_type)) + } + + fn insert_and_resolve( + &mut self, + signed_packet: &SignedPacket, + name: &Name, + record_type: RecordType, + ) -> Result>> { + let pubkey = PublicKeyBytes::from_signed_packet(signed_packet); + self.insert(signed_packet)?; + Ok(self.resolve(&pubkey, name, record_type)) + } + + fn insert(&mut self, signed_packet: &SignedPacket) -> Result<()> { + let pubkey = PublicKeyBytes::from_signed_packet(signed_packet); + if self + .cache + .peek(&pubkey) + .map(|old| old.is_newer_than(signed_packet)) + .unwrap_or(false) + { + return Ok(()); + } + self.cache + .put(pubkey, CachedZone::from_signed_packet(signed_packet)?); + Ok(()) + } + + fn remove(&mut self, pubkey: &PublicKeyBytes) { + self.cache.pop(pubkey); + } +} + +#[derive(Debug)] +struct CachedZone { + timestamp: u64, + records: BTreeMap>, +} + +impl CachedZone { + fn from_signed_packet(signed_packet: &SignedPacket) -> Result { + let (_label, records) = + signed_packet_to_hickory_records_without_origin(signed_packet, |_| true)?; + Ok(Self { + records, + timestamp: *signed_packet.timestamp(), + }) + } + + fn is_newer_than(&self, signed_packet: &SignedPacket) -> bool { + self.timestamp > *signed_packet.timestamp() + } + + fn resolve(&self, name: &Name, record_type: RecordType) -> Option> { + let key = RrKey::new(name.into(), record_type); + self.records.get(&key).cloned() + } +} diff --git a/iroh-dns-server/src/store/signed_packets.rs b/iroh-dns-server/src/store/signed_packets.rs new file mode 100644 index 0000000000..1ed03598d2 --- /dev/null +++ b/iroh-dns-server/src/store/signed_packets.rs @@ -0,0 +1,98 @@ +use std::path::Path; + +use anyhow::Result; +use iroh_metrics::inc; +use pkarr::SignedPacket; +use redb::{backends::InMemoryBackend, Database, ReadableTable, TableDefinition}; + +use crate::{metrics::Metrics, util::PublicKeyBytes}; + +pub type SignedPacketsKey = [u8; 32]; +const SIGNED_PACKETS_TABLE: TableDefinition<&SignedPacketsKey, &[u8]> = + TableDefinition::new("signed-packets-1"); + +#[derive(Debug)] +pub struct SignedPacketStore { + db: Database, +} + +impl SignedPacketStore { + pub fn persistent(path: impl AsRef) -> Result { + if let Some(parent) = path.as_ref().parent() { + std::fs::create_dir_all(parent)?; + } + let db = Database::builder().create(path)?; + Self::open(db) + } + + pub fn in_memory() -> Result { + let db = Database::builder().create_with_backend(InMemoryBackend::new())?; + Self::open(db) + } + + pub fn open(db: Database) -> Result { + let write_tx = db.begin_write()?; + { + let _table = write_tx.open_table(SIGNED_PACKETS_TABLE)?; + } + write_tx.commit()?; + Ok(Self { db }) + } + + pub fn upsert(&self, packet: SignedPacket) -> Result { + let key = PublicKeyBytes::from_signed_packet(&packet); + let tx = self.db.begin_write()?; + let mut replaced = false; + { + let mut table = tx.open_table(SIGNED_PACKETS_TABLE)?; + if let Some(existing) = get_packet(&table, &key)? { + if existing.more_recent_than(&packet) { + return Ok(false); + } else { + replaced = true; + } + } + let value = packet.as_bytes(); + table.insert(key.as_bytes(), &value[..])?; + } + tx.commit()?; + if replaced { + inc!(Metrics, store_packets_updated); + } else { + inc!(Metrics, store_packets_inserted); + } + Ok(true) + } + + pub fn get(&self, key: &PublicKeyBytes) -> Result> { + let tx = self.db.begin_read()?; + let table = tx.open_table(SIGNED_PACKETS_TABLE)?; + get_packet(&table, key) + } + + pub fn remove(&self, key: &PublicKeyBytes) -> Result { + let tx = self.db.begin_write()?; + let updated = { + let mut table = tx.open_table(SIGNED_PACKETS_TABLE)?; + let did_remove = table.remove(key.as_bytes())?.is_some(); + #[allow(clippy::let_and_return)] + did_remove + }; + tx.commit()?; + if updated { + inc!(Metrics, store_packets_removed) + } + Ok(updated) + } +} + +fn get_packet( + table: &impl ReadableTable<&'static SignedPacketsKey, &'static [u8]>, + key: &PublicKeyBytes, +) -> Result> { + let Some(row) = table.get(key.as_ref())? else { + return Ok(None); + }; + let packet = SignedPacket::from_bytes(row.value().to_vec().into(), false)?; + Ok(Some(packet)) +} diff --git a/iroh-dns-server/src/util.rs b/iroh-dns-server/src/util.rs new file mode 100644 index 0000000000..0b64f34b2e --- /dev/null +++ b/iroh-dns-server/src/util.rs @@ -0,0 +1,151 @@ +use core::fmt; +use std::{ + collections::{btree_map, BTreeMap}, + str::FromStr, + sync::Arc, +}; + +use anyhow::{anyhow, Result}; +use hickory_proto::{ + op::Message, + rr::{ + domain::{IntoLabel, Label}, + Name, Record, RecordSet, RecordType, RrKey, + }, + serialize::binary::BinDecodable, +}; +use pkarr::SignedPacket; + +#[derive(derive_more::From, derive_more::Into, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct PublicKeyBytes([u8; 32]); + +impl PublicKeyBytes { + pub fn from_z32(s: &str) -> Result { + let bytes = z32::decode(s.as_bytes())?; + let bytes: [u8; 32] = bytes.try_into().map_err(|_| anyhow!("invalid length"))?; + Ok(Self(bytes)) + } + + pub fn to_z32(&self) -> String { + z32::encode(&self.0) + } + + pub fn to_bytes(&self) -> [u8; 32] { + self.0 + } + + pub fn as_bytes(&self) -> &[u8; 32] { + &self.0 + } + + pub fn from_signed_packet(packet: &SignedPacket) -> Self { + Self(packet.public_key().to_bytes()) + } +} + +impl fmt::Display for PublicKeyBytes { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.to_z32()) + } +} + +impl fmt::Debug for PublicKeyBytes { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "PublicKeyBytes({})", self.to_z32()) + } +} + +impl From for PublicKeyBytes { + fn from(value: pkarr::PublicKey) -> Self { + Self(value.to_bytes()) + } +} + +impl TryFrom for pkarr::PublicKey { + type Error = anyhow::Error; + fn try_from(value: PublicKeyBytes) -> Result { + pkarr::PublicKey::try_from(value.0).map_err(anyhow::Error::from) + } +} + +impl FromStr for PublicKeyBytes { + type Err = anyhow::Error; + fn from_str(s: &str) -> Result { + Self::from_z32(s) + } +} + +impl AsRef<[u8; 32]> for PublicKeyBytes { + fn as_ref(&self) -> &[u8; 32] { + &self.0 + } +} + +pub fn signed_packet_to_hickory_message(signed_packet: &SignedPacket) -> Result { + let encoded = signed_packet.encoded_packet(); + let message = Message::from_bytes(&encoded)?; + Ok(message) +} + +pub fn signed_packet_to_hickory_records_without_origin( + signed_packet: &SignedPacket, + filter: impl Fn(&Record) -> bool, +) -> Result<(Label, BTreeMap>)> { + let common_zone = Label::from_utf8(&signed_packet.public_key().to_z32())?; + let mut message = signed_packet_to_hickory_message(signed_packet)?; + let answers = message.take_answers(); + let mut output: BTreeMap> = BTreeMap::new(); + for mut record in answers.into_iter() { + // disallow SOA and NS records + if matches!(record.record_type(), RecordType::SOA | RecordType::NS) { + continue; + } + // expect the z32 encoded pubkey as root name + let name = record.name(); + if name.num_labels() < 1 { + continue; + } + let zone = name.iter().last().unwrap().into_label()?; + if zone != common_zone { + continue; + } + if !filter(&record) { + continue; + } + + let name_without_zone = + Name::from_labels(name.iter().take(name.num_labels() as usize - 1))?; + record.set_name(name_without_zone); + + let rrkey = RrKey::new(record.name().into(), record.record_type()); + match output.entry(rrkey) { + btree_map::Entry::Vacant(e) => { + let set: RecordSet = record.into(); + e.insert(Arc::new(set)); + } + btree_map::Entry::Occupied(mut e) => { + let set = e.get_mut(); + let serial = set.serial(); + // safe because we just created the arc and are sync iterating + Arc::get_mut(set).unwrap().insert(record, serial); + } + } + } + Ok((common_zone, output)) +} + +pub fn record_set_append_origin( + input: &RecordSet, + origin: &Name, + serial: u32, +) -> Result { + let new_name = input.name().clone().append_name(origin)?; + let mut output = RecordSet::new(&new_name, input.record_type(), serial); + // TODO: less clones + for record in input.records_without_rrsigs() { + let mut record = record.clone(); + record.set_name(new_name.clone()); + output.insert(record, serial); + } + Ok(output) +} diff --git a/iroh-net/Cargo.toml b/iroh-net/Cargo.toml index 44213368f8..1208aea005 100644 --- a/iroh-net/Cargo.toml +++ b/iroh-net/Cargo.toml @@ -27,6 +27,9 @@ flume = "0.11" futures = "0.3.25" governor = "0.6.0" hex = "0.4.3" +hickory-proto = "0.24.0" +hickory-resolver = "0.24.0" +hostname = "0.3.1" http = "1" http-body-util = "0.1.0" hyper = { version = "1", features = ["server", "client", "http1"] } @@ -37,6 +40,7 @@ libc = "0.2.139" num_enum = "0.7" once_cell = "1.18.0" parking_lot = "0.12.1" +pkarr = { version = "1.1.3", default-features = false, features = ["async", "relay"] } postcard = { version = "1", default-features = false, features = ["alloc", "use-std", "experimental-derive"] } quinn = "0.10" quinn-proto = "0.10.5" @@ -56,15 +60,15 @@ thiserror = "1" time = "0.3.20" tokio = { version = "1", features = ["io-util", "macros", "sync", "rt", "net", "fs", "io-std", "signal", "process"] } tokio-rustls = { version = "0.24" } -tokio-rustls-acme = { version = "0.2" } +tokio-rustls-acme = { version = "0.3" } tokio-util = { version = "0.7", features = ["io-util", "io", "codec"] } tracing = "0.1" -hickory-resolver = "0.24.0" url = { version = "2.4", features = ["serde"] } watchable = "1.1.2" webpki = { package = "rustls-webpki", version = "0.101.4", features = ["std"] } webpki-roots = "0.25" x509-parser = "0.15" +z32 = "1.0.3" # iroh-relay clap = { version = "4", features = ["derive"], optional = true } @@ -76,6 +80,7 @@ tracing-subscriber = { version = "0.3", features = ["env-filter"], optional = tr # metrics iroh-metrics = { version = "0.13.0", path = "../iroh-metrics", default-features = false } +strum = { version = "0.26.2", features = ["derive"] } [target.'cfg(any(target_os = "linux", target_os = "android"))'.dependencies] netlink-packet-core = "0.7.0" @@ -100,6 +105,7 @@ tokio = { version = "1", features = ["io-util", "sync", "rt", "net", "fs", "macr tracing-subscriber = { version = "0.3", features = ["env-filter"] } iroh-test = { path = "../iroh-test" } serde_json = "1.0.107" +axum = "0.7.4" [[bench]] name = "key" diff --git a/iroh-net/src/discovery.rs b/iroh-net/src/discovery.rs index d81e4fbdea..117ab689b3 100644 --- a/iroh-net/src/discovery.rs +++ b/iroh-net/src/discovery.rs @@ -10,6 +10,9 @@ use tracing::{debug, error_span, warn, Instrument}; use crate::{AddrInfo, MagicEndpoint, NodeId}; +pub mod dns; +pub mod pkarr_publish; + /// Node discovery for [`super::MagicEndpoint`]. /// /// The purpose of this trait is to hook up a node discovery mechanism that @@ -67,11 +70,16 @@ pub struct ConcurrentDiscovery { } impl ConcurrentDiscovery { - /// Create a new [`ConcurrentDiscovery`]. - pub fn new() -> Self { + /// Create a empty [`ConcurrentDiscovery`]. + pub fn empty() -> Self { Self::default() } + /// Create a new [`ConcurrentDiscovery`]. + pub fn from_services(services: Vec>) -> Self { + Self { services } + } + /// Add a [`Discovery`] service. pub fn add(&mut self, service: impl Discovery + 'static) { self.services.push(Box::new(service)); @@ -418,7 +426,7 @@ mod tests { let secret = SecretKey::generate(); let disco1 = EmptyDiscovery; let disco2 = disco_shared.create_discovery(secret.public()); - let mut disco = ConcurrentDiscovery::new(); + let mut disco = ConcurrentDiscovery::empty(); disco.add(disco1); disco.add(disco2); new_endpoint(secret, disco).await @@ -447,7 +455,7 @@ mod tests { let disco1 = EmptyDiscovery; let disco2 = disco_shared.create_lying_discovery(secret.public()); let disco3 = disco_shared.create_discovery(secret.public()); - let mut disco = ConcurrentDiscovery::new(); + let mut disco = ConcurrentDiscovery::empty(); disco.add(disco1); disco.add(disco2); disco.add(disco3); @@ -473,8 +481,7 @@ mod tests { let ep2 = { let secret = SecretKey::generate(); let disco1 = disco_shared.create_lying_discovery(secret.public()); - let mut disco = ConcurrentDiscovery::new(); - disco.add(disco1); + let disco = ConcurrentDiscovery::from_services(vec![Box::new(disco1)]); new_endpoint(secret, disco).await }; let ep1_addr = NodeAddr::new(ep1.node_id()); @@ -532,3 +539,349 @@ mod tests { .as_micros() as u64 } } + +/// This module contains end-to-end tests for DNS node discovery. +/// +/// The tests run a minimal test DNS server to resolve against, and a minimal pkarr relay to +/// publish to. The DNS and pkarr servers share their state. +#[cfg(test)] +mod test_dns_pkarr { + use std::net::SocketAddr; + use std::time::Duration; + + use anyhow::Result; + use iroh_base::key::SecretKey; + use tokio::task::JoinHandle; + use tokio_util::sync::CancellationToken; + use url::Url; + + use crate::{ + discovery::{dns::DnsDiscovery, pkarr_publish::PkarrPublisher, ConcurrentDiscovery}, + dns::node_info::{lookup_by_id, NodeInfo}, + relay::{RelayMap, RelayMode}, + test_utils::{ + dns_server::{create_dns_resolver, run_dns_server}, + run_relay_server, + }, + AddrInfo, MagicEndpoint, NodeAddr, + }; + + use self::{pkarr_relay::run_pkarr_relay, state::State}; + + #[tokio::test] + async fn dns_resolve() -> Result<()> { + let _logging_guard = iroh_test::logging::setup(); + + let cancel = CancellationToken::new(); + let origin = "testdns.example".to_string(); + let state = State::new(origin.clone()); + let (nameserver, dns_task) = run_dns_server(state.clone(), cancel.clone()).await?; + + let secret_key = SecretKey::generate(); + let node_info = NodeInfo::new( + secret_key.public(), + Some("https://relay.example".parse().unwrap()), + ); + let signed_packet = node_info.to_pkarr_signed_packet(&secret_key, 30)?; + state.upsert(signed_packet)?; + + let resolver = create_dns_resolver(nameserver)?; + let resolved = lookup_by_id(&resolver, &node_info.node_id, &origin).await?; + + assert_eq!(resolved, node_info.into()); + + cancel.cancel(); + dns_task.await??; + Ok(()) + } + + #[tokio::test] + async fn pkarr_publish_dns_resolve() -> Result<()> { + let _logging_guard = iroh_test::logging::setup(); + + let origin = "testdns.example".to_string(); + let cancel = CancellationToken::new(); + let timeout = Duration::from_secs(2); + + let (nameserver, pkarr_url, state, task) = + run_dns_and_pkarr_servers(origin.clone(), cancel.clone()).await?; + + let secret_key = SecretKey::generate(); + let node_id = secret_key.public(); + + let addr_info = AddrInfo { + relay_url: Some("https://relay.example".parse().unwrap()), + ..Default::default() + }; + + let resolver = create_dns_resolver(nameserver)?; + let publisher = PkarrPublisher::new(secret_key, pkarr_url); + // does not block, update happens in background task + publisher.update_addr_info(&addr_info); + // wait until our shared state received the update from pkarr publishing + state.on_node(&node_id, timeout).await?; + let resolved = lookup_by_id(&resolver, &node_id, &origin).await?; + + let expected = NodeAddr { + info: addr_info, + node_id, + }; + + assert_eq!(resolved, expected); + + cancel.cancel(); + task.await??; + Ok(()) + } + + const TEST_ALPN: &[u8] = b"TEST"; + + #[tokio::test] + async fn pkarr_publish_dns_discover() -> Result<()> { + let _logging_guard = iroh_test::logging::setup(); + + let origin = "testdns.example".to_string(); + let cancel = CancellationToken::new(); + let timeout = Duration::from_secs(2); + + let (nameserver, pkarr_url, state, task) = + run_dns_and_pkarr_servers(&origin, cancel.clone()).await?; + let (relay_map, _relay_url, _relay_guard) = run_relay_server().await?; + + let ep1 = ep_with_discovery(relay_map.clone(), nameserver, &origin, &pkarr_url).await?; + let ep2 = ep_with_discovery(relay_map, nameserver, &origin, &pkarr_url).await?; + + // wait until our shared state received the update from pkarr publishing + state.on_node(&ep1.node_id(), timeout).await?; + + // we connect only by node id! + let res = ep2.connect(ep1.node_id().into(), TEST_ALPN).await; + assert!(res.is_ok(), "connection established"); + cancel.cancel(); + task.await??; + Ok(()) + } + + async fn ep_with_discovery( + relay_map: RelayMap, + nameserver: SocketAddr, + node_origin: &str, + pkarr_relay: &Url, + ) -> Result { + let secret_key = SecretKey::generate(); + let resolver = create_dns_resolver(nameserver)?; + let discovery = ConcurrentDiscovery::from_services(vec![ + Box::new(DnsDiscovery::new(node_origin.to_string())), + Box::new(PkarrPublisher::new(secret_key.clone(), pkarr_relay.clone())), + ]); + let ep = MagicEndpoint::builder() + .relay_mode(RelayMode::Custom(relay_map)) + .insecure_skip_relay_cert_verify(true) + .secret_key(secret_key) + .dns_resolver(resolver) + .alpns(vec![TEST_ALPN.to_vec()]) + .discovery(Box::new(discovery)) + .bind(0) + .await?; + Ok(ep) + } + + async fn run_dns_and_pkarr_servers( + origin: impl ToString, + cancel: CancellationToken, + ) -> Result<(SocketAddr, Url, State, JoinHandle>)> { + let state = State::new(origin.to_string()); + let (nameserver, dns_task) = run_dns_server(state.clone(), cancel.clone()).await?; + let (pkarr_url, pkarr_task) = run_pkarr_relay(state.clone(), cancel.clone()).await?; + let join_handle = tokio::task::spawn(async move { + dns_task.await??; + pkarr_task.await??; + Ok(()) + }); + Ok((nameserver, pkarr_url, state, join_handle)) + } + + mod state { + use anyhow::{bail, Result}; + use parking_lot::{Mutex, MutexGuard}; + use pkarr::SignedPacket; + use std::{ + collections::{hash_map, HashMap}, + future::Future, + ops::Deref, + sync::Arc, + time::Duration, + }; + + use crate::dns::node_info::{node_id_from_hickory_name, NodeInfo}; + use crate::test_utils::dns_server::QueryHandler; + use crate::NodeId; + + #[derive(Debug, Clone)] + pub struct State { + packets: Arc>>, + origin: String, + notify: Arc, + } + + impl State { + pub fn new(origin: String) -> Self { + Self { + packets: Default::default(), + origin, + notify: Arc::new(tokio::sync::Notify::new()), + } + } + + pub fn on_update(&self) -> tokio::sync::futures::Notified<'_> { + self.notify.notified() + } + + pub async fn on_node(&self, node: &NodeId, timeout: Duration) -> Result<()> { + let timeout = tokio::time::sleep(timeout); + tokio::pin!(timeout); + while self.get(node).is_none() { + tokio::select! { + _ = &mut timeout => bail!("timeout"), + _ = self.on_update() => {} + } + } + Ok(()) + } + + pub fn upsert(&self, signed_packet: SignedPacket) -> anyhow::Result { + let node_id = NodeId::from_bytes(&signed_packet.public_key().to_bytes())?; + let mut map = self.packets.lock(); + let updated = match map.entry(node_id) { + hash_map::Entry::Vacant(e) => { + e.insert(signed_packet); + true + } + hash_map::Entry::Occupied(mut e) => { + if signed_packet.more_recent_than(e.get()) { + e.insert(signed_packet); + true + } else { + false + } + } + }; + if updated { + self.notify.notify_waiters(); + } + Ok(updated) + } + + /// Returns a mutex guard, do not hold over await points + pub fn get(&self, node_id: &NodeId) -> Option + '_> { + let map = self.packets.lock(); + if map.contains_key(node_id) { + let guard = MutexGuard::map(map, |state| state.get_mut(node_id).unwrap()); + Some(guard) + } else { + None + } + } + + pub fn resolve_dns( + &self, + query: &hickory_proto::op::Message, + reply: &mut hickory_proto::op::Message, + ttl: u32, + ) -> Result<()> { + for query in query.queries() { + let Some(node_id) = node_id_from_hickory_name(query.name()) else { + continue; + }; + let packet = self.get(&node_id); + let Some(packet) = packet.as_ref() else { + continue; + }; + let node_info = NodeInfo::from_pkarr_signed_packet(packet)?; + for record in node_info.to_hickory_records(&self.origin, ttl)? { + reply.add_answer(record); + } + } + Ok(()) + } + } + + impl QueryHandler for State { + fn resolve( + &self, + query: &hickory_proto::op::Message, + reply: &mut hickory_proto::op::Message, + ) -> impl Future> + Send { + const TTL: u32 = 30; + let res = self.resolve_dns(query, reply, TTL); + futures::future::ready(res) + } + } + } + + mod pkarr_relay { + use std::net::{Ipv4Addr, SocketAddr}; + + use anyhow::Result; + use axum::{ + extract::{Path, State}, + response::IntoResponse, + routing::put, + Router, + }; + use bytes::Bytes; + use tokio::task::JoinHandle; + use tokio_util::sync::CancellationToken; + use tracing::warn; + use url::Url; + + use super::State as AppState; + + pub async fn run_pkarr_relay( + state: AppState, + cancel: CancellationToken, + ) -> Result<(Url, JoinHandle>)> { + let bind_addr = SocketAddr::from((Ipv4Addr::LOCALHOST, 0)); + let app = Router::new() + .route("/pkarr/:key", put(pkarr_put)) + .with_state(state); + let listener = tokio::net::TcpListener::bind(bind_addr).await?; + let bound_addr = listener.local_addr()?; + let url: Url = format!("http://{bound_addr}/pkarr") + .parse() + .expect("valid url"); + let join_handle = tokio::task::spawn(async move { + let serve = axum::serve(listener, app); + let serve = serve.with_graceful_shutdown(cancel.cancelled_owned()); + serve.await?; + Ok(()) + }); + Ok((url, join_handle)) + } + + async fn pkarr_put( + State(state): State, + Path(key): Path, + body: Bytes, + ) -> Result { + let key = pkarr::PublicKey::try_from(key.as_str())?; + let signed_packet = pkarr::SignedPacket::from_relay_response(key, body)?; + let _updated = state.upsert(signed_packet)?; + Ok(http::StatusCode::NO_CONTENT) + } + + #[derive(Debug)] + struct AppError(anyhow::Error); + impl> From for AppError { + fn from(value: T) -> Self { + Self(value.into()) + } + } + impl IntoResponse for AppError { + fn into_response(self) -> axum::response::Response { + warn!(err = ?self, "request failed"); + (http::StatusCode::INTERNAL_SERVER_ERROR, self.0.to_string()).into_response() + } + } + } +} diff --git a/iroh-net/src/discovery/dns.rs b/iroh-net/src/discovery/dns.rs new file mode 100644 index 0000000000..befb00cc48 --- /dev/null +++ b/iroh-net/src/discovery/dns.rs @@ -0,0 +1,70 @@ +//! DNS node discovery for iroh-net + +use crate::{ + discovery::{Discovery, DiscoveryItem}, + MagicEndpoint, NodeId, +}; +use anyhow::Result; +use futures::{future::FutureExt, stream::BoxStream, StreamExt}; + +use crate::dns; + +/// The n0 testing DNS node origin +pub const N0_DNS_NODE_ORIGIN: &str = "dns.iroh.link"; + +/// DNS node discovery +/// +/// When asked to resolve a [`NodeId`], this service performs a lookup in the Domain Name System (DNS). +/// +/// It uses the [`MagicEndpoint`]'s DNS resolver to query for `TXT` records under the domain +/// `_iroh..`: +/// +/// * `_iroh`: is the record name +/// * `` is the [`NodeId`] encoded in [`z-base-32`] format +/// * `` is the node origin domain as set in [`DnsDiscovery::new`]. +/// +/// Each TXT record returned from the query is expected to contain a string in the format `=`. +/// If a TXT record contains multiple character strings, they are concatenated first. +/// The supported attributes are: +/// * `relay=`: The URL of the home relay server of the node +/// +/// The DNS resolver defaults to using the nameservers configured on the host system, but can be changed +/// with [`crate::magic_endpoint::MagicEndpointBuilder::dns_resolver`]. +/// +/// [z-base-32]: https://philzimmermann.com/docs/human-oriented-base-32-encoding.txt +#[derive(Debug)] +pub struct DnsDiscovery { + origin_domain: String, +} + +impl DnsDiscovery { + /// Create a new DNS discovery. + pub fn new(origin_domain: String) -> Self { + Self { origin_domain } + } + + /// Create a new DNS discovery which uses the [`N0_DNS_NODE_ORIGIN`] origin domain. + pub fn n0_dns() -> Self { + Self::new(N0_DNS_NODE_ORIGIN.to_string()) + } +} + +impl Discovery for DnsDiscovery { + fn resolve( + &self, + ep: MagicEndpoint, + node_id: NodeId, + ) -> Option>> { + let resolver = ep.dns_resolver().clone(); + let fut = async move { + let node_addr = + dns::node_info::lookup_by_id(&resolver, &node_id, &self.origin_domain).await?; + Ok(DiscoveryItem { + provenance: "dns", + last_updated: None, + addr_info: node_addr.info, + }) + }; + Some(fut.into_stream().boxed()) + } +} diff --git a/iroh-net/src/discovery/pkarr_publish.rs b/iroh-net/src/discovery/pkarr_publish.rs new file mode 100644 index 0000000000..a39cac4b5f --- /dev/null +++ b/iroh-net/src/discovery/pkarr_publish.rs @@ -0,0 +1,186 @@ +//! A discovery service which publishes node information to a [Pkarr] relay. +//! +//! This service only implements the [`Discovery::publish`] method and does not provide discovery. +//! It encodes the node information into a DNS packet in the format resolvable by the +//! [`super::dns::DnsDiscovery`]. +//! +//! [pkarr]: https://pkarr.org + +use std::sync::Arc; + +use anyhow::Result; +use pkarr::SignedPacket; +use tokio::{ + task::JoinHandle, + time::{Duration, Instant}, +}; +use tracing::{debug, info, warn}; +use url::Url; +use watchable::{Watchable, Watcher}; + +use crate::{discovery::Discovery, dns::node_info::NodeInfo, key::SecretKey, AddrInfo, NodeId}; + +/// The pkarr relay run by n0. +pub const N0_DNS_PKARR_RELAY: &str = "https://dns.iroh.link/pkarr"; + +/// Default TTL for the records in the pkarr signed packet +pub const DEFAULT_PKARR_TTL: u32 = 30; + +/// Interval in which we will republish our node info even if unchanged: 5 minutes. +pub const DEFAULT_REPUBLISH_INTERVAL: Duration = Duration::from_secs(60 * 5); + +/// Publish node info to a pkarr relay. +#[derive(derive_more::Debug, Clone)] +pub struct PkarrPublisher { + node_id: NodeId, + watchable: Watchable>, + join_handle: Arc>, +} + +impl PkarrPublisher { + /// Create a new config with a secret key and a pkarr relay URL. + /// + /// Will use [`DEFAULT_PKARR_TTL`] as the time-to-live value for the published packets. + /// Will republish info, even if unchanged, every [`DEFAULT_REPUBLISH_INTERVAL`]. + pub fn new(secret_key: SecretKey, pkarr_relay: Url) -> Self { + Self::with_options( + secret_key, + pkarr_relay, + DEFAULT_PKARR_TTL, + DEFAULT_REPUBLISH_INTERVAL, + ) + } + + /// Create a new [`PkarrPublisher`] with a custom time-to-live (ttl) value for the published + /// [`pkarr::SignedPacket`]s. + pub fn with_options( + secret_key: SecretKey, + pkarr_relay: Url, + ttl: u32, + republish_interval: std::time::Duration, + ) -> Self { + let node_id = secret_key.public(); + let pkarr_client = PkarrRelayClient::new(pkarr_relay); + let watchable = Watchable::default(); + let service = PublisherService { + ttl, + watcher: watchable.watch(), + secret_key, + pkarr_client, + republish_interval, + }; + let join_handle = tokio::task::spawn(service.run()); + Self { + watchable, + node_id, + join_handle: Arc::new(join_handle), + } + } + + /// Create a config that publishes to the n0 dns server through [`N0_DNS_PKARR_RELAY`]. + pub fn n0_dns(secret_key: SecretKey) -> Self { + let pkarr_relay: Url = N0_DNS_PKARR_RELAY.parse().expect("url is valid"); + Self::new(secret_key, pkarr_relay) + } + + /// Publish [`AddrInfo`] about this node to a pkarr relay. + /// + /// This is a nonblocking function, the actual update is performed in the background. + pub fn update_addr_info(&self, info: &AddrInfo) { + let info = NodeInfo::new(self.node_id, info.relay_url.clone().map(Into::into)); + self.watchable.update(Some(info)).ok(); + } +} + +impl Discovery for PkarrPublisher { + fn publish(&self, info: &AddrInfo) { + self.update_addr_info(info); + } +} + +impl Drop for PkarrPublisher { + fn drop(&mut self) { + // this means we're dropping the last reference + if let Some(handle) = Arc::get_mut(&mut self.join_handle) { + handle.abort(); + } + } +} + +/// Publish node info to a pkarr relay. +#[derive(derive_more::Debug, Clone)] +struct PublisherService { + #[debug("SecretKey")] + secret_key: SecretKey, + #[debug("PkarrClient")] + pkarr_client: PkarrRelayClient, + watcher: Watcher>, + ttl: u32, + republish_interval: Duration, +} + +impl PublisherService { + async fn run(self) { + let mut failed_attemps = 0; + let republish = tokio::time::sleep(Duration::MAX); + tokio::pin!(republish); + loop { + if let Some(info) = self.watcher.get() { + if let Err(err) = self.publish_current(info).await { + warn!(?err, url = %self.pkarr_client.pkarr_relay , "Failed to publish to pkarr"); + failed_attemps += 1; + // Retry after increasing timeout + republish + .as_mut() + .reset(Instant::now() + Duration::from_secs(failed_attemps)); + } else { + failed_attemps = 0; + // Republish after fixed interval + republish + .as_mut() + .reset(Instant::now() + self.republish_interval); + } + } + // Wait until either the retry/republish timeout is reached, or the node info changed. + tokio::select! { + res = self.watcher.watch_async() => match res { + Ok(()) => debug!("Publish node info to pkarr (info changed)"), + Err(_disconnected) => break, + }, + _ = &mut republish => debug!("Publish node info to pkarr (interval elapsed)"), + } + } + } + + async fn publish_current(&self, info: NodeInfo) -> Result<()> { + info!("Publish node info to pkarr"); + let signed_packet = info.to_pkarr_signed_packet(&self.secret_key, self.ttl)?; + self.pkarr_client.publish(&signed_packet).await?; + Ok(()) + } +} + +/// A pkarr client to publish [`pkarr::SignedPacket`]s to a pkarr relay. +#[derive(Debug, Clone)] +pub struct PkarrRelayClient { + inner: pkarr::PkarrClient, + pkarr_relay: Url, +} + +impl PkarrRelayClient { + /// Create a new client. + pub fn new(pkarr_relay: Url) -> Self { + Self { + inner: pkarr::PkarrClient::builder().build(), + pkarr_relay, + } + } + + /// Publish a [`SignedPacket`] + pub async fn publish(&self, signed_packet: &SignedPacket) -> anyhow::Result<()> { + self.inner + .relay_put(&self.pkarr_relay, signed_packet) + .await?; + Ok(()) + } +} diff --git a/iroh-net/src/dns.rs b/iroh-net/src/dns.rs index 60c1d6eabd..7cfb3117fb 100644 --- a/iroh-net/src/dns.rs +++ b/iroh-net/src/dns.rs @@ -8,6 +8,8 @@ use anyhow::Result; use hickory_resolver::{AsyncResolver, IntoName, TokioAsyncResolver, TryParseIp}; use once_cell::sync::Lazy; +pub mod node_info; + /// The DNS resolver type used throughout `iroh-net`. pub type DnsResolver = TokioAsyncResolver; @@ -22,6 +24,11 @@ pub fn default_resolver() -> &'static DnsResolver { &DNS_RESOLVER } +/// Get the DNS resolver used within iroh-net. +pub fn resolver() -> &'static TokioAsyncResolver { + Lazy::force(&DNS_RESOLVER) +} + /// Deprecated IPv6 site-local anycast addresses still configured by windows. /// /// Windows still configures these site-local addresses as soon even as an IPv6 loopback @@ -142,7 +149,7 @@ pub async fn lookup_ipv4_ipv6( } #[cfg(test)] -mod tests { +pub(crate) mod tests { use crate::defaults::NA_RELAY_HOSTNAME; use super::*; diff --git a/iroh-net/src/dns/node_info.rs b/iroh-net/src/dns/node_info.rs new file mode 100644 index 0000000000..6964f90758 --- /dev/null +++ b/iroh-net/src/dns/node_info.rs @@ -0,0 +1,362 @@ +//! This module contains functions and structs to lookup node information from DNS +//! and to encode node information in Pkarr signed packets. + +use std::{collections::BTreeMap, fmt::Display, hash::Hash, str::FromStr}; + +use anyhow::{anyhow, ensure, Result}; +use hickory_proto::error::ProtoError; +use hickory_resolver::{Name, TokioAsyncResolver}; +use url::Url; + +use crate::{key::SecretKey, AddrInfo, NodeAddr, NodeId}; + +/// The DNS name for the iroh TXT record +pub const IROH_TXT_NAME: &str = "_iroh"; + +/// The attributes supported by iroh for `_iroh` DNS records +#[derive( + Debug, strum::Display, strum::AsRefStr, strum::EnumString, Hash, Eq, PartialEq, Ord, PartialOrd, +)] +#[strum(serialize_all = "kebab-case")] +pub enum IrohAttr { + /// `relay`: URL of home relay + Relay, +} + +/// Lookup node info by domain name +/// +/// The domain name must either contain an _iroh TXT record or be a CNAME record that leads to +/// an _iroh TXT record. +pub async fn lookup_by_domain(resolver: &TokioAsyncResolver, domain: &str) -> Result { + let attrs = TxtAttrs::::lookup_by_domain(resolver, domain).await?; + let info: NodeInfo = attrs.into(); + Ok(info.into()) +} + +/// Lookup node info by node id and origin domain name. +pub async fn lookup_by_id( + resolver: &TokioAsyncResolver, + node_id: &NodeId, + origin: &str, +) -> Result { + let attrs = TxtAttrs::::lookup_by_id(resolver, node_id, origin).await?; + let info: NodeInfo = attrs.into(); + Ok(info.into()) +} + +/// Encode a [`NodeId`] in [`z-base-32`] encoding. +/// +/// [z-base-32]: https://philzimmermann.com/docs/human-oriented-base-32-encoding.txt +pub fn to_z32(node_id: &NodeId) -> String { + z32::encode(node_id.as_bytes()) +} + +/// Parse a [`NodeId`] from [`z-base-32`] encoding. +/// +/// [z-base-32]: https://philzimmermann.com/docs/human-oriented-base-32-encoding.txt +pub fn from_z32(s: &str) -> Result { + let bytes = z32::decode(s.as_bytes()).map_err(|_| anyhow!("invalid z32"))?; + let bytes: &[u8; 32] = &bytes.try_into().map_err(|_| anyhow!("not 32 bytes long"))?; + let node_id = NodeId::from_bytes(bytes)?; + Ok(node_id) +} + +/// Node info contained in a DNS _iroh TXT record. +#[derive(derive_more::Debug, Clone, Eq, PartialEq)] +pub struct NodeInfo { + /// The node id + pub node_id: NodeId, + /// Home relay server for this node + #[debug("{:?}", self.relay_url.as_ref().map(|s| s.to_string()))] + pub relay_url: Option, +} + +impl From> for NodeInfo { + fn from(attrs: TxtAttrs) -> Self { + (&attrs).into() + } +} + +impl From<&TxtAttrs> for NodeInfo { + fn from(attrs: &TxtAttrs) -> Self { + let node_id = attrs.node_id(); + let attrs = attrs.attrs(); + let relay_url = attrs + .get(&IrohAttr::Relay) + .into_iter() + .flatten() + .next() + .and_then(|s| Url::parse(s).ok()); + Self { node_id, relay_url } + } +} + +impl From<&NodeInfo> for TxtAttrs { + fn from(info: &NodeInfo) -> Self { + let mut attrs = vec![]; + if let Some(relay_url) = &info.relay_url { + attrs.push((IrohAttr::Relay, relay_url.to_string())); + } + Self::from_parts(info.node_id, attrs.into_iter()) + } +} + +impl From for NodeAddr { + fn from(value: NodeInfo) -> Self { + NodeAddr { + node_id: value.node_id, + info: value.into(), + } + } +} + +impl From for AddrInfo { + fn from(value: NodeInfo) -> Self { + AddrInfo { + relay_url: value.relay_url.map(|u| u.into()), + direct_addresses: Default::default(), + } + } +} + +impl NodeInfo { + /// Create a new [`NodeInfo`] from its parts. + pub fn new(node_id: NodeId, relay_url: Option) -> Self { + Self { node_id, relay_url } + } + + fn to_attrs(&self) -> TxtAttrs { + self.into() + } + + /// Try to parse a [`NodeInfo`] from a set of DNS records. + pub fn from_hickory_records(records: &[hickory_proto::rr::Record]) -> Result { + let attrs = TxtAttrs::from_hickory_records(records)?; + Ok(attrs.into()) + } + + /// Try to parse a [`NodeInfo`] from a [`pkarr::SignedPacket`]. + pub fn from_pkarr_signed_packet(packet: &pkarr::SignedPacket) -> Result { + let attrs = TxtAttrs::from_pkarr_signed_packet(packet)?; + Ok(attrs.into()) + } + + /// Create a [`pkarr::SignedPacket`] by constructing a DNS packet and + /// signing it with a [`SecretKey`]. + pub fn to_pkarr_signed_packet( + &self, + secret_key: &SecretKey, + ttl: u32, + ) -> Result { + self.to_attrs().to_pkarr_signed_packet(secret_key, ttl) + } + + /// Convert into a [`hickory_proto::rr::Record`] DNS record. + pub fn to_hickory_records( + &self, + origin: &str, + ttl: u32, + ) -> Result + 'static> { + let attrs = self.to_attrs(); + let records = attrs.to_hickory_records(origin, ttl)?; + Ok(records.collect::>().into_iter()) + } +} + +/// Parse a [`NodeId`] from iroh DNS name. +/// +/// Takes a [`hickory_proto::rr::Name`] DNS name and expects the first label to be `_iroh` +/// and the second label to be a z32 encoded [`NodeId`]. Does not care about subsequent labels. +pub(crate) fn node_id_from_hickory_name(name: &hickory_proto::rr::Name) -> Option { + if name.num_labels() < 2 { + return None; + } + let mut labels = name.iter(); + let label = std::str::from_utf8(labels.next().expect("num_labels checked")).ok()?; + if label != IROH_TXT_NAME { + return None; + } + let label = std::str::from_utf8(labels.next().expect("num_labels checked")).ok()?; + let node_id = from_z32(label).ok()?; + Some(node_id) +} + +/// Attributes parsed from `_iroh` TXT records. +/// +/// This struct is generic over the key type. When using with String, this will parse all +/// attributes. Can also be used with an enum, if it implements [`FromStr`] and [`Display`]. +#[derive(Debug)] +pub struct TxtAttrs { + node_id: NodeId, + attrs: BTreeMap>, +} + +impl TxtAttrs { + /// Create from a node id and an iterator of key-value pairs. + pub fn from_parts(node_id: NodeId, pairs: impl Iterator) -> Self { + let mut attrs: BTreeMap> = BTreeMap::new(); + for (k, v) in pairs { + attrs.entry(k).or_default().push(v); + } + Self { attrs, node_id } + } + + /// Create from a node id and an iterator of "{key}={value}" strings. + pub fn from_strings(node_id: NodeId, strings: impl Iterator) -> Result { + let mut attrs: BTreeMap> = BTreeMap::new(); + for s in strings { + let mut parts = s.split('='); + let (Some(key), Some(value)) = (parts.next(), parts.next()) else { + continue; + }; + let Ok(attr) = T::from_str(key) else { + continue; + }; + attrs.entry(attr).or_default().push(value.to_string()); + } + Ok(Self { attrs, node_id }) + } + + async fn lookup(resolver: &TokioAsyncResolver, name: Name) -> Result { + let name = ensure_iroh_txt_label(name)?; + let lookup = resolver.txt_lookup(name).await?; + let attrs = Self::from_hickory_records(lookup.as_lookup().records())?; + Ok(attrs) + } + + /// Lookup attributes for a node id and origin domain. + pub async fn lookup_by_id( + resolver: &TokioAsyncResolver, + node_id: &NodeId, + origin: &str, + ) -> Result { + let name = node_domain(node_id, origin)?; + TxtAttrs::lookup(resolver, name).await + } + + /// Lookup attributes for a domain. + pub async fn lookup_by_domain(resolver: &TokioAsyncResolver, domain: &str) -> Result { + let name = Name::from_str(domain)?; + TxtAttrs::lookup(resolver, name).await + } + + /// Get a reference to the parsed attributes. + pub fn attrs(&self) -> &BTreeMap> { + &self.attrs + } + + /// Get the node id. + pub fn node_id(&self) -> NodeId { + self.node_id + } + + /// Try to parse a from a [`pkarr::SignedPacket`]. + pub fn from_pkarr_signed_packet(packet: &pkarr::SignedPacket) -> Result { + use pkarr::dns::{self, rdata::RData}; + let pubkey = packet.public_key(); + let pubkey_z32 = pubkey.to_z32(); + let node_id = NodeId::from(*pubkey.verifying_key()); + let zone = dns::Name::new(&pubkey_z32)?; + let inner = packet.packet(); + let txt_data = inner.answers.iter().filter_map(|rr| match &rr.rdata { + RData::TXT(txt) => match rr.name.without(&zone) { + Some(name) if name.to_string() == IROH_TXT_NAME => Some(txt), + Some(_) | None => None, + }, + _ => None, + }); + + let txt_strs = txt_data.filter_map(|s| String::try_from(s.clone()).ok()); + Self::from_strings(node_id, txt_strs) + } + + /// Try to parse a from a set of DNS records. + pub fn from_hickory_records(records: &[hickory_proto::rr::Record]) -> Result { + use hickory_proto::rr; + let mut records = records.iter().filter_map(|rr| match rr.data() { + Some(rr::RData::TXT(txt)) => { + node_id_from_hickory_name(rr.name()).map(|node_id| (node_id, txt)) + } + _ => None, + }); + let (node_id, first) = records.next().ok_or_else(|| { + anyhow!("invalid DNS answer: no TXT record with name _iroh.z32encodedpubkey found") + })?; + ensure!( + &records.all(|(n, _)| n == node_id), + "invalid DNS answer: all _iroh txt records must belong to the same node domain" + ); + let records = records.map(|(_, txt)| txt).chain(Some(first)); + let strings = records.map(ToString::to_string); + Self::from_strings(node_id, strings) + } + + fn to_txt_strings(&self) -> impl Iterator + '_ { + self.attrs + .iter() + .flat_map(move |(k, vs)| vs.iter().map(move |v| format!("{k}={v}"))) + } + + /// Convert into list of [`hickory_proto::rr::Record`]. + pub fn to_hickory_records( + &self, + origin: &str, + ttl: u32, + ) -> Result + '_> { + use hickory_proto::rr; + let name = format!("{}.{}.{}", IROH_TXT_NAME, to_z32(&self.node_id), origin); + let name = rr::Name::from_utf8(name)?; + let records = self.to_txt_strings().map(move |s| { + let txt = rr::rdata::TXT::new(vec![s]); + let rdata = rr::RData::TXT(txt); + rr::Record::from_rdata(name.clone(), ttl, rdata) + }); + Ok(records) + } + + /// Create a [`pkarr::SignedPacket`] by constructing a DNS packet and + /// signing it with a [`SecretKey`]. + pub fn to_pkarr_signed_packet( + &self, + secret_key: &SecretKey, + ttl: u32, + ) -> Result { + let packet = self.to_pkarr_dns_packet(ttl)?; + let keypair = pkarr::Keypair::from_secret_key(&secret_key.to_bytes()); + let signed_packet = pkarr::SignedPacket::from_packet(&keypair, &packet)?; + Ok(signed_packet) + } + + fn to_pkarr_dns_packet(&self, ttl: u32) -> Result> { + use pkarr::dns::{self, rdata}; + let name = dns::Name::new(IROH_TXT_NAME)?.into_owned(); + + let mut packet = dns::Packet::new_reply(0); + for s in self.to_txt_strings() { + let mut txt = rdata::TXT::new(); + txt.add_string(&s)?; + let rdata = rdata::RData::TXT(txt.into_owned()); + packet.answers.push(dns::ResourceRecord::new( + name.clone(), + dns::CLASS::IN, + ttl, + rdata, + )); + } + Ok(packet) + } +} + +fn ensure_iroh_txt_label(name: Name) -> Result { + if name.iter().next() == Some(IROH_TXT_NAME.as_bytes()) { + Ok(name) + } else { + Name::parse(IROH_TXT_NAME, Some(&name)) + } +} + +fn node_domain(node_id: &NodeId, origin: &str) -> Result { + let domain = format!("{}.{}", to_z32(node_id), origin); + let domain = Name::from_str(&domain)?; + Ok(domain) +} diff --git a/iroh-net/src/magic_endpoint.rs b/iroh-net/src/magic_endpoint.rs index c9e4f69c54..6f71cffe01 100644 --- a/iroh-net/src/magic_endpoint.rs +++ b/iroh-net/src/magic_endpoint.rs @@ -545,6 +545,11 @@ impl MagicEndpoint { Ok(()) } + /// Get a reference to the DNS resolver used in this [`MagicEndpoint`]. + pub fn dns_resolver(&self) -> &DnsResolver { + self.msock.dns_resolver() + } + /// Close the QUIC endpoint and the magic socket. /// /// This will close all open QUIC connections with the provided error_code and reason. See diff --git a/iroh-net/src/magicsock.rs b/iroh-net/src/magicsock.rs index a35e179df9..bc9de2f1b3 100644 --- a/iroh-net/src/magicsock.rs +++ b/iroh-net/src/magicsock.rs @@ -1403,6 +1403,11 @@ impl MagicSock { self.inner.node_map.add_node_addr(addr); } + /// Get a reference to the DNS resolver used in this [`MagicSock`]. + pub fn dns_resolver(&self) -> &DnsResolver { + &self.inner.dns_resolver + } + /// Closes the connection. /// /// Only the first close does anything. Any later closes return nil. diff --git a/iroh-net/src/test_utils.rs b/iroh-net/src/test_utils.rs index 7a866d1f3e..652d81a09d 100644 --- a/iroh-net/src/test_utils.rs +++ b/iroh-net/src/test_utils.rs @@ -62,3 +62,107 @@ pub async fn run_relay_server() -> Result<(RelayMap, RelayUrl, CleanupDropGuard) Ok((m, url, CleanupDropGuard(tx))) } + +#[cfg(test)] +pub(crate) mod dns_server { + use std::net::{Ipv4Addr, SocketAddr}; + + use anyhow::{ensure, Result}; + use futures::{future::BoxFuture, Future}; + use hickory_proto::{ + op::{header::MessageType, Message}, + serialize::binary::BinDecodable, + }; + use hickory_resolver::{config::NameServerConfig, TokioAsyncResolver}; + use tokio::{net::UdpSocket, task::JoinHandle}; + use tokio_util::sync::CancellationToken; + use tracing::{debug, warn}; + + /// Trait used by [`run_dns_server`] for answering DNS queries. + pub trait QueryHandler: Send + Sync + 'static { + fn resolve( + &self, + query: &Message, + reply: &mut Message, + ) -> impl Future> + Send; + } + + pub type QueryHandlerFunction = Box< + dyn Fn(&Message, &mut Message) -> BoxFuture<'static, Result<()>> + Send + Sync + 'static, + >; + impl QueryHandler for QueryHandlerFunction { + fn resolve( + &self, + query: &Message, + reply: &mut Message, + ) -> impl Future> + Send { + (self)(query, reply) + } + } + + /// Run a DNS server. + /// + /// Must pass a [`QueryHandler`] that answers queries. Can be a [`ResolveCallback`] or a struct. + pub async fn run_dns_server( + resolver: impl QueryHandler, + cancel: CancellationToken, + ) -> Result<(SocketAddr, JoinHandle>)> { + let bind_addr = SocketAddr::from((Ipv4Addr::LOCALHOST, 0)); + let socket = UdpSocket::bind(bind_addr).await?; + let bound_addr = socket.local_addr()?; + let s = TestDnsServer { + socket, + cancel, + resolver, + }; + let join_handle = tokio::task::spawn(async move { s.run().await }); + Ok((bound_addr, join_handle)) + } + + /// Create a DNS resolver with a single nameserver. + pub fn create_dns_resolver(nameserver: SocketAddr) -> Result { + let mut config = hickory_resolver::config::ResolverConfig::new(); + let nameserver_config = + NameServerConfig::new(nameserver, hickory_resolver::config::Protocol::Udp); + config.add_name_server(nameserver_config); + let resolver = hickory_resolver::AsyncResolver::tokio(config, Default::default()); + Ok(resolver) + } + + struct TestDnsServer { + resolver: R, + socket: UdpSocket, + cancel: CancellationToken, + } + + impl TestDnsServer { + async fn run(self) -> Result<()> { + let mut buf = [0; 1450]; + loop { + tokio::select! { + _ = self.cancel.cancelled() => break, + res = self.socket.recv_from(&mut buf) => { + let (len, from) = res?; + if let Err(err) = self.handle_datagram(from, &buf[..len]).await { + warn!(?err, %from, "failed to handle incoming datagram"); + } + } + }; + } + Ok(()) + } + + async fn handle_datagram(&self, from: SocketAddr, buf: &[u8]) -> Result<()> { + let packet = Message::from_bytes(buf)?; + debug!(queries = ?packet.queries(), %from, "received query"); + let mut reply = packet.clone(); + reply.set_message_type(MessageType::Response); + self.resolver.resolve(&packet, &mut reply).await?; + debug!(?reply, %from, "send reply"); + let buf = reply.to_vec()?; + let len = self.socket.send_to(&buf, from).await?; + ensure!(len == buf.len(), "failed to send complete packet"); + Ok(()) + } + } +} diff --git a/iroh/Cargo.toml b/iroh/Cargo.toml index 5c8a752ba2..0d70f3325d 100644 --- a/iroh/Cargo.toml +++ b/iroh/Cargo.toml @@ -63,7 +63,6 @@ test-utils = ["iroh-net/test-utils"] [dev-dependencies] anyhow = { version = "1" } bytes = "1" -console-subscriber = "0.2" genawaiter = { version = "0.99", features = ["futures03"] } iroh = { path = ".", features = ["test-utils"] } iroh-test = { path = "../iroh-test" } diff --git a/iroh/src/node.rs b/iroh/src/node.rs index e8a96d2fc3..663c8bf19f 100644 --- a/iroh/src/node.rs +++ b/iroh/src/node.rs @@ -42,7 +42,7 @@ mod builder; mod rpc; mod rpc_status; -pub use builder::{Builder, GcPolicy, StorageConfig}; +pub use builder::{Builder, GcPolicy, NodeDiscoveryConfig, StorageConfig}; pub use rpc_status::RpcStatus; type EventCallback = Box BoxFuture<'static, ()> + 'static + Sync + Send>; diff --git a/iroh/src/node/builder.rs b/iroh/src/node/builder.rs index 75b6fb26bb..029036c22b 100644 --- a/iroh/src/node/builder.rs +++ b/iroh/src/node/builder.rs @@ -16,7 +16,11 @@ use iroh_bytes::{ }; use iroh_gossip::net::{Gossip, GOSSIP_ALPN}; use iroh_net::{ - magic_endpoint::get_alpn, relay::RelayMode, util::AbortingJoinHandle, MagicEndpoint, + discovery::{dns::DnsDiscovery, pkarr_publish::PkarrPublisher, ConcurrentDiscovery, Discovery}, + magic_endpoint::get_alpn, + relay::RelayMode, + util::AbortingJoinHandle, + MagicEndpoint, }; use iroh_sync::net::SYNC_ALPN; use quic_rpc::{ @@ -80,6 +84,7 @@ where keylog: bool, relay_mode: RelayMode, gc_policy: GcPolicy, + node_discovery: NodeDiscoveryConfig, docs_store: iroh_sync::store::fs::Store, #[cfg(any(test, feature = "test-utils"))] insecure_skip_relay_cert_verify: bool, @@ -94,6 +99,20 @@ pub enum StorageConfig { Persistent(PathBuf), } +/// Configuration for node discovery. +#[derive(Debug, Default)] +pub enum NodeDiscoveryConfig { + /// Use no node discovery mechanism. + None, + /// Use the default discovery mechanism. + /// + /// This enables the [`DnsDiscovery`] service. + #[default] + Default, + /// Use a custom discovery mechanism. + Custom(Box), +} + impl Default for Builder { fn default() -> Self { Self { @@ -106,6 +125,7 @@ impl Default for Builder { rpc_endpoint: Default::default(), gc_policy: GcPolicy::Disabled, docs_store: iroh_sync::store::Store::memory(), + node_discovery: Default::default(), #[cfg(any(test, feature = "test-utils"))] insecure_skip_relay_cert_verify: false, } @@ -129,6 +149,7 @@ impl Builder { rpc_endpoint: Default::default(), gc_policy: GcPolicy::Disabled, docs_store, + node_discovery: Default::default(), #[cfg(any(test, feature = "test-utils"))] insecure_skip_relay_cert_verify: false, } @@ -189,6 +210,7 @@ where relay_mode: self.relay_mode, gc_policy: self.gc_policy, docs_store, + node_discovery: self.node_discovery, #[cfg(any(test, feature = "test-utils"))] insecure_skip_relay_cert_verify: false, }) @@ -207,6 +229,7 @@ where relay_mode: self.relay_mode, gc_policy: self.gc_policy, docs_store: self.docs_store, + node_discovery: self.node_discovery, #[cfg(any(test, feature = "test-utils"))] insecure_skip_relay_cert_verify: self.insecure_skip_relay_cert_verify, } @@ -232,6 +255,7 @@ where relay_mode: self.relay_mode, gc_policy: self.gc_policy, docs_store: self.docs_store, + node_discovery: self.node_discovery, #[cfg(any(test, feature = "test-utils"))] insecure_skip_relay_cert_verify: self.insecure_skip_relay_cert_verify, }) @@ -259,6 +283,15 @@ where self } + /// Sets the node discovery mechanism. + /// + /// The default is [`NodeDiscoveryConfig::Default`]. Use [`NodeDiscoveryConfig::Custom`] to pass a + /// custom [`Discovery`]. + pub fn node_discovery(mut self, config: NodeDiscoveryConfig) -> Self { + self.node_discovery = config; + self + } + /// Binds the node service to a different socket. /// /// By default it binds to `127.0.0.1:11204`. @@ -306,6 +339,20 @@ where .max_concurrent_bidi_streams(MAX_STREAMS.try_into()?) .max_concurrent_uni_streams(0u32.into()); + let discovery: Option> = match self.node_discovery { + NodeDiscoveryConfig::None => None, + NodeDiscoveryConfig::Custom(discovery) => Some(discovery), + NodeDiscoveryConfig::Default => { + let discovery = ConcurrentDiscovery::from_services(vec![ + // Enable DNS discovery by default + Box::new(DnsDiscovery::n0_dns()), + // Enable pkarr publishing by default + Box::new(PkarrPublisher::n0_dns(self.secret_key.clone())), + ]); + Some(Box::new(discovery)) + } + }; + let endpoint = MagicEndpoint::builder() .secret_key(self.secret_key.clone()) .alpns(PROTOCOLS.iter().map(|p| p.to_vec()).collect()) @@ -313,6 +360,10 @@ where .transport_config(transport_config) .concurrent_connections(MAX_CONNECTIONS) .relay_mode(self.relay_mode); + let endpoint = match discovery { + Some(discovery) => endpoint.discovery(discovery), + None => endpoint, + }; #[cfg(any(test, feature = "test-utils"))] let endpoint =